pytorh .to(device) 和.cuda()的区别
原理可以指定cpu 或者GPUdevice = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")model.to(device)#如果是多GPUif torch.cuda.device_count() > 1:model = nn.DataParallel(model,device_ids=[0,1,2])model
文章共653字 · 阅读需要大约3分钟
一键AI生成摘要,助你高效阅读
问答
·
原理
.to(device) 可以指定CPU 或者GPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # 单GPU或者CPU
model.to(device)
#如果是多GPU
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model,device_ids=[0,1,2])
model.to(device)
.cuda() 只能指定GPU
#指定某个GPU
os.environ['CUDA_VISIBLE_DEVICE']='1'
model.cuda()
#如果是多GPU
os.environment['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
device_ids = [0,1,2,3]
net = torch.nn.Dataparallel(net, device_ids =device_ids)
net = torch.nn.Dataparallel(net) # 默认使用所有的device_ids
net = net.cuda()
class DataParallel(Module):
def __init__(self, module, device_ids=None, output_device=None, dim=0):
super(DataParallel, self).__init__()
if not torch.cuda.is_available():
self.module = module
self.device_ids = []
return
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
更多推荐
已为社区贡献5条内容
所有评论(0)