pytorch将模型加载到GPU上
# ----------- 判断模型是在CPU还是GPU上 ----------------------model = nn.LSTM(input_size=10, hidden_size=4, num_layers=1, batch_first=True)print(next(model.parameters()).device)# 输出:cpumodel = model.cuda()print
·
# ----------- 判断模型是在CPU还是GPU上 ----------------------
model = nn.LSTM(input_size=10, hidden_size=4, num_layers=1, batch_first=True)
print(next(model.parameters()).device) # 输出:cpu
model = model.cuda()
print(next(model.parameters()).device) # 输出:cuda:0
model = model.cpu()
print(next(model.parameters()).device) # 输出:cpu
# ----------- 判断数据是在CPU还是GPU上 ----------------------
data = torch.ones([2, 3])
print(data.device) # 输出:cpu
data = data.cuda()
print(data.device) # 输出:cuda:0
data = data.cpu()
print(data.device) # 输出:cpu
方法一
# 如果GPU可用,将模型和张量加载到GPU上
if torch.cuda.is_available():
model = model.cuda()
x = x.cuda()
y = y.cuda()
方法二
# 分配到的GPU或CPU
device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# 将模型加到GPU
model=model.to(device)
# 将张量加到GPU
x=x.to(device)
y=y.to(device)
更多推荐
所有评论(0)
您需要登录才能发言
加载更多