import  numpy as np #科学计算
import  torch #深度学习
import  torch.nn as nn  #RNN 循环神经网络
import  torch.optim as optim  #优化参数
from    matplotlib import pyplot as plt  #绘图

num_time_steps = 50 #每次的步数
input_size = 1  #输入大小
hidden_size = 16 #隐藏层
output_size = 1  #输出层
lr=0.01 #学习率

class Net(nn.Module):#继承网络层
    def __init__(self, ):
        super(Net, self).__init__()
        #自建RNN
        self.rnn = nn.RNN(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=1,
            batch_first=True,
        )
        #遍历参数
        for p in self.rnn.parameters():
          nn.init.normal_(p, mean=0.0, std=0.001)
        #配置线性回归
        self.linear = nn.Linear(hidden_size, output_size)

    def forward(self, x, hidden_prev):#计算权重
       out, hidden_prev = self.rnn(x, hidden_prev)#循环神经网络特征
       # [b, seq, h]
       out = out.view(-1, hidden_size)
       out = self.linear(out)
       out = out.unsqueeze(dim=0)
       return out, hidden_prev

model = Net() #构造网络模型
criterion = nn.MSELoss()#设定损失函数
optimizer = optim.Adam(model.parameters(), lr)#配置优化参数
hidden_prev = torch.zeros(1, 1, hidden_size)#RNN的隐藏

#训练,sin函数
for iter in range(10000):
    start = np.random.randint(3, size=1)[0]
    time_steps = np.linspace(start, start + 10, num_time_steps)
    data = np.cos(time_steps) #sin
    data = data.reshape(num_time_steps, 1)

    x = torch.tensor(data[:-1]).float().view(1, num_time_steps - 1, 1)
    y = torch.tensor(data[1:]).float().view(1, num_time_steps - 1, 1)

    output, hidden_prev = model(x, hidden_prev)#循环神经网络
    hidden_prev = hidden_prev.detach()#分离

    loss = criterion(output, y)#配置损失函数
    model.zero_grad()
    loss.backward()#反向传播
    #梯度压缩,避免梯度爆炸
    for p in model.parameters():
        print(p.grad.norm())
    torch.nn.utils.clip_grad_norm_(p, 10)
    #]可以通过对w的梯度进行压缩:(w.grad/||w.gard||)*threashold解决梯度爆炸的问题

    optimizer.step() #每一步处理

    if iter % 100 == 0:
        print("Iteration: {} loss {}".format(iter, loss.item()))


start = np.random.randint(3, size=1)[0]
time_steps = np.linspace(start, start + 10, num_time_steps)
data = np.cos(time_steps)#sin函数
data = data.reshape(num_time_steps, 1) #降维
x = torch.tensor(data[:-1]).float().view(1, num_time_steps - 1, 1)
y = torch.tensor(data[1:]).float().view(1, num_time_steps - 1, 1)

#检测预测结果
predictions = []
input = x[:, 0, :]
for _ in range(x.shape[1]):
  input = input.view(1, 1, 1)
  (pred, hidden_prev) = model(input, hidden_prev) #预测
  input = pred
  predictions.append(pred.detach().numpy().ravel()[0])#叠加、

#绘图展示
x = x.data.numpy().ravel()
y = y.data.numpy()
plt.scatter(time_steps[:-1], x.ravel(), s=90)
plt.plot(time_steps[:-1], x.ravel())

plt.scatter(time_steps[1:], predictions)
plt.show()

在这里插入图片描述

梯度压缩

import  numpy as np #科学计算
import  torch #深度学习
import  torch.nn as nn  #RNN 循环神经网络
import  torch.optim as optim  #优化参数
from    matplotlib import pyplot as plt  #绘图

num_time_steps = 50 #每次的步数
input_size = 1  #输入大小
hidden_size = 16 #隐藏层
output_size = 1  #输出层
lr=0.01 #学习率

class Net(nn.Module):#继承网络层
    def __init__(self, ):
        super(Net, self).__init__()
        #自建RNN
        self.rnn = nn.RNN(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=1,
            batch_first=True,
        )
        #遍历参数
        for p in self.rnn.parameters():
          nn.init.normal_(p, mean=0.0, std=0.001)
        #配置线性回归
        self.linear = nn.Linear(hidden_size, output_size)

    def forward(self, x, hidden_prev):#计算权重
       out, hidden_prev = self.rnn(x, hidden_prev)#循环神经网络特征
       # [b, seq, h]
       out = out.view(-1, hidden_size)
       out = self.linear(out)
       out = out.unsqueeze(dim=0)
       return out, hidden_prev

model = Net() #构造网络模型
criterion = nn.MSELoss()#设定损失函数
optimizer = optim.Adam(model.parameters(), lr)#配置优化参数
hidden_prev = torch.zeros(1, 1, hidden_size)#RNN的隐藏

#训练,sin函数
for iter in range(10000):
    start = np.random.randint(3, size=1)[0]
    time_steps = np.linspace(start, start + 10, num_time_steps)
    data = np.cos(time_steps) #sin
    data = data.reshape(num_time_steps, 1)

    x = torch.tensor(data[:-1]).float().view(1, num_time_steps - 1, 1)
    y = torch.tensor(data[1:]).float().view(1, num_time_steps - 1, 1)

    output, hidden_prev = model(x, hidden_prev)#循环神经网络
    hidden_prev = hidden_prev.detach()#分离

    loss = criterion(output, y)#配置损失函数
    model.zero_grad()
    loss.backward()#反向传播
    #梯度压缩,避免梯度爆炸
    for p in model.parameters():
        print(p.grad.norm())
    torch.nn.utils.clip_grad_norm_(p, 10)
    #]可以通过对w的梯度进行压缩:(w.grad/||w.gard||)*threashold解决梯度爆炸的问题

    optimizer.step() #每一步处理

    if iter % 100 == 0:
        print("Iteration: {} loss {}".format(iter, loss.item()))


start = np.random.randint(3, size=1)[0]
time_steps = np.linspace(start, start + 10, num_time_steps)
data = np.cos(time_steps)#sin函数
data = data.reshape(num_time_steps, 1) #降维
x = torch.tensor(data[:-1]).float().view(1, num_time_steps - 1, 1)
y = torch.tensor(data[1:]).float().view(1, num_time_steps - 1, 1)

#检测预测结果
predictions = []
input = x[:, 0, :]
for _ in range(x.shape[1]):
  input = input.view(1, 1, 1)
  (pred, hidden_prev) = model(input, hidden_prev) #预测
  input = pred
  predictions.append(pred.detach().numpy().ravel()[0])#叠加

#绘图展示
x = x.data.numpy().ravel()
y = y.data.numpy()
plt.scatter(time_steps[:-1], x.ravel(), s=90)
plt.plot(time_steps[:-1], x.ravel())

plt.scatter(time_steps[1:], predictions)
plt.show()

总结:对梯度进行压缩可以避免梯度压缩,利用循环对于每一个参数进行clip_grad_norm_

Logo

CSDN联合极客时间,共同打造面向开发者的精品内容学习社区,助力成长!

更多推荐