循环神经网络(RNN)--基于pytorch框架

从零开始实现RNN:

import math
import torch
from torch import nn
from torch.nn import functional as F
import matplotlib.pyplot as plt
from d2l import torch as d2l

batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)

print(F.one_hot(torch.tensor([0, 2]), len(vocab)))

x = torch.arange(10).reshape((2, 5))


# 初始化模型参数

def get_params(vocab_size, num_hidden, device):
    num_inputs = num_outputs = vocab_size  # 因为one hot函数之后,输入都变成了一个vocab_size的大小矩阵,所以在这里要变换

    def normal(shape):  # 初始化函数,均一函数,方差为0.01
        return torch.randn(size=shape, device=device) * 0.01

    # 构造当前输入和上一层隐藏变量权重以及偏差
    w_xh = normal((num_inputs, num_hidden))  # 从输入到隐藏
    w_hh = normal((num_hidden, num_hidden))  # 有两个隐藏层
    b_h = torch.zeros(num_hidden, device=device)

    # 构造从隐藏层到输出层的变量权重和方差
    w_qh = normal((num_hidden, num_outputs))
    b_q = torch.zeros(num_outputs, device=device)

    params = [w_xh, w_hh, b_h, w_qh, b_q]
    for param in params:
        param.requires_grad_(True)  # 自动求导
    return params


# 初始化隐藏状态
# 因为在0时刻的时候我们没有上一刻的隐藏状态,所以我们应该给它一个初始的隐藏状态

def init_rnn_state(batch_size, num_hidden, device):
    return (torch.zeros((batch_size, num_hidden), device=device),)


# 做一个rnn函数来进行计算,在一个时间步内算出隐藏状态和输出

def rnn(inputs, state, params):  # inputs->时间步,state->初始的隐藏层的状态,params为可学习的参数
    # inputs的形状:(时间步数量,批量大小,词表大小)
    w_xh, w_hh, b_h, w_hq, b_q = params
    h, = state
    outputs = []
    # X的形状:(批量大小,词表大小)
    for x in inputs:  # 分析每个时间步
        h = torch.tanh(torch.mm(x, w_xh) + torch.mm(h, w_hh) + b_h)  # 计算当前时刻的隐藏状态
        y = torch.mm(h, w_hq) + b_q  # 当前输出
        outputs.append(y)  # 记录

    return torch.cat(outputs, dim=0), (h,)


class RNNModelScratch:  # 创造一个类来包装这个函数
    """从零开始实现的循环神经网络模型"""

    def __init__(self, vocab_size, num_hidden, device,
                 get_params, init_state, forward_fn):
        self.vocab_size, self.num_hidden = vocab_size, num_hidden
        self.params = get_params(vocab_size, num_hidden, device)
        self.init_state, self.forward_fn = init_state, forward_fn  # forward函数为之前的rnn函数

    # 制作 forward函数
    def __call__(self, x, state):  # 这里的x是数据集中的x,即数据
        x = F.one_hot(x.T, self.vocab_size).type(torch.float32)
        return self.forward_fn(x, state, self.params)

    # 制作初始状态,也就是刚才的初始化函数
    def begin_state(self, batch_size, device):
        return self.init_state(batch_size, self.num_hidden, device)


# 通过调用net里的函数来进行操作

num_hidden = 512
net = RNNModelScratch(len(vocab), num_hidden, d2l.try_gpu(), get_params, init_rnn_state, rnn)

state = net.begin_state(x.shape[0], d2l.try_gpu())
y, new_state = net(x.to(d2l.try_gpu()), state)

print(y.shape, len(new_state), new_state[0].shape)


# 首先定义一个预测函数, 用来生成前缀后的新字符
# 注意这里是没训练,模型会进行乱猜
def predict_ch8(prefix, num_preds, net, vocab, device):  # @save
    """在prefix后面生成新字符"""
    state = net.begin_state(batch_size=1, device=device)  # 因为我们是对一个字符做预测,所以batch_size为1
    outputs = [vocab[prefix[0]]]  # 把每个字符所对应的下标存下来

    get_input = lambda: torch.tensor([outputs[-1]], device=device).reshape((1, 1))
    # 每次输入我们基本上都要拿最近一次被预测的数来当作下一次的预测数,即 -1
    for y in prefix[1:]:
        _, state = net(get_input(), state)  # 存储状态
        outputs.append(vocab[y])

    # 开始预测
    for _ in range(num_preds):
        y, state = net(get_input(), state)
        outputs.append(int(y.argmax(dim=1).reshape(1)))
        # 这句话就是把概率最大的那个返回成一个整形,reshape把这个数变成一个1x1的矩阵

    return ''.join([vocab.idx_to_token[i] for i in outputs])  # 把预测的字符存在tuple里然后直接返回


# 梯度剪裁 防止梯度爆炸

def grad_clipping(net, theta):
    """裁剪梯度"""
    if isinstance(net, nn.Module):
        params = [p for p in net.parameters() if p.requires_grad]
    else:
        params = net.params
    norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params))
    if norm > theta:
        for param in params:
            param.grad[:] *= theta / norm  # 直接映射梯度,如果大于1就直接变成1


def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):
    """训练网络一个迭代周期"""
    state, timer = None, d2l.Timer()
    metric = d2l.Accumulator(2)  # 用来存loss信息

    for x, y in train_iter:
        if state is None or use_random_iter:  # 如果状态为空,我们需要初始化
            state = net.begin_state(batch_size=x.shape[0], device=device)
        else:
            if isinstance(net, nn.Module) and not isinstance(state, tuple):
                state.detach_()
            else:
                for s in state:
                    s.detach_()

        Y = y.T.reshape(-1)
        x, Y = x.to(device), Y.to(device)
        y_hat, state = net(x, state)
        l = loss(y_hat, Y.long()).mean()

        if isinstance(updater, torch.optim.Optimizer):
            updater.zero_grad()
            l.backward()  # 计算梯度
            grad_clipping(net, 1)  # 裁剪梯度
            updater.step()  # 更新
        else:
            l.backward()
            grad_clipping(net, 1)
            # 因为已经调用了mean函数
            updater(batch_size=1)
        metric.add(l * y.numel(), y.numel())

    return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()


def train_ch8(net, train_iter, vocab, lr, num_epochs, device,
              use_random_iter=False):
    """训练模型"""
    loss = nn.CrossEntropyLoss()

    animator = d2l.Animator(xlabel='epoch', ylabel='perplexity',
                            legend=['train'], xlim=[10, num_epochs])

    if isinstance(net, nn.Module):
        updater = torch.optim.SGD(net.parameters(), lr)
    else:
        updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)
    predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)

    for epoch in range(num_epochs):
        ppl, speed = train_epoch_ch8(
            net, train_iter, loss, updater, device, use_random_iter)
        if (epoch + 1) % 10 == 0:
            print(predict('time traveller'))
            animator.add(epoch + 1, [ppl])

    print(f'困惑度 {ppl:.1f}, {speed:.1f} 词元/秒 {str(device)}')
    print(predict('time traveller'))
    print(predict('traveller'))

    plt.show()


num_epochs, lr = 500, 1
train_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu())

简介实现:

import torch
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l

batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)

num_hidden = 256
#  我们构造一个具有256个隐藏单元的单隐藏层的循环神经网络层rnn_layer。
rnn_layer = nn.RNN(len(vocab), num_hidden)

# 我们使用张量来初始化隐状态,它的形状是(隐藏层数,批量大小,隐藏单元数)。
state = torch.zeros((1, batch_size, num_hidden))
print(state.shape)

X = torch.rand(size=(num_steps, batch_size, len(vocab)))
Y, state_new = rnn_layer(X, state)
print(Y.shape, state_new.shape)


class RNNModel(nn.Module):
    """循环神经网络模型"""

    def __init__(self, rnn_layer, vocab_size, **kwargs):
        super(RNNModel, self).__init__(**kwargs)
        self.rnn = rnn_layer
        self.vocab_size=vocab_size
        self.num_hidden=self.rnn.hidden_size

        if not self.rnn.bidirectional:
            self.num_directions = 1
            self.linear = nn.Linear(self.num_hiddens, self.vocab_size)
        else:
            self.num_directions = 2
            self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)

    def forward(self,inputs,state):
        x=F.one_hot(inputs.T.long(),self.vocab_size)
        x=x.to(torch.float32)
        y,state=self.rnn(x,state)
        # 全连接层首先将Y的形状改为(时间步数*批量大小,隐藏单元数)
        # 它的输出形状是(时间步数*批量大小,词表大小)。
        output=self.linear(y.reshape((-1,y.shape[-1])))
        return output,state

    def begin_state(self,device,batch_size=1):
        if not isinstance(self.rnn,nn.LSTM):
            return  torch.zeros((self.num_directions * self.rnn.num_layers,
                                 batch_size, self.num_hiddens),
                                device=device)
        else:
            # nn.LSTM以元组作为隐状态
            return (torch.zeros((
                self.num_directions * self.rnn.num_layers,
                batch_size, self.num_hiddens), device=device),
                    torch.zeros((
                        self.num_directions * self.rnn.num_layers,
                        batch_size, self.num_hiddens), device=device))

device = d2l.try_gpu()
net = RNNModel(rnn_layer, vocab_size=len(vocab))
net = net.to(device)

num_epochs, lr = 500, 1
print(d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device))

 

posted @ 2023-09-05 20:46  o-Sakurajimamai-o  阅读(66)  评论(0)    收藏  举报
-- --