《PyTorch 深度学习实践 》-刘二大人 第十二讲

 1 '''
 2 input hello
 3 output ohlol   use RNNcell
 4 '''
 5 import torch
 6 
 7 input_size=4
 8 hidden_size=4
 9 batch_size=1
10 # 准备数据
11 idx2char=['e','h','l','o']
12 x_data=[1,0,2,2,3] # hello
13 y_data=[3,1,2,3,2] # ohlol
14 
15 one_hot_lookup=[[1,0,0,0],
16                 [0,1,0,0],
17                 [0,0,1,0],
18                 [0,0,0,1]] #分别对应0,1,2,3项
19 x_one_hot=[one_hot_lookup[x] for x in x_data] # 组成序列张量
20 print('x_one_hot:',x_one_hot)
21 
22 # 构造输入序列和标签
23 inputs=torch.Tensor(x_one_hot).view(-1,batch_size,input_size)
24 labels=torch.LongTensor(y_data).view(-1,1)
25 
26 # design model
27 class Model(torch.nn.Module):
28     def __init__(self,input_size,hidden_size,batch_size):
29         super(Model, self).__init__()
30         self.batch_size=batch_size
31         self.input_size=input_size
32         self.hidden_size=hidden_size
33         self.rnncell=torch.nn.RNNCell(input_size=self.input_size,
34                                       hidden_size=self.hidden_size)
35 
36     def forward(self,input,hidden):
37         hidden=self.rnncell(input,hidden)
38         return hidden
39     #构造h0为全0张量
40     def init_hidden(self):
41         return torch.zeros(self.batch_size,self.hidden_size)
42 
43 net=Model(input_size,hidden_size,batch_size)
44 
45 # loss and optimizer
46 criterion=torch.nn.CrossEntropyLoss()
47 optimizer=torch.optim.Adam(net.parameters(), lr=0.1)
48 
49 # train cycle
50 for epoch in range(20):
51     loss=0
52     optimizer.zero_grad()
53     hidden = net.init_hidden()
54     print('Predicted String:',end='')
55     for input ,lable in zip(inputs,labels):
56         hidden = net(input,hidden)
57         loss+=criterion(hidden,lable)
58         _, idx=hidden.max(dim=1)
59         print(idx2char[idx.item()],end='')
60     loss.backward()
61     optimizer.step()
62     print(',Epoch [%d/20] loss=%.4f' % (epoch+1, loss.item()))

 

 1 '''
 2 input hello
 3 output ohlol
 4 use RNN
 5 '''
 6 import torch
 7 
 8 input_size=4
 9 hidden_size=4
10 num_layers=1
11 batch_size=1
12 seq_len=5
13 # 准备数据
14 idx2char=['e','h','l','o']
15 x_data=[1,0,2,2,3] # hello
16 y_data=[3,1,2,3,2] # ohlol
17 
18 one_hot_lookup=[[1,0,0,0],
19                 [0,1,0,0],
20                 [0,0,1,0],
21                 [0,0,0,1]] #分别对应0,1,2,3项
22 x_one_hot=[one_hot_lookup[x] for x in x_data] # 组成序列张量
23 print('x_one_hot:',x_one_hot)
24 
25 # 构造输入序列和标签
26 inputs=torch.Tensor(x_one_hot).view(seq_len,batch_size,input_size)
27 labels=torch.LongTensor(y_data)
28 
29 # design model
30 class Model(torch.nn.Module):
31     def __init__(self,input_size,hidden_size,batch_size,num_layers=1):
32         super(Model, self).__init__()
33         self.num_layers=num_layers
34         self.batch_size=batch_size
35         self.input_size=input_size
36         self.hidden_size=hidden_size
37         self.rnn=torch.nn.RNN(input_size=self.input_size,
38                               hidden_size=self.hidden_size,
39                               num_layers=self.num_layers)
40 
41     def forward(self,input):
42         hidden=torch.zeros(self.num_layers,self.batch_size,self.hidden_size)
43         out, _=self.rnn(input,hidden)
44         return out.view(-1,self.hidden_size)
45 
46 net=Model(input_size,hidden_size,batch_size,num_layers)
47 
48 # loss and optimizer
49 criterion=torch.nn.CrossEntropyLoss()
50 optimizer=torch.optim.Adam(net.parameters(), lr=0.05)
51 
52 # train cycle
53 for epoch in range(20):
54     optimizer.zero_grad()
55     outputs=net(inputs)
56     loss=criterion(outputs,labels)
57     loss.backward()
58     optimizer.step()
59 
60     _, idx=outputs.max(dim=1)
61     idx=idx.data.numpy()
62     print('Predicted: ',''.join([idx2char[x] for x in idx]),end='')
63     print(',Epoch [%d/20] loss=%.3f' % (epoch+1, loss.item()))

 

posted @ 2022-10-24 22:26  silvan_happy  阅读(95)  评论(0编辑  收藏  举报