Pytorch-基础入门之RNN
RNN循环神经网络,也是一种很常见的神经网络了,在这里也不进行原理展示了。直接上代码。
这一块的话与上一篇逻辑斯蒂回归使用的是相同的数据集MNIST。
第一部分:构造模型
# Import Libraries
import torch
import torch.nn as nn
from torch.autograd import Variable
# Create RNN Model
class RNNModel(nn.Module):
def __init__(self, input_dim, hidden_dim, layer_dim, output_dim):
super(RNNModel, self).__init__()
# Number of hidden dimensions
self.hidden_dim = hidden_dim
# Number of hidden layers
self.layer_dim = layer_dim
# RNN
self.rnn = nn.RNN(input_dim, hidden_dim, layer_dim, batch_first=True, nonlinearity='relu')
# Readout layer
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
# Initialize hidden state with zeros
h0 = Variable(torch.zeros(self.layer_dim, x.size(0), self.hidden_dim))
# One time step
out, hn = self.rnn(x, h0)
out = self.fc(out[:, -1, :])
return out
# batch_size, epoch and iteration
batch_size = 100
n_iters = 8000
num_epochs = n_iters / (len(features_train) / batch_size)
num_epochs = int(num_epochs)
# Pytorch train and test sets
train = TensorDataset(featuresTrain,targetsTrain)
test = TensorDataset(featuresTest,targetsTest)
# data loader
train_loader = DataLoader(train, batch_size = batch_size, shuffle = False)
test_loader = DataLoader(test, batch_size = batch_size, shuffle = False)
# Create RNN
input_dim = 28 # input dimension
hidden_dim = 100 # hidden layer dimension
layer_dim = 1 # number of hidden layers
output_dim = 10 # output dimension
model = RNNModel(input_dim, hidden_dim, layer_dim, output_dim)
# Cross Entropy Loss
error = nn.CrossEntropyLoss()
# SGD Optimizer
learning_rate = 0.05
optimizer = torch.optim.SGD(model.p
arameters(), lr=learning_rate)
第二部分:训练模型
seq_dim = 28
loss_list = []
iteration_list = []
accuracy_list = []
count = 0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
train = Variable(images.view(-1, seq_dim, input_dim))
labels = Variable(labels )
# Clear gradients
optimizer.zero_grad()
# Forward propagation
outputs = model(train)
# Calculate softmax and ross entropy loss
loss = error(outputs, labels)
# Calculating gradients
loss.backward()
# Update parameters
optimizer.step()
count += 1
if count % 250 == 0:
# Calculate Accuracy
correct = 0
total = 0
# Iterate through test dataset
for images, labels in test_loader:
images = Variable(images.view(-1, seq_dim, input_dim))
# Forward propagation
outputs = model(images)
# Get predictions from the maximum value
predicted = torch.max(outputs.data, 1)[1]
# Total number of labels
total += labels.size(0)
correct += (predicted == labels).sum()
accuracy = 100 * correct / float(total)
# store loss and iteration
loss_list.append(loss.data)
iteration_list.append(count)
accuracy_list.append(accuracy)
if count % 500 == 0:
# Print Loss
print('Iteration: {} Loss: {} Accuracy: {} %'.format(count, loss.data[0], accuracy))
结果:
Iteration: 500 Loss: 1.4726558923721313 Accuracy: 42.726190476190474 % Iteration: 1000 Loss: 0.7108388543128967 Accuracy: 71.73809523809524 % Iteration: 1500 Loss: 0.43755194544792175 Accuracy: 85.22619047619048 % Iteration: 2000 Loss: 0.271086722612381 Accuracy: 90.25 % Iteration: 2500 Loss: 0.2235582023859024 Accuracy: 89.5 % Iteration: 3000 Loss: 0.09727417677640915 Accuracy: 92.66666666666667 % Iteration: 3500 Loss: 0.42934906482696533 Accuracy: 92.6547619047619 % Iteration: 4000 Loss: 0.09869173169136047 Accuracy: 94.19047619047619 % Iteration: 4500 Loss: 0.2372802197933197 Accuracy: 95.20238095238095 % Iteration: 5000 Loss: 0.10717732459306717 Accuracy: 95.19047619047619 % Iteration: 5500 Loss: 0.23859672248363495 Accuracy: 94.69047619047619 % Iteration: 6000 Loss: 0.15453924238681793 Accuracy: 96.05952380952381 % Iteration: 6500 Loss: 0.07914035022258759 Accuracy: 95.97619047619048 % Iteration: 7000 Loss: 0.12296199798583984 Accuracy: 96.27380952380952 % Iteration: 7500 Loss: 0.10664860904216766 Accuracy: 96.11904761904762 %
第三部分:可视化展示


浙公网安备 33010602011771号