每日笔记-LSTM
今天,搞了一段代码,但没有达到应有的效果
import torch
import torch.nn as nn
import numpy as np
# 设置随机种子以便结果可重复
torch.manual_seed(42)
# 定义一个更复杂的LSTM模型
class ComplexLSTMModel(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(ComplexLSTMModel, self).__init__()
self.hidden_size = hidden_size
self.lstm = nn.LSTM(input_size, hidden_size, num_layers=2) # 增加了一个LSTM层
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, input):
lstm_out, _ = self.lstm(input.view(len(input), 1, -1))
output = self.fc(lstm_out.view(len(input), -1))
return output[-1]
# 准备数据
input_size = 1
hidden_size = 8
output_size = 1
lr = 0.01
num_epochs = 100
# 生成一些示例数据
data = np.sin(np.arange(0, 100, 0.1))
# 定义模型、损失函数和优化器
model = ComplexLSTMModel(input_size, hidden_size, output_size)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# 训练模型
for epoch in range(num_epochs):
inputs = torch.Tensor(data[:-1]).view(-1, 1, 1)
labels = torch.Tensor(data[1:])
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (epoch+1) % 10 == 0:
print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
# 预测
with torch.no_grad():
future = 100
# 用训练数据最后一个数据点开始预测
pred_data = [data[-1]]
for _ in range(future):
inputs = torch.Tensor([pred_data[-1]]).view(-1, 1, 1)
pred = model(inputs)
pred_data.append(pred.item())
# 绘制结果
import matplotlib.pyplot as plt
plt.plot(data, label='Original data')
plt.plot(np.arange(len(data)-1, len(data) + future), pred_data, label='Predictions')
plt.legend()
plt.show()
结果如图:

没有达到想象的效果
-----------------------------------
仔细阅读后,找到了问题症结所在。
修改后的代码:
import torch
import torch.nn as nn
import numpy as np
torch.manual_seed(42)
class LSTMModel(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(LSTMModel, self).__init__()
self.hidden_size = hidden_size
self.lstm = nn.LSTM(input_size, hidden_size, num_layers=2)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, input):
lstm_out, self.hidden = self.lstm(input.view(len(input), 1, -1))
output = self.fc(lstm_out.view(len(input), -1))
return output[-1]
input_size = 1
hidden_size = 16
output_size = 1
lr = 0.001
num_epochs = 100
data = np.sin(np.arange(0, 100, 0.1))
# 制作训练数据
inputs = []
labels = []
for i in range(20, len(data)):
inputs.append(data[i-20:i])
labels.append(data[i])
model = LSTMModel(input_size, hidden_size, output_size)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
for epoch in range(num_epochs):
for i in range(len(inputs) - 100):
optimizer.zero_grad()
outputs = model(torch.Tensor(inputs[i]).view(-1, 1, 1))
loss = criterion(outputs, torch.Tensor([labels[i]]))
loss.backward()
optimizer.step()
if (epoch+1) % 1 == 0:
print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.6f}')
# 预测
pred_data = []
with torch.no_grad():
for i in range(len(inputs) - 100, len(inputs)):
outputs = model(torch.Tensor(inputs[i]).view(-1, 1, 1))
pred_data.append(outputs.item())
import matplotlib.pyplot as plt
plt.plot(data[:-100], label='Original data')
plt.plot(np.arange(len(data) - 100, len(data)), pred_data, label='Predictions')
plt.legend()
plt.show()
显示图像正常 !
浙公网安备 33010602011771号