import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from utils.dl_helper import EarlyStopper
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as Data
import os
from utils.parent_dir import parent_dir
# 以下这两行是手动进行环境变量配置,防止在本机的环境变量部署失败
os.environ['PATH'] = os.pathsep + r'D:\software\Anaconda3\envs\sao\Graphviz\bin'
device = torch.device('cuda')
# 搭建全连接神经网络回归
# 搭建全连接神经网络回归
class MLPregression(nn.Module):
def __init__(self, dim):
super(MLPregression, self).__init__()
# 第一个隐含层
self.hidden1 = nn.Linear(in_features=dim, out_features=int(dim / 2), bias=True)
# 回归预测层
self.predict = nn.Linear(int(dim / 2), 1, bias=True)
# 定义网络前向传播路径
def forward(self, x):
x = F.sigmoid(self.hidden1(x))
output = self.predict(x)
# 输出一个一维向量
return output[:, 0]
class MLPHelper():
def __init__(self, dim):
self.testnet = MLPregression(dim).to(device)
self.save_dir = os.path.join(parent_dir(), "checkpoint")
self.model_name = "MLP"
def get_data(self, x, y):
# 导入数据, (110, 10) (110,)
self.X_train, self.X_test, self.y_train, self.y_test\
= train_test_split(x, y, test_size = 0.1, random_state = 42)
# 数据标准化处理
self.X_train_t = torch.from_numpy(self.X_train.astype(np.float32))
self.y_train_t = torch.from_numpy(self.y_train.astype(np.float32))
self.X_test_t = torch.from_numpy(self.X_test.astype(np.float32))
self.y_test_t = torch.from_numpy(self.y_test.astype(np.float32))
def get_dataloader(self):
# 将训练数据处理为数据加载器
train_data = Data.TensorDataset(self.X_train_t, self.y_train_t)
self.test_data = Data.TensorDataset(self.X_test_t, self.y_test_t)
self.train_loader = Data.DataLoader(dataset = train_data, batch_size = 8,
shuffle = True, num_workers = 1)
def validate(self):
self.testnet.eval()
with torch.no_grad():
y_pre = self.testnet(self.X_test_t.cuda())
self.y_pre = y_pre.data.cpu().numpy()
mse = mean_squared_error(self.y_test, self.y_pre)
return mse
def train(self):
self.testnet.train()
# 定义优化器
optimizer = torch.optim.SGD(self.testnet.parameters(), lr=0.00006)
loss_func = nn.MSELoss().to(device) # 均方根误差损失函数
early_stopper = EarlyStopper(num_trials=3, save_path=f'{self.save_dir}/{self.model_name}.pt')
# 对模型迭代训练,总共epoch轮
for epoch in range(1000):
avg_loss = []
# 对训练数据的加载器进行迭代计算
for step, (b_x, b_y) in enumerate(self.train_loader):
b_x, b_y = b_x.cuda(), b_y.cuda()
output = self.testnet(b_x) # MLP在训练batch上的输出
loss = loss_func(output, b_y) # 均方根损失函数
optimizer.zero_grad() # 每次迭代梯度初始化0
loss.backward() # 反向传播,计算梯度
optimizer.step() # 使用梯度进行优化
avg_loss.append(loss.item())
avg_loss = np.array(avg_loss).mean()
validate_loss = self.validate()
print("Epoch {}, train loss:{}, val loss:{}".format(epoch, avg_loss, validate_loss))
if not early_stopper.is_continuable(self.testnet, validate_loss):
print(f'validation: best loss: {early_stopper.best_loss}')
break
def predict(self, x):
self.testnet = torch.load(f'{self.save_dir}/{self.model_name}.pt')
self.testnet.eval()
with torch.no_grad():
y_pre = self.testnet(x.cuda())
y_pre = y_pre.data.cpu().numpy()
return y_pre
if __name__ == '__main__':
from common.sampling import latin
import problem.test_function as fun
dimension = 10
fun = fun.ellipsoid
lower_bound = -5.12
upper_bound = 5.12
datanum = 11 * dimension
x = latin(datanum, dimension, lower_bound, upper_bound)
y = fun(x)
print(x.shape, y.shape)
m = MLPHelper(dimension)
m.get_data(x, y)
m.get_dataloader()
m.train()
x = torch.tensor([[1,2,3,4,5,6,7,8,9,10]], dtype= torch.float32)
print(m.predict(x))