pytorch训练神经网络以及L1和L2正则化约束神经网络并展示网络权重

import torch
import numpy as np
import torch.nn as nn
import torch.functional as F
class LinearNet(nn.Module):
      def __init__(self):
            super(LinearNet, self).__init__()
            self.linear = nn.Linear(2, 50)
            self.linear1 = nn.Linear(50, 1)
            self.sig=nn.Sigmoid()
 # forward 定义前向传播
      def forward(self, x):
            y = self.sig(self.linear(x))
            y=self.linear1(y)
            return y
net=LinearNet()
netL1=LinearNet()
netL2=LinearNet()
net=torch.load('net.pkl')
netL1=torch.load('netL1.pkl')
netL2=torch.load('netL2.pkl')
for param_name, param in net.named_parameters():
    print(param_name,param)
for param_name, param in netL1.named_parameters():
    print(param_name,param)
for param_name, param in netL2.named_parameters():
    print(param_name,param)
     
     



num_inputs = 2
num_examples = 10
true_w = [2, -3.4]
true_b = 4.2
features = torch.from_numpy(np.random.normal(0, 1, (num_examples,
num_inputs)))
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] +true_b

import torch.utils.data as Data
batch_size = 10
# 将训练数据的特征和标签组合
features=features.cuda()
labels=labels.cuda()
dataset = Data.TensorDataset(features, labels)
# 随机读取⼩批量
data_iter = Data.DataLoader(dataset, batch_size, shuffle=True)
net=LinearNet()
net=net.double()

netL1=LinearNet()
netL2=LinearNet()
netL1=netL1.double()
netL2=netL2.double()
loss = nn.MSELoss()
import torch.optim as optim
net=net.cuda()
netL1=netL1.cuda()
netL2=netL2.cuda()
optimizer =optim.SGD(net.parameters(),lr=0.00003)
optimizer1 =optim.SGD(netL1.parameters(),lr=0.00003)
optimizerL2 =optim.SGD(netL2.parameters(),lr=0.00003,weight_decay=1)


for epoch in range(1, 70000):
      for X, y in data_iter:
            output = netL2(X)
            l = loss(output, y.view(-1, 1))
            optimizerL2.zero_grad() # 梯度清零,等价于net.zero_grad()
            l.backward()
            optimizerL2.step()
      #print('epoch %d, loss: %f' % (epoch, l.item()))
      if epoch==69999:
            print('L2规则化epoch %d, loss: %f' % (epoch, l.item()))
            l = loss(output, y.view(-1, 1))
            print('L2规则化网络不含权重损失值epoch %d, loss: %f' % (epoch, l.item()))

for epoch in range(1, 70000):
      for X, y in data_iter:
            output = netL1(X)
            l = loss(output, y.view(-1, 1))
            regular=0
            for param in netL1.parameters():
                  regular+=torch.sum(torch.abs(param))
            l+=1*regular
                 
            optimizer1.zero_grad() # 梯度清零,等价于net.zero_grad()
            l.backward()
            optimizer1.step()
      #print('epoch %d, loss: %f' % (epoch, l.item()))
      if epoch==69999:
            print('L1规则化含权重损失值epoch %d, loss: %f' % (epoch, l.item()))
            l = loss(output, y.view(-1, 1))
            print('L1规则化网络不含权重损失值epoch %d, loss: %f' % (epoch, l.item()))



for epoch in range(1, 70000):
      for X, y in data_iter:
            output = net(X)
            l = loss(output, y.view(-1, 1))
            optimizer.zero_grad() # 梯度清零,等价于net.zero_grad()
            l.backward()
            optimizer.step()
      #print('epoch %d, loss: %f' % (epoch, l.item()))
      if epoch==69999:
            print('无规则化网络epoch %d, loss: %f' % (epoch, l.item()))
torch.save(net, 'net.pkl')
torch.save(netL1, 'netL1.pkl')
torch.save(netL2, 'netL2.pkl')
posted @ 2022-02-25 15:07  祥瑞哈哈哈  阅读(289)  评论(0)    收藏  举报