# 目录

## 参考资料

 基础概念

 自定义一个网络为例

x和卷积核K做卷积运算，得到σ，对σ的每个元素做一个求和聚合，得到S（这里没有把σ拉直然后连接全连接的原因是，简化计算，让演示更加方便），然后S与GT计算MSE损失。

 初始化模型参数

这里重点演示卷积核上参数的更新，忽略了偏置项。

 计算卷积核上的梯度

 梯度更新

 PyTorch实战

# -*- coding: utf-8 -*-
import torch
import torch.nn as nn

class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 1, 2, bias=False)  # 默认padding=0 即valid卷积

def forward(self, x):
# Max pooling over a (2, 2) window
x = self.conv1(x)
return x.sum()

if __name__ == '__main__':
net = Net()
print("网络结构为：")
print(net)
print()
weight1 = torch.tensor([3., 2., 1., 5.])
weight1 = weight1.view(1, 1, 2, 2)
net.conv1._parameters['weight'].data = weight1  # 自定义卷积核

input = torch.tensor([[1., 2., 3.],  # 自定义输入
[4., 5., 6.],
[7., 8., 9.]])
input = input.view(1, 1, 3, 3)
output = net(input)
print("前向传播输出：")
print(output)
print()

# Loss Function
target = torch.tensor(230.)
criterion = nn.MSELoss()
loss = criterion(output, target)
print("MSE loss：", loss)
print()

# Backprop
loss.backward()
print("卷积核的梯度：")
print()

use_module = True
if not use_module:
# Update the weights     weight = weight - learning_rate * gradient
learning_rate = 0.01
for f in net.parameters():
print("手动更新")
print(list(net.parameters()))
"""
tensor([[[[2.5200, 1.3600],
"""

else:
# However, as you use neural networks, you want to use various different update rules such as SGD,
# Nesterov-SGD, Adam, RMSProp, etc. To enable this, we built a small package: torch.optim that
# implements all these methods. Using it is very simple:
import torch.optim as optim

optimizer = optim.SGD(net.parameters(), lr=0.01)

output = net(input)
loss = criterion(output, target)
loss.backward()
optimizer.step()  # Does the update
print("optim更新")
print(list(net.parameters()))
"""
tensor([[[[2.5200, 1.3600],
"""
View Code

 参考资料

《图解深度学习与神经网络：从张量到TensorFlow实现》_张平

https://pytorch.org/tutorials/beginner/blitz/neural_networks_tutorial.html#sphx-glr-beginner-blitz-neural-networks-tutorial-py

posted @ 2020-06-30 22:58  黎明程序员  阅读(1287)  评论(0编辑  收藏  举报