# 目录

 平均池化

## 对应代码

# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F

class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 1, 2, bias=False)  # 默认padding=0 即valid卷积

def forward(self, x):
# Max pooling over a (2, 2) window
x = self.conv1(x)
x = F.avg_pool2d(x, 2)
# x = F.max_pool2d(x, 2)

return x

if __name__ == '__main__':
net = Net()
print("网络结构为：")
print(net)
print()
weight1 = torch.tensor([3., 2., 1., 5.])
weight1 = weight1.view(1, 1, 2, 2)
net.conv1._parameters['weight'].data = weight1  # 自定义卷积核

input = torch.tensor([[1., 2., 3.],  # 自定义输入
[4., 5., 6.],
[7., 8., 9.]])
input = input.view(1, 1, 3, 3)
output = net(input)
print("前向传播输出：")
print(output)
print()

# Loss Function
target = torch.tensor(60.)
criterion = nn.MSELoss()
loss = criterion(output, target)
print("MSE loss：", loss)
print()

# Backprop
loss.backward()
print("卷积核的梯度：")
print()

use_module = True
if not use_module:
# Update the weights     weight = weight - learning_rate * gradient
learning_rate = 0.01
for f in net.parameters():
print("手动更新")
print(list(net.parameters()))
"""
tensor([[[[2.5200, 1.3600],
"""

else:
# However, as you use neural networks, you want to use various different update rules such as SGD,
# Nesterov-SGD, Adam, RMSProp, etc. To enable this, we built a small package: torch.optim that
# implements all these methods. Using it is very simple:
import torch.optim as optim

optimizer = optim.SGD(net.parameters(), lr=0.01)

output = net(input)
loss = criterion(output, target)
loss.backward()
optimizer.step()  # Does the update
print("optim更新")
print(list(net.parameters()))
"""
tensor([[[[2.5200, 1.3600],
"""
View Code

 最大值池化

## 对应代码

# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F

class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 1, 2, bias=False)  # 默认padding=0 即valid卷积

def forward(self, x):
# Max pooling over a (2, 2) window
x = self.conv1(x)
# x = F.avg_pool2d(x, 2)
x = F.max_pool2d(x, 2)

return x

if __name__ == '__main__':
net = Net()
print("网络结构为：")
print(net)
print()
weight1 = torch.tensor([3., 2., 1., 5.])
weight1 = weight1.view(1, 1, 2, 2)
net.conv1._parameters['weight'].data = weight1  # 自定义卷积核

input = torch.tensor([[1., 2., 3.],  # 自定义输入
[4., 5., 6.],
[7., 8., 9.]])
input = input.view(1, 1, 3, 3)
output = net(input)
print("前向传播输出：")
print(output)
print()

# Loss Function
target = torch.tensor(60.)
criterion = nn.MSELoss()
loss = criterion(output, target)
print("MSE loss：", loss)
print()

# Backprop
loss.backward()
print("卷积核的梯度：")
print()

use_module = True
if not use_module:
# Update the weights     weight = weight - learning_rate * gradient
learning_rate = 0.01
for f in net.parameters():
print("手动更新")
print(list(net.parameters()))
"""
tensor([[[[2.5200, 1.3600],
"""

else:
# However, as you use neural networks, you want to use various different update rules such as SGD,
# Nesterov-SGD, Adam, RMSProp, etc. To enable this, we built a small package: torch.optim that
# implements all these methods. Using it is very simple:
import torch.optim as optim

optimizer = optim.SGD(net.parameters(), lr=0.01)

output = net(input)
loss = criterion(output, target)
loss.backward()
optimizer.step()  # Does the update
print("optim更新")
print(list(net.parameters()))
"""
tensor([[[[2.5200, 1.3600],
"""
View Code

 参考资料

## https://pytorch.org/tutorials/beginner/blitz/neural_networks_tutorial.html#sphx-glr-beginner-blitz-neural-networks-tutorial-py

posted @ 2020-07-02 22:53  黎明程序员  阅读(566)  评论(0编辑  收藏  举报