pytorch-day01
1、Pytorch 和 Tensorflow的区别
pytorch是动态图;tensorflow1是静态图;tensorflow2即有动态图模式也有静态图模式:
#1、pytorch动态图
w_h = torch.randn(20, 20, requires_grad=True)
#2、tensorflow静态图
x_ph = tf.placeholder(tf.int32, name='x')
y_ph = tf.placeholder(tf.int32, name='y')
z_ph = tf.multiply(a_ph, b_ph, name='x*y')
with tf.Session() as sess:
z_val = sess.run(z_ph, feed_dict={x_ph:[8], y_ph:[9]})
print(z_val)
2、PyTorch能做什么?
2.1、GPU加速
1 import torch
2 import time
3 print(torch.__version__)
4 print(torch.cuda.is_available())
5 # print('hello, world.')
6
7 a = torch.randn(10000, 1000)
8 b = torch.randn(1000, 2000)
9
10 t0 = time.time()
11 c = torch.matmul(a, b)
12 t1 = time.time()
13 print(a.device, t1 - t0, c.norm(2))
14
15 device = torch.device('cuda')
16 #把变量搬到cuda上
17 a = a.to(device)
18 b = b.to(device)
19
20 t0 = time.time()
21 c = torch.matmul(a, b)
22 t2 = time.time()
23 print(a.device, t2 - t0, c.norm(2))
24
25 t0 = time.time()
26 c = torch.matmul(a, b)
27 t2 = time.time()
28 print(a.device, t2 - t0, c.norm(2))
2.2、自动求导
1 import torch
2 from torch import autograd
3
4 x = torch.tensor(1.)
5 a = torch.tensor(1., requires_grad=True)
6 b = torch.tensor(2., requires_grad=True)
7 c = torch.tensor(3., requires_grad=True)
8
9 y = a**2 * x + b * x + c
10
11 print('before:', a.grad, b.grad, c.grad)
12 grads = autograd.grad(y, [a, b, c])
13 print('after :', grads[0], grads[1], grads[2])
14 """
15 before: None None None
16 after : tensor(2.) tensor(1.) tensor(1.)
17 """
2.3、常用网络层

3、开发环境安装
4、简单回归案例
1 import numpy as np
2
3 # y = wx + b
4 def average_loss(b, w, points):
5 totalError = 0
6 for i in range(0, len(points)):
7 x = points[i, 0]
8 y = points[i, 1]
9 totalError += (y - (w * x + b)) ** 2
10 return totalError / float(len(points))
11
12 def step_gradient(b_current, w_current, points, learningRate):
13 b_gradient = 0
14 w_gradient = 0
15 N = float(len(points))
16 for i in range(0, len(points)):
17 x = points[i, 0]
18 y = points[i, 1]
19 #求平均梯度
20 b_gradient += -(2 / N) * (y - ((w_current * x) + b_current))
21 w_gradient += -(2 / N) * x * (y - ((w_current * x) + b_current))
22 new_b = b_current - (learningRate * b_gradient)
23 new_m = w_current - (learningRate * w_gradient)
24 return [new_b, new_m]
25
26 def gradient_descent_runner(points, starting_b, starting_w, learning_rate, num_iterations):
27 b = starting_b
28 w = starting_w
29 for i in range(num_iterations): #跌迭次数
30 b, w = step_gradient(b, w, np.array(points), learning_rate) #返回迭代后的参数
31 return [b, w]
32
33 def run():
34 points = np.genfromtxt("data.csv", delimiter=",")
35 learning_rate = 0.0001
36 initial_b = 0 # initial y-intercept guess
37 initial_w = 0 # initial slope guess
38 num_iterations = 1000
39 print("Starting gradient descent at b = {0}, m = {1}, error = {2}"
40 .format(initial_b, initial_w,
41 average_loss(initial_b, initial_w, points))
42 )
43 print("Running...")
44 [b, w] = gradient_descent_runner(points, initial_b, initial_w, learning_rate, num_iterations)
45 print("After {0} iterations b = {1}, m = {2}, error = {3}".
46 format(num_iterations, b, w,
47 average_loss(b, w, points))
48 )
49
50 if __name__ == '__main__':
51 run()
5、手写数字
mnist_train.py:
1 import torch
2 from torch import nn
3 from torch.nn import functional as F
4 from torch import optim
5
6 import torchvision
7 from matplotlib import pyplot as plt
8
9 from utils import plot_image, plot_curve, one_hot
10
11 batch_size = 512
12
13 # step1. load dataset
14 train_loader = torch.utils.data.DataLoader(
15 torchvision.datasets.MNIST('mnist_data', train=True, download=True,
16 transform=torchvision.transforms.Compose([
17 torchvision.transforms.ToTensor(),
18 torchvision.transforms.Normalize(
19 (0.1307,), (0.3081,))
20 ])),
21 batch_size=batch_size, shuffle=True)
22
23 test_loader = torch.utils.data.DataLoader(
24 torchvision.datasets.MNIST('mnist_data/', train=False, download=True,
25 transform=torchvision.transforms.Compose([
26 torchvision.transforms.ToTensor(),
27 torchvision.transforms.Normalize(
28 (0.1307,), (0.3081,))
29 ])),
30 batch_size=batch_size, shuffle=False)
31
32 x, y = next(iter(train_loader))
33 print(x.shape, y.shape, x.min(), x.max())
34 plot_image(x, y, 'image sample')
35
36
37 class Net(nn.Module):
38 def __init__(self):
39 super(Net, self).__init__()
40
41 # xw+b
42 self.fc1 = nn.Linear(28 * 28, 256) # 第一层 256自己定义,一般逐渐变小
43 self.fc2 = nn.Linear(256, 64) # 第二层
44 self.fc3 = nn.Linear(64, 10) # 第三层 10是必须的
45
46 def forward(self, x): # 计算过程
47 # x: [b, 1, 28, 28] #一共有b张图片
48 # h1 = relu(xw1+b1)
49 x = F.relu(self.fc1(x)) # 第一层第二层传播,期间经过一个激活函数
50 # h2 = relu(h1w2+b2)
51 x = F.relu(self.fc2(x))
52 # h3 = h2w3+b3
53 x = self.fc3(x)
54
55 return x
56
57
58 net = Net()
59
60 optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) # 返回:[w1, b1, w2, b2, w3, b3]
61
62 train_loss = []
63
64 for epoch in range(3): # 对整个数据集迭代3次
65 for batch_idx, (x, y) in enumerate(train_loader):
66 # x.shape: [b, 1, 28, 28], y.shape: [512]
67
68 # [b, 1, 28, 28] => [b, 784] => [512, 784]
69 x = x.view(x.size(0), 28 * 28)
70 # => [b, 10]
71 out = net(x)
72
73 y_onehot = one_hot(y)
74 # loss = mse(out, y_onehot)
75 loss = F.mse_loss(out, y_onehot)
76
77 optimizer.zero_grad() # 1、请0梯度
78 loss.backward() # 2、计算梯度
79 optimizer.step() # 3、更新梯度,即w' = w - lr*grad
80 train_loss.append(loss.item())
81
82 if batch_idx % 10 == 0:
83 print(epoch, batch_idx, loss.item())
84
85 plot_curve(train_loss) #绘制loss曲线
86 # we get optimal [w1, b1, w2, b2, w3, b3]
87
88 #4、准确度测试
89 total_correct = 0
90 for x, y in test_loader:
91 x = x.view(x.size(0), 28 * 28)
92 out = net(x) #输出
93 # out: [b, 10] => pred: [b]
94 pred = out.argmax(dim=1)
95 correct = pred.eq(y).sum().float().item()
96 total_correct += correct
97
98 total_num = len(test_loader.dataset)
99 acc = total_correct / total_num
100 print('test acc:', acc)
101
102 x, y = next(iter(test_loader))
103 out = net(x.view(x.size(0), 28 * 28))
104 pred = out.argmax(dim=1)
105 plot_image(x, pred, 'test')
utils.py:
1 import torch
2 from matplotlib import pyplot as plt
3
4
5 def plot_curve(data): # loss曲线
6 fig = plt.figure()
7 plt.plot(range(len(data)), data, color='blue')
8 plt.legend(['value'], loc='upper right')
9 plt.xlabel('step')
10 plt.ylabel('value')
11 plt.show()
12
13
14 def plot_image(img, label, name):
15 fig = plt.figure()
16 for i in range(6):
17 plt.subplot(2, 3, i + 1)
18 plt.tight_layout()
19 plt.imshow(img[i][0] * 0.3081 + 0.1307, cmap='gray', interpolation='none')
20 plt.title("{}: {}".format(name, label[i].item()))
21 plt.xticks([])
22 plt.yticks([])
23 plt.show()
24
25
26 def one_hot(label, depth=10):
27 out = torch.zeros(label.size(0), depth)
28 idx = torch.LongTensor(label).view(-1, 1)
29 out.scatter_(dim=1, index=idx, value=1)
30 return out

浙公网安备 33010602011771号