Pytorch学习
资源推荐
网址链接:
1Pytorch官网:https://pytorch.org/
video:
1Bilibili-土堆:https://space.bilibili.com/203989554?spm_id_from=333.788.b_765f7570696e666f.1
2 Bilibili:https://www.bilibili.com/video/BV1Y7411d7Ys
dataset:
https://pytorch.org/vision/stable/index.html#
1 COCO:https://cocodataset.org/#download
2 VOC:http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html
hub:
Pytorch深度学习
基本配置
--导入包和版本查询
import torch import torch.nn as nn import torchvision print(torch.__version__) print(torch.version.cuda) print(torch.backends.cudnn.version()) print(torch.cuda.get_device_name(0))
--显卡设置
#需要一张显卡 # Device configuration device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') #指定多张显卡,比如0,1号显卡 import os os.environ['CUDA_VISIBLE_DEVICES'] = '0,1' # or: CUDA_VISIBLE_DEVICES=0,1 python train.py # 清除显存 torch.cuda.empty_cache() #or:也可以使用在命令行重置GPU的指令 nvidia-smi --gpu-reset -i [gpu_id]
-- 张量基本信息
参考链接:https://mp.weixin.qq.com/s/pPx2sWR_CR40FFL1uHMDHA
#张量基本信息 tensor = torch.randn(3,4,5) print(tensor.type()) # 数据类型 print(tensor.size()) # 张量的shape,是个元组 print(tensor.dim()) # 维度的数量 #命名张量 # 在PyTorch 1.3之前,需要使用注释 # Tensor[N, C, H, W] images = torch.randn(32, 3, 56, 56) images.sum(dim=1) images.select(dim=1, index=0) # PyTorch 1.3之后 NCHW = [‘N’, ‘C’, ‘H’, ‘W’] images = torch.randn(32, 3, 56, 56, names=NCHW) images.sum('C') images.select('C', index=0) # 也可以这么设置 tensor = torch.rand(3,4,1,2,names=('C', 'N', 'H', 'W')) # 使用align_to可以对维度方便地排序 tensor = tensor.align_to('N', 'C', 'H', 'W') 数据类型转换 # 设置默认类型,pytorch中的FloatTensor远远快于DoubleTensor torch.set_default_tensor_type(torch.FloatTensor) # 类型转换 tensor = tensor.cuda() tensor = tensor.cpu() tensor = tensor.float() tensor = tensor.long()
ndarray = tensor.cpu().numpy() tensor = torch.from_numpy(ndarray).float() tensor = torch.from_numpy(ndarray.copy()).float() # If ndarray has negative stride.
# pytorch中的张量默认采用[N, C, H, W]的顺序,并且数据范围在[0,1],需要进行转置和规范化 # torch.Tensor -> PIL.Image image = PIL.Image.fromarray(torch.clamp(tensor*255, min=0, max=255).byte().permute(1,2,0).cpu().numpy()) image = torchvision.transforms.functional.to_pil_image(tensor) # Equivalently way # PIL.Image -> torch.Tensor path = r'./figure.jpg' tensor = torch.from_numpy(np.asarray(PIL.Image.open(path))).permute(2,0,1).float() / 255 tensor = torchvision.transforms.functional.to_tensor(PIL.Image.open(path)) # Equivalently way
image = PIL.Image.fromarray(ndarray.astype(np.uint8))
ndarray = np.asarray(PIL.Image.open(path))
.....
进阶学习
1 TensorBoard使用:
功能:losse实时显示,解析事件文件 1 安装:pip install tensorboard -i https://pypi.douban.com/simple 2 启动:cmd:tensorboard --logdir=logs --port=6007
  #tensorboard --logdir=truck_train2 --host 0.0.0.0 --port 8080  #指定ip与端口
""" tensorboard使用步骤: 功能:losse实时显示,解析事件文件 1 安装:pip install tensorboard -i https://pypi.douban.com/simple 2 启动:cmd:tensorboard --logdir=logs --port=6007 """ from torch.utils.tensorboard import SummaryWriter import numpy as np from PIL import Image writer = SummaryWriter("logs") #向指定文件夹写入事件文件,方便tensorboard解析 image_path = r"\dataset\train\ants_image\0013035.jpg" img_PIL = Image.open(image_path) img_array = np.array(img_PIL) print(type(img_array)) print(img_array.shape) writer.add_image("train", img_array, 1, dataformats='HWC') #高度,宽度,通道 # y = 2x for i in range(100): writer.add_scalar("y=2x", 3*i, i) writer.close()
2 Dataset类
from torch.utils.data import Dataset, DataLoader import numpy as np from PIL import Image import os from torchvision import transforms from torch.utils.tensorboard import SummaryWriter from torchvision.utils import make_grid writer = SummaryWriter("logs") class MyData(Dataset): def __init__(self, root_dir, image_dir, label_dir, transform): self.root_dir = root_dir self.image_dir = image_dir self.label_dir = label_dir self.label_path = os.path.join(self.root_dir, self.label_dir) self.image_path = os.path.join(self.root_dir, self.image_dir) self.image_list = os.listdir(self.image_path) self.label_list = os.listdir(self.label_path) self.transform = transform # 因为label 和 Image文件名相同,进行一样的排序,可以保证取出的数据和label是一一对应的 self.image_list.sort() self.label_list.sort() def __getitem__(self, idx): img_name = self.image_list[idx] label_name = self.label_list[idx] img_item_path = os.path.join(self.root_dir, self.image_dir, img_name) label_item_path = os.path.join(self.root_dir, self.label_dir, label_name) img = Image.open(img_item_path) # 读取图片 with open(label_item_path, 'r') as f: label = f.readline() # img = np.array(img) img = self.transform(img) sample = {'img': img, 'label': label} return sample def __len__(self): assert len(self.image_list) == len(self.label_list) return len(self.image_list) if __name__ == '__main__': transform = transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()]) root_dir = "dataset/train" image_ants = "ants_image" label_ants = "ants_label" ants_dataset = MyData(root_dir, image_ants, label_ants, transform) image_bees = "bees_image" label_bees = "bees_label" bees_dataset = MyData(root_dir, image_bees, label_bees, transform) train_dataset = ants_dataset + bees_dataset # transforms = transforms.Compose([transforms.Resize(256, 256)]) dataloader = DataLoader(train_dataset, batch_size=1, num_workers=2) writer.add_image('error', train_dataset[119]['img']) writer.close() # for i, j in enumerate(dataloader): # # imgs, labels = j # print(type(j)) # print(i, j['img'].shape) # # writer.add_image("train_data_b2", make_grid(j['img']), i) # # writer.close()
3 transforms
#功能:图像转换:pic 转化为tensor数据类型;尺寸转换
#图像转换:pic 转化为tensor数据类型;尺寸转换 import torchvision from torch.utils.tensorboard import SummaryWriter dataset_transform = torchvision.transforms.Compose([ torchvision.transforms.ToTensor() ]) train_set = torchvision.datasets.CIFAR10(root="./dataset", train=True, transform=dataset_transform, download=True) test_set = torchvision.datasets.CIFAR10(root="./dataset", train=False, transform=dataset_transform, download=True) # print(test_set[0]) # print(test_set.classes) # # img, target = test_set[0] # print(type(img)) # print(target) # print(test_set.classes[target]) # img.show() writer = SummaryWriter("p10") for i in range(10): img, target = test_set[i] writer.add_image("test_set", img, i) writer.close()
from torch.utils.data import Dataset, DataLoader from PIL import Image import os from torchvision import transforms class MyData(Dataset): def __init__(self, root_dir, image_dir, label_dir, transform=None): self.root_dir = root_dir self.image_dir = image_dir self.label_dir = label_dir self.label_path = os.path.join(self.root_dir, self.label_dir) self.image_path = os.path.join(self.root_dir, self.image_dir) self.image_list = os.listdir(self.image_path) self.label_list = os.listdir(self.label_path) self.transform = transform # 因为label 和 Image文件名相同,进行一样的排序,可以保证取出的数据和label是一一对应的 self.image_list.sort() self.label_list.sort() def __getitem__(self, idx): img_name = self.image_list[idx] label_name = self.label_list[idx] img_item_path = os.path.join(self.root_dir, self.image_dir, img_name) label_item_path = os.path.join(self.root_dir, self.label_dir, label_name) img = Image.open(img_item_path) with open(label_item_path, 'r') as f: label = f.readline() if self.transform: img = transform(img) return img, label def __len__(self): assert len(self.image_list) == len(self.label_list) return len(self.image_list) transform = transforms.Compose([transforms.Resize(400), transforms.ToTensor()]) root_dir = "dataset/train" image_ants = "ants_image" label_ants = "ants_label" ants_dataset = MyData(root_dir, image_ants, label_ants, transform=transform) image_bees = "bees_image" label_bees = "bees_label" bees_dataset = MyData(root_dir, image_bees, label_bees, transform=transform)
4 Dataloader
import torchvision # 准备的测试数据集 from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter test_data = torchvision.datasets.CIFAR10("./dataset", train=False, transform=torchvision.transforms.ToTensor()) test_loader = DataLoader(dataset=test_data, batch_size=64, shuffle=True, num_workers=0, drop_last=True) # 测试数据集中第一张图片及target img, target = test_data[0] print(img.shape) print(target) writer = SummaryWriter("dataloader") for epoch in range(2): step = 0 for data in test_loader: imgs, targets = data # print(imgs.shape) # print(targets) writer.add_images("Epoch: {}".format(epoch), imgs, step) step = step + 1 writer.close()
5 nn.Module (神经网络基本骨架)
import torch from torch import nn class Tudui(nn.Module): def __init__(self): super().__init__() def forward(self, input): output = input + 1 return output tudui = Tudui() x = torch.tensor(1.0) output = tudui(x) print(output)
6 卷积层(conv,conv2)
import torch import torch.nn.functional as F input = torch.tensor([[1, 2, 0, 3, 1], [0, 1, 2, 3, 1], [1, 2, 1, 0, 0], [5, 2, 3, 1, 1], [2, 1, 0, 1, 1]]) kernel = torch.tensor([[1, 2, 1], [0, 1, 0], [2, 1, 0]]) input = torch.reshape(input, (1, 1, 5, 5)) kernel = torch.reshape(kernel, (1, 1, 3, 3)) print(input.shape) print(kernel.shape) output = F.conv2d(input, kernel, stride=1) print(output) output2 = F.conv2d(input, kernel, stride=2) print(output2) output3 = F.conv2d(input, kernel, stride=1, padding=1) print(output3)
# -*- coding: utf-8 -*- # 作者:小土堆 # 公众号:土堆碎念 import torch import torchvision from torch import nn from torch.nn import Conv2d from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter dataset = torchvision.datasets.CIFAR10("../data", train=False, transform=torchvision.transforms.ToTensor(), download=True) dataloader = DataLoader(dataset, batch_size=64) class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.conv1 = Conv2d(in_channels=3, out_channels=6, kernel_size=3, stride=1, padding=0) def forward(self, x): x = self.conv1(x) return x tudui = Tudui() writer = SummaryWriter("../logs") step = 0 for data in dataloader: imgs, targets = data output = tudui(imgs) print(imgs.shape) print(output.shape) # torch.Size([64, 3, 32, 32]) writer.add_images("input", imgs, step) # torch.Size([64, 6, 30, 30]) -> [xxx, 3, 30, 30] output = torch.reshape(output, (-1, 3, 30, 30)) writer.add_images("output", output, step) step = step + 1 writer.close()
7 最大池化(maxpool)
# -*- coding: utf-8 -*- # 作者:小土堆 # 公众号:土堆碎念 import torch import torchvision from torch import nn from torch.nn import MaxPool2d from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter dataset = torchvision.datasets.CIFAR10("../data", train=False, download=True, transform=torchvision.transforms.ToTensor()) dataloader = DataLoader(dataset, batch_size=64) class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.maxpool1 = MaxPool2d(kernel_size=3, ceil_mode=False) def forward(self, input): output = self.maxpool1(input) return output tudui = Tudui() writer = SummaryWriter("../logs_maxpool") step = 0 for data in dataloader: imgs, targets = data writer.add_images("input", imgs, step) output = tudui(imgs) writer.add_images("output", output, step) step = step + 1 writer.close()
8 非线性激活(relu,)
import torch import torchvision from torch import nn from torch.nn import ReLU, Sigmoid from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter input = torch.tensor([[1, -0.5], [-1, 3]]) input = torch.reshape(input, (-1, 1, 2, 2)) print(input.shape) dataset = torchvision.datasets.CIFAR10("../data", train=False, download=True, transform=torchvision.transforms.ToTensor()) dataloader = DataLoader(dataset, batch_size=64) class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.relu1 = ReLU() self.sigmoid1 = Sigmoid() def forward(self, input): output = self.sigmoid1(input) return output tudui = Tudui() writer = SummaryWriter("../logs_relu") step = 0 for data in dataloader: imgs, targets = data writer.add_images("input", imgs, global_step=step) output = tudui(imgs) writer.add_images("output", output, step) step += 1 writer.close()
9 线性层及其他层(linear)
import torch import torchvision from torch import nn from torch.nn import Linear from torch.utils.data import DataLoader dataset = torchvision.datasets.CIFAR10("../data", train=False, transform=torchvision.transforms.ToTensor(), download=True) dataloader = DataLoader(dataset, batch_size=64, drop_last=True) class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.linear1 = Linear(196608, 10) def forward(self, input): output = self.linear1(input) return output tudui = Tudui() for data in dataloader: imgs, targets = data print(imgs.shape) output = torch.flatten(imgs) print(output.shape) output = tudui(output) print(output.shape)
10 Sequential搭建神经网络
import torch from torch import nn # 搭建神经网络 class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.model = nn.Sequential( nn.Conv2d(3, 32, 5, 1, 2), nn.MaxPool2d(2), nn.Conv2d(32, 32, 5, 1, 2), nn.MaxPool2d(2), nn.Conv2d(32, 64, 5, 1, 2), nn.MaxPool2d(2), nn.Flatten(), nn.Linear(64*4*4, 64), nn.Linear(64, 10) ) def forward(self, x): x = self.model(x) return x if __name__ == '__main__': tudui = Tudui() input = torch.ones((64, 3, 32, 32)) output = tudui(input) print(output.shape)
11 损失函数与反向传播(CrossEntropyLoss)
import torch from torch.nn import L1Loss from torch import nn inputs = torch.tensor([1, 2, 3], dtype=torch.float32) targets = torch.tensor([1, 2, 5], dtype=torch.float32) inputs = torch.reshape(inputs, (1, 1, 1, 3)) #batch_size=1,channel=1,1行3列 targets = torch.reshape(targets, (1, 1, 1, 3)) loss = L1Loss(reduction='sum') result = loss(inputs, targets) loss_mse = nn.MSELoss() #平方差mean result_mse = loss_mse(inputs, targets) print(result) print(result_mse) x = torch.tensor([0.1, 0.2, 0.3]) y = torch.tensor([1]) x = torch.reshape(x, (1, 3)) loss_cross = nn.CrossEntropyLoss() #交叉熵--分类问题 result_cross = loss_cross(x, y) print(result_cross)
# -*- coding: utf-8 -*- # 作者:小土堆 # 公众号:土堆碎念 import torchvision from torch import nn from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear from torch.utils.data import DataLoader dataset = torchvision.datasets.CIFAR10("../data", train=False, transform=torchvision.transforms.ToTensor(), download=True) dataloader = DataLoader(dataset, batch_size=1) class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.model1 = Sequential( Conv2d(3, 32, 5, padding=2), MaxPool2d(2), Conv2d(32, 32, 5, padding=2), MaxPool2d(2), Conv2d(32, 64, 5, padding=2), MaxPool2d(2), Flatten(), Linear(1024, 64), Linear(64, 10) ) def forward(self, x): x = self.model1(x) return x loss = nn.CrossEntropyLoss() tudui = Tudui() for data in dataloader: imgs, targets = data outputs = tudui(imgs) result_loss = loss(outputs, targets) print("ok")
12 优化器(optim)
# -*- coding: utf-8 -*- # 作者:小土堆 # 公众号:土堆碎念 # # 优化器 import torch import torchvision from torch import nn from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear from torch.optim.lr_scheduler import StepLR from torch.utils.data import DataLoader dataset = torchvision.datasets.CIFAR10("../data", train=False, transform=torchvision.transforms.ToTensor(), download=True) dataloader = DataLoader(dataset, batch_size=1) class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.model1 = Sequential( Conv2d(3, 32, 5, padding=2), MaxPool2d(2), Conv2d(32, 32, 5, padding=2), MaxPool2d(2), Conv2d(32, 64, 5, padding=2), MaxPool2d(2), Flatten(), Linear(1024, 64), Linear(64, 10) ) def forward(self, x): x = self.model1(x) return x loss = nn.CrossEntropyLoss() tudui = Tudui() optim = torch.optim.SGD(tudui.parameters(), lr=0.01) for epoch in range(20): running_loss = 0.0 for data in dataloader: imgs, targets = data outputs = tudui(imgs) result_loss = loss(outputs, targets) optim.zero_grad() result_loss.backward() #反向传播 optim.step() running_loss = running_loss + result_loss print(running_loss)
13 网络模型使用与修改
# -*- coding: utf-8 -*- #现有模型的使用与修改 import torchvision # train_data = torchvision.datasets.ImageNet("../data_image_net", split='train', download=True, # transform=torchvision.transforms.ToTensor()) from torch import nn vgg16_false = torchvision.models.vgg16(pretrained=False) vgg16_true = torchvision.models.vgg16(pretrained=True) print(vgg16_true) train_data = torchvision.datasets.CIFAR10('../data', train=True, transform=torchvision.transforms.ToTensor(), download=True) vgg16_true.classifier.add_module('add_linear', nn.Linear(1000, 10)) print(vgg16_true) print(vgg16_false) vgg16_false.classifier[6] = nn.Linear(4096, 10) print(vgg16_false)
14 模型保存与修改
# -*- coding: utf-8 -*- import torch import torchvision from torch import nn vgg16 = torchvision.models.vgg16(pretrained=False) # 保存方式1,模型结构+模型参数 torch.save(vgg16, "vgg16_method1.pth") # 保存方式2,模型参数(官方推荐) torch.save(vgg16.state_dict(), "vgg16_method2.pth") # 陷阱 class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=3) def forward(self, x): x = self.conv1(x) return x tudui = Tudui() torch.save(tudui, "tudui_method1.pth")
# -*- coding: utf-8 -*- # 作者:小土堆 # 公众号:土堆碎念 import torch from model_save import * # 方式1-》保存方式1,加载模型 import torchvision from torch import nn model = torch.load("vgg16_method1.pth") # print(model) # 方式2,加载模型 vgg16 = torchvision.models.vgg16(pretrained=False) vgg16.load_state_dict(torch.load("vgg16_method2.pth")) # model = torch.load("vgg16_method2.pth") # print(vgg16) # 陷阱1 # class Tudui(nn.Module): # def __init__(self): # super(Tudui, self).__init__() # self.conv1 = nn.Conv2d(3, 64, kernel_size=3) # # def forward(self, x): # x = self.conv1(x) # return x model = torch.load('tudui_method1.pth') print(model)
16 模型训练示例
# -*- coding: utf-8 -*- # 作者:小土堆 # 公众号:土堆碎念 import torch import torchvision from PIL import Image from torch import nn image_path = "../imgs/airplane.png" image = Image.open(image_path) print(image) image = image.convert('RGB') transform = torchvision.transforms.Compose([torchvision.transforms.Resize((32, 32)), torchvision.transforms.ToTensor()]) image = transform(image) print(image.shape) class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.model = nn.Sequential( nn.Conv2d(3, 32, 5, 1, 2), nn.MaxPool2d(2), nn.Conv2d(32, 32, 5, 1, 2), nn.MaxPool2d(2), nn.Conv2d(32, 64, 5, 1, 2), nn.MaxPool2d(2), nn.Flatten(), nn.Linear(64*4*4, 64), nn.Linear(64, 10) ) def forward(self, x): x = self.model(x) return x model = torch.load("tudui_29_gpu.pth", map_location=torch.device('cpu')) print(model) image = torch.reshape(image, (1, 3, 32, 32)) model.eval() with torch.no_grad(): output = model(image) print(output) print(output.argmax(1))
import torch from torch import nn # 搭建神经网络 class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.model = nn.Sequential( nn.Conv2d(3, 32, 5, 1, 2), nn.MaxPool2d(2), nn.Conv2d(32, 32, 5, 1, 2), nn.MaxPool2d(2), nn.Conv2d(32, 64, 5, 1, 2), nn.MaxPool2d(2), nn.Flatten(), nn.Linear(64*4*4, 64), nn.Linear(64, 10) ) def forward(self, x): x = self.model(x) return x if __name__ == '__main__': tudui = Tudui() input = torch.ones((64, 3, 32, 32)) output = tudui(input) print(output.shape)
# -*- coding: utf-8 -*- # 作者:小土堆 # 公众号:土堆碎念 import torchvision from torch.utils.tensorboard import SummaryWriter from model import * # 准备数据集 from torch import nn from torch.utils.data import DataLoader train_data = torchvision.datasets.CIFAR10(root="../data", train=True, transform=torchvision.transforms.ToTensor(), download=True) test_data = torchvision.datasets.CIFAR10(root="../data", train=False, transform=torchvision.transforms.ToTensor(), download=True) # length 长度 train_data_size = len(train_data) test_data_size = len(test_data) # 如果train_data_size=10, 训练数据集的长度为:10 print("训练数据集的长度为:{}".format(train_data_size)) print("测试数据集的长度为:{}".format(test_data_size)) # 利用 DataLoader 来加载数据集 train_dataloader = DataLoader(train_data, batch_size=64) test_dataloader = DataLoader(test_data, batch_size=64) # 创建网络模型 tudui = Tudui() # 损失函数 loss_fn = nn.CrossEntropyLoss() # 优化器 # learning_rate = 0.01 # 1e-2=1 x (10)^(-2) = 1 /100 = 0.01 learning_rate = 1e-2 optimizer = torch.optim.SGD(tudui.parameters(), lr=learning_rate) # 设置训练网络的一些参数 # 记录训练的次数 total_train_step = 0 # 记录测试的次数 total_test_step = 0 # 训练的轮数 epoch = 10 # 添加tensorboard writer = SummaryWriter("../logs_train") for i in range(epoch): print("-------第 {} 轮训练开始-------".format(i+1)) # 训练步骤开始 tudui.train() for data in train_dataloader: imgs, targets = data outputs = tudui(imgs) loss = loss_fn(outputs, targets) # 优化器优化模型 optimizer.zero_grad()#梯度清零 loss.backward() #反向传播 optimizer.step() #优化 total_train_step = total_train_step + 1 if total_train_step % 100 == 0: print("训练次数:{}, Loss: {}".format(total_train_step, loss.item())) writer.add_scalar("train_loss", loss.item(), total_train_step) # 测试步骤开始 tudui.eval() total_test_loss = 0 total_accuracy = 0 with torch.no_grad(): for data in test_dataloader: imgs, targets = data outputs = tudui(imgs) loss = loss_fn(outputs, targets) total_test_loss = total_test_loss + loss.item() accuracy = (outputs.argmax(1) == targets).sum() total_accuracy = total_accuracy + accuracy print("整体测试集上的Loss: {}".format(total_test_loss)) print("整体测试集上的正确率: {}".format(total_accuracy/test_data_size)) writer.add_scalar("test_loss", total_test_loss, total_test_step) writer.add_scalar("test_accuracy", total_accuracy/test_data_size, total_test_step) total_test_step = total_test_step + 1 torch.save(tudui, "tudui_{}.pth".format(i)) print("模型已保存") writer.close()
17 cpu训练
import torchvision from torch.utils.tensorboard import SummaryWriter # 准备数据集 from torch import nn from torch.utils.data import DataLoader train_data = torchvision.datasets.CIFAR10(root="../data", train=True, transform=torchvision.transforms.ToTensor(), download=True) test_data = torchvision.datasets.CIFAR10(root="../data", train=False, transform=torchvision.transforms.ToTensor(), download=True) # length 长度 train_data_size = len(train_data) test_data_size = len(test_data) # 如果train_data_size=10, 训练数据集的长度为:10 print("训练数据集的长度为:{}".format(train_data_size)) print("测试数据集的长度为:{}".format(test_data_size)) # 利用 DataLoader 来加载数据集 train_dataloader = DataLoader(train_data, batch_size=64) test_dataloader = DataLoader(test_data, batch_size=64) # 创建网络模型 class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.model = nn.Sequential( nn.Conv2d(3, 32, 5, 1, 2), nn.MaxPool2d(2), nn.Conv2d(32, 32, 5, 1, 2), nn.MaxPool2d(2), nn.Conv2d(32, 64, 5, 1, 2), nn.MaxPool2d(2), nn.Flatten(), nn.Linear(64*4*4, 64), nn.Linear(64, 10) ) def forward(self, x): x = self.model(x) return x tudui = Tudui() # 损失函数 loss_fn = nn.CrossEntropyLoss() # 优化器 # learning_rate = 0.01 # 1e-2=1 x (10)^(-2) = 1 /100 = 0.01 learning_rate = 1e-2 optimizer = torch.optim.SGD(tudui.parameters(), lr=learning_rate) # 设置训练网络的一些参数 # 记录训练的次数 total_train_step = 0 # 记录测试的次数 total_test_step = 0 # 训练的轮数 epoch = 10 # 添加tensorboard writer = SummaryWriter("../logs_train") for i in range(epoch): print("-------第 {} 轮训练开始-------".format(i+1)) # 训练步骤开始 tudui.train() for data in train_dataloader: imgs, targets = data outputs = tudui(imgs) loss = loss_fn(outputs, targets) # 优化器优化模型 optimizer.zero_grad() loss.backward() optimizer.step() total_train_step = total_train_step + 1 if total_train_step % 100 == 0: print("训练次数:{}, Loss: {}".format(total_train_step, loss.item())) writer.add_scalar("train_loss", loss.item(), total_train_step) # 测试步骤开始 tudui.eval() total_test_loss = 0 total_accuracy = 0 with torch.no_grad(): for data in test_dataloader: imgs, targets = data outputs = tudui(imgs) loss = loss_fn(outputs, targets) total_test_loss = total_test_loss + loss.item() accuracy = (outputs.argmax(1) == targets).sum() total_accuracy = total_accuracy + accuracy print("整体测试集上的Loss: {}".format(total_test_loss)) print("整体测试集上的正确率: {}".format(total_accuracy/test_data_size)) writer.add_scalar("test_loss", total_test_loss, total_test_step) writer.add_scalar("test_accuracy", total_accuracy/test_data_size, total_test_step) total_test_step = total_test_step + 1 torch.save(tudui, "tudui_{}.pth".format(i)) print("模型已保存") writer.close()
18 gpu训练
# -*- coding: utf-8 -*- # 作者:小土堆 # 公众号:土堆碎念 import torch import torchvision from torch.utils.tensorboard import SummaryWriter # from model import * # 准备数据集 from torch import nn from torch.utils.data import DataLoader train_data = torchvision.datasets.CIFAR10(root="../data", train=True, transform=torchvision.transforms.ToTensor(), download=True) test_data = torchvision.datasets.CIFAR10(root="../data", train=False, transform=torchvision.transforms.ToTensor(), download=True) # length 长度 train_data_size = len(train_data) test_data_size = len(test_data) # 如果train_data_size=10, 训练数据集的长度为:10 print("训练数据集的长度为:{}".format(train_data_size)) print("测试数据集的长度为:{}".format(test_data_size)) # 利用 DataLoader 来加载数据集 train_dataloader = DataLoader(train_data, batch_size=64) test_dataloader = DataLoader(test_data, batch_size=64) # 创建网络模型 class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.model = nn.Sequential( nn.Conv2d(3, 32, 5, 1, 2), nn.MaxPool2d(2), nn.Conv2d(32, 32, 5, 1, 2), nn.MaxPool2d(2), nn.Conv2d(32, 64, 5, 1, 2), nn.MaxPool2d(2), nn.Flatten(), nn.Linear(64*4*4, 64), nn.Linear(64, 10) ) def forward(self, x): x = self.model(x) return x tudui = Tudui() if torch.cuda.is_available(): tudui = tudui.cuda() # 损失函数 loss_fn = nn.CrossEntropyLoss() if torch.cuda.is_available(): loss_fn = loss_fn.cuda() # 优化器 # learning_rate = 0.01 # 1e-2=1 x (10)^(-2) = 1 /100 = 0.01 learning_rate = 1e-2 optimizer = torch.optim.SGD(tudui.parameters(), lr=learning_rate) # 设置训练网络的一些参数 # 记录训练的次数 total_train_step = 0 # 记录测试的次数 total_test_step = 0 # 训练的轮数 epoch = 10 # 添加tensorboard writer = SummaryWriter("../logs_train") for i in range(epoch): print("-------第 {} 轮训练开始-------".format(i+1)) # 训练步骤开始 tudui.train() for data in train_dataloader: imgs, targets = data if torch.cuda.is_available(): imgs = imgs.cuda() targets = targets.cuda() outputs = tudui(imgs) loss = loss_fn(outputs, targets) # 优化器优化模型 optimizer.zero_grad() loss.backward() optimizer.step() total_train_step = total_train_step + 1 if total_train_step % 100 == 0: print("训练次数:{}, Loss: {}".format(total_train_step, loss.item())) writer.add_scalar("train_loss", loss.item(), total_train_step) # 测试步骤开始 tudui.eval() total_test_loss = 0 total_accuracy = 0 with torch.no_grad(): for data in test_dataloader: imgs, targets = data if torch.cuda.is_available(): imgs = imgs.cuda() targets = targets.cuda() outputs = tudui(imgs) loss = loss_fn(outputs, targets) total_test_loss = total_test_loss + loss.item() accuracy = (outputs.argmax(1) == targets).sum() total_accuracy = total_accuracy + accuracy print("整体测试集上的Loss: {}".format(total_test_loss)) print("整体测试集上的正确率: {}".format(total_accuracy/test_data_size)) writer.add_scalar("test_loss", total_test_loss, total_test_step) writer.add_scalar("test_accuracy", total_accuracy/test_data_size, total_test_step) total_test_step = total_test_step + 1 torch.save(tudui, "tudui_{}.pth".format(i)) print("模型已保存") writer.close()
# -*- coding: utf-8 -*- # 作者:小土堆 # 公众号:土堆碎念 import torch import torchvision from torch.utils.tensorboard import SummaryWriter # from model import * # 准备数据集 from torch import nn from torch.utils.data import DataLoader # 定义训练的设备 device = torch.device("cuda") train_data = torchvision.datasets.CIFAR10(root="../data", train=True, transform=torchvision.transforms.ToTensor(), download=True) test_data = torchvision.datasets.CIFAR10(root="../data", train=False, transform=torchvision.transforms.ToTensor(), download=True) # length 长度 train_data_size = len(train_data) test_data_size = len(test_data) # 如果train_data_size=10, 训练数据集的长度为:10 print("训练数据集的长度为:{}".format(train_data_size)) print("测试数据集的长度为:{}".format(test_data_size)) # 利用 DataLoader 来加载数据集 train_dataloader = DataLoader(train_data, batch_size=64) test_dataloader = DataLoader(test_data, batch_size=64) # 创建网络模型 class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.model = nn.Sequential( nn.Conv2d(3, 32, 5, 1, 2), nn.MaxPool2d(2), nn.Conv2d(32, 32, 5, 1, 2), nn.MaxPool2d(2), nn.Conv2d(32, 64, 5, 1, 2), nn.MaxPool2d(2), nn.Flatten(), nn.Linear(64*4*4, 64), nn.Linear(64, 10) ) def forward(self, x): x = self.model(x) return x tudui = Tudui() tudui = tudui.to(device) # 损失函数 loss_fn = nn.CrossEntropyLoss() loss_fn = loss_fn.to(device) # 优化器 # learning_rate = 0.01 # 1e-2=1 x (10)^(-2) = 1 /100 = 0.01 learning_rate = 1e-2 optimizer = torch.optim.SGD(tudui.parameters(), lr=learning_rate) # 设置训练网络的一些参数 # 记录训练的次数 total_train_step = 0 # 记录测试的次数 total_test_step = 0 # 训练的轮数 epoch = 10 # 添加tensorboard writer = SummaryWriter("../logs_train") for i in range(epoch): print("-------第 {} 轮训练开始-------".format(i+1)) # 训练步骤开始 tudui.train() for data in train_dataloader: imgs, targets = data imgs = imgs.to(device) targets = targets.to(device) outputs = tudui(imgs) loss = loss_fn(outputs, targets) # 优化器优化模型 optimizer.zero_grad() loss.backward() optimizer.step() total_train_step = total_train_step + 1 if total_train_step % 100 == 0: print("训练次数:{}, Loss: {}".format(total_train_step, loss.item())) writer.add_scalar("train_loss", loss.item(), total_train_step) # 测试步骤开始 tudui.eval() total_test_loss = 0 total_accuracy = 0 with torch.no_grad(): for data in test_dataloader: imgs, targets = data imgs = imgs.to(device) targets = targets.to(device) outputs = tudui(imgs) loss = loss_fn(outputs, targets) total_test_loss = total_test_loss + loss.item() accuracy = (outputs.argmax(1) == targets).sum() total_accuracy = total_accuracy + accuracy print("整体测试集上的Loss: {}".format(total_test_loss)) print("整体测试集上的正确率: {}".format(total_accuracy/test_data_size)) writer.add_scalar("test_loss", total_test_loss, total_test_step) writer.add_scalar("test_accuracy", total_accuracy/test_data_size, total_test_step) total_test_step = total_test_step + 1 torch.save(tudui, "tudui_{}.pth".format(i)) print("模型已保存") writer.close()
    作者:华王
博客:https://www.cnblogs.com/huahuawang/
                    
                
                
            
        
浙公网安备 33010602011771号