深度学习入门 (5):实战:手写数字识别

基于pytorch的框架实现

导入必要的库

import torch
import torchvision
from torch import optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import numpy as np
import torch.nn as nn
from PIL import Image

图像预处理

transform = transforms.Compose([
    transforms.ToTensor(),  # 将图像转为Tensor(形状 [1,28,28],像素值范围 [0,1])

transforms.Normalize((0.1307,), (0.3081,))  # MNIST 均值和标准差
])

加载MNIST数据集,训练集6w张,测试集1w张

train_data = torchvision.datasets.MNIST(root='../data', train=True, download=True,transform=transform)
test_data = torchvision.datasets.MNIST(root='../data', train=False, download=True,transform=transform)
test_loader = DataLoader(dataset=test_data, batch_size=64, shuffle=True)
train_loader = DataLoader(dataset=train_data, batch_size=1000, shuffle=True)

设备选择

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

模型使用全连接网络

#全连接网络
class SimpleNN(nn.Module):
    def __init__(self):
        super().__init__()
        self.flatten = nn.Flatten()
        self.linear_relu_stack = nn.Sequential(
            nn.Linear(28 * 28, 512),  # 输入层 → 隐藏层
            nn.ReLU(),
            nn.Linear(512, 10)  # 隐藏层 → 输出层
        )

    def forward(self, x):
        x = self.flatten(x)  # 将图像展平为 [batch_size, 784]
        logits = self.linear_relu_stack(x)
        return logits


model = SimpleNN().to(device)

损失函数、优化器、学习率调度器

loss_fn = nn.CrossEntropyLoss()  #交叉熵损失
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)  #Adom优化器
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1)

训练函数

def train(model, dataloader, loss_fn, optimizer):
    model.train()
    total_loss, correct = 0, 0

    for X, y in dataloader:
        X, y = X.to(device), y.to(device)

        # 前向传播
        pred = model(X)
        loss = loss_fn(pred, y)

        # 反向传播与优化
        optimizer.zero_grad()  # 清除历史梯度
        loss.backward()  # 计算梯度
        optimizer.step()  # 更新参数

        # 统计指标
        total_loss += loss.item()
        correct += (pred.argmax(1) == y).type(torch.float).sum().item()

    avg_loss = total_loss / len(dataloader)
    accuracy = correct / len(dataloader.dataset)
    return avg_loss, accuracy

测试函数

def test(model, dataloader, loss_fn):
    model.eval()
    total_loss, correct = 0, 0

    with torch.no_grad():  # 关闭梯度计算(节省内存)
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            pred = model(X)
            loss = loss_fn(pred, y)

            total_loss += loss.item()
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()

    avg_loss = total_loss / len(dataloader)
    accuracy = correct / len(dataloader.dataset)
    return avg_loss, accuracy

训练并输出结果

epochs = 5 #迭代次数为5次
for epoch in range(epochs):
    train_loss, train_acc = train(model, train_loader, loss_fn, optimizer)
    test_loss, test_acc = test(model, test_loader, loss_fn)

    print(f"Epoch {epoch + 1}/{epochs}")
    print(f"训练损失: {train_loss:.4f} | 训练准确率: {train_acc * 100:.2f}%")
    print(f"测试损失: {test_loss:.4f} | 测试准确率: {test_acc * 100:.2f}%\n")

资料参考

《动手学深度学习v2(pytorch版)》

posted @ 2025-03-06 17:05  屈臣  阅读(20)  评论(0)    收藏  举报