2026.1.17总结

2: 计算机视觉基础

以下是代码的完整执行流程:
导入库​ → 导入PyTorch及相关工具库
定义CNN模型​ → 构建两层卷积+全连接的神经网络结构
数据准备​ → 下载MNIST数据集并进行标准化预处理
初始化​ → 创建模型、损失函数和优化器
训练循环(10个epoch):
训练阶段:前向传播 → 计算损失 → 反向传播 → 参数更新
测试阶段:评估模型准确率
记录结果​ → 保存每个epoch的训练损失和测试准确率
可视化​ → 绘制损失和准确率变化曲线
结束​ → 显示训练完成的模型性能图表
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt

2.1 CNN基础架构

class CNN(nn.Module):
def init(self):
super(CNN, self).init()
# 卷积层
self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)

    # 池化层
    self.pool = nn.MaxPool2d(2, 2)
    
    # 全连接层
    self.fc1 = nn.Linear(64 * 7 * 7, 128)
    self.fc2 = nn.Linear(128, 10)
    
    # Dropout
    self.dropout = nn.Dropout(0.5)

def forward(self, x):
    # 卷积层1 + 激活函数 + 池化
    x = self.pool(F.relu(self.conv1(x)))
    # 卷积层2 + 激活函数 + 池化
    x = self.pool(F.relu(self.conv2(x)))
    
    # 展平
    x = x.view(-1, 64 * 7 * 7)
    
    # 全连接层
    x = F.relu(self.fc1(x))
    x = self.dropout(x)
    x = self.fc2(x)
    
    return x

2.2 数据加载和预处理

def load_mnist():
"""加载MNIST数据集"""
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])

# 训练集
trainset = torchvision.datasets.MNIST(
    root='./data', 
    train=True,
    download=True, 
    transform=transform
)

# 测试集
testset = torchvision.datasets.MNIST(
    root='./data',
    train=False,
    download=True,
    transform=transform
)

trainloader = DataLoader(trainset, batch_size=64, shuffle=True)
testloader = DataLoader(testset, batch_size=64, shuffle=False)

return trainloader, testloader

2.3 完整训练流程

def train_cnn():
# 加载数据
trainloader, testloader = load_mnist()

# 初始化模型
model = CNN()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 训练循环
num_epochs = 10
train_losses = []
test_accuracies = []

for epoch in range(num_epochs):
    model.train()
    running_loss = 0.0
    
    # 训练阶段
    for images, labels in trainloader:
        # 前向传播
        outputs = model(images)
        loss = criterion(outputs, labels)
        
        # 反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        running_loss += loss.item()
    
    avg_loss = running_loss / len(trainloader)
    train_losses.append(avg_loss)
    
    # 测试阶段
    model.eval()
    correct = 0
    total = 0
    
    with torch.no_grad():
        for images, labels in testloader:
            outputs = model(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    
    accuracy = 100 * correct / total
    test_accuracies.append(accuracy)
    
    print(f'Epoch [{epoch+1}/{num_epochs}], '
          f'Loss: {avg_loss:.4f}, '
          f'Accuracy: {accuracy:.2f}%')

return model, train_losses, test_accuracies

2.4 可视化训练结果

def plot_training_results(train_losses, test_accuracies):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))

# 损失曲线
ax1.plot(train_losses, label='Training Loss')
ax1.set_xlabel('Epoch')
ax1.set_ylabel('Loss')
ax1.set_title('Training Loss over Epochs')
ax1.legend()
ax1.grid(True)

# 准确率曲线
ax2.plot(test_accuracies, label='Test Accuracy', color='orange')
ax2.set_xlabel('Epoch')
ax2.set_ylabel('Accuracy (%)')
ax2.set_title('Test Accuracy over Epochs')
ax2.legend()
ax2.grid(True)

plt.tight_layout()
plt.show()

运行训练

print("开始训练CNN模型...")
model, train_losses, test_accuracies = train_cnn()
plot_training_results(train_losses, test_accuracies)

posted @ 2026-01-19 23:50  臧博涛  阅读(1)  评论(0)    收藏  举报