手写数字识别

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms

1. 配置参数

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
BATCH_SIZE = 64
LR = 0.001
EPOCHS = 10
NUM_CLASSES = 10 # 若为汉字,需改为对应类别数(如HWDB为3755)

2. 数据预处理与加载

transform = transforms.Compose([
transforms.ToTensor(), # 转为Tensor并归一化到[0,1]
transforms.Normalize((0.1307,), (0.3081,)) # MNIST均值和标准差
])

加载数据集(替换为汉字数据集时修改此处路径和数据集类)

train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform)

train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False)

3. 定义CNN模型

class HandwritingCNN(nn.Module):
def init(self, num_classes):
super(HandwritingCNN, self).init()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.dropout = nn.Dropout(0.25)
self.fc1 = nn.Linear(64 * 7 * 7, 128) # MNIST输入为28x28,两次池化后为7x7
self.fc2 = nn.Linear(128, num_classes)

def forward(self, x):
    x = self.pool(torch.relu(self.conv1(x)))
    x = self.pool(torch.relu(self.conv2(x)))
    x = self.dropout(x)
    x = x.view(-1, 64 * 7 * 7)  # 展平
    x = torch.relu(self.fc1(x))
    x = self.fc2(x)
    return x

4. 初始化模型、损失函数和优化器

model = HandwritingCNN(NUM_CLASSES).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=LR)

5. 训练函数

def train(model, train_loader, criterion, optimizer, epoch):
model.train()
total_loss = 0.0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)

    # 前向传播
    output = model(data)
    loss = criterion(output, target)

    # 反向传播与优化
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    total_loss += loss.item()
    if batch_idx % 100 == 0:
        print(f'Epoch [{epoch + 1}/{EPOCHS}], Step [{batch_idx + 1}/{len(train_loader)}], Loss: {loss.item():.4f}')

6. 测试函数(计算准确率)

def test(model, test_loader):
model.eval()
correct = 0
total = 0
with torch.no_grad(): # 关闭梯度计算
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
_, predicted = torch.max(output.data, 1) # 取概率最大的类别
total += target.size(0)
correct += (predicted == target).sum().item()
accuracy = 100 * correct / total
print(f'Test Accuracy: {accuracy:.2f}%')
return accuracy

7. 执行训练与测试

best_accuracy = 0.0
for epoch in range(EPOCHS):
train(model, train_loader, criterion, optimizer, epoch)
current_accuracy = test(model, test_loader)
# 保存最优模型
if current_accuracy > best_accuracy:
best_accuracy = current_accuracy
torch.save(model.state_dict(), 'best_handwriting_model.pth')
print(f'Saved best model with accuracy: {best_accuracy:.2f}%')

print(f'Final Best Accuracy: {best_accuracy:.2f}%')
print("3018")

posted @ 2025-12-26 11:50  彭66  阅读(6)  评论(0)    收藏  举报