第四十五天打卡 - 详解

知识点回顾:
tensorboard的发展历史和原理
tensorboard的常见操作
tensorboard在cifar上的实战:MLP和CNN模型
效果展示如下,很适合拿去组会汇报撑页数:

作业:对resnet18在cifar10上采用微调策略下,用tensorboard监控训练过程。

!pip install tensorboard -i https://pypi.tuna.tsinghua.edu.cn/simpleimport torchimport torch.nn as nnimport torch.optim as optimfrom torchvision import datasets, transforms, modelsfrom torch.utils.data import DataLoaderimport matplotlib.pyplot as pltimport osfrom torch.utils.tensorboard import SummaryWriter  # 添加TensorBoard支持import torchvision  # 用于图像网格可视化 # 设置中文字体支持plt.rcParams["font.family"] = ["SimHei"]plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题 # 检查GPU是否可用device = torch.device("cuda" if torch.cuda.is_available() else "cpu")print(f"使用设备: {device}") # 1. 数据预处理(训练集增强,测试集标准化)train_transform = transforms.Compose([    transforms.RandomCrop(32, padding=4),    transforms.RandomHorizontalFlip(),    transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),    transforms.RandomRotation(15),    transforms.ToTensor(),    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) test_transform = transforms.Compose([    transforms.ToTensor(),    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) # 2. 加载CIFAR-10数据集train_dataset = datasets.CIFAR10(    root='./data',    train=True,    download=True,    transform=train_transform) test_dataset = datasets.CIFAR10(    root='./data',    train=False,    transform=test_transform) # 3. 创建数据加载器batch_size = 64train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False) # 4. 定义ResNet18模型def create_resnet18(pretrained=True, num_classes=10):    model = models.resnet18(pretrained=pretrained)        # 修改最后一层全连接层    in_features = model.fc.in_features    model.fc = nn.Linear(in_features, num_classes)        return model.to(device) # 5. 冻结/解冻模型层的函数def freeze_model(model, freeze=True):    """冻结或解冻模型的卷积层参数"""    # 冻结/解冻除fc层外的所有参数    for name, param in model.named_parameters():        if 'fc' not in name:            param.requires_grad = not freeze        # 打印冻结状态    frozen_params = sum(p.numel() for p in model.parameters() if not p.requires_grad)    total_params = sum(p.numel() for p in model.parameters())        if freeze:        print(f"已冻结模型卷积层参数 ({frozen_params}/{total_params} 参数)")    else:        print(f"已解冻模型所有参数 ({total_params}/{total_params} 参数可训练)")        return model # 6. 训练函数(支持阶段式训练并集成TensorBoard)def train_with_freeze_schedule(model, train_loader, test_loader, criterion, optimizer, scheduler, device, epochs, freeze_epochs=5, writer=None):    """    前freeze_epochs轮冻结卷积层,之后解冻所有层进行训练    """    train_loss_history = []    test_loss_history = []    train_acc_history = []    test_acc_history = []    all_iter_losses = []    iter_indices = []        global_step = 0  # 用于TensorBoard的全局步数计数器        # 记录模型结构和样本图像到TensorBoard    if writer is not None:        dataiter = iter(train_loader)        images, _ = next(dataiter)        images = images.to(device)        writer.add_graph(model, images)  # 记录模型计算图                # 可视化训练样本        img_grid = torchvision.utils.make_grid(images[:8].cpu())        writer.add_image('训练样本', img_grid, global_step=0)        # 初始冻结卷积层    if freeze_epochs > 0:        model = freeze_model(model, freeze=True)        for epoch in range(epochs):        # 解冻控制:在指定轮次后解冻所有层        if epoch == freeze_epochs:            model = freeze_model(model, freeze=False)            # 解冻后调整优化器(可选)            optimizer.param_groups[0]['lr'] = 1e-4  # 降低学习率防止过拟合            if writer is not None:                writer.add_text('训练状态', f'Epoch {epoch+1}: 解冻所有层并调整学习率', global_step)                model.train()  # 设置为训练模式        running_loss = 0.0        correct_train = 0        total_train = 0                for batch_idx, (data, target) in enumerate(train_loader):            data, target = data.to(device), target.to(device)            optimizer.zero_grad()            output = model(data)            loss = criterion(output, target)            loss.backward()            optimizer.step()                        # 记录Iteration损失            iter_loss = loss.item()            all_iter_losses.append(iter_loss)            iter_indices.append(epoch * len(train_loader) + batch_idx + 1)                        # 统计训练指标            running_loss += iter_loss            _, predicted = output.max(1)            total_train += target.size(0)            correct_train += predicted.eq(target).sum().item()                        # ================= TensorBoard记录 =================            if writer is not None:                # 每100批次记录一次训练指标                if (batch_idx + 1) % 100 == 0:                    batch_acc = 100. * correct_train / total_train                    writer.add_scalar('Train/Batch Loss', iter_loss, global_step)                    writer.add_scalar('Train/Batch Accuracy', batch_acc, global_step)                    writer.add_scalar('Train/Learning Rate', optimizer.param_groups[0]['lr'], global_step)                                # 每200批次记录一次权重和梯度分布                if (batch_idx + 1) % 200 == 0:                    for name, param in model.named_parameters():                        if param.requires_grad:  # 只记录可训练参数                            writer.add_histogram(f'Weights/{name}', param, global_step)                            if param.grad is not None:                                writer.add_histogram(f'Gradients/{name}', param.grad, global_step)            # =================================================                        # 每100批次打印进度            if (batch_idx + 1) % 100 == 0:                print(f"Epoch {epoch+1}/{epochs} | Batch {batch_idx+1}/{len(train_loader)} "                      f"| 单Batch损失: {iter_loss:.4f}")                        global_step += 1  # 更新全局步数                # 计算 epoch 级指标        epoch_train_loss = running_loss / len(train_loader)        epoch_train_acc = 100. * correct_train / total_train                # 测试阶段        model.eval()        correct_test = 0        total_test = 0        test_loss = 0.0                # 用于收集错误预测样本        wrong_images = []        wrong_labels = []        wrong_preds = []                with torch.no_grad():            for data, target in test_loader:                data, target = data.to(device), target.to(device)                output = model(data)                test_loss += criterion(output, target).item()                _, predicted = output.max(1)                total_test += target.size(0)                correct_test += predicted.eq(target).sum().item()                                # 收集错误预测样本                wrong_mask = (predicted != target)                if wrong_mask.any():                    wrong_images.append(data[wrong_mask].cpu())                    wrong_labels.append(target[wrong_mask].cpu())                    wrong_preds.append(predicted[wrong_mask].cpu())                epoch_test_loss = test_loss / len(test_loader)        epoch_test_acc = 100. * correct_test / total_test                # 记录历史数据        train_loss_history.append(epoch_train_loss)        test_loss_history.append(epoch_test_loss)        train_acc_history.append(epoch_train_acc)        test_acc_history.append(epoch_test_acc)                # ================= TensorBoard记录 =================        if writer is not None:            # 记录epoch级指标            writer.add_scalar('Train/Epoch Loss', epoch_train_loss, epoch)            writer.add_scalar('Train/Epoch Accuracy', epoch_train_acc, epoch)            writer.add_scalar('Test/Epoch Loss', epoch_test_loss, epoch)            writer.add_scalar('Test/Epoch Accuracy', epoch_test_acc, epoch)                        # 记录冻结状态            frozen_params = sum(1 for p in model.parameters() if not p.requires_grad)            writer.add_scalar('Train/Frozen Parameters', frozen_params, epoch)                        # 在最后一个epoch记录错误预测样本            if epoch == epochs - 1 and wrong_images:                wrong_images = torch.cat(wrong_images)[:8]  # 最多取8个错误样本                wrong_labels = torch.cat(wrong_labels)[:8]                wrong_preds = torch.cat(wrong_preds)[:8]                                img_grid = torchvision.utils.make_grid(wrong_images)                writer.add_image('错误预测样本', img_grid, epoch)                                # 添加错误预测标签                classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')                labels_text = '\n'.join([f'真实: {classes[l]}, 预测: {classes[p]}'                                        for l, p in zip(wrong_labels, wrong_preds)])                writer.add_text('错误预测详情', labels_text, epoch)        # =================================================                # 更新学习率调度器        if scheduler is not None:            scheduler.step(epoch_test_loss)                # 打印 epoch 结果        print(f"Epoch {epoch+1} 完成 | 训练损失: {epoch_train_loss:.4f} "              f"| 训练准确率: {epoch_train_acc:.2f}% | 测试准确率: {epoch_test_acc:.2f}%")        # 关闭TensorBoard写入器    if writer is not None:        writer.close()        # 绘制损失和准确率曲线    plot_iter_losses(all_iter_losses, iter_indices)    plot_epoch_metrics(train_acc_history, test_acc_history, train_loss_history, test_loss_history)        return epoch_test_acc  # 返回最终测试准确率 # 7. 绘制Iteration损失曲线def plot_iter_losses(losses, indices):    plt.figure(figsize=(10, 4))    plt.plot(indices, losses, 'b-', alpha=0.7)    plt.xlabel('Iteration(Batch序号)')    plt.ylabel('损失值')    plt.title('训练过程中的Iteration损失变化')    plt.grid(True)    plt.show() # 8. 绘制Epoch级指标曲线def plot_epoch_metrics(train_acc, test_acc, train_loss, test_loss):    epochs = range(1, len(train_acc) + 1)        plt.figure(figsize=(12, 5))        # 准确率曲线    plt.subplot(1, 2, 1)    plt.plot(epochs, train_acc, 'b-', label='训练准确率')    plt.plot(epochs, test_acc, 'r-', label='测试准确率')    plt.xlabel('Epoch')    plt.ylabel('准确率 (%)')    plt.title('准确率随Epoch变化')    plt.legend()    plt.grid(True)        # 损失曲线    plt.subplot(1, 2, 2)    plt.plot(epochs, train_loss, 'b-', label='训练损失')    plt.plot(epochs, test_loss, 'r-', label='测试损失')    plt.xlabel('Epoch')    plt.ylabel('损失值')    plt.title('损失值随Epoch变化')    plt.legend()    plt.grid(True)    plt.tight_layout()    plt.show() # 主函数:训练模型def main():    # 参数设置    epochs = 40  # 总训练轮次    freeze_epochs = 5  # 冻结卷积层的轮次    learning_rate = 1e-3  # 初始学习率    weight_decay = 1e-4  # 权重衰减        # ================= 创建TensorBoard写入器 =================    log_dir = "runs/cifar10_resnet18_finetune"    if os.path.exists(log_dir):        version = 1        while os.path.exists(f"{log_dir}_v{version}"):            version += 1        log_dir = f"{log_dir}_v{version}"    writer = SummaryWriter(log_dir)    print(f"TensorBoard日志目录: {log_dir}")    print("训练后执行: tensorboard --logdir=runs 查看可视化")    # =======================================================        # 创建ResNet18模型(加载预训练权重)    model = create_resnet18(pretrained=True, num_classes=10)        # 定义优化器和损失函数    optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)    criterion = nn.CrossEntropyLoss()        # 定义学习率调度器    scheduler = optim.lr_scheduler.ReduceLROnPlateau(        optimizer, mode='min', factor=0.5, patience=2, verbose=True    )        # 开始训练(前5轮冻结卷积层,之后解冻)    final_accuracy = train_with_freeze_schedule(        model=model,        train_loader=train_loader,        test_loader=test_loader,        criterion=criterion,        optimizer=optimizer,        scheduler=scheduler,        device=device,        epochs=epochs,        freeze_epochs=freeze_epochs,        writer=writer  # 传入TensorBoard写入器    )        print(f"训练完成!最终测试准确率: {final_accuracy:.2f}%")        # # 保存模型    # torch.save(model.state_dict(), 'resnet18_cifar10_finetuned.pth')    # print("模型已保存至: resnet18_cifar10_finetuned.pth") if __name__ == "__main__":    main()
已冻结模型卷积层参数 (11176512/11181642 参数)Epoch 1/40 | Batch 100/782 | 单Batch损失: 1.9845Epoch 1/40 | Batch 200/782 | 单Batch损失: 1.9221Epoch 1/40 | Batch 300/782 | 单Batch损失: 2.1262Epoch 1/40 | Batch 400/782 | 单Batch损失: 1.9537Epoch 1/40 | Batch 500/782 | 单Batch损失: 1.6979Epoch 1/40 | Batch 600/782 | 单Batch损失: 1.7894Epoch 1/40 | Batch 700/782 | 单Batch损失: 1.9485Epoch 1 完成 | 训练损失: 1.9627 | 训练准确率: 30.03% | 测试准确率: 32.34%Epoch 2/40 | Batch 100/782 | 单Batch损失: 1.7793Epoch 2/40 | Batch 200/782 | 单Batch损失: 1.8816Epoch 2/40 | Batch 300/782 | 单Batch损失: 1.7683Epoch 2/40 | Batch 400/782 | 单Batch损失: 1.9009Epoch 2/40 | Batch 500/782 | 单Batch损失: 1.9358Epoch 2/40 | Batch 600/782 | 单Batch损失: 1.7030Epoch 2/40 | Batch 700/782 | 单Batch损失: 1.8201Epoch 2 完成 | 训练损失: 1.8657 | 训练准确率: 33.73% | 测试准确率: 33.70%Epoch 3/40 | Batch 100/782 | 单Batch损失: 1.7812Epoch 3/40 | Batch 200/782 | 单Batch损失: 1.8254Epoch 3/40 | Batch 300/782 | 单Batch损失: 2.0188Epoch 3/40 | Batch 400/782 | 单Batch损失: 1.8106Epoch 3/40 | Batch 500/782 | 单Batch损失: 1.8855Epoch 3/40 | Batch 600/782 | 单Batch损失: 1.7753Epoch 3/40 | Batch 700/782 | 单Batch损失: 1.7371Epoch 3 完成 | 训练损失: 1.8509 | 训练准确率: 34.66% | 测试准确率: 33.65%Epoch 4/40 | Batch 100/782 | 单Batch损失: 1.8383Epoch 4/40 | Batch 200/782 | 单Batch损失: 1.8095Epoch 4/40 | Batch 300/782 | 单Batch损失: 2.0029Epoch 4/40 | Batch 400/782 | 单Batch损失: 1.7765Epoch 4/40 | Batch 500/782 | 单Batch损失: 1.8919Epoch 4/40 | Batch 600/782 | 单Batch损失: 1.7515Epoch 4/40 | Batch 700/782 | 单Batch损失: 1.8964Epoch 4 完成 | 训练损失: 1.8491 | 训练准确率: 34.36% | 测试准确率: 34.47%Epoch 5/40 | Batch 100/782 | 单Batch损失: 1.9020Epoch 5/40 | Batch 200/782 | 单Batch损失: 2.0807Epoch 5/40 | Batch 300/782 | 单Batch损失: 1.8857Epoch 5/40 | Batch 400/782 | 单Batch损失: 1.9153Epoch 5/40 | Batch 500/782 | 单Batch损失: 1.8053Epoch 5/40 | Batch 600/782 | 单Batch损失: 1.8766Epoch 5/40 | Batch 700/782 | 单Batch损失: 1.8560Epoch 5 完成 | 训练损失: 1.8454 | 训练准确率: 34.51% | 测试准确率: 35.81%已解冻模型所有参数 (11181642/11181642 参数可训练)Epoch 6/40 | Batch 100/782 | 单Batch损失: 1.7783Epoch 6/40 | Batch 200/782 | 单Batch损失: 1.3492Epoch 6/40 | Batch 300/782 | 单Batch损失: 1.2985Epoch 6/40 | Batch 400/782 | 单Batch损失: 1.1477Epoch 6/40 | Batch 500/782 | 单Batch损失: 1.0184Epoch 6/40 | Batch 600/782 | 单Batch损失: 1.0792Epoch 6/40 | Batch 700/782 | 单Batch损失: 0.9014Epoch 6 完成 | 训练损失: 1.2920 | 训练准确率: 54.37% | 测试准确率: 68.44%Epoch 7/40 | Batch 100/782 | 单Batch损失: 1.1539Epoch 7/40 | Batch 200/782 | 单Batch损失: 1.0359Epoch 7/40 | Batch 300/782 | 单Batch损失: 0.9367Epoch 7/40 | Batch 400/782 | 单Batch损失: 0.9108Epoch 7/40 | Batch 500/782 | 单Batch损失: 0.8993Epoch 7/40 | Batch 600/782 | 单Batch损失: 0.7143Epoch 7/40 | Batch 700/782 | 单Batch损失: 0.9862Epoch 7 完成 | 训练损失: 0.9915 | 训练准确率: 65.46% | 测试准确率: 74.44%Epoch 8/40 | Batch 100/782 | 单Batch损失: 0.9431Epoch 8/40 | Batch 200/782 | 单Batch损失: 0.9887Epoch 8/40 | Batch 300/782 | 单Batch损失: 0.7520Epoch 8/40 | Batch 400/782 | 单Batch损失: 0.8691Epoch 8/40 | Batch 500/782 | 单Batch损失: 0.8537Epoch 8/40 | Batch 600/782 | 单Batch损失: 0.8077Epoch 8/40 | Batch 700/782 | 单Batch损失: 0.9450Epoch 8 完成 | 训练损失: 0.8706 | 训练准确率: 69.68% | 测试准确率: 76.69%Epoch 9/40 | Batch 100/782 | 单Batch损失: 0.6912Epoch 9/40 | Batch 200/782 | 单Batch损失: 1.0554Epoch 9/40 | Batch 300/782 | 单Batch损失: 0.8509Epoch 9/40 | Batch 400/782 | 单Batch损失: 0.6163Epoch 9/40 | Batch 500/782 | 单Batch损失: 0.6654Epoch 9/40 | Batch 600/782 | 单Batch损失: 0.7246Epoch 9/40 | Batch 700/782 | 单Batch损失: 0.7361Epoch 9 完成 | 训练损失: 0.8005 | 训练准确率: 72.25% | 测试准确率: 78.66%Epoch 10/40 | Batch 100/782 | 单Batch损失: 0.7592Epoch 10/40 | Batch 200/782 | 单Batch损失: 0.6813Epoch 10/40 | Batch 300/782 | 单Batch损失: 0.8109Epoch 10/40 | Batch 400/782 | 单Batch损失: 0.8108Epoch 10/40 | Batch 500/782 | 单Batch损失: 0.4869Epoch 10/40 | Batch 600/782 | 单Batch损失: 0.6479Epoch 10/40 | Batch 700/782 | 单Batch损失: 0.6231Epoch 10 完成 | 训练损失: 0.7458 | 训练准确率: 74.18% | 测试准确率: 79.61%Epoch 11/40 | Batch 100/782 | 单Batch损失: 0.9745Epoch 11/40 | Batch 200/782 | 单Batch损失: 0.7001Epoch 11/40 | Batch 300/782 | 单Batch损失: 0.5166Epoch 11/40 | Batch 400/782 | 单Batch损失: 0.9212Epoch 11/40 | Batch 500/782 | 单Batch损失: 0.8682Epoch 11/40 | Batch 600/782 | 单Batch损失: 0.7065Epoch 11/40 | Batch 700/782 | 单Batch损失: 0.5803Epoch 11 完成 | 训练损失: 0.7058 | 训练准确率: 75.47% | 测试准确率: 80.05%Epoch 12/40 | Batch 100/782 | 单Batch损失: 0.5478Epoch 12/40 | Batch 200/782 | 单Batch损失: 0.8005Epoch 12/40 | Batch 300/782 | 单Batch损失: 0.7657Epoch 12/40 | Batch 400/782 | 单Batch损失: 0.5238Epoch 12/40 | Batch 500/782 | 单Batch损失: 0.6686Epoch 12/40 | Batch 600/782 | 单Batch损失: 0.6046Epoch 12/40 | Batch 700/782 | 单Batch损失: 0.4792Epoch 12 完成 | 训练损失: 0.6713 | 训练准确率: 76.52% | 测试准确率: 80.93%Epoch 13/40 | Batch 100/782 | 单Batch损失: 1.0814Epoch 13/40 | Batch 200/782 | 单Batch损失: 0.8355Epoch 13/40 | Batch 300/782 | 单Batch损失: 0.7163Epoch 13/40 | Batch 400/782 | 单Batch损失: 0.8455Epoch 13/40 | Batch 500/782 | 单Batch损失: 0.7083Epoch 13/40 | Batch 600/782 | 单Batch损失: 0.4937Epoch 13/40 | Batch 700/782 | 单Batch损失: 0.7588Epoch 13 完成 | 训练损失: 0.6509 | 训练准确率: 77.38% | 测试准确率: 81.50%Epoch 14/40 | Batch 100/782 | 单Batch损失: 0.7048Epoch 14/40 | Batch 200/782 | 单Batch损失: 0.6938Epoch 14/40 | Batch 300/782 | 单Batch损失: 0.6346Epoch 14/40 | Batch 400/782 | 单Batch损失: 0.5922Epoch 14/40 | Batch 500/782 | 单Batch损失: 0.6982Epoch 14/40 | Batch 600/782 | 单Batch损失: 0.7085Epoch 14/40 | Batch 700/782 | 单Batch损失: 0.7181Epoch 14 完成 | 训练损失: 0.6167 | 训练准确率: 78.43% | 测试准确率: 82.15%Epoch 15/40 | Batch 100/782 | 单Batch损失: 0.9140Epoch 15/40 | Batch 200/782 | 单Batch损失: 0.5345Epoch 15/40 | Batch 300/782 | 单Batch损失: 0.4287Epoch 15/40 | Batch 400/782 | 单Batch损失: 0.5277Epoch 15/40 | Batch 500/782 | 单Batch损失: 0.7996Epoch 15/40 | Batch 600/782 | 单Batch损失: 0.4521Epoch 15/40 | Batch 700/782 | 单Batch损失: 0.5768Epoch 15 完成 | 训练损失: 0.5981 | 训练准确率: 79.22% | 测试准确率: 82.28%Epoch 16/40 | Batch 100/782 | 单Batch损失: 0.5512Epoch 16/40 | Batch 200/782 | 单Batch损失: 0.3907Epoch 16/40 | Batch 300/782 | 单Batch损失: 0.5873Epoch 16/40 | Batch 400/782 | 单Batch损失: 0.7727Epoch 16/40 | Batch 500/782 | 单Batch损失: 0.6523Epoch 16/40 | Batch 600/782 | 单Batch损失: 0.8028Epoch 16/40 | Batch 700/782 | 单Batch损失: 0.3571Epoch 16 完成 | 训练损失: 0.5809 | 训练准确率: 79.84% | 测试准确率: 82.45%Epoch 17/40 | Batch 100/782 | 单Batch损失: 0.4884Epoch 17/40 | Batch 200/782 | 单Batch损失: 0.6235Epoch 17/40 | Batch 300/782 | 单Batch损失: 0.6000Epoch 17/40 | Batch 400/782 | 单Batch损失: 0.5768Epoch 17/40 | Batch 500/782 | 单Batch损失: 0.6355Epoch 17/40 | Batch 600/782 | 单Batch损失: 0.9570Epoch 17/40 | Batch 700/782 | 单Batch损失: 0.3773Epoch 17 完成 | 训练损失: 0.5583 | 训练准确率: 80.59% | 测试准确率: 83.44%Epoch 18/40 | Batch 100/782 | 单Batch损失: 0.4415Epoch 18/40 | Batch 200/782 | 单Batch损失: 0.4202Epoch 18/40 | Batch 300/782 | 单Batch损失: 0.4529Epoch 18/40 | Batch 400/782 | 单Batch损失: 0.5010Epoch 18/40 | Batch 500/782 | 单Batch损失: 0.5142Epoch 18/40 | Batch 600/782 | 单Batch损失: 0.3954Epoch 18/40 | Batch 700/782 | 单Batch损失: 0.5276Epoch 18 完成 | 训练损失: 0.5491 | 训练准确率: 80.88% | 测试准确率: 83.22%Epoch 19/40 | Batch 100/782 | 单Batch损失: 0.4030Epoch 19/40 | Batch 200/782 | 单Batch损失: 0.4581Epoch 19/40 | Batch 300/782 | 单Batch损失: 0.5019Epoch 19/40 | Batch 400/782 | 单Batch损失: 0.4664Epoch 19/40 | Batch 500/782 | 单Batch损失: 0.4308Epoch 19/40 | Batch 600/782 | 单Batch损失: 0.4998Epoch 19/40 | Batch 700/782 | 单Batch损失: 0.4180Epoch 19 完成 | 训练损失: 0.5372 | 训练准确率: 81.29% | 测试准确率: 83.87%Epoch 20/40 | Batch 100/782 | 单Batch损失: 0.5643Epoch 20/40 | Batch 200/782 | 单Batch损失: 0.6189Epoch 20/40 | Batch 300/782 | 单Batch损失: 0.4015Epoch 20/40 | Batch 400/782 | 单Batch损失: 0.4931Epoch 20/40 | Batch 500/782 | 单Batch损失: 0.5194Epoch 20/40 | Batch 600/782 | 单Batch损失: 0.5057Epoch 20/40 | Batch 700/782 | 单Batch损失: 0.5244Epoch 20 完成 | 训练损失: 0.5196 | 训练准确率: 81.80% | 测试准确率: 84.25%Epoch 21/40 | Batch 100/782 | 单Batch损失: 0.3585Epoch 21/40 | Batch 200/782 | 单Batch损失: 0.2221Epoch 21/40 | Batch 300/782 | 单Batch损失: 0.5224Epoch 21/40 | Batch 400/782 | 单Batch损失: 0.4546Epoch 21/40 | Batch 500/782 | 单Batch损失: 0.3895Epoch 21/40 | Batch 600/782 | 单Batch损失: 0.4467Epoch 21/40 | Batch 700/782 | 单Batch损失: 0.4560Epoch 21 完成 | 训练损失: 0.4971 | 训练准确率: 82.57% | 测试准确率: 83.88%Epoch 22/40 | Batch 100/782 | 单Batch损失: 0.3642Epoch 22/40 | Batch 200/782 | 单Batch损失: 0.7551Epoch 22/40 | Batch 300/782 | 单Batch损失: 0.4533Epoch 22/40 | Batch 400/782 | 单Batch损失: 0.6884Epoch 22/40 | Batch 500/782 | 单Batch损失: 0.5062Epoch 22/40 | Batch 600/782 | 单Batch损失: 0.5316Epoch 22/40 | Batch 700/782 | 单Batch损失: 0.3991Epoch 22 完成 | 训练损失: 0.4949 | 训练准确率: 82.76% | 测试准确率: 84.23%Epoch 23/40 | Batch 100/782 | 单Batch损失: 0.4934Epoch 23/40 | Batch 200/782 | 单Batch损失: 0.3914Epoch 23/40 | Batch 300/782 | 单Batch损失: 0.5075Epoch 23/40 | Batch 400/782 | 单Batch损失: 0.6494Epoch 23/40 | Batch 500/782 | 单Batch损失: 0.4456Epoch 23/40 | Batch 600/782 | 单Batch损失: 0.4376Epoch 23/40 | Batch 700/782 | 单Batch损失: 0.5029Epoch 23 完成 | 训练损失: 0.4820 | 训练准确率: 83.05% | 测试准确率: 84.24%Epoch 24/40 | Batch 100/782 | 单Batch损失: 0.4478Epoch 24/40 | Batch 200/782 | 单Batch损失: 0.4439Epoch 24/40 | Batch 300/782 | 单Batch损失: 0.6566Epoch 24/40 | Batch 400/782 | 单Batch损失: 0.3610Epoch 24/40 | Batch 500/782 | 单Batch损失: 0.3373Epoch 24/40 | Batch 600/782 | 单Batch损失: 0.4565Epoch 24/40 | Batch 700/782 | 单Batch损失: 0.5308Epoch 24 完成 | 训练损失: 0.4746 | 训练准确率: 83.40% | 测试准确率: 84.45%Epoch 25/40 | Batch 100/782 | 单Batch损失: 0.4398Epoch 25/40 | Batch 200/782 | 单Batch损失: 0.3060Epoch 25/40 | Batch 300/782 | 单Batch损失: 0.5542Epoch 25/40 | Batch 400/782 | 单Batch损失: 0.4484Epoch 25/40 | Batch 500/782 | 单Batch损失: 0.3688Epoch 25/40 | Batch 600/782 | 单Batch损失: 0.5104Epoch 25/40 | Batch 700/782 | 单Batch损失: 0.5033Epoch 25 完成 | 训练损失: 0.4536 | 训练准确率: 83.94% | 测试准确率: 84.60%Epoch 26/40 | Batch 100/782 | 单Batch损失: 0.6212Epoch 26/40 | Batch 200/782 | 单Batch损失: 0.4445Epoch 26/40 | Batch 300/782 | 单Batch损失: 0.4936Epoch 26/40 | Batch 400/782 | 单Batch损失: 0.4348Epoch 26/40 | Batch 500/782 | 单Batch损失: 0.4900Epoch 26/40 | Batch 600/782 | 单Batch损失: 0.5532Epoch 26/40 | Batch 700/782 | 单Batch损失: 0.3503Epoch 26 完成 | 训练损失: 0.4523 | 训练准确率: 84.11% | 测试准确率: 85.29%Epoch 27/40 | Batch 100/782 | 单Batch损失: 0.4809Epoch 27/40 | Batch 200/782 | 单Batch损失: 0.3168Epoch 27/40 | Batch 300/782 | 单Batch损失: 0.4344Epoch 27/40 | Batch 400/782 | 单Batch损失: 0.2452Epoch 27/40 | Batch 500/782 | 单Batch损失: 0.4902Epoch 27/40 | Batch 600/782 | 单Batch损失: 0.4841Epoch 27/40 | Batch 700/782 | 单Batch损失: 0.4331Epoch 27 完成 | 训练损失: 0.4347 | 训练准确率: 84.82% | 测试准确率: 84.93%Epoch 28/40 | Batch 100/782 | 单Batch损失: 0.5113Epoch 28/40 | Batch 200/782 | 单Batch损失: 0.4025Epoch 28/40 | Batch 300/782 | 单Batch损失: 0.5368Epoch 28/40 | Batch 400/782 | 单Batch损失: 0.4800Epoch 28/40 | Batch 500/782 | 单Batch损失: 0.5973Epoch 28/40 | Batch 600/782 | 单Batch损失: 0.4407Epoch 28/40 | Batch 700/782 | 单Batch损失: 0.5990Epoch 28 完成 | 训练损失: 0.4299 | 训练准确率: 84.99% | 测试准确率: 84.38%Epoch 29/40 | Batch 100/782 | 单Batch损失: 0.2680Epoch 29/40 | Batch 200/782 | 单Batch损失: 0.2952Epoch 29/40 | Batch 300/782 | 单Batch损失: 0.4970Epoch 29/40 | Batch 400/782 | 单Batch损失: 0.3671Epoch 29/40 | Batch 500/782 | 单Batch损失: 0.3845Epoch 29/40 | Batch 600/782 | 单Batch损失: 0.2393Epoch 29/40 | Batch 700/782 | 单Batch损失: 0.4481Epoch 29 完成 | 训练损失: 0.4242 | 训练准确率: 85.12% | 测试准确率: 85.04%Epoch 30/40 | Batch 100/782 | 单Batch损失: 0.5809Epoch 30/40 | Batch 200/782 | 单Batch损失: 0.3701Epoch 30/40 | Batch 300/782 | 单Batch损失: 0.2804Epoch 30/40 | Batch 400/782 | 单Batch损失: 0.3507Epoch 30/40 | Batch 500/782 | 单Batch损失: 0.1962Epoch 30/40 | Batch 600/782 | 单Batch损失: 0.4570Epoch 30/40 | Batch 700/782 | 单Batch损失: 0.2103Epoch 30 完成 | 训练损失: 0.3778 | 训练准确率: 86.65% | 测试准确率: 85.71%Epoch 31/40 | Batch 100/782 | 单Batch损失: 0.3076Epoch 31/40 | Batch 200/782 | 单Batch损失: 0.2548Epoch 31/40 | Batch 300/782 | 单Batch损失: 0.3434Epoch 31/40 | Batch 400/782 | 单Batch损失: 0.3898Epoch 31/40 | Batch 500/782 | 单Batch损失: 0.5643Epoch 31/40 | Batch 600/782 | 单Batch损失: 0.2407Epoch 31/40 | Batch 700/782 | 单Batch损失: 0.2459Epoch 31 完成 | 训练损失: 0.3647 | 训练准确率: 87.05% | 测试准确率: 85.72%Epoch 32/40 | Batch 100/782 | 单Batch损失: 0.5242Epoch 32/40 | Batch 200/782 | 单Batch损失: 0.3581Epoch 32/40 | Batch 300/782 | 单Batch损失: 0.5027Epoch 32/40 | Batch 400/782 | 单Batch损失: 0.2311Epoch 32/40 | Batch 500/782 | 单Batch损失: 0.4504Epoch 32/40 | Batch 600/782 | 单Batch损失: 0.4259Epoch 32/40 | Batch 700/782 | 单Batch损失: 0.2881Epoch 32 完成 | 训练损失: 0.3509 | 训练准确率: 87.40% | 测试准确率: 85.88%Epoch 33/40 | Batch 100/782 | 单Batch损失: 0.2308Epoch 33/40 | Batch 200/782 | 单Batch损失: 0.2749Epoch 33/40 | Batch 300/782 | 单Batch损失: 0.3666Epoch 33/40 | Batch 400/782 | 单Batch损失: 0.3093Epoch 33/40 | Batch 500/782 | 单Batch损失: 0.4387Epoch 33/40 | Batch 600/782 | 单Batch损失: 0.3664Epoch 33/40 | Batch 700/782 | 单Batch损失: 0.3048Epoch 33 完成 | 训练损失: 0.3424 | 训练准确率: 87.83% | 测试准确率: 85.93%Epoch 34/40 | Batch 100/782 | 单Batch损失: 0.1878Epoch 34/40 | Batch 200/782 | 单Batch损失: 0.2186Epoch 34/40 | Batch 300/782 | 单Batch损失: 0.2953Epoch 34/40 | Batch 400/782 | 单Batch损失: 0.4248Epoch 34/40 | Batch 500/782 | 单Batch损失: 0.4961Epoch 34/40 | Batch 600/782 | 单Batch损失: 0.2806Epoch 34/40 | Batch 700/782 | 单Batch损失: 0.3832Epoch 34 完成 | 训练损失: 0.3364 | 训练准确率: 88.01% | 测试准确率: 85.88%Epoch 35/40 | Batch 100/782 | 单Batch损失: 0.4153Epoch 35/40 | Batch 200/782 | 单Batch损失: 0.2748Epoch 35/40 | Batch 300/782 | 单Batch损失: 0.3258Epoch 35/40 | Batch 400/782 | 单Batch损失: 0.2264Epoch 35/40 | Batch 500/782 | 单Batch损失: 0.2102Epoch 35/40 | Batch 600/782 | 单Batch损失: 0.2262Epoch 35/40 | Batch 700/782 | 单Batch损失: 0.3287Epoch 35 完成 | 训练损失: 0.3117 | 训练准确率: 88.96% | 测试准确率: 86.21%Epoch 36/40 | Batch 100/782 | 单Batch损失: 0.2270Epoch 36/40 | Batch 200/782 | 单Batch损失: 0.2434Epoch 36/40 | Batch 300/782 | 单Batch损失: 0.2651Epoch 36/40 | Batch 400/782 | 单Batch损失: 0.1981Epoch 36/40 | Batch 500/782 | 单Batch损失: 0.3411Epoch 36/40 | Batch 600/782 | 单Batch损失: 0.3588Epoch 36/40 | Batch 700/782 | 单Batch损失: 0.4123Epoch 36 完成 | 训练损失: 0.3059 | 训练准确率: 89.21% | 测试准确率: 86.49%Epoch 37/40 | Batch 100/782 | 单Batch损失: 0.2236Epoch 37/40 | Batch 200/782 | 单Batch损失: 0.1829Epoch 37/40 | Batch 300/782 | 单Batch损失: 0.3180Epoch 37/40 | Batch 400/782 | 单Batch损失: 0.1161Epoch 37/40 | Batch 500/782 | 单Batch损失: 0.2394Epoch 37/40 | Batch 600/782 | 单Batch损失: 0.3800Epoch 37/40 | Batch 700/782 | 单Batch损失: 0.1893Epoch 37 完成 | 训练损失: 0.2973 | 训练准确率: 89.51% | 测试准确率: 86.20%Epoch 38/40 | Batch 100/782 | 单Batch损失: 0.2446Epoch 38/40 | Batch 200/782 | 单Batch损失: 0.3209Epoch 38/40 | Batch 300/782 | 单Batch损失: 0.2194Epoch 38/40 | Batch 400/782 | 单Batch损失: 0.3172Epoch 38/40 | Batch 500/782 | 单Batch损失: 0.3022Epoch 38/40 | Batch 600/782 | 单Batch损失: 0.2326Epoch 38/40 | Batch 700/782 | 单Batch损失: 0.3124Epoch 38 完成 | 训练损失: 0.2827 | 训练准确率: 90.02% | 测试准确率: 86.50%Epoch 39/40 | Batch 100/782 | 单Batch损失: 0.2458Epoch 39/40 | Batch 200/782 | 单Batch损失: 0.3421Epoch 39/40 | Batch 300/782 | 单Batch损失: 0.2558Epoch 39/40 | Batch 400/782 | 单Batch损失: 0.2393Epoch 39/40 | Batch 500/782 | 单Batch损失: 0.3710Epoch 39/40 | Batch 600/782 | 单Batch损失: 0.2537Epoch 39/40 | Batch 700/782 | 单Batch损失: 0.1849Epoch 39 完成 | 训练损失: 0.2763 | 训练准确率: 90.11% | 测试准确率: 86.72%Epoch 40/40 | Batch 100/782 | 单Batch损失: 0.2092Epoch 40/40 | Batch 200/782 | 单Batch损失: 0.2279Epoch 40/40 | Batch 300/782 | 单Batch损失: 0.1306Epoch 40/40 | Batch 400/782 | 单Batch损失: 0.3211Epoch 40/40 | Batch 500/782 | 单Batch损失: 0.3480Epoch 40/40 | Batch 600/782 | 单Batch损失: 0.1992Epoch 40/40 | Batch 700/782 | 单Batch损失: 0.3205Epoch 40 完成 | 训练损失: 0.2782 | 训练准确率: 90.06% | 测试准确率: 86.57%

posted on 2025-07-22 08:43  ljbguanli  阅读(11)  评论(0)    收藏  举报