1 import os
2 import torch
3 from torchvision import datasets, transforms
4 from torch.utils.data import DataLoader, SubsetRandomSampler
5
6 import torch
7 from torch import nn
8 from torch.utils.data import DataLoader,Dataset
9 import torch.nn.functional as F
10 from PIL import Image
11 import tifffile
12 import torchvision.transforms as transforms
13 from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
14 import csv
15 import numpy as np
16 import cv2
17 if torch.cuda.is_available():
18 torch.cuda.set_device(0) # 设置默认使用第一个GPU设备
19 # 检查当前默认设备
20 # default_device = torch.cuda.current_device()
21 device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
22 if device != -1:
23 print(f"默认设备设置成功,当前默认设备为 CUDA 设备 {device}")
24 else:
25 print("默认设备设置失败,当前默认设备为 CPU")
26
27 import torch
28 import torchvision.models as models
29 import torch.nn as nn
30 import torch.optim as optim
31 from torchvision import transforms
32 from torch.utils.data import DataLoader
33 from torchvision.datasets import ImageFolder
34 # 定义ResNet50模型
35 resnet50 = models.resnet50()
36 # 加载本地下载好的权重文件
37 checkpoint = torch.load(r'/home/guoliang/CV/LJQ/0/RETFound_MAE-main/models/resnet50-19c8e357.pth')
38 # 加载权重文件到模型中
39 resnet50.load_state_dict(checkpoint)
40 # 冻结除最后一层之外的所有层参数
41 # 如果需要微调,可以设置requires_grad=True来允许参数更新
42 for param in resnet50.parameters():
43 param.requires_grad = True
44 # 替换或添加最后一层全连接层以适应你的任务
45 num_classes = 4 # 假设需要分类成4类
46 resnet50.fc = nn.Linear(resnet50.fc.in_features, num_classes)
47 print(resnet50)
48 # 其余代码与前述示例相同
49 from timm.data import create_transform
50 from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
51 mean = IMAGENET_DEFAULT_MEAN
52 std = IMAGENET_DEFAULT_STD
53
54 transform1 = transforms.Compose([
55 transforms.Resize(400) ,
56 #transforms.CenterCrop(300) ,# 中心裁剪至256x256
57 transforms.ToTensor(),
58 transforms.Normalize(mean=[0.5,0.5,0.5], std=[0.5,0.5,0.5])
59 ])
60 transform = transforms.Compose([
61 #transforms.Resize(400, interpolation=transforms.InterpolationMode.BICUBIC),
62 transforms.Resize(400) ,
63 #transforms.CenterCrop(300) ,# 中心裁剪至256x256
64 transforms.ColorJitter(brightness=(0.95,1.05),
65 contrast=(0.95,1.05),
66 saturation=(0.95,1.05),
67 hue=0.05
68 ),
69 transforms.RandomAffine(5,translate=(0.01,0.01)),
70 transforms.ToTensor(),
71 transforms.Normalize(mean=[0.5,0.5,0.5], std=[0.5,0.5,0.5])
72 ])
73 train_dataset = datasets.ImageFolder(r'/home/guoliang/CV/dataset/messidor5/train', transform=transform)
74 test_dataset = datasets.ImageFolder(r'/home/guoliang/CV/dataset/messidor/test', transform=transform1)
75 val_dataset = datasets.ImageFolder(r'/home/guoliang/CV/dataset/messidor/val', transform=transform1)
76 train_loader = DataLoader(train_dataset, batch_size=256, shuffle=True)
77 test_loader = DataLoader(test_dataset, batch_size=128)
78 val_loader = DataLoader(val_dataset, batch_size=128)
79 # 计算类别权重
80 # class_counts = torch.bincount(torch.tensor(train_dataset.targets))
81 # total_samples = len(train_dataset.targets)
82 # class_weights = total_samples / (2 * class_counts.float())
83 def accuracy_test_evaluation(model):
84 model.eval()
85 right = 0
86 total = 0
87 with torch.no_grad():
88 for x, y in test_loader:
89 x, y = x.to(device), y.to(device)
90 y_hat = model(x.cuda())
91 _, predicted = torch.max(y_hat, 1)
92 right += (predicted == y.cuda()).sum().item()
93 total += y.size(0)
94 print(right)
95 return right / total
96 def accuracy_train_evaluation(model):
97 model.eval()
98 right = 0
99 total = 0
100 with torch.no_grad():
101 for x, y in train_loader:
102 x, y = x.to(device), y.to(device)
103 y_hat = model(x.cuda())
104 _, predicted = torch.max(y_hat, 1)
105 right += (predicted == y.cuda()).sum().item()
106 total += y.size(0)
107 print(right)
108 return right / total
109 def eval_val(model):
110 model.eval()
111 loss1=nn.CrossEntropyLoss()
112 sum=0
113 with torch.no_grad():
114 for x,y in val_loader:
115 x, y = x.to(device), y.to(device)
116 y_hat1 = model(x)
117 y_hat1 = F.softmax(y_hat1, dim=1)
118 l=loss1(y_hat1,y)
119 sum+=l
120 return sum
121 def eval_train(model):
122 model.eval()
123 loss2=nn.CrossEntropyLoss()
124 sum=0
125 with torch.no_grad():
126 for x,y in train_loader:
127 x, y = x.to(device), y.to(device)
128 y_hat1 = model(x)
129 y_hat1 = F.softmax(y_hat1, dim=1)
130 l=loss2(y_hat1,y)
131 sum+=l
132 return sum
133 from torch.optim.lr_scheduler import StepLR
134 import torch.optim as optim
135 from torch.optim.lr_scheduler import ReduceLROnPlateau
136 loss = nn.CrossEntropyLoss()
137 #weights = torch.FloatTensor([1, 4, 2, 2]) # 类别权重分别是 1:1:8:8:4
138 # pos_weight_weight(tensor): 1-D tensor,n 个元素,分别代表 n 类的权重,
139 # 为每个批次元素的损失指定的手动重新缩放权重,
140 # 如果你的训练样本很不均衡的话,是非常有用的。默认值为 None。
141 #loss = nn.CrossEntropyLoss(pos_weight=weights).cuda()
142 #loss = nn.BCELoss(weight=class_weights) # 使用权重
143 #loss = nn.CrossEntropyLoss(weight=class_weights.to(device)) # 使用权重
144 net=resnet50.to(device)
145 # optimizer = torch.optim.SGD(net.parameters(), lr=0.1)
146 # scheduler = StepLR(optimizer, step_size=50, gamma=0.1)
147 optimizer = optim.Adam(resnet50.parameters(), lr=0.001)
148 scheduler = StepLR(optimizer, step_size=80, gamma=0.1)
149 sum_loss=[]
150 sum_train_acc=[]
151 sum_test_acc=[]
152 def train_batch(model,loss,optimizer):
153 # def init_weights(m):
154 # if isinstance(m, (nn.Linear, nn.Conv2d)):
155 # nn.init.xavier_uniform_(m.weight)
156 # model.apply(init_weights)
157 epoch=500
158 # 在每个 epoch 开始之前重新打乱数据集的索引
159 for ix in range(epoch):
160 model.train()
161 for x,y in train_loader:
162 optimizer.zero_grad()
163 print(x.shape)
164 x, y = x.to(device), y.to(device)
165 #print(len(y))
166 y_hat=model(x)
167 y_hat = F.softmax(y_hat, dim=1)
168 #print(y_hat)
169 #print(y)
170 l=loss(y_hat,y)
171 l.backward()
172 optimizer.step()
173 #scheduler.step()
174 val_loss=eval_val(model)
175 #train_loss=eval_train(model)
176 acc_test=accuracy_test_evaluation(model)
177 #acc_train=accuracy_train_evaluation(model)
178 scheduler.step()
179 #sum_train_acc.append(acc_train)
180 sum_test_acc.append(acc_test)
181 print("epoch: "+str(ix)+" 完成")
182 #print("train_loss: "+str(train_loss))
183 #print("acc_train: "+str(acc_train))
184 print("acc_test: "+str(acc_test))
185 print("val_loss: "+str(val_loss))
186 with open(r'/home/guoliang/CV/LJQ/0/log/output1.log', 'a') as f:
187 f.write("epoch: " + str(ix) + " 完成\n")
188 #f.write("train_loss: " + str(train_loss) + "\n")
189 # f.write("acc_train: " + str(acc_train) + "\n")
190 f.write("acc_test: " + str(acc_test) + "\n")
191 f.write("val_loss: " + str(val_loss) + "\n")
192 train_batch(net,loss,optimizer)
193 # for ix ,batch in enumerate()
194
195 torch.cuda.empty_cache()
196 print(1)