# 所有图片都在一个文件夹1

test: 几千张图片，没有标签，测试集

train: 10222张狗的图片，全是jpg，大小不一，有长有宽，基本都在400×300以上

labels.csv ： excel表格, 图片名称+品种名称

import pandas as pd
import numpy as np
print(df.info())


1)得到一个长 list1 : 里面是每张图片的路径

2)另外一个长list2: 里面是每张图片对应的标签（整数），顺序要和list1对应。

3)把这两个list切分出来一部分作为验证集

1）看看一共多少个breed,把每种breed名称和一个数字编号对应起来：

from pandas import Series,DataFrame
breed = df['breed']
breed_np = Series.as_matrix(breed)
print(type(breed_np) )
print(breed_np.shape)   #(10222,)

breed_set = set(breed_np)
print(len(breed_set))   #120

breed_120_list = list(breed_set)
dic = {}
for i in range(120):
dic[  breed_120_list[i]   ] = i


2）处理id那一列，分割成两段：

file =  Series.as_matrix(df["id"])
print(file.shape)
import os
file = [i+".jpg" for i in file]
file = [os.path.join("./dog_breed/train",i) for i in file ]
file_train = file[:8000]
file_test = file[8000:]
print(file_train)
np.save( "file_train.npy" ,file_train )
np.save( "file_test.npy" ,file_test )


3）处理breed那一列，分成两段：

breed = Series.as_matrix(df["breed"])
print(breed.shape)
number = []
for i in range(10222):
number.append(  dic[ breed[i] ]  )
number = np.array(number)
number_train = number[:8000]
number_test = number[8000:]
np.save( "number_train.npy" ,number_train )
np.save( "number_test.npy" ,number_test )


from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
preprocess = transforms.Compose([
#transforms.Scale(256),
#transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
img_pil =  Image.open(path)
img_pil = img_pil.resize((224,224))
img_tensor = preprocess(img_pil)
return img_tensor

class trainset(Dataset):
#定义好 image 的路径
self.images = file_train
self.target = number_train
def __getitem__(self, index):
fn = self.images[index]
target = self.target[index]
return img,target

def __len__(self):
return len(self.images)



def getitem(self, index)：

def len(self):

return你所有数据的个数

train_data  = trainset()


# 所有图片都在一个文件夹2

class ImageFolder(data.Dataset):
"""A generic data loader where the images are arranged in this way: ::

root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png

root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png

Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that  takes in an PIL image
and returns a transformed version. E.g, transforms.RandomCrop
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.

Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""

def __init__(self, root, transform=None, target_transform=None,
classes, class_to_idx = find_classes(root)
imgs = make_dataset(root, class_to_idx)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))

self.root = root
self.imgs = imgs
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.target_transform = target_transform

def __getitem__(self, index):
"""
Args:
index (int): Index

Returns:
tuple: (image, target) where target is class_index of the target class.
"""
path, target = self.imgs[index]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)

return img, target

def __len__(self):
return len(self.imgs)

def pil_loader(path):
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')

import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image

from torchvision import get_image_backend
if get_image_backend() == 'accimage':
else:
return pil_loader(path)

class customData(Dataset):
with open(txt_path) as input_file:
self.img_name = [os.path.join(img_path, line.strip().split('\t')[0]) for line in lines]
self.img_label = [int(line.strip().split('\t')[-1]) for line in lines]
self.data_transforms = data_transforms
self.dataset = dataset

def __len__(self):
return len(self.img_name)

def __getitem__(self, item):
img_name = self.img_name[item]
label = self.img_label[item]

if self.data_transforms is not None:
try:
img = self.data_transforms[self.dataset](img)
except:
print("Cannot transform image: {}".format(img_name))
return img, label

 image_datasets = {x: customData(img_path='/ImagePath',
txt_path=('/TxtFile/' + x + '.txt'),
data_transforms=data_transforms,
dataset=x) for x in ['train', 'val']}

 dataloders = {x: torch.utils.data.DataLoader(image_datasets[x],
batch_size=batch_size,
shuffle=True) for x in ['train', 'val']}

torch.save(model, 'output/resnet_epoch{}.pkl'.format(epoch))

 model = torch.nn.DataParallel(model, device_ids=[0,1])

# 每个类的图片放在一个文件夹

data_dir = '/data'
image_datasets = {x: datasets.ImageFolder(
os.path.join(data_dir, x),
data_transforms[x])，
for x in ['train', 'val']}

data_transforms = {
'train': transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}

dataloders = {x: torch.utils.data.DataLoader(image_datasets[x],
batch_size=4,
shuffle=True,
num_workers=4)
for x in ['train', 'val']}

for data in dataloders['train']:
inputs, labels = data

if use_gpu:
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)

model = models.resnet18(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, 2)

criterion = nn.CrossEntropyLoss()

optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)

scheduler.step()

model.train(True)

optimizer.zero_grad()

outputs = model(inputs)

loss = criterion(outputs, labels)

 _, preds = torch.max(outputs.data, 1)

loss.backward()

optimizer.step()

use_gpu = torch.cuda.is_available()

posted @ 2018-12-11 22:48  向前奔跑的少年  阅读(30046)  评论(0编辑  收藏  举报