花分类数据集
链接:https://pan.baidu.com/s/1HWFhZfNjkDV5gzqNsex2nA
提取码:5yq3
复制这段内容后打开百度网盘手机App,操作更方便哦
import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt import numpy as np import glob import time # ============数据的预处理=============== # 获取图片的路径 imgs_path = glob.glob('flower_photos/*/*.jpg') print(imgs_path[0:5]) # 通过切片操作获取标签 all_labels_names = [img.split('\\')[1] for img in imgs_path] print(all_labels_names[:5]) # 讲标签映射到数字 label_names = np.unique(all_labels_names) labels_to_index = dict((name, i) for i, name in enumerate(label_names)) index_to_labels = dict((v, k) for k, v in labels_to_index.items()) # 讲标签转化为数字 all_labels = [labels_to_index.get(name) for name in all_labels_names] # 打乱数据集顺序,便于切分数据集 np.random.seed(0) random_index = np.random.permutation(len(imgs_path)) imgs_path = np.array(imgs_path)[random_index] all_labels = np.array(all_labels)[random_index] # 切分数据集,百分之80为分界线 i = int(len(imgs_path) * 0.8) train_path = imgs_path[:i] train_labels = all_labels[:i] test_path = imgs_path[i:] test_labels = all_labels[i:] # 创建数据集 train_ds = tf.data.Dataset.from_tensor_slices((train_path, train_labels)) test_ds = tf.data.Dataset.from_tensor_slices((test_path, test_labels)) def load_img(path, label): image = tf.io.read_file(path) image = tf.image.decode_jpeg(image, channels=3) image = tf.image.resize(image, [256, 256]) image = tf.cast(image, tf.float32) image = image / 255 return image, label # 自定义决定读取图像的线程数 AUTOTUNE = tf.data.experimental.AUTOTUNE train_ds = train_ds.map(load_img, num_parallel_calls=AUTOTUNE) test_ds = test_ds.map(load_img, num_parallel_calls=AUTOTUNE) # 分批次进行训练 BATCH_SIZE = 16 train_ds = train_ds.repeat().shuffle(100).batch(BATCH_SIZE) test_ds = test_ds.batch(BATCH_SIZE) # ============模型的构建======================= # 采用卷积神经网络 model = tf.keras.Sequential([ tf.keras.layers.Conv2D(64, (3, 3), input_shape=(256, 256, 3), activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.MaxPool2D(), tf.keras.layers.Conv2D(128, (3, 3), activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Conv2D(128, (3, 3), activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.MaxPool2D(), tf.keras.layers.Conv2D(256, (3, 3), activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Conv2D(256, (3, 3), activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.MaxPool2D(), tf.keras.layers.Conv2D(512, (3, 3), activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Conv2D(512, (3, 3), activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.MaxPool2D(), tf.keras.layers.Conv2D(512, (3, 3), activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Conv2D(512, (3, 3), activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Conv2D(512, (3, 3), activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dense(1024, activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(5) ]) # 查看模型的结构 model.summary() # 因为我们在模型构建的手最后一层没有进行softmax激活 # from_logits 为True时,会将y_pred转化为概率(用softmax), # 否则不进行转换,通常情况下用True结果更稳定; model.compile(optimizer=tf.keras.optimizers.Adam(0.0001), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['acc']) train_count = len(train_path) test_count = len(test_path) steps_per_epoch = train_count // BATCH_SIZE validation_steps = test_count // BATCH_SIZE # ===========开始训练========= time_start = time.time() history = model.fit(train_ds, epochs=2, steps_per_epoch=steps_per_epoch, validation_data=test_ds, validation_steps=validation_steps) time_end = time.time() print('Time cost = %fs' % (time_end - time_start)) # 绘制正确率变化曲线 plt.plot(history.epoch, history.history.get('acc'), label='acc') plt.plot(history.epoch, history.history.get('val_acc'), label='val_acc') plt.lengend() # 绘制loss值变化曲线 plt.plot(history.epoch, history.history.get('loss'), label='loss') plt.plot(history.epoch, history.history.get('val_loss'), label='val_loss') plt.lengend() # ==================使用模型进行预测======================= def load_and_preprocess_image(path): image = tf.io.read_file(path) image = tf.image.decode_jpeg(image, channels=3) image = tf.image.resize(image, [256, 256]) image = tf.cast(image, tf.float32) image = image / 255 return image test_image = 'test_image/image_1.jpg' test_tensor = load_and_preprocess_image(test_image) # 单张图片不能直接用来预测,要扩展一下维度。和模型input维度一样才行 test_tensor = tf.expand_dims(test_tensor, axis=0) # pred是长度为5的数组 pred = model.predict(test_tensor) index_to_labels.get(np.argmax(pred))
浙公网安备 33010602011771号