微信扫一扫打赏支持

Tensorflow2(预课程)---7.7、cifar10分类-层方式-卷积神经网络-ResNet18

Tensorflow2(预课程)---7.7、cifar10分类-层方式-卷积神经网络-ResNet18

一、总结

一句话总结:

可以看到ResNet18得到的结果比较稳定,测试集准确率在81左右,感觉batchsize好像对准确率有影响
# 构建容器
model = tf.keras.Sequential()

# ===================================================

# ResnetBlock:同维度:3*3卷积层、3*3卷积层
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3),strides=1,padding='same',use_bias=False,input_shape=(32,32,3))) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

# ResnetBlock:同维度:3*3卷积层、3*3卷积层
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

# ===================================================

# ResnetBlock:不同纬度:3*3卷积层、3*3卷积层、1*1卷积层
model.add(tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3),strides=2,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

model.add(tf.keras.layers.Conv2D(filters=128, kernel_size=(1, 1),strides=2,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

# ResnetBlock:同维度:3*3卷积层、3*3卷积层
model.add(tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

# ===================================================

# ResnetBlock:不同纬度:3*3卷积层、3*3卷积层、1*1卷积层
model.add(tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3),strides=2,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

model.add(tf.keras.layers.Conv2D(filters=256, kernel_size=(1, 1),strides=2,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

# ResnetBlock:同维度:3*3卷积层、3*3卷积层
model.add(tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

# ===================================================

# ResnetBlock:不同纬度:3*3卷积层、3*3卷积层、1*1卷积层
model.add(tf.keras.layers.Conv2D(filters=512, kernel_size=(3, 3),strides=2,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=512, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

model.add(tf.keras.layers.Conv2D(filters=512, kernel_size=(1, 1),strides=2,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

# ResnetBlock:同维度:3*3卷积层、3*3卷积层
model.add(tf.keras.layers.Conv2D(filters=512, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=512, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))


# 
model.add(tf.keras.layers.GlobalAveragePooling2D())
# 输出层
model.add(tf.keras.layers.Dense(10, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2()))
# 模型的结构
model.summary()

 

1、1*1卷积核作用?

多加一个1*1卷积核,步长为2,可以让维度扩大一倍
# ResnetBlock:不同纬度:3*3卷积层、3*3卷积层、1*1卷积层
model.add(tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3),strides=2,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

model.add(tf.keras.layers.Conv2D(filters=128, kernel_size=(1, 1),strides=2,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

# ResnetBlock:同维度:3*3卷积层、3*3卷积层
model.add(tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

 

 

 

二、cifar10分类-层方式-卷积神经网络-ResNet18

博客对应课程的视频位置:

 

步骤

1、读取数据集
2、拆分数据集(拆分成训练数据集和测试数据集)
3、构建模型
4、训练模型
5、检验模型

需求

cifar10(物品分类)


该数据集共有60000张彩色图像,这些图像是32*32,分为10个类,每类6000张图。这里面有50000张用于训练,构成了5个训练批,每一批10000张图;另外10000用于测试,单独构成一批。测试批的数据里,取自10类中的每一类,每一类随机取1000张。抽剩下的就随机排列组成了训练批。注意一个训练批中的各类图像并不一定数量相同,总的来看训练批,每一类都有5000张图。


 

 

In [1]:
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt

1、读取数据集

直接从tensorflow的dataset来读取数据集即可

In [2]:
 (train_x, train_y), (test_x, test_y) = tf.keras.datasets.cifar10.load_data()
print(train_x.shape, train_y.shape)
(50000, 32, 32, 3) (50000, 1)

这是32*32的彩色图,rgb三个通道如何处理呢

In [3]:
plt.imshow(train_x[0])
plt.show()
In [4]:
plt.figure()
plt.imshow(train_x[1])
plt.figure()
plt.imshow(train_x[2])
plt.show()
In [5]:
print(test_y)
[[3]
 [8]
 [8]
 ...
 [5]
 [1]
 [7]]
In [6]:
# 像素值 RGB
np.max(train_x[0])
Out[6]:
255

2、拆分数据集(拆分成训练数据集和测试数据集)

上一步做了拆分数据集的工作

In [7]:
# 图片数据如何归一化
# 直接除255即可
train_x = train_x/255.0
test_x = test_x/255.0
In [8]:
# 像素值 RGB
np.max(train_x[0])
Out[8]:
1.0
In [9]:
train_y=train_y.flatten()
test_y=test_y.flatten()
train_y = tf.one_hot(train_y, depth=10)
test_y = tf.one_hot(test_y, depth=10)
print(test_y.shape)
(10000, 10)

3、构建模型

应该构建一个怎么样的模型:

In [ ]:
# Resnet的两种不同块

# ResnetBlock:同维度:3*3卷积层、3*3卷积层
# ResnetBlock:不同纬度:3*3卷积层、3*3卷积层、1*1卷积层

# ResnetBlock:同维度:3*3卷积层、3*3卷积层
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3),strides=1,padding='same',use_bias=False,input_shape=(32,32,3))) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

# ResnetBlock:不同纬度:3*3卷积层、3*3卷积层、1*1卷积层
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3),strides=2,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=(1, 1),strides=2,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))
In [10]:
# 构建容器
model = tf.keras.Sequential()

# ===================================================

# ResnetBlock:同维度:3*3卷积层、3*3卷积层
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3),strides=1,padding='same',use_bias=False,input_shape=(32,32,3))) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

# ResnetBlock:同维度:3*3卷积层、3*3卷积层
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

# ===================================================

# ResnetBlock:不同纬度:3*3卷积层、3*3卷积层、1*1卷积层
model.add(tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3),strides=2,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

model.add(tf.keras.layers.Conv2D(filters=128, kernel_size=(1, 1),strides=2,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

# ResnetBlock:同维度:3*3卷积层、3*3卷积层
model.add(tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

# ===================================================

# ResnetBlock:不同纬度:3*3卷积层、3*3卷积层、1*1卷积层
model.add(tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3),strides=2,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

model.add(tf.keras.layers.Conv2D(filters=256, kernel_size=(1, 1),strides=2,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

# ResnetBlock:同维度:3*3卷积层、3*3卷积层
model.add(tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

# ===================================================

# ResnetBlock:不同纬度:3*3卷积层、3*3卷积层、1*1卷积层
model.add(tf.keras.layers.Conv2D(filters=512, kernel_size=(3, 3),strides=2,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=512, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

model.add(tf.keras.layers.Conv2D(filters=512, kernel_size=(1, 1),strides=2,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))

# ResnetBlock:同维度:3*3卷积层、3*3卷积层
model.add(tf.keras.layers.Conv2D(filters=512, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu')) # 激活层

model.add(tf.keras.layers.Conv2D(filters=512, kernel_size=(3, 3),strides=1,padding='same',use_bias=False)) # 卷积层
model.add(tf.keras.layers.BatchNormalization()) # BN层
model.add(tf.keras.layers.Activation('relu'))


# 
model.add(tf.keras.layers.GlobalAveragePooling2D())
# 输出层
model.add(tf.keras.layers.Dense(10, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2()))
# 模型的结构
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 32, 32, 64)        1728      
_________________________________________________________________
batch_normalization (BatchNo (None, 32, 32, 64)        256       
_________________________________________________________________
activation (Activation)      (None, 32, 32, 64)        0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 32, 32, 64)        36864     
_________________________________________________________________
batch_normalization_1 (Batch (None, 32, 32, 64)        256       
_________________________________________________________________
activation_1 (Activation)    (None, 32, 32, 64)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 32, 32, 64)        36864     
_________________________________________________________________
batch_normalization_2 (Batch (None, 32, 32, 64)        256       
_________________________________________________________________
activation_2 (Activation)    (None, 32, 32, 64)        0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 32, 32, 64)        36864     
_________________________________________________________________
batch_normalization_3 (Batch (None, 32, 32, 64)        256       
_________________________________________________________________
activation_3 (Activation)    (None, 32, 32, 64)        0         
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 16, 16, 128)       73728     
_________________________________________________________________
batch_normalization_4 (Batch (None, 16, 16, 128)       512       
_________________________________________________________________
activation_4 (Activation)    (None, 16, 16, 128)       0         
_________________________________________________________________
conv2d_5 (Conv2D)            (None, 16, 16, 128)       147456    
_________________________________________________________________
batch_normalization_5 (Batch (None, 16, 16, 128)       512       
_________________________________________________________________
activation_5 (Activation)    (None, 16, 16, 128)       0         
_________________________________________________________________
conv2d_6 (Conv2D)            (None, 8, 8, 128)         16384     
_________________________________________________________________
batch_normalization_6 (Batch (None, 8, 8, 128)         512       
_________________________________________________________________
activation_6 (Activation)    (None, 8, 8, 128)         0         
_________________________________________________________________
conv2d_7 (Conv2D)            (None, 8, 8, 128)         147456    
_________________________________________________________________
batch_normalization_7 (Batch (None, 8, 8, 128)         512       
_________________________________________________________________
activation_7 (Activation)    (None, 8, 8, 128)         0         
_________________________________________________________________
conv2d_8 (Conv2D)            (None, 8, 8, 128)         147456    
_________________________________________________________________
batch_normalization_8 (Batch (None, 8, 8, 128)         512       
_________________________________________________________________
activation_8 (Activation)    (None, 8, 8, 128)         0         
_________________________________________________________________
conv2d_9 (Conv2D)            (None, 4, 4, 256)         294912    
_________________________________________________________________
batch_normalization_9 (Batch (None, 4, 4, 256)         1024      
_________________________________________________________________
activation_9 (Activation)    (None, 4, 4, 256)         0         
_________________________________________________________________
conv2d_10 (Conv2D)           (None, 4, 4, 256)         589824    
_________________________________________________________________
batch_normalization_10 (Batc (None, 4, 4, 256)         1024      
_________________________________________________________________
activation_10 (Activation)   (None, 4, 4, 256)         0         
_________________________________________________________________
conv2d_11 (Conv2D)           (None, 2, 2, 256)         65536     
_________________________________________________________________
batch_normalization_11 (Batc (None, 2, 2, 256)         1024      
_________________________________________________________________
activation_11 (Activation)   (None, 2, 2, 256)         0         
_________________________________________________________________
conv2d_12 (Conv2D)           (None, 2, 2, 256)         589824    
_________________________________________________________________
batch_normalization_12 (Batc (None, 2, 2, 256)         1024      
_________________________________________________________________
activation_12 (Activation)   (None, 2, 2, 256)         0         
_________________________________________________________________
conv2d_13 (Conv2D)           (None, 2, 2, 256)         589824    
_________________________________________________________________
batch_normalization_13 (Batc (None, 2, 2, 256)         1024      
_________________________________________________________________
activation_13 (Activation)   (None, 2, 2, 256)         0         
_________________________________________________________________
conv2d_14 (Conv2D)           (None, 1, 1, 512)         1179648   
_________________________________________________________________
batch_normalization_14 (Batc (None, 1, 1, 512)         2048      
_________________________________________________________________
activation_14 (Activation)   (None, 1, 1, 512)         0         
_________________________________________________________________
conv2d_15 (Conv2D)           (None, 1, 1, 512)         2359296   
_________________________________________________________________
batch_normalization_15 (Batc (None, 1, 1, 512)         2048      
_________________________________________________________________
activation_15 (Activation)   (None, 1, 1, 512)         0         
_________________________________________________________________
conv2d_16 (Conv2D)           (None, 1, 1, 512)         262144    
_________________________________________________________________
batch_normalization_16 (Batc (None, 1, 1, 512)         2048      
_________________________________________________________________
activation_16 (Activation)   (None, 1, 1, 512)         0         
_________________________________________________________________
conv2d_17 (Conv2D)           (None, 1, 1, 512)         2359296   
_________________________________________________________________
batch_normalization_17 (Batc (None, 1, 1, 512)         2048      
_________________________________________________________________
activation_17 (Activation)   (None, 1, 1, 512)         0         
_________________________________________________________________
conv2d_18 (Conv2D)           (None, 1, 1, 512)         2359296   
_________________________________________________________________
batch_normalization_18 (Batc (None, 1, 1, 512)         2048      
_________________________________________________________________
activation_18 (Activation)   (None, 1, 1, 512)         0         
_________________________________________________________________
global_average_pooling2d (Gl (None, 512)               0         
_________________________________________________________________
dense (Dense)                (None, 10)                5130      
=================================================================
Total params: 11,318,474
Trainable params: 11,309,002
Non-trainable params: 9,472
_________________________________________________________________

4、训练模型

In [11]:
# 配置优化函数和损失器
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['acc'])
# 开始训练
history = model.fit(train_x,train_y,batch_size=128,epochs=50,validation_data=(test_x,test_y))
Epoch 1/50
  2/391 [..............................] - ETA: 21s - loss: 2.8172 - acc: 0.1797WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0349s vs `on_train_batch_end` time: 0.0722s). Check your callbacks.
391/391 [==============================] - 47s 121ms/step - loss: 1.6363 - acc: 0.4444 - val_loss: 3.0470 - val_acc: 0.2321
Epoch 2/50
391/391 [==============================] - 49s 126ms/step - loss: 1.0962 - acc: 0.6266 - val_loss: 1.8872 - val_acc: 0.5047
Epoch 3/50
391/391 [==============================] - 58s 148ms/step - loss: 0.8536 - acc: 0.7134 - val_loss: 1.2344 - val_acc: 0.6304
Epoch 4/50
391/391 [==============================] - 65s 166ms/step - loss: 0.6976 - acc: 0.7682 - val_loss: 1.0695 - val_acc: 0.6776
Epoch 5/50
391/391 [==============================] - 65s 166ms/step - loss: 0.5781 - acc: 0.8125 - val_loss: 1.3413 - val_acc: 0.6408
Epoch 6/50
391/391 [==============================] - 66s 168ms/step - loss: 0.4976 - acc: 0.8413 - val_loss: 0.9659 - val_acc: 0.6997
Epoch 7/50
391/391 [==============================] - 66s 170ms/step - loss: 0.4265 - acc: 0.8660 - val_loss: 0.8132 - val_acc: 0.7601
Epoch 8/50
391/391 [==============================] - 70s 178ms/step - loss: 0.3672 - acc: 0.8849 - val_loss: 0.9416 - val_acc: 0.7353
Epoch 9/50
391/391 [==============================] - 69s 177ms/step - loss: 0.3184 - acc: 0.9005 - val_loss: 1.1263 - val_acc: 0.7213
Epoch 10/50
391/391 [==============================] - 74s 189ms/step - loss: 0.2739 - acc: 0.9166 - val_loss: 0.8242 - val_acc: 0.7755
Epoch 11/50
391/391 [==============================] - 71s 182ms/step - loss: 0.2325 - acc: 0.9301 - val_loss: 1.0679 - val_acc: 0.7383
Epoch 12/50
391/391 [==============================] - 73s 187ms/step - loss: 0.2090 - acc: 0.9385 - val_loss: 1.1227 - val_acc: 0.7183
Epoch 13/50
391/391 [==============================] - 74s 190ms/step - loss: 0.1840 - acc: 0.9470 - val_loss: 1.0284 - val_acc: 0.7446
Epoch 14/50
391/391 [==============================] - 74s 189ms/step - loss: 0.1682 - acc: 0.9519 - val_loss: 1.0932 - val_acc: 0.7332
Epoch 15/50
391/391 [==============================] - 76s 193ms/step - loss: 0.1436 - acc: 0.9596 - val_loss: 0.9541 - val_acc: 0.7671
Epoch 16/50
391/391 [==============================] - 70s 180ms/step - loss: 0.1374 - acc: 0.9614 - val_loss: 0.9939 - val_acc: 0.7615
Epoch 17/50
391/391 [==============================] - 71s 181ms/step - loss: 0.1204 - acc: 0.9668 - val_loss: 0.8042 - val_acc: 0.8139
Epoch 18/50
391/391 [==============================] - 68s 175ms/step - loss: 0.1144 - acc: 0.9690 - val_loss: 1.0072 - val_acc: 0.7695
Epoch 19/50
391/391 [==============================] - 67s 172ms/step - loss: 0.1035 - acc: 0.9724 - val_loss: 0.9769 - val_acc: 0.7785
Epoch 20/50
391/391 [==============================] - 71s 182ms/step - loss: 0.1054 - acc: 0.9718 - val_loss: 1.0007 - val_acc: 0.7683
Epoch 21/50
391/391 [==============================] - 70s 179ms/step - loss: 0.0985 - acc: 0.9735 - val_loss: 0.9973 - val_acc: 0.7736
Epoch 22/50
391/391 [==============================] - 71s 182ms/step - loss: 0.0902 - acc: 0.9760 - val_loss: 1.1456 - val_acc: 0.7631
Epoch 23/50
391/391 [==============================] - 71s 182ms/step - loss: 0.0810 - acc: 0.9789 - val_loss: 0.8866 - val_acc: 0.7941
Epoch 24/50
391/391 [==============================] - 71s 182ms/step - loss: 0.0834 - acc: 0.9780 - val_loss: 1.0282 - val_acc: 0.7846
Epoch 25/50
391/391 [==============================] - 71s 181ms/step - loss: 0.0767 - acc: 0.9804 - val_loss: 1.0002 - val_acc: 0.7874
Epoch 26/50
391/391 [==============================] - 71s 182ms/step - loss: 0.0715 - acc: 0.9817 - val_loss: 0.9370 - val_acc: 0.7933
Epoch 27/50
391/391 [==============================] - 71s 182ms/step - loss: 0.0704 - acc: 0.9818 - val_loss: 0.9685 - val_acc: 0.8004
Epoch 28/50
391/391 [==============================] - 82s 211ms/step - loss: 0.0671 - acc: 0.9830 - val_loss: 1.0319 - val_acc: 0.7801
Epoch 29/50
391/391 [==============================] - 104s 266ms/step - loss: 0.0613 - acc: 0.9848 - val_loss: 0.9145 - val_acc: 0.8000
Epoch 30/50
391/391 [==============================] - 102s 261ms/step - loss: 0.0633 - acc: 0.9838 - val_loss: 1.0077 - val_acc: 0.7972
Epoch 31/50
391/391 [==============================] - 91s 232ms/step - loss: 0.0631 - acc: 0.9842 - val_loss: 1.0202 - val_acc: 0.7948
Epoch 32/50
391/391 [==============================] - 94s 242ms/step - loss: 0.0540 - acc: 0.9869 - val_loss: 0.9116 - val_acc: 0.8074
Epoch 33/50
391/391 [==============================] - 90s 230ms/step - loss: 0.0609 - acc: 0.9849 - val_loss: 1.1728 - val_acc: 0.7618
Epoch 34/50
391/391 [==============================] - 86s 220ms/step - loss: 0.0534 - acc: 0.9868 - val_loss: 0.9026 - val_acc: 0.8037
Epoch 35/50
391/391 [==============================] - 86s 220ms/step - loss: 0.0523 - acc: 0.9878 - val_loss: 0.9588 - val_acc: 0.8017
Epoch 36/50
391/391 [==============================] - 86s 221ms/step - loss: 0.0444 - acc: 0.9899 - val_loss: 1.1109 - val_acc: 0.7801
Epoch 37/50
391/391 [==============================] - 86s 220ms/step - loss: 0.0523 - acc: 0.9873 - val_loss: 0.9235 - val_acc: 0.8049
Epoch 38/50
391/391 [==============================] - 86s 219ms/step - loss: 0.0517 - acc: 0.9873 - val_loss: 1.0006 - val_acc: 0.7980
Epoch 39/50
391/391 [==============================] - 86s 219ms/step - loss: 0.0466 - acc: 0.9884 - val_loss: 0.9359 - val_acc: 0.8069
Epoch 40/50
391/391 [==============================] - 86s 220ms/step - loss: 0.0445 - acc: 0.9894 - val_loss: 0.9902 - val_acc: 0.7973
Epoch 41/50
391/391 [==============================] - 86s 219ms/step - loss: 0.0439 - acc: 0.9895 - val_loss: 1.0221 - val_acc: 0.7982
Epoch 42/50
391/391 [==============================] - 86s 220ms/step - loss: 0.0432 - acc: 0.9897 - val_loss: 0.8906 - val_acc: 0.8116
Epoch 43/50
391/391 [==============================] - 87s 223ms/step - loss: 0.0427 - acc: 0.9898 - val_loss: 1.0218 - val_acc: 0.8023
Epoch 44/50
391/391 [==============================] - 79s 201ms/step - loss: 0.0419 - acc: 0.9903 - val_loss: 0.9575 - val_acc: 0.8057
Epoch 45/50
391/391 [==============================] - 76s 194ms/step - loss: 0.0393 - acc: 0.9907 - val_loss: 0.9459 - val_acc: 0.8037
Epoch 46/50
391/391 [==============================] - 103s 262ms/step - loss: 0.0358 - acc: 0.9919 - val_loss: 0.8833 - val_acc: 0.8076
Epoch 47/50
391/391 [==============================] - 86s 219ms/step - loss: 0.0391 - acc: 0.9907 - val_loss: 1.1165 - val_acc: 0.7835
Epoch 48/50
391/391 [==============================] - 82s 211ms/step - loss: 0.0339 - acc: 0.9923 - val_loss: 0.9455 - val_acc: 0.8102
Epoch 49/50
391/391 [==============================] - 81s 207ms/step - loss: 0.0368 - acc: 0.9911 - val_loss: 0.9709 - val_acc: 0.8100
Epoch 50/50
391/391 [==============================] - 82s 210ms/step - loss: 0.0338 - acc: 0.9926 - val_loss: 0.9594 - val_acc: 0.8110
In [12]:
plt.plot(history.epoch,history.history.get('loss'))
plt.title("train data loss")
plt.show()
In [13]:
plt.plot(history.epoch,history.history.get('val_loss'))
plt.title("test data loss")
plt.show()
In [14]:
plt.plot(history.epoch,history.history.get('acc'))
plt.title("train data acc")
plt.show()
In [15]:
plt.plot(history.epoch,history.history.get('val_acc'))
plt.title("test data acc")
plt.show()

5、检验模型

In [16]:
# 看一下模型的预测能力
pridict_y=model.predict(test_x)
print(pridict_y)
print(test_y)
[[4.9199176e-04 6.5483042e-04 8.3143410e-04 ... 9.6460141e-04
  7.4021431e-04 9.3528494e-04]
 [1.3349028e-04 4.7717502e-05 3.7917389e-06 ... 2.1447826e-05
  9.9956256e-01 1.3025121e-04]
 [3.7733724e-05 1.4747976e-05 1.0107384e-06 ... 7.0531887e-06
  9.9986100e-01 4.4848210e-05]
 ...
 [1.6339345e-05 2.8372289e-05 6.1155937e-05 ... 2.2989197e-04
  4.0727486e-05 1.6455990e-05]
 [8.0899794e-05 9.9973887e-01 7.9612191e-06 ... 7.5331459e-06
  7.3146948e-06 1.0017386e-05]
 [1.4366872e-04 2.3366158e-05 1.3137512e-05 ... 9.9933928e-01
  2.6286190e-05 3.2659078e-05]]
tf.Tensor(
[[0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 1. 0.]
 [0. 0. 0. ... 0. 1. 0.]
 ...
 [0. 0. 0. ... 0. 0. 0.]
 [0. 1. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 1. 0. 0.]], shape=(10000, 10), dtype=float32)
In [17]:
# 在pridict_y中找最大值的索引,横向
pridict_y = tf.argmax(pridict_y, axis=1)
print(pridict_y)
#
test_y = tf.argmax(test_y, axis=1)
print(test_y)
tf.Tensor([3 8 8 ... 5 1 7], shape=(10000,), dtype=int64)
tf.Tensor([3 8 8 ... 5 1 7], shape=(10000,), dtype=int64)
In [18]:
plt.figure()
plt.imshow(test_x[0])
plt.figure()
plt.imshow(test_x[1])
plt.figure()
plt.imshow(test_x[2])
plt.figure()
plt.imshow(test_x[3])
plt.show()
In [ ]:
 
 
posted @ 2020-09-21 19:19  范仁义  阅读(391)  评论(1编辑  收藏  举报