TensorFlow入门:MNIST数据的单层回归代码及多层神经网络代码[附简单解释]
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 11 10:16:34 2017
single layer softmax regression
@author: Wangjc code from TensorFlow
"""
import tensorflow.examples.tutorials.mnist.input_data as input_data
#need to show the full address, or error occus.
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
#use read_data_sets to download and load the mnist data set. if has the data, then load.
#need a long time about 5 minutes
import tensorflow as tf
sess = tf.InteractiveSession()
#link the back-end of C++ to compute.
#in norm cases, we should create the map and then run in the sussion.
#now, use a more convenient class named InteractiveSession which could insert compute map when running map.
x=tf.placeholder("float",shape=[None,784])
y_=tf.placeholder("float",shape=[None,10])
#x for input date,28*28
#y_ for the classfication
w=tf.Variable(tf.zeros([784,10]))
b=tf.Variable(tf.zeros([10]))
#w for weight:784 input and 10 output
#b for bias:10 output
sess.run(tf.initialize_all_variables())
#initial the variables
y=tf.nn.softmax(tf.matmul(x,w)+b)
#predict the probablity of the compute result
cross_entropy=-tf.reduce_sum(y_*tf.log(y))
#compute the cross entry by reduce_sum method.
train_step=tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
#find the loss Gradient by Automatic differentiation through the minimal of cross entropy by step of 0.01
for i in range(1000):
batch=mnist.train.next_batch(50)
train_step.run(feed_dict={x:batch[0],y_:batch[1]})
# in every iteration, load 50 samples, and run train_step once.
# place the train data to placeholder by feed_dict
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
#see if the predict is equal to the compute result
#argmax return the max value of an array in the assigned dimention
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
#conver bool to float,and calculate the accuracy
print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
#evaluate the accuracy
多层神经网络代码:
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 11 10:16:34 2017
multy layers softmax regression
@author: Wangjc code from TensorFlow
"""
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
#need to show the full address, or error occus.
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
#use read_data_sets to download and load the mnist data set. if has the data, then load.
#need a long time about 5 minutes
sess = tf.InteractiveSession()
#link the back-end of C++ to compute.
#in norm cases, we should create the map and then run in the sussion.
#now, use a more convenient class named InteractiveSession which could insert compute map when running map.
x=tf.placeholder("float",shape=[None,784])
y_=tf.placeholder("float",shape=[None,10])
def weight_variable(shape):
#use normal distribution numbers with stddev 0.1 to initial the weight
initial=tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
#use constant value of 0.1 to initial the bias
initial=tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x,W):
#convolution by filter of W,with step size of 1, 0 padding size
#x should have the dimension of [batch,height,width,channels]
#other dimension of strides or ksize is the same with x
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')
def max_pool_2x2(x):
#pool by windows of ksize,with step size of 2, 0 padding size
return tf.nn.max_pool(x,ksize=[1,2,2,1],
strides=[1,2,2,1],padding='SAME')
#------------------------------------------------
W_conv1=weight_variable([5,5,1,32])
b_conv1=bias_variable([32])
#build the first conv layer:
#get 32 features from every 5*5 patch, so the shape is [5,5,1(channel),32]
x_image = tf.reshape(x, [-1,28,28,1])
#to use conv1, need to convert x to 4D, in form of [batch,height,width,channels]
# -1 means default
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
#--------------------------------------------
W_conv2=weight_variable([5,5,32,64])
b_conv2=bias_variable([64])
#build the 2nd conv layer:
#get 64 features from every 5*5 patch, so the shape is [5,5,32(channel),64]
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
#----------------------------------------
#image size reduce to 7*7 by pooling
#we add a full connect layer contains 1027 nuere
#need to flat pool tensor for caculate
W_fc1 = weight_variable([7*7*64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2,[-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1) + b_fc1)
#------------------------------------
#output layer
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)
#to decrease overfit, we add dropout before output layer.
#use placeholder to represent the porbability of a neure's output value unchange
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
#---------------------------------
#train and evaluate the module
#use a ADAM
cross_entropy=-tf.reduce_sum(y_*tf.log(y_conv))
train_step=tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
sess.run(tf.initialize_all_variables())
for i in range(5000):
batch = mnist.train.next_batch(50)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={x:batch[0], y_:batch[1],keep_prob:1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print("test accuracy %g"%accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
可参考,解释代码较详细
中间结果可视化,可参考

浙公网安备 33010602011771号