【2-2】神经网络简单例

 

 1 import tensorflow as tf
 2 import numpy as np
 3 
 4 #placeholder
 5 #要给节点输入数据时用placeholder,在TensorFlow中用placeholder来描述等待输入的节点,只需要指定类型即可;
 6 #然后在执行节点的时候用一个字典来“喂”这些节点。相当于先把变量hold住,然后每次从外部传入data;
 7 #注意placeholder和feed_dict是绑定用的。
 8 #给feed提供数据,作为run()调用的参数;
 9 #feed只在调用它的方法内有效,方法结束,feed就会消失。
10 
11 #添加层
12 def add_layer(inputs,in_size,out_size,activation_function=None): #输入及输入节点和输出节点的个数,激活函数
13     #add one more layer and return the output of this layer
14     Weights = tf.Variable(tf.random_normal([in_size,out_size]))  #如果不加[]会报错,里边表示shape
15     biases = tf.Variable(tf.zeros([1,out_size]) + 0.1)           #同样zeros([1,out_size])
16     Wx_plus_b = tf.matmul(inputs,Weights) + biases
17     if activation_function is None:
18         outputs = Wx_plus_b
19     else:
20         outputs = activation_function(Wx_plus_b)
21     return outputs
22 
23 #1.训练的数据
24 x_data = np.linspace(-1,1,300)[:,np.newaxis]     
25 noise = np.random.normal(0,0.05,x_data.shape)    
26 y_data = np.square(x_data) - 0.5 + noise
27 
28 #2.定义节点准备接受数据
29 xs = tf.placeholder(tf.float32,[None,1])         #shape=[None,1]
30 ys = tf.placeholder(tf.float32,[None,1])
31 
32 #3.定义神经层:隐藏层和输出层
33 #输入值为xs,隐藏层有10个神经元
34 l1 = add_layer(xs,1,10,activation_function=tf.nn.relu)
35 #输入值是隐藏层l1,预测层输出1个值
36 prediction = add_layer(l1,10,1,activation_function=None)
37 
38 #4.定义loss表达式
39 #the error between prediction and data
40 loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),
41                       reduction_indices=[1]))
42 
43 #5.让loss值最小,学习率为0.1
44 train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
45 
46 init = tf.initialize_all_variables()
47 
48 with tf.Session() as sess:
49     sess.run(init)
50     for i in range(1000):
51         sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
52         if i % 50 ==0:
53             print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
54 >>>
55 2.13953
56 0.0208985
57 0.00737973
58 0.00562527
59 0.00518565
60 0.00500644
61 0.00489249
62 0.00478653
63 0.00469118
64 0.00460924
65 0.00453592
66 0.00447866
67 0.00443341
68 0.00438404
69 0.00430098
70 0.0042153
71 0.00414793
72 0.00409648
73 0.00405469
74 0.00401441

在用python使用TensorFlow的时候:
tf.reduce_sum函数中reduction_indices参数表示函数的处理维度。
reduction_indices参数的值默认的时候为None,默认把所有的数据求和,即结果是一维的。
reduction_indices参数的值为0的时候,是第0维对应位置相加。
reduction_indices参数的值为1的时候,是第1维对应位置相加。

 https://blog.csdn.net/u014772246/article/details/84973358

 1 a = np.array([[1,2,3],[4,5,6]])
 2 b = tf.reduce_sum(a,reduction_indices=[0])
 3 c = tf.reduce_sum(a,reduction_indices=[1])
 4 d = tf.reduce_sum(a)
 5 
 6 with tf.Session() as sess:
 7     b_result,c_result,d_result = sess.run([b,c,d])
 8     print(a)
 9     print("a的形状:",a.shape)
10     print("reduction_indices值为0时:")
11     print(b_result)
12     print("reduction_indices值为1时:")
13     print(c_result)
14     print("reduction_indices值为默认None时:")
15     print(d_result)
16 >>>
17 [[1 2 3]
18  [4 5 6]]
19 a的形状: (2, 3)
20 reduction_indices值为0时:
21 [5 7 9]
22 reduction_indices值为1时:
23 [ 6 15]
24 reduction_indices值为默认None时:
25 21

 2019-05-30 14:23:58

posted @ 2019-05-30 14:23  闪亮可可仙  阅读(240)  评论(0编辑  收藏  举报