tensorboard可视化
---恢复内容开始---
1. 查看tensorboard的一个简单例子
#tensorflow's tensorboard
import tensorflow as tf
x=tf.constant(1.0,name="input")
w=tf.Variable(0.5,name="weight")
b=tf.Variable(0.1,name="bias")
y=tf.add(tf.multiply(x,w,name="mul_op"),b,name="add_op")
#设置写入的文件夹
summary_writer=tf.summary.FileWriter("./calc_graph")
#获取默认的图
graph=tf.get_default_graph()
summary_writer.add_graph(graph)
#将图结构写入文件
summary_writer.flush()
在命令行中进到放置calc_graph文件夹的目录下,输入:
tensorboard --logdir=./calc_graph
根据提示打开浏览器,输入地址,便可以查看保存的图结构
至此,我们查看了tensorboard的 graph模块
2.一个简单的梯度下降例子
import numpy as np
import tensorflow as tf
x=tf.placeholder(tf.float32)
y=tf.placeholder(tf.float32)
weight=tf.get_variable("weight",[],tf.float32,initializer=tf.random_normal_initializer())
bias=tf.get_variable("bias",[],tf.float32,initializer=tf.random_normal_initializer())
pred=tf.add(tf.multiply(x,weight,name="mul_op"),bias,name="add_op")
#############定义参数,准备数据###################################
loss=tf.square(y - pred,name="loss")
optimizer=tf.train.GradientDescentOptimizer(0.01)
#计算梯度和应用梯度, 可以用optimizer.minimize(loss)代替下面两个语句,效果相同
grads_and_vars=optimizer.compute_gradients(loss)
train_op=optimizer.apply_gradients(grads_and_vars)
##################模型相关设定######################
tf.summary.scalar("weight",weight)
tf.summary.scalar("bias",bias)
tf.summary.scalar("loss",loss[0])
#合并summary
merged_summary=tf.summary.merge_all()
summary_writer=tf.summary.FileWriter("./log_graph")
summary_writer.add_graph(tf.get_default_graph())
###############tensorboard中收集训练数据,放入计算图#########################
init_op=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
for step in range(500):
train_x=np.random.randn(1)
train_y=2* train_x+np.random.randn(1)*0.01+10
_,summary=sess.run([train_op,merged_summary],feed_dict={x:train_x,y:train_y})#sess.run执行了两个操作
summary_writer.add_summary(summary,step)
我们可以查看tensorboard的Scalars和Graphs模块:




3.一个简单的神经网络例子
增加了命名空间,让图结构更清晰
#有一个隐藏层的简单神经网络
#定义方法添加层数
import tensorflow as tf
def add_layer(input_x,in_size,out_size,activation_function=None):
with tf.name_scope("layer"):
with tf.name_scope("weights"):
weights=tf.Variable(tf.random_normal([in_size,out_size]))
with tf.name_scope("bias"):
bias=tf.Variable(tf.zeros(out_size)+0.1)
with tf.name_scope("wx_plus_b"):
wx_plus_b=tf.matmul(input_x,weights)+bias
if activation_function is None:
outputs=wx_plus_b
else:
outputs=activation_function(wx_plus_b)
return outputs
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
x_data=np.linspace(-1,1,300)[:,np.newaxis]#增加一个维度,变成二维了
noise=np.random.normal(0,0.1,x_data.shape)
y_data=np.square(x_data)-0.5+noise
with tf.name_scope("input"):
xs=tf.placeholder(tf.float32,[None,1],name="x_input")
ys=tf.placeholder(tf.float32,[None,1],name="y_input")
#############数据准备#####################
hidden_layer_out=add_layer(xs,1,10,activation_function=tf.nn.relu)
final_layer_out=add_layer(hidden_layer_out,10,1,activation_function=None)
###################模型构造########################
with tf.name_scope("loss"):
loss=tf.reduce_mean(tf.reduce_sum(
tf.square(y_data-final_layer_out),
reduction_indices=[1]
))
optimizer=tf.train.GradientDescentOptimizer(0.01)
with tf.name_scope("train"):
train_op=optimizer.minimize(loss)
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.scatter(x_data,y_data)
plt.ion()
plt.show
init=tf.global_variables_initializer()
with tf.Session() as sess:
summary_writer=tf.summary.FileWriter("./log_graph4")
summary_writer.add_graph(tf.get_default_graph())
sess.run(init)

取消add_layer中的命名空间后,图结构如下:

---恢复内容结束---

浙公网安备 33010602011771号