# tf.device给你了很多可伸缩性在TensorFlow的计算图中选择放置你的单独的操作
# 在许多的情况下,有很多启发可以工作的很好
# 例如tf.train.replica_device_setter API可以和tf.device一起使用,在进行数据分布式并行性话训练的时候
# 列如下面的代码块显示了tf.train.replica_device_setter 如何在tf.Variable 对象和其他的操作之间
# 应用不同的参数放置策略
with tf.device(tf.train.replica_device_setter(ps_tasks=3)):
# tf.Variable objects are , by default , placed on tasks in "/job:ps" in a
# round-robin fashion
w_0 = tf.Variable(...) # placed on "/job:ps/task:0"
b_0 = tf.Variable(...) # placed on "/job:ps/task:1"
w_1 = tf.Variable(...) # placed on "/job:ps/task:2"
b_1 = tf.Variable(...) # placed on "/job:ps/task:0"
input_data = tf.placeholder(tf.float32) # placed on "/job:worker"
layer_0 = tf.matmul(input_data, w_0) + b_0 # placed on "/job:worker"
layer_1 = tf.matmul(layer_0, w_1) + b_1 # placed on "/job:worker"