TensorFlow使用记录 (一): 基本概念

基本使用

  • 使用graph来表示计算任务
  • 在被称之为Session的上下文中执行graph
  • 使用tensor表示数据
  • 通过Variable维护状态
  • 使用feed和fetch可以为任意的操作(op)赋值或者取数据

综述

TensorFlow 是一个编程系统, 使用图来表示计算任务. 图中的节点被称之为 op (operation 的缩写). 一个 op 获得 0 个或多个 Tensor, 执行计算, 产生 0 个或多个 Tensor. 每个 Tensor 是一个类型化的多维数组. 例如, 你可以将一小组图像集表示为一个四维浮点数数组, 这四个维度分别是 [batch, height, width, channels].

一个 TensorFlow 图描述了计算的过程. 为了进行计算, 图必须在 会话 里被启动. 会话 将图的 op 分发到诸如 CPU 或 GPU 之类的 设备 上, 同时提供执行 op 的方法. 这些方法执行后, 将产生的 tensor 返回. 在 Python 语言中, 返回的 tensor 是 numpy ndarray 对象; 在 C 和 C++ 语言中, 返回的 tensor 是tensorflow::Tensor 实例.

MNIST

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

print(tf.__version__)

# 1. create data
mnist = input_data.read_data_sets('../MNIST_data', one_hot=True)

with tf.variable_scope('Input'):
    tf_x = tf.placeholder(dtype=tf.float32, shape=[None, 28*28], name='x')
    image = tf.reshape(tf_x, [-1, 28, 28, 1], name='image')
    tf_y = tf.placeholder(dtype=tf.float32, shape=[None, 10], name='y')
    is_training = tf.placeholder(dtype=tf.bool, shape=None)

# 2. define Network
with tf.variable_scope('Net'):
    """
    "SAME" 类型的padding:
    out_height = ceil(in_height / strides[1]); ceil向上取整
    out_width = ceil(in_width / strides[2])

    "VALID"类型的padding:
    out_height = ceil((in_height - filter_height + 1) / striders[1])
    out_width = ceil((in_width - filter_width + 1) / striders[2]
    """
    conv1 = tf.layers.conv2d(inputs=image, filters=32, kernel_size=5,
                             strides=1, padding='same', activation=tf.nn.relu) # 32x28x28
    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=2, strides=2) # 32x14x14
    conv2 = tf.layers.conv2d(pool1, 64, 3, 1, 'same', activation=tf.nn.relu) # 64x14x14
    pool2 = tf.layers.max_pooling2d(conv2, 2, 2) # 64x7x7
    pool2_flat = tf.reshape(pool2, [-1, 7*7*64])
    fc1 = tf.layers.dense(pool2_flat, 1024, tf.nn.relu)
    fc1 = tf.layers.dropout(fc1, rate=0.5, training=is_training)
    predict = tf.layers.dense(fc1, 10)

# 3. define loss & accuracy
with tf.name_scope('loss'):
    loss = tf.losses.softmax_cross_entropy(onehot_labels=tf_y, logits=predict, label_smoothing=0.01)
    tf.summary.scalar('loss', loss)
with tf.name_scope('accuracy'):
    # tf.metrics.accuracy() 返回 累计[上次的平均accuracy, 这次的平均accuracy]
    accuracy = tf.metrics.accuracy(labels=tf.argmax(tf_y, axis=1), predictions=tf.argmax(predict, axis=1))[1]
    tf.summary.scalar('accuracy', accuracy)

# 4. define optimizer
with tf.name_scope('train'):
    optimizer_op = tf.train.AdamOptimizer(1e-4).minimize(loss)

# 5. initialize
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())

# 6.train
saver = tf.train.Saver()
save_path = './cnn_mnist.ckpt'

with tf.Session() as sess:
    sess.run(init_op)
    # =================
    merge_op = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter('logs/train', sess.graph)
    test_writer = tf.summary.FileWriter('logs/test', sess.graph)
    # tensorboard --logdir=logs
    # =================
    for step in range(11000):
        """
        mnist.train.num_examples=55000
        11000*100/mnist.train.num_examples=20epochs
        """
        batch_x, batch_y = mnist.train.next_batch(100)
        _, ls, train_output = sess.run([optimizer_op, loss, merge_op],
                                       feed_dict={tf_x: batch_x, tf_y: batch_y, is_training: True})
        if step % 100 == 0:
            acc_test, test_ouput  = sess.run([accuracy, merge_op],
                                             feed_dict={tf_x: mnist.test.images, tf_y: mnist.test.labels,
                                                        is_training: False})
            print('Step: ', step, ' | train loss: {:.4f} | test accuracy: {:.3f}'.format(ls, acc_test))
            sess.run(tf.local_variables_initializer()) # 不加上这句的话 accuracy 就是个累积平均值了
            train_writer.add_summary(train_output, step)
            test_writer.add_summary(test_ouput, step)
    saver.save(sess, save_path)

with tf.Session() as sess:
    sess.run(init_op)
    saver.restore(sess, save_path)
    acc_test = sess.run(accuracy, feed_dict={tf_x: mnist.test.images,
                                             tf_y: mnist.test.labels,
                                             is_training: False})
    print('test accuracy: {}'.format(acc_test))
View Code

Numpy Tensor

import tensorflow as tf
import numpy as np

array = np.array([[1,2,3], [4,5,6], [7,8,9]], dtype=np.float32)
print(array)
tensor = tf.constant(array, dtype=tf.float32)
tensor = tf.convert_to_tensor(array)

with tf.Session() as sess:
    print(tensor.eval())

常量 constant

tf.constant(
    value,
    dtype=None,
    shape=None,
    name='Const',
    verify_shape=False
)

示例:

import tensorflow as tf
import numpy as np

def my_func(arg):
    arg = tf.convert_to_tensor(arg, dtype=tf.float32)
    return tf.matmul(arg, arg) + arg

# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))

with tf.Session() as sess:
    print(value_1.eval())
    print(value_2.eval())
    print(value_3.eval())

'''
[[ 8. 12.]
 [18. 26.]]
[[ 8. 12.]
 [18. 26.]]
[[ 8. 12.]
 [18. 26.]]
'''
View Code

变量 Variable

A variable starts its lift when its initializer is run, and it ends when the session is closed.

__init__(
    initial_value=None,
    trainable=None,
    collections=None,
    validate_shape=True,
    caching_device=None,
    name=None,
    variable_def=None,
    dtype=None,
    expected_shape=None,
    import_scope=None,
    constraint=None,
    use_resource=None,
    synchronization=tf.VariableSynchronization.AUTO,
    aggregation=tf.VariableAggregation.NONE,
    shape=None
)

示例:

import tensorflow as tf

state = tf.Variable(1, name='counter')
one = tf.constant(1)

new_value = tf.add(state, one)
update = tf.assign_add(state, new_value)

init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    for i in range(3):
        state_, new_value_, update_ = sess.run([state, new_value, update])
        print('new_value', new_value_) # 2 4 8
        print('state    ', state_)     # 3 7 15
        print('update   ', update_)    # 3 7 15
View Code

占位符 placeholder

tf.placeholder(
    dtype,
    shape=None,
    name=None
)

示例代码

import tensorflow as tf
import numpy as np

x = tf.placeholder(tf.float32, shape=(1024, 1024))
y = tf.matmul(x, x)

with tf.Session() as sess:
    rand_array = np.random.rand(1024, 1024)
    print(sess.run(y, feed_dict={x: rand_array}))
View Code

维度增加 expand_dims

tf.expand_dims(
    input,
    axis=None,
    name=None,
    dim=None
)

示例代码 

'''  
# 't' is a tensor of shape [2]
tf.shape(tf.expand_dims(t, 0))  # [1, 2]
tf.shape(tf.expand_dims(t, 1))  # [2, 1]
tf.shape(tf.expand_dims(t, -1))  # [2, 1]

# 't2' is a tensor of shape [2, 3, 5]
tf.shape(tf.expand_dims(t2, 0))  # [1, 2, 3, 5]
tf.shape(tf.expand_dims(t2, 2))  # [2, 3, 1, 5]
tf.shape(tf.expand_dims(t2, 3))  # [2, 3, 5, 1]
'''
View Code

reshape

tf.reshape(
    tensor,
    shape,
    name=None
)

示例代码:

# tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
# tensor 't' has shape [9]
reshape(t, [3, 3]) ==> [[1, 2, 3],
                        [4, 5, 6],
                        [7, 8, 9]]

# tensor 't' is [[[1, 1], [2, 2]],
#                [[3, 3], [4, 4]]]
# tensor 't' has shape [2, 2, 2]
reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
                        [3, 3, 4, 4]]

# tensor 't' is [[[1, 1, 1],
#                 [2, 2, 2]],
#                [[3, 3, 3],
#                 [4, 4, 4]],
#                [[5, 5, 5],
#                 [6, 6, 6]]]
# tensor 't' has shape [3, 2, 3]
# pass '[-1]' to flatten 't'
reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]

# -1 can also be used to infer the shape

# -1 is inferred to be 9:
reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
                         [4, 4, 4, 5, 5, 5, 6, 6, 6]]
# -1 is inferred to be 2:
reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
                         [4, 4, 4, 5, 5, 5, 6, 6, 6]]
# -1 is inferred to be 3:
reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
                              [2, 2, 2],
                              [3, 3, 3]],
                             [[4, 4, 4],
                              [5, 5, 5],
                              [6, 6, 6]]]

# tensor 't' is [7]
# shape `[]` reshapes to a scalar
reshape(t, []) ==> 7
View Code

类型转换

#tensor`a` is [1.8,2.2],dtype = tf.float  
tf.cast(a, tf.int32) == >  [ 1 , 2 ]   #dtype = tf.int32

共享变量

with tf.variable_scope("my_scope"):
    x0 = tf.get_variable("x", shape=(), initializer=tf.constant_initializer(0.))
    x1 = tf.Variable(0., name="x")
    x2 = tf.Variable(0., name="x")

with tf.variable_scope("my_scope", reuse=True):
    x3 = tf.get_variable("x")
    x4 = tf.Variable(0., name="x")

with tf.variable_scope("", default_name="", reuse=True):
    x5 = tf.get_variable("my_scope/x")

print("x0:", x0.op.name)     # x0: my_scope/x
print("x1:", x1.op.name)     # x1: my_scope/x_1
print("x2:", x2.op.name)     # x2: my_scope/x_2
print("x3:", x3.op.name)     # x3: my_scope/x
print("x4:", x4.op.name)     # x4: my_scope_1/x
print("x5:", x5.op.name)     # x5: my_scope/x
print(x0 is x3 and x3 is x5) # True

"""
第一个 variable_scope() 首先创建了一个共享变量 x0,即 my_scope/x. 对于除共享变量以外的所有操作来说,
variable scope 实际上只相当于 name scope,因此随后创建的两个变量 x1, x2 名为 my_scope/x_1, my_scope/x_2.
第二个 variable_scope() 首先重用了 my_scope 范围里的共享变量,这里 x3 就是 x0. 同样的,对于其他非共享
变量来说 variable scope 只是个 name scope,而又因为和第一个variable_scope() 分开与不同的 block, 因此
x4 被命名为 my_scope_1/x.
第三个 variable_scope() 展示了获取共享变量 my_scope/x 的另一种方法,即在根作用域上创建一个variable_scope()
"""

 涉及函数

tf.get_variable(
    name,
    shape=None,
    dtype=None,
    initializer=None,
    regularizer=None,
    trainable=None,
    collections=None,
    caching_device=None,
    partitioner=None,
    validate_shape=True,
    use_resource=None,
    custom_getter=None,
    constraint=None,
    synchronization=tf.VariableSynchronization.AUTO,
    aggregation=tf.VariableAggregation.NONE
)

变量初始化方式

code file: https://github.com/tensorflow/tensorflow/blob/r1.14/tensorflow/python/ops/init_ops.py

以全连接层创建为例:

tf.layers.dense(
    inputs,
    units,
    activation=None,
    use_bias=True,
    kernel_initializer=
    bias_initializer=tf.zeros_initializer(),
    kernel_regularizer=None,
    bias_regularizer=None,
    activity_regularizer=None,
    kernel_constraint=None,
    bias_constraint=None,
    trainable=True,
    name=None,
    reuse=None
)

如果不指定 kernel_initializer,默认调用上一节的出现过得  tf.get_variable()。而如果该函数中 initializer 不指定,则默认调用 tf.glorot_uniform_initializer()

这其实就是我们所熟知的 Xavier 初始化方式中的服从均匀分布的一种。

Xavier

tf.glorot_uniform_initializer()
tf.initializers.glorot_uniform()
"""
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
"""
tf.glorot_normal_initializer()
tf.initializers.glorot_normal()

"""
It draws samples from a truncated normal distribution centered on 0
with standard deviation (after truncation) given by
`stddev = sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number
of input units in the weight tensor and `fan_out` is the number of
output units in the weight tensor.
"""

 常量初始化

tf.zeros_initializer()
tf.ones_initializer()
tf.constant_initializer()

"""
value = [0, 1, 2, 3, 4, 5, 6, 7]
init = tf.compat.v1.constant_initializer(value)
print('fitting shape:')
with tf.compat.v1.Session():
    x = tf.compat.v1.get_variable('x', shape=[2, 4], initializer=init)
    x.initializer.run()
    print(x.eval())

fitting shape:
[[ 0.  1.  2.  3.]
[ 4.  5.  6.  7.]]
"""

He init

tf.initializers.he_normal()
"""
It draws samples from a truncated normal distribution centered on 0
with standard deviation (after truncation) given by
`stddev = sqrt(2 / fan_in)` where `fan_in` is the number of
input units in the weight tensor.
"""
tf.initializers.he_uniform()

"""
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
"""

模型集合

tensorflow/models

posted @ 2019-10-03 16:06  xuanyuyt  阅读(883)  评论(0编辑  收藏  举报