TensorFlow基础笔记(11) conv2D函数

 

#链接:http://www.jianshu.com/p/a70c1d931395
import tensorflow as tf
import tensorflow.contrib.slim as slim

# tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu=None, name=None)
# 除去name参数用以指定该操作的name,与方法有关的一共五个参数:
#
#     input:
#     指需要做卷积的输入图像,它要求是一个Tensor,具有[batch, in_height, in_width, in_channels]这样的shape,具体含义是[训练时一个batch的图片数量, 图片高度, 图片宽度, 图像通道数],注意这是一个4维的Tensor,要求类型为float32和float64其中之一
#
#     filter:
#     相当于CNN中的卷积核,它要求是一个Tensor,具有[filter_height, filter_width, in_channels, out_channels]这样的shape,具体含义是[卷积核的高度,卷积核的宽度,图像通道数,卷积核个数],要求类型与参数input相同,有一个地方需要注意,第三维in_channels,就是参数input的第四维
#
#     strides:卷积时在图像每一维的步长,这是一个一维的向量,长度4
#
#     padding:
#     string类型的量,只能是”SAME”,”VALID”其中之一,这个值决定了不同的卷积方式(后面会介绍)
#     SAME 表示输出的out_height, out_width与输入的in_height, in_width相同
#     VALID 表示输出的图像大小小于输入图像大小,输出的大小计算公式如下:
#     out_height = round((in_height - floor(filter_height / 2) * 2) / strides_height) floor表示下取整 round表示四舍五入
#     use_cudnn_on_gpu:
#     bool类型,是否使用cudnn加速,默认为true

#而对于tf.contrib.slim.conv2d,其函数定义如下:

# convolution(inputs,
#           num_outputs,
#           kernel_size,
#           stride=1,
#           padding='SAME',
#           data_format=None,
#           rate=1,
#           activation_fn=nn.relu,
#           normalizer_fn=None,
#           normalizer_params=None,
#           weights_initializer=initializers.xavier_initializer(),
#           weights_regularizer=None,
#           biases_initializer=init_ops.zeros_initializer(),
#           biases_regularizer=None,
#           reuse=None,
#           variables_collections=None,
#           outputs_collections=None,
#           trainable=True,
#           scope=None):
#
# inputs****同样是****指需要做卷积的输入图像
# num_outputs****指定卷积核的个数(就是filter****的个数)
# kernel_size****用于指定卷积核的维度****(卷积核的宽度,卷积核的高度)
# stride****为卷积时在图像每一维的步长
# padding****为padding****的方式选择,VALID****或者SAME
# data_format****是用于指定输入的****input****的格式
# rate****这个参数不是太理解,而且tf.nn.conv2d****中也没有,对于使用atrous convolution的膨胀率(不是太懂这个atrous convolution)
# activation_fn****用于激活函数的指定,默认的为ReLU函数
# normalizer_fn****用于指定正则化函数
# normalizer_params****用于指定正则化函数的参数
# weights_initializer****用于指定权重的初始化程序
# weights_regularizer****为权重可选的正则化程序
# biases_initializer****用于指定biase****的初始化程序
# biases_regularizer: biases****可选的正则化程序
# reuse****指定是否共享层或者和变量
# variable_collections****指定所有变量的集合列表或者字典
# outputs_collections****指定输出被添加的集合
# trainable:****卷积层的参数是否可被训练
# scope:****共享变量所指的variable_scope

input = tf.Variable(tf.round(10 * tf.random_normal([1, 6, 6, 1])))
filter = tf.Variable(tf.round(5 * tf.random_normal([3, 3, 1, 1])))
#op2 = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='VALID')

conv_SAME = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='SAME')
conv_VALID = tf.nn.conv2d(input, filter, strides=[1, 2, 2, 1], padding='VALID')
slim_conv2d_SAME = slim.conv2d(input, 1, [3, 3], [1, 1], weights_initializer=tf.ones_initializer, padding='SAME')
slim_conv2d_VALID = slim.conv2d(input, 1, [3, 3], [2, 2], weights_initializer=tf.ones_initializer, padding='VALID')

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    conv_SAME_value, conv_VALID_value, slim_conv2d_SAME_value, slim_conv2d_VALID_value = \
        sess.run([conv_SAME, conv_VALID, slim_conv2d_SAME, slim_conv2d_VALID])
    print(conv_SAME_value.shape)
    print(conv_VALID_value.shape)
    print(slim_conv2d_SAME_value.shape)
    print(slim_conv2d_VALID_value.shape)

input = tf.Variable(tf.round(10 * tf.random_normal([1, 7, 7, 1])))
filter = tf.Variable(tf.round(5 * tf.random_normal([3, 3, 1, 1])))
#op2 = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='VALID')

conv_SAME = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='SAME')
conv_VALID = tf.nn.conv2d(input, filter, strides=[1, 2, 2, 1], padding='VALID')
slim_conv2d_SAME = slim.conv2d(input, 1, [3, 3], [1, 1], weights_initializer=tf.ones_initializer, padding='SAME')
slim_conv2d_VALID = slim.conv2d(input, 1, [3, 3], [2, 2], weights_initializer=tf.ones_initializer, padding='VALID')

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    conv_SAME_value, conv_VALID_value, slim_conv2d_SAME_value, slim_conv2d_VALID_value = \
        sess.run([conv_SAME, conv_VALID, slim_conv2d_SAME, slim_conv2d_VALID])
    print(conv_SAME_value.shape)
    print(conv_VALID_value.shape)
    print(slim_conv2d_SAME_value.shape)
    print(slim_conv2d_VALID_value.shape)

#输出
# (1, 6, 6, 1)
# (1, 2, 2, 1)
# (1, 6, 6, 1)
# (1, 2, 2, 1)

# (1, 7, 7, 1)
# (1, 3, 3, 1)
# (1, 7, 7, 1)
# (1, 3, 3, 1)

 

 

 

#coding=utf-8

#http://blog.csdn.net/mao_xiao_feng/article/details/78004522
# tf.nn.conv2d(input, filter, strides, padding, use_cudnn_on_gpu=None, name=None)
# 除去name参数用以指定该操作的name,与方法有关的一共五个参数:
#
#     input:
#     指需要做卷积的输入图像,它要求是一个Tensor,具有[batch, in_height, in_width, in_channels]这样的shape,具体含义是[训练时一个batch的图片数量, 图片高度, 图片宽度, 图像通道数],注意这是一个4维的Tensor,要求类型为float32和float64其中之一
#
#     filter:
#     相当于CNN中的卷积核,它要求是一个Tensor,具有[filter_height, filter_width, in_channels, out_channels]这样的shape,具体含义是[卷积核的高度,卷积核的宽度,图像通道数,卷积核个数],要求类型与参数input相同,有一个地方需要注意,第三维in_channels,就是参数input的第四维
#
#     strides:卷积时在图像每一维的步长,这是一个一维的向量,长度4
#
#     padding:
#     string类型的量,只能是”SAME”,”VALID”其中之一,这个值决定了不同的卷积方式(后面会介绍)
#
#     use_cudnn_on_gpu:
#     bool类型,是否使用cudnn加速,默认为true

import tensorflow as tf
#case 2
input = tf.Variable(tf.round(10 * tf.random_normal([1,3,3,2])))
filter = tf.Variable(tf.round(5 * tf.random_normal([1,1,2,1])))
op2 = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='VALID')
#对于filter,多个输入通道,变成一个输入通道,是对各个通道上的卷积值进行相加

# case 2
# input:  [[[[-14. -11.]
#    [  2.   2.]
#    [ 25.  18.]]
#
#   [[  8.  13.]
#    [ -7.  -7.]
#    [ 11.   6.]]
#
#   [[ -1.   8.]
#    [ 18.  10.]
#    [ -2.  19.]]]]
#转换:输入为3*3的2通道数据
#通道1:
#[-14 2 25],
#[8 -7 11],
#[-1 18 -2]
#通道2:
#[-11 2 18],
#[13 -7 6],
#[8 10 19]



# filter:  [[[[-3.]
#    [ 2.]]]]

# conv  [[[[ 20.]
#    [ -2.]
#    [-39.]]
#
#   [[  2.]
#    [  7.]
#    [-21.]]
#
#   [[ 19.]
#    [-34.]
#    [ 44.]]]]

#conv转换
#[20 -2 -39],
#[2 -7 -21],
#[9 -34 44]

#计算过程
#[-14 2 25],
#[8 -7 11],  *  [-3]  +
#[-1 18 -2]
#[-11 2 18],
#[13 -7 6],  * [2]
#[8 10 19]
#result
#[20 -2 -39],
#[2 -7 -21],
#[9 -34 44]




# #case 3
# input = tf.Variable(tf.random_normal([1,3,3,5]))
# filter = tf.Variable(tf.random_normal([3,3,5,1]))

# op3 = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='VALID')
# #case 4
# input = tf.Variable(tf.random_normal([1,5,5,5]))
# filter = tf.Variable(tf.random_normal([3,3,5,1]))
#
# op4 = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='VALID')
# #case 5
# input = tf.Variable(tf.random_normal([1,5,5,5]))
# filter = tf.Variable(tf.random_normal([3,3,5,1]))
#
# op5 = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='SAME')
# #case 6
# input = tf.Variable(tf.random_normal([1,5,5,5]))
# filter = tf.Variable(tf.random_normal([3,3,5,7]))
#
# op6 = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='SAME')
# #case 7
# input = tf.Variable(tf.random_normal([1,5,5,5]))
# filter = tf.Variable(tf.random_normal([3,3,5,7]))
#
# op7 = tf.nn.conv2d(input, filter, strides=[1, 2, 2, 1], padding='SAME')
# #case 8
# input = tf.Variable(tf.random_normal([10,5,5,5]))
# filter = tf.Variable(tf.random_normal([3,3,5,7]))
#
# op8 = tf.nn.conv2d(input, filter, strides=[1, 2, 2, 1], padding='SAME')

init = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)
    print("case 2")
    print("input: ", sess.run(input))
    print("filter: ", sess.run(filter))
    print("conv ", sess.run(op2))
    # print("case 3")
    # print(sess.run(op3))
    # print("case 4")
    # print(sess.run(op4))
    # print("case 5")
    # print(sess.run(op5))
    # print("case 6")
    # print(sess.run(op6))
    # print("case 7")
    # print(sess.run(op7))
    # print("case 8")
    # print(sess.run(op8))

 

posted on 2017-11-27 16:03  Maddock  阅读(2430)  评论(0编辑  收藏  举报

导航