tensorflow函数说明

 

tensorflow中有一类在tensor的某一维度上求值的函数
求最大值tf.reduce_max(input_tensor, reduction_indices=None, keep_dims=False, name=None)
求平均值tf.reduce_mean(input_tensor, reduction_indices=None, keep_dims=False, name=None)
参数1--input_tensor:待求值的tensor。
参数2--reduction_indices:在哪一维上求解。

import numpy as np
import tensorflow as tf

x = np.array([[1.,2.,3.],[4.,5.,6.]])
sess = tf.Session()
#如果不指定第二个参数,那么就在所有的元素中取平均值
mean_none = sess.run(tf.reduce_mean(x))
#指定第二个参数为0,则第一维的元素取平均值,即每一列求平均值
mean_0 = sess.run(tf.reduce_mean(x, 0))
#指定第二个参数为1,则第二维的元素取平均值,即每一行求平均值
mean_1 = sess.run(tf.reduce_mean(x, 1))
print (x)
print (mean_none)
print (mean_0)
print (mean_1)
sess.close() 

D:\python\tensorflow>python s.py
[[1. 2. 3.]
[4. 5. 6.]]
3.5
[2.5 3.5 4.5]
[2. 5.]

 

tf.square(x, name=None)
计算张量对应元素平方

tf.squared_difference(x, y, name=None)
计算张量 x、y 对应元素差平方

tf.reduce_mean(input_tensor, axis=None, keep_dims=False, name=None, reduction_indices=None)
计算张量 input_tensor 平均值
axis: None:全局求平均值;0:求每一列平均值;1:求每一行平均值

tf.nn.bias_add(value, bias, data_format=None, name=None)
将偏差项bias加到value上面,bias必须是一维的

 

import tensorflow as tf

def main():
    test1()
    testArg()
    testFunction()
    testReduce()
    testBroadcasting()    

def test1():
    print("--------test1()")
    a =  [[1, 2], [3, 4]]
    b= [[2], [3]]
    c = tf.matmul(a, b)
    z = tf.Variable(tf.zeros([10]))
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        print(sess.run(c))
        print(sess.run(z))

#tf.argmax()返回的是vector中的最大值的索引号,如果vector是一个向量,那就返回一个值,如果是一个矩阵,那就返回一个向量
#返回列或者行中最大值的索引
def testArg():
    print("--------testArg()")
    a = [1, 2 ,3]
    b = [[1, 2, 3], [2, 3, 4], [5, 4, 3], [8, 7, 2]]
    c = [3, 2, 1]
    x = tf.equal(a, c)
    cast = tf.cast(x, "float")
    accuracy = tf.reduce_mean(cast)
    with tf.Session() as sess:
        print(sess.run(tf.argmax(a, 0))) # 2
        #print(sess.run(tf.argmax(a, 1))) # It throws error
        print(sess.run(tf.argmax(b, 0))) # [3 3 1]
        print(sess.run(tf.argmax(b, 1))) # [2 2 0 0]
        print(sess.run(tf.equal(2, 1))) # False
        print(sess.run(x)) # [False True False]
        print(sess.run(cast)) # [0. 1. 0.]
        print(sess.run(accuracy)) # 0.33333334
        
def testFunction():
    print("--------testFunction()")
    mylist = [2, 4, 6, 8]
    sq = tf.square(mylist)
    sess = tf.Session()
    result = sess.run(sq)
    print(result) # [ 4 16 36 64]
    
    x = [1, 2, 3]
    y = [4, 5, 6]
    # square(x-y)
    sq = tf.squared_difference(x, y)
    result = sess.run(sq)
    print(result)  # [9 9 9]
    
    x = [1, 2.71828, 9]
    c = tf.log(x)
    print(sess.run(c))
        
def testReduce():
    print("--------testReduce()")    
    x = [1, 2, 3]
    sq = tf.reduce_mean(x)
    with tf.Session() as sess:
        result = sess.run(sq)
        print(result)  # 2
        sq = tf.reduce_max(x)
        result = sess.run(sq)
        print(result)  # 3
        sq = tf.reduce_sum(x)
        result = sess.run(sq)
        print(result)  # 6
        
        x = [[1, 1], [3, 4], [9, 13]]
        y = [1, -1]
        sq = tf.nn.bias_add(x, y)
        result = sess.run(sq)
        print(result)  # [[ 2  0] [ 4  3] [10 12]]

#tensorflow矩阵和向量的加法:broadcasting机制 (和线性代数里面不同)
#一个3*2的矩阵和一个1*2的向量相加。其实是在tensorflow中允许矩阵和向量相加。 
# C=A+b 即C[i, j] = A[i, j] + b[j]。也就是给矩阵A的每一行都加上向量b。 
#那么这至少要求矩阵的列数和向量的元素个数对齐。 这种隐式的复制向量b到很多位置的办法,叫做broadcasting。广播机制。
def testBroadcasting():
    print("--------testBroadcasting()")
    A =  [[1, 2], [3, 4], [4, 6]]
    b= [[2, 3]]
    C =  tf.add(A, b)
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        print(sess.run(C))

if __name__ == "__main__":
    main()

--------test1()
[[ 8]
[18]]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
--------testArg()
2
[3 3 1]
[2 2 0 0]
False
[False True False]
[0. 1. 0.]
0.33333334
--------testFunction()
[ 4 16 36 64]
[9 9 9]
[0. 0.99999934 2.1972246 ]
--------testReduce()
2
3
6
[[ 2 0]
[ 4 3]
[10 12]]
--------testBroadcasting()
[[3 5]
[5 7]
[6 9]]

posted @ 2018-12-27 16:40  牧 天  阅读(195)  评论(0)    收藏  举报