tensorflow知识点
一.bazel編譯tensorflow注意版本號:
在/tensorflow/tensorflow/configure.py 查看bazel版本號
https://github.com/tensorflow/tensorflow
https://github.com/bazelbuild/bazel/releases?after=0.26.1?
https://tensorflow.google.cn/
二,基礎知識點
1.打印出與訓練變量相關的信息
""" slim.model_analyzer.analyze_vars打印出與訓練變量相關的信息 """ import tensorflow as tf import tensorflow.contrib.slim as slim x1=tf.Variable(tf.constant(1,shape=[1],dtype=tf.float32,name='x1')) x2=tf.Variable(tf.random_normal(shape=[2,1],dtype=tf.float32,name='x2')) y=tf.trainable_variables() for i in y:print(6666)print(i) slim.model_analyzer.analyze_vars(y,print_info=True) print(88888888)2. tf.concat拼接
""" tf.concat拼接 """ import tensorflow as tf t1=tf.constant([[1,2,3],[4,5,6]]) t2=tf.constant([[7,8,9],[10,11,12]]) t3=tf.concat([t1,t2],0) t4=tf.concat([t1,t2],1) print('t1={}'.format(t1)) print('t2={}'.format(t2)) print('t3={}'.format(t3)) print('t4={}'.format(t4)) """ tf.concat拼接 """ import tensorflow as tf t1=tf.constant([[[1,2,3],[4,5,6]]]) t2=tf.constant([[[7,8,9],[10,11,12]]]) t3=tf.concat([t1,t2],0) t4=tf.concat([t1,t2],1) t5=tf.concat([t1,t2],-1) print('t1={}'.format(t1)) print('t2={}'.format(t2)) print('t3={}'.format(t3)) print('t4={}'.format(t4)) print('t5={}'.format(t5))3.tensorboard調用
graphs:計算圖顯示?
tf.summary.FileWritter(path,sess.graph)
import tensorflow as tf input1=tf.constant([1.0,2.0,3.0],name='input_1') input2=tf.constant([2.0,5.0,8.0],name='input_2') output=tf.add(input1,input2,name='add') with tf.Session() as sess:writer=tf.summary.FileWriter('./data/', sess.graph)sess.run(tf.global_variables_initializer())print(sess.run(output)) writer.close()GRPHS顯示了網絡結構信息。
4. 池化和卷積的‘SAME’和‘VALID’
一維:
-
"VALID" = without padding:
inputs: 1 2 3 4 5 6 7 8 9 10 11 (12 13)|________________| dropped|_________________| -
valid采用丟棄
-
"SAME" = with zero padding:
SAME為向上取整。ceil
卷積過程:
- For the SAME padding, the output height and width are computed as:
out_height = ceil(float(in_height) / float(strides[1]))
out_width = ceil(float(in_width) / float(strides[2]))
And
- For the VALID padding, the output height and width are computed as:
out_height = ceil(float(in_height - filter_height + 1) / float(strides[1]))
out_width = ceil(float(in_width - filter_width + 1) / float(strides[2]))
5.tf.variable_scope和tf.name_scope
tf.variable_scope可以讓變量有相同的命名,包括tf.get_variable得到的變量,還有tf.Variable的變量
tf.name_scope可以讓變量有相同的命名,但只是限于tf.Variable的變量
import tensorflow as tfwith tf.variable_scope('V1'):a1 = tf.get_variable(name='a1', shape=[1], initializer=tf.constant_initializer(1))a2 = tf.Variable(tf.random_normal(shape=[2, 3], mean=0, stddev=1), name='a2') with tf.variable_scope('V2'):a3 = tf.get_variable(name='a1', shape=[1], initializer=tf.constant_initializer(1))a4 = tf.Variable(tf.random_normal(shape=[2, 3], mean=0, stddev=1), name='a2')with tf.Session() as sess:sess.run(tf.global_variables_initializer())print(a1.name)print(a2.name)print(a3.name)print(a4.name) import tensorflow as tfwith tf.name_scope('V1'):a1 = tf.get_variable(name='a1', shape=[1], initializer=tf.constant_initializer(1))a2 = tf.Variable(tf.random_normal(shape=[2, 3], mean=0, stddev=1), name='a2') with tf.name_scope('V2'):a3 = tf.get_variable(name='a1', shape=[1], initializer=tf.constant_initializer(1))a4 = tf.Variable(tf.random_normal(shape=[2, 3], mean=0, stddev=1), name='a2')with tf.Session() as sess:sess.run(tf.global_variables_initializer())print(a1.name)print(a2.name)print(a3.name)print(a4.name) import tensorflow as tfwith tf.name_scope('V1'):# a1 = tf.get_variable(name='a1', shape=[1], initializer=tf.constant_initializer(1))a2 = tf.Variable(tf.random_normal(shape=[2, 3], mean=0, stddev=1), name='a2') with tf.name_scope('V2'):# a3 = tf.get_variable(name='a1', shape=[1], initializer=tf.constant_initializer(1))a4 = tf.Variable(tf.random_normal(shape=[2, 3], mean=0, stddev=1), name='a2')with tf.Session() as sess:sess.run(tf.global_variables_initializer())# print(a1.name)print(a2.name)# print(a3.name)print(a4.name)6. tf.pad,二維情況
import tensorflow as tf t = tf.constant([[1, 2, 3], [4, 5, 6]]) #向上兩行 向下一行 向左三行 向右兩行 paddings = tf.constant([[2, 1,], [3, 2]]) #CONSTAN代表補零 t_pad_constant=tf.pad(t,paddings,mode='CONSTANT')with tf.Session() as sess:print(sess.run(t_pad_constant)) import tensorflow as tf t = tf.constant([[1, 2, 3], [4, 5, 6]]) #向上兩行 向下一行 向左三行 向右兩行 paddings = tf.constant([[1, 1,], [2, 2]]) #CONSTAN代表補零 t_pad_constant=tf.pad(t,paddings,mode='CONSTANT') #REFLECT代表鏡像對稱不包含軸 t_pad_reflect=tf.pad(t,paddings,mode='REFLECT')with tf.Session() as sess:print(sess.run(t_pad_constant))print(sess.run(t_pad_reflect)) import tensorflow as tf t = tf.constant([[1, 2, 3], [4, 5, 6]]) #向上兩行 向下一行 向左三行 向右兩行 paddings = tf.constant([[1, 1,], [2, 2]]) #CONSTAN代表補零 t_pad_constant=tf.pad(t,paddings,mode='CONSTANT') #REFLECT代表鏡像對稱不包含軸 t_pad_reflect=tf.pad(t,paddings,mode='REFLECT') #SYMMETRIC代表鏡像對稱 包含軸 t_pad_symmetric=tf.pad(t,paddings,mode='SYMMETRIC')with tf.Session() as sess:print(sess.run(t_pad_constant))print(sess.run(t_pad_reflect))print(sess.run(t_pad_symmetric))三維情況:
import tensorflow as tf t = tf.constant([[[[1, 2, 3],[4, 5, 6]],[[1, 2, 3],[4, 5, 6]],[[1, 2, 3],[4, 5, 6]]]]) #向上兩行 向下一行 向左三行 向右兩行 paddings = tf.constant([[0,0],[1, 1,], [1, 1],[0,0]]) #CONSTAN代表補零 t_pad_constant=tf.pad(t,paddings,mode='CONSTANT') t_pad_constant=tf.pad(t,paddings,mode='CONSTANT') t_pad_constant=tf.pad(t,paddings,mode='CONSTANT') t_pad_constant=tf.pad(t,paddings,mode='CONSTANT') #REFLECT代表鏡像對稱不包含軸 t_pad_reflect=tf.pad(t,paddings,mode='REFLECT') # #SYMMETRIC代表鏡像對稱 包含軸 t_pad_symmetric=tf.pad(t,paddings,mode='SYMMETRIC')with tf.Session() as sess:print(sess.run(t))print(sess.run(t).shape)print(sess.run(t_pad_constant))print(sess.run(t_pad_constant).shape)print(sess.run(t_pad_reflect))print(sess.run(t_pad_reflect).shape)print(sess.run(t_pad_symmetric))print(sess.run(t_pad_symmetric).shape)打印結果:
7.學習率衰減
tf.train.exponential_decay的返回值
decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
其中,decayed_learning_rate為每一輪優化時使用的學習率;learning_rate為事先設定的初始學習率;?decay_rate為衰減系數;decay_steps為衰減速度。
下面代碼:返回值=0.1×0.96^ (global_step / 100),其中staircase=True代表取整,故打印的結果是藍線,staircase=Fasle代表取小數,故打印的結果是紅線
import tensorflow as tf import numpy as np import matplotlib.pyplot as pltlearning_rate = 0.1 decay_rate = 0.96 global_steps = 1000 decay_steps = 100global_ = tf.Variable(tf.constant(0)) c = tf.train.exponential_decay(learning_rate, global_, decay_steps, decay_rate, staircase=True) d = tf.train.exponential_decay(learning_rate, global_, decay_steps, decay_rate, staircase=False)T_C = [] F_D = []with tf.Session() as sess:for i in range(global_steps):T_c = sess.run(c, feed_dict={global_: i})T_C.append(T_c)F_d = sess.run(d, feed_dict={global_: i})F_D.append(F_d)plt.figure() plt.plot(range(global_steps), F_D, 'r-') plt.plot(range(global_steps), T_C, 'b-') plt.show()打印結果:
8. tf.assign
#tf.assign實現值的轉換 import tensorflow as tf a = tf.Variable(tf.constant(0.0), dtype=tf.float32) init=tf.global_variables_initializer() with tf.Session() as sess:sess.run(init)print('a={}'.format(sess.run(a)))print('a={}'.format(sess.run(tf.assign(a, 1))))9. tf.nn.moments
#tf求均值和方差 import tensorflow as tf W = tf.constant([[1.,2.,3.],[4.,5.,6.]]) mean,var = tf.nn.moments(W, axes = [0]) init=tf.global_variables_initializer() with tf.Session() as sess:sess.run(init)Mean = sess.run(mean)print(Mean)Var = sess.run(var)print(Var) #tf求均值和方差 import tensorflow as tf W = tf.constant([[1.,2.,3.],[4.,5.,6.]]) mean,var = tf.nn.moments(W, axes = [1]) init=tf.global_variables_initializer() with tf.Session() as sess:sess.run(init)Mean = sess.run(mean)print(Mean)Var = sess.run(var)print(Var) #tf求均值和方差 import tensorflow as tf W = tf.constant([[1.,2.,3.],[4.,5.,6.]]) mean,var = tf.nn.moments(W, axes = [0,1]) init=tf.global_variables_initializer() with tf.Session() as sess:sess.run(init)Mean = sess.run(mean)print(Mean)Var = sess.run(var)print(Var) # tf求均值和方差 import tensorflow as tf W = tf.constant([[[[1., 2., 3.], [4., 5., 6.]]]]) mean, var = tf.nn.moments(W, axes=[0,1,2]) y=W-mean init = tf.global_variables_initializer() with tf.Session() as sess:sess.run(init)Mean = sess.run(mean)print(Mean)print(sess.run(y))求每一個feature map的均值,把每個feature map當成一個神經元,故軸方向取[0,1,2],而對于二位矩陣,需要計算神經元的均值和方差故軸取的是0。
10. tensorflow隊列操作
隊列是一種先入先出的線性數據結構,隊尾增加數據,隊首輸出和刪除數據。
隊列的創建:
import tensorflow as tf with tf.Session() as sess:#隊列先入先出 存放三個數據q=tf.FIFOQueue(3,'float')#將上文創建的FIFOQueue函數 填充3個數據 做預備工作init=q.enqueue_many(([0.1,0.2,0.3],))#真正填充三個數據sess.run(init)#獲取長度quelen=sess.run(q.size())for i in range(quelen):#將元素從隊列移出print(sess.run(q.dequeue()))注意:每次做元素填充隊列和彈出都需要sess run
import tensorflow as tf with tf.Session() as sess:#隊列先入先出 存放三個數據q=tf.FIFOQueue(3,'float')#將上文創建的FIFOQueue函數 填充3個數據 做預備工作init=q.enqueue_many(([0.1,0.2,0.3],))#彈出一個元素 準備工作 后面需要 sess runinit2=q.dequeue()#將元素放入隊列 準備工作 后面需要 sess runinit3=q.enqueue(1.)#真正填充三個數據sess.run(init)sess.run(init2)sess.run(init3)#獲取長度quelen=sess.run(q.size())for i in range(quelen):#將元素從隊列移出print(sess.run(q.dequeue()))上述的程序隊列的操作主要是在session中進行,優點不易阻塞,好找bug,缺點,效率低。下面用隊列管理器QueueRunner解決異步操作問題,創建一系列線程在主線程內操作,數據讀取與操作(訓練模型)是同步的,提升效率。
import tensorflow as tf with tf.Session() as sess:# 隊列先入先出 存放三個數據q=tf.FIFOQueue(1000, 'float')counter=tf.Variable(0.0)#counter=counter+1.0add_op=tf.assign_add(counter,tf.constant(1.0))# 將元素放入隊列 準備工作 后面需要 sess runenqueueData_op=q.enqueue(counter)qr=tf.train.QueueRunner(q,enqueue_ops=[add_op,enqueueData_op]*2)sess.run(tf.global_variables_initializer())qr.create_threads(sess,start=True)for i in range(10):print(sess.run(q.dequeue()))首先正常執行,最后隊列管理器QueueRunner報錯。原因是多線程雖然方便了在一個session下共同工作,并行地相互執行,但是這種同步會造成某個線程想要關閉session時,session被強行關閉而未完成的線程也被強行關閉。
故為了解決多線程的同步和處理問題,提供了Coordinator和QueueRunner函數來對線程進行控制與協調。
import tensorflow as tf with tf.Session() as sess:# 隊列先入先出 存放三個數據q=tf.FIFOQueue(1000, 'float')counter=tf.Variable(0.0)#counter=counter+1.0add_op=tf.assign_add(counter,tf.constant(1.0))# 將元素放入隊列 準備工作 后面需要 sess runenqueueData_op=q.enqueue(counter)#調用線程qr=tf.train.QueueRunner(q,enqueue_ops=[add_op,enqueueData_op]*2)sess.run(tf.global_variables_initializer())coord=tf.train.Coordinator()# 開啟線程 啟動入隊線程 coord線程協調器 啟動線程后負責對所有線程接受和處理,故當一個線程結束時,其會對所有線程發出通知,協調完畢。enqueue_threads = qr.create_threads(sess, coord=coord,start=True)for i in range(10):print(sess.run(q.dequeue()))coord.request_stop()coord.join(enqueue_threads)11. minimize里的 global_steps
x = tf.placeholder(tf.float32, shape=[None, 1], name='x') y = tf.placeholder(tf.float32, shape=[None, 1], name='y') w = tf.Variable(tf.constant(0.0))# global_steps = tf.Variable(0, trainable=False) global_steps = tf.train.get_or_create_global_step() # learning_rate = tf.train.exponential_decay(0.1, global_steps, 10, 2, staircase=False) loss = tf.pow(w*x - y, 2)train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss, global_step=global_steps)with tf.Session() as sess:sess.run(tf.global_variables_initializer())for i in range(5):sess.run(train_step, feed_dict={x: np.linspace(1, 2, 10).reshape([10, 1]),y: np.linspace(1, 2, 10).reshape([10, 1])})# print sess.run(learning_rate)print sess.run(global_steps)可看出,global_steps每次自動加1
12. tf.add_to_collection
tf.add_to_collection(‘list_name’, element):將元素element添加到列表list_name中
tf.get_collection(‘list_name’):返回名稱為list_name的列表
tf.add_n(list):將列表元素相加并返回
tf.add_to_collection('losses', tf.constant(1.2)) tf.add_to_collection('losses', tf.constant(5.)) with tf.Session() as sess:print(sess.run(tf.get_collection('losses')))print(sess.run(tf.add_n(tf.get_collection('losses'))))13. tf.nn.depthwise_conv2d,其輸出的通道數是卷積核的輸入與輸出channel相乘
https://blog.csdn.net/mao_xiao_feng/article/details/78003476
import tensorflow as tf img1 = tf.constant(value=[[[[1],[2],[3],[4]],[[1],[2],[3],[4]],[[1],[2],[3],[4]],[[1],[2],[3],[4]]]],dtype=tf.float32) img2 = tf.constant(value=[[[[1],[1],[1],[1]],[[1],[1],[1],[1]],[[1],[1],[1],[1]],[[1],[1],[1],[1]]]],dtype=tf.float32) img = tf.concat(values=[img1,img2],axis=3) print(img1.shape) print('img.shape={}'.format(img.shape)) filter1 = tf.constant(value=0, shape=[3,3,1,1],dtype=tf.float32) filter2 = tf.constant(value=1, shape=[3,3,1,1],dtype=tf.float32) filter3 = tf.constant(value=2, shape=[3,3,1,1],dtype=tf.float32) filter4 = tf.constant(value=3, shape=[3,3,1,1],dtype=tf.float32) filter_out1 = tf.concat(values=[filter1,filter2],axis=2) filter_out2 = tf.concat(values=[filter3,filter4],axis=2) filter = tf.concat(values=[filter_out1,filter_out2,filter_out2],axis=3) print(filter_out1.shape) print(filter_out2.shape) print('filter.shape={}'.format(filter.shape))out_img = tf.nn.conv2d(input=img, filter=filter, strides=[1,1,1,1], padding='VALID') print('out_img.shape={}'.format(out_img.shape))# t_img = tf.nn.depthwise_conv2d(input=img, filter=filter, strides=[1,1,1,1], rate=[1,1], padding='VALID') print('t_img.shape={}'.format(t_img.shape))-
input:?
指需要做卷積的輸入圖像,要求是一個4維Tensor,具有[batch, height, width, in_channels]這樣的shape,具體含義是[訓練時一個batch的圖片數量, 圖片高度, 圖片寬度, 圖像通道數] -
filter:?
相當于CNN中的卷積核,要求是一個4維Tensor,具有[filter_height, filter_width, in_channels, channel_multiplier]這樣的shape,具體含義是[卷積核的高度,卷積核的寬度,輸入通道數,輸出卷積乘子],同理這里第三維in_channels,就是參數value的第四維 -
strides:?
卷積的滑動步長。 -
結果返回一個Tensor,shape為[batch, out_height, out_width, in_channels * channel_multiplier],注意這里輸出通道變成了in_channels * channel_multiplier
?14. tf.expand_dims擴充維度
import tensorflow as tfa=tf.Variable(tf.zeros(shape=[2,3,4])) b=tf.expand_dims(a,axis=-1) c=tf.expand_dims(a,axis=0) d=a[:,:,0] e=tf.expand_dims(a[:,:,0],axis=0) f=tf.expand_dims(a[:,:,0],axis=-1) # tf.reset_default_graph() with tf.Session() as sess:sess.run(tf.global_variables_initializer())print(a)print(b)# print(sess.run(b))print(c)print(d)print(e)print(f)15. tf.gradients,求梯度
https://blog.csdn.net/taoyanqi8932/article/details/77602721
import tensorflow as tfw1 = tf.Variable([[1,2]])#a1 a2 w2 = tf.Variable([[3,4]])res = tf.matmul(w1, [[2],[1]])#2*a1+a2grads = tf.gradients(res,[w1])#求梯度a = tf.constant(0.) b=2*a g1 = tf.gradients(a + b, [a, b], stop_gradients=[a, b]) g2 = tf.gradients(b, [a, b]) with tf.Session() as sess:sess.run(tf.global_variables_initializer())print(sess.run(res))print(sess.run(grads))print(sess.run(g1))print(sess.run(g2))16. tf.trainable_variables()與tf.global_variables()
tf.trainable_variables返回的是需要訓練的變量列表
tf.global_variables返回的是所有變量的列表
import tensorflow as tfv = tf.Variable(tf.constant(0.0, shape=[1], dtype=tf.float32), name='v') v1 = tf.Variable(tf.constant(5, shape=[1], dtype=tf.float32), name='v1')global_step = tf.Variable(tf.constant(5, shape=[1], dtype=tf.float32), name='global_step', trainable=False) ema = tf.train.ExponentialMovingAverage(0.99, global_step)for i in tf.trainable_variables():print(i) print('===============') for i in tf.global_variables():print(i)17. tf.clip_by_global_norm
https://blog.csdn.net/u013713117/article/details/56281715
18. tf.reduce_sum中的reduction_indices和axis一樣的用法,keepdims=True表示維持維度
x = tf.constant([[1, 1, 1], [1, 1, 1]]) a=tf.reduce_sum(x,keepdims=True) # 6 b=tf.reduce_sum(x, 0,keepdims=True) # [2, 2, 2] c=tf.reduce_sum(x, reduction_indices=0) # [2, 2, 2] d=tf.reduce_sum(x, 1,keepdims=True) # [2, 2, 2] e=tf.reduce_sum(x, reduction_indices=1) # [2, 2, 2] with tf.Session() as sess:print(sess.run(a))print(sess.run(b))print(sess.run(c))print(sess.run(d))print(sess.run(e))19. tf.app.flags.FLAGS
命令行解析
import tensorflow as tfFLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_float('flag_float', 0.01, 'input a float') tf.app.flags.DEFINE_integer('flag_int', 400, 'input a int') tf.app.flags.DEFINE_boolean('flag_bool', True, 'input a bool') tf.app.flags.DEFINE_string('flag_string', 'yes', 'input a string')print(FLAGS.flag_float) print(FLAGS.flag_int) print(FLAGS.flag_bool) print(FLAGS.flag_string)20. tensorflow讀圖片和opencv輸出圖片
def tf_read_image():path='./img_size.jpg'with tf.gfile.FastGFile(path, 'r') as f:image_data = f.read()with tf.Session() as sess:image_data = tf.image.decode_jpeg(image_data)image = sess.run(image_data)"""第一種方式"""r, g, b = cv2.split(image)image = cv2.merge([b, g, r])cv2.imwrite('img_size_out.jpg',image)"""第二種方式"""# plt.imshow(image)# plt.show()# print(image.shape)由于opencv讀圖片是b,g,r故在decode成image時,拆分成r,g,b在融合成b,g,r給opencv輸出,否則直接輸出會造成r通道,卻是b通道的值,整張圖片現藍色,而用plot直接show就行。故在用opencv讀圖片時,記得加下面這句話。
cv2.cvtColor(img, cv2.COLOR_BGR2RGB)21. 不同計算圖之間的聯系,用來隔離tensor和計算
#定義計算圖g1 g1=tf.Graph() with g1.as_default():v=tf.get_variable('v',shape=[1],initializer=tf.zeros_initializer) # 定義計算圖g2 g2 = tf.Graph() with g2.as_default():v = tf.get_variable('v', shape=[1], initializer=tf.ones_initializer) #在計算圖g1中讀取變量v的值 with tf.Session(graph=g1) as sess:sess.run(tf.global_variables_initializer())with tf.variable_scope('',reuse=True):print(sess.run(tf.get_variable('v')))# 在計算圖g2中讀取變量v的值 with tf.Session(graph=g2) as sess:sess.run(tf.global_variables_initializer())with tf.variable_scope('', reuse=True):print(sess.run(tf.get_variable('v')))22. tf.greater,tf.where
v1 = tf.constant([1.0, 2.0, 3.0, 4.0]) v2 = tf.constant([4.0, 3.0, 2.0, 1.0]) with tf.Session() as sess:print(sess.run(tf.greater(v1,v2)))tf.where兩種用法
where(condition,?x=None,?y=None,name=None)
如果x,y為空,返回condition中值為True的位置的Tensor
labels=tf.constant([[[1],[2],[3]],[[4], [5], [6]]],dtype=tf.float32) ignore_label=-1 a=tf.squeeze(labels) b=tf.not_equal(a, ignore_label) c=tf.where(b) with tf.Session() as sess:print(sess.run(a))print(sess.run(b))print(sess.run(c))如果x,y不為空,返回值和x、y有相同的形狀,如果condition對應位置值為True那么返回Tensor對應位置為x的值,否則為y的值
v1 = tf.constant([1.0, 2.0, 3.0, 4.0]) v2 = tf.constant([4.0, 3.0, 2.0, 1.0]) with tf.Session() as sess:print(sess.run(tf.greater(v1,v2)))#true選v1,False選2print(sess.run(tf.where(tf.greater(v1,v2),v1,v2)))23. shape,set_shape,reshape,get_shape,注意看feed的是x1,還是x2
x1 = tf.placeholder(tf.float32, shape=[2, 2]) print(tf.shape(x1)) print(x1.get_shape())可看出,shape返回值是一個tensor,而get_shape返回的是一個tuple
x1 = tf.placeholder(tf.int32) x2 = tf.reshape(x1, [2, 2]) print(tf.shape(x1))with tf.Session() as sess:print(sess.run(tf.shape(x2), feed_dict={x1: [0, 1, 2, 3]}))可看出:reshape生成新的shape,創造一個新的tensor以供我們使用
x1 = tf.placeholder(tf.int32) x1 = tf.reshape(x1, [2, 2]) # use tf.reshape() print(tf.shape(x1))sess = tf.Session() print(sess.run(tf.shape(x1), feed_dict={x1:[0,1,2,3]}))reshape改變了x1的shape,此時傳入就會報錯
x1=tf.placeholder(tf.float32) print(x1.get_shape())with tf.Session() as sess:print(sess.run(tf.shape(x1), feed_dict={x1: [[0, 1], [2, 3]]}))未加set_shape,沒有更新圖信息。?
x1 = tf.placeholder(tf.int32) x1.set_shape([2,2]) print(x1.get_shape())with tf.Session() as sess:print(sess.run(tf.shape(x1), feed_dict={x1:[0,1,2,3]}))# print(sess.run(tf.shape(x1), feed_dict={x1: [[0, 1], [2, 3]]}))set_shape更新了圖信息
x1 = tf.placeholder(tf.int32) x1.set_shape([2,2]) print(x1.get_shape())with tf.Session() as sess:print(sess.run(tf.shape(x1), feed_dict={x1:[0,1,2,3]}))# print(sess.run(tf.shape(x1), feed_dict={x1: [[0, 1], [2, 3]]}))圖中最開始沒有shape的x1在使用了set_shape后,它的圖中的信息已經改變了,但是卻不能改變tensor的shape,傳入了和圖不符合的參數就會報錯。
x1 = tf.Variable([[0, 1], [2, 3]]) print(x1.get_shape())x1 = tf.reshape(x1, [4, 1]) print(x1.get_shape()) x1 = tf.Variable([[0, 1], [2, 3]]) print(x1.get_shape())x1 = x1.set_shape([4, 1]) print(x1.get_shape())? ? 可見創建新的tensor或者動態地改變原有tensor的shape的時候可以使用reshape;而當我們只是想更新圖中某個tensor的shape或者補充某個tensor的shape信息可以使用set_shape來進行更新
tf.reshape(,-1)變為一行
labels=tf.constant([[[1],[2],[3]],[[4], [5], [6]]],dtype=tf.float32) d = tf.reshape(labels, [-1]) with tf.Session() as sess:print(sess.run(d))24. 不同計算graph
def different_graph():#定義計算圖g1g1=tf.Graph()with g1.as_default():v=tf.get_variable('v',shape=[1],initializer=tf.zeros_initializer)# 定義計算圖g2g2 = tf.Graph()with g2.as_default():v = tf.get_variable('v', shape=[1,2], initializer=tf.ones_initializer)#在計算圖g1中讀取變量v的值with tf.Session(graph=g1) as sess:sess.run(tf.global_variables_initializer())with tf.variable_scope('',reuse=True):print('g1_V',sess.run(tf.get_variable('v')))# 在計算圖g2中讀取變量v的值with tf.Session(graph=g2) as sess:sess.run(tf.global_variables_initializer())with tf.variable_scope('', reuse=True):print('g2_V',sess.run(tf.get_variable('v')))a=tf.get_variable('v')print('g2_V.shape',sess.run(tf.shape(a)))25. tf.boolean_mask,用于找出需要的元素
with tf.Session() as sess: tensor = [0, 1, 2, 3]mask = np.array([True, False, True, False])print(sess.run(tf.boolean_mask(tensor, mask)))#2Dtensor = [[1, 2], [3, 4], [5, 6]]mask = np.array([True, False, True])print(sess.run(tf.boolean_mask(tensor, mask)))26. tf.slice與tf.gather切片
http://www.360doc.com/content/17/0115/14/10408243_622618137.shtml
tf.slice(input_, begin, size, name=None):按照指定的下標范圍抽取連續區域的子集,可以從圖片中截取指定的像素點
tf.gather(params, indices, validate_indices=None, name=None):按照指定的下標集合從axis=0中抽取子集,適合抽取不連續區域的子集
input = tf.constant([[[1, 1, 1], [2, 2, 2]],[[3, 3, 3], [4, 4, 4]],[[5, 5, 5], [6, 6, 6]]],dtype=tf.float32) a=tf.slice(input, [1, 0, 0], [1, 1, 3]) b=tf.slice(input, [1, 0, 0], [1, 2, 3]) c=tf.slice(input, [1, 0, 0], [2, 1, 3]) with tf.Session() as sess:print(sess.run(a))print(sess.run(b))print(sess.run(c)) input = tf.constant([[[1, 1, 1], [2, 2, 2]],[[3, 3, 3], [4, 4, 4]],[[5, 5, 5], [6, 6, 6]]],dtype=tf.float32) a=tf.gather(input, [0,1]) with tf.Session() as sess:print(sess.run(a))用于語義分割中,可用于踢掉不需要的ground truth,比如下面示例是6分類,不需要用到6
raw_gt = np.array([0,1,2, 3, 4, 5, 6]) #6 class less_euqal=tf.less_equal(raw_gt, 6 - 1) where_index=tf.where(less_euqal) indices=tf.squeeze(where_index,axis=1) gt=tf.gather(raw_gt, indices)然后預測的結果也用類似處理
raw_prediction = tf.reshape(raw_output, [-1, self.conf.num_classes]) prediction = tf.gather(raw_prediction, indices)# Pixel-wise softmax_cross_entropy loss loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=gt)然后在做交叉熵。
完整示例:
raw_gt = tf.reshape(label_proc, [-1,]) indices = tf.squeeze(tf.where(tf.less_equal(raw_gt, self.conf.num_classes - 1)), 1) gt = tf.cast(tf.gather(raw_gt, indices), tf.int32) raw_prediction = tf.reshape(raw_output, [-1, self.conf.num_classes]) prediction = tf.gather(raw_prediction, indices)# Pixel-wise softmax_cross_entropy loss loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=gt)27. tf.group,tf.tuple
w = tf.Variable(1) mul = tf.multiply(w, 2) add = tf.add(w, 2) group = tf.group(mul, add) tuple = tf.tuple([mul, add]) print(group) print(tuple) with tf.Session() as sess:sess.run(tf.global_variables_initializer())print(sess.run(group))print(sess.run(tuple))# sess.run(group)和sess.run(tuple)都會求Tensor(add)
#Tensor(mul)的值。區別是,tf.group()返回的是`op`
#tf.tuple()返回的是list of tensor。
#這樣就會導致,sess.run(tuple)的時候,會返回 Tensor(mul),Tensor(add)的值.
#而 sess.run(group)不會
28. tf.metrics.true_positives
https://blog.csdn.net/jyzhang_cvml/article/details/82694631
a = tf.Variable([0, 1, 1, 0], tf.bool) b = tf.Variable([0, 1, 0, 1], tf.bool) tp, tp_update = tf.metrics.true_positives(predictions=a, labels=b)with tf.Session() as sess:sess.run(tf.global_variables_initializer())# tp_update 是保存在 tf.local_variables()中sess.run(tf.local_variables_initializer())sess.run(tp_update)print(sess.run(tp))29. tf.one_hot
classes = 3 labels = tf.constant([0, 1, 2]) # 輸入的元素值最小為0,最大為2 output = tf.one_hot(labels, classes) with tf.Session() as sess:output = sess.run(output)print("output of one-hot is : ", output) classes = 3 labels = tf.constant([[0, 1, 2],[1,2,0]]) # 輸入的元素值最小為0,最大為2 output = tf.one_hot(labels, classes) with tf.Session() as sess:output = sess.run(output)print("output of one-hot is : ", output)?
?
?
總結
以上是生活随笔為你收集整理的tensorflow知识点的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: Android实现圆形圆角图片
- 下一篇: Android缓存学习入门(二)