三层神经网络实现手写字母的识别(基于tensorflow)
生活随笔
收集整理的這篇文章主要介紹了
三层神经网络实现手写字母的识别(基于tensorflow)
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
數據集的制作參考這篇文章:
https://blog.csdn.net/fanzonghao/article/details/81229409
一,讀取數據集
import tensorflow as tf import numpy as np import pickle import matplotlib.pyplot as plt #對于x變成(samles,pixs),y變成one_hot (samples,10) """ one-hot """ def reformat(dataset,labels,imgsize,C):dataset=dataset.reshape(-1,imgsize*imgsize).astype(np.float32)#one_hot兩種寫法#寫法一labels=np.eye(C)[labels.reshape(-1)].astype(np.float32)#寫法二#labels=(np.arange(10)==labels[:,None]).astype(np.float32)return dataset,labels """ 讀取.pickle文件 """ def pickle_dataset():path='./data/notMNIST.pickle'with open(path,'rb') as f:restore=pickle.load(f)train_dataset=restore['train_dataset']train_label = restore['train_label']valid_dataset = restore['valid_dataset']valid_label = restore['valid_label']test_dataset = restore['test_dataset']test_label = restore['test_label']del restore# print('Training:', train_dataset.shape, train_label.shape)# print('Validing:', valid_dataset.shape, valid_label.shape)# print('Testing:', test_dataset.shape, test_label.shape)train_dataset,train_label=reformat(train_dataset,train_label,imgsize=28,C=10)valid_dataset,valid_label=reformat(valid_dataset,valid_label,imgsize=28,C=10)test_dataset,test_label=reformat(test_dataset,test_label,imgsize=28,C=10)# print('after Training:', train_dataset.shape, train_label.shape)# print('after Validing:', valid_dataset.shape, valid_label.shape)# print('after Testing:', test_dataset.shape, test_label.shape)return train_dataset,train_label,valid_dataset,valid_label,test_dataset,test_label# #測試生成的數據正確不 # def test(train_dataset,train_label): # print(train_label[:10]) # #plt.figure(figsize=(50,20)) # for i in range(10): # plt.subplot(5,2,i+1) # plt.imshow(train_dataset[i].reshape(28,28)) # plt.show()# if __name__ == '__main__': # test(train_dataset,train_label)二,三層神經網絡代碼 import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import read_pickle_dataset import math #比較預測和真實精度 def accuracy(predictions,lables):acc=np.sum(np.argmax(predictions,1)==np.argmax(lables,1))/lables.shape[0]return acctrain_dataset,train_label,valid_dataset,valid_label,test_dataset,test_label=read_pickle_dataset.pickle_dataset() print('Training:', train_dataset.shape, train_label.shape) print('Validing:', valid_dataset.shape, valid_label.shape) print('Testing:', test_dataset.shape, test_label.shape) train_subset=10000 batch_size=128 #常量寫法 # tf_train_dataset=tf.constant(train_dataset[:train_subset,:]) # tf_train_label=tf.constant(train_label[:train_subset,:]) tf_valid_dataset=tf.constant(valid_dataset) #tf_valid_label=tf.constant(valid_label) tf_test_dataset=tf.constant(test_dataset) #tf_test_label=tf.constant(test_label) #place_holder """ 創建placeholder """ def creat_placeholder():X = tf.placeholder(dtype=tf.float32, shape=(None, 28 * 28))Y = tf.placeholder(dtype=tf.float32, shape=(None, 10))beta_regu=tf.placeholder(dtype=tf.float32)# tf_train_dataset=tf.placeholder(dtype=tf.float32,shape=(None,28*28))# tf_train_label=tf.placeholder(dtype=tf.float32,shape=(None,10))return X,Y,beta_regu """ 初始化權重""" def initialize_parameters():W1 = tf.Variable(initial_value=tf.truncated_normal(shape=(28 * 28, 1024),stddev=np.sqrt(2.0/(28*28))))b1=tf.Variable(initial_value=tf.zeros(1024))W2 = tf.Variable(initial_value=tf.truncated_normal(shape=(1024, 10)))b2 = tf.Variable(initial_value=tf.zeros(10))parameters={'W1':W1,'b1':b1,'W2':W2,'b2':b2}return parameters """ 前向傳播:三層網絡 """ def forward_propagation(X,parameters):W1 = parameters['W1']b1 = parameters['b1']W2 = parameters['W2']b2 = parameters['b2']Z=tf.add(tf.matmul(X,W1),b1)A=tf.nn.relu(Z)output=tf.add(tf.matmul(A,W2),b2)return output """ 計算誤差 含有正則項誤差 """ def compute_cost(beta,Y,Y_pred,parameter): #計算lossloss=tf.reduce_mean( #(10000,10) (10000,10)tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y,logits=Y_pred))loss+=beta*(tf.nn.l2_loss(parameter['W1'])+tf.nn.l2_loss(parameter['W2']))return loss """ 構建模型 """ def model():beta_array=np.logspace(-4,-2,20)X,Y,beta_regu=creat_placeholder()parameters=initialize_parameters()Y_pred=forward_propagation(X, parameters)loss=compute_cost(beta_regu,Y, Y_pred,parameters)#學習率衰減 指數衰減#learning_rate=tf.train.exponential_decayoptimizer=tf.train.GradientDescentOptimizer(learning_rate=0.5).minimize(loss)# 預測train_prediction = tf.nn.softmax(Y_pred)valid_prediction = tf.nn.softmax(forward_propagation(tf_valid_dataset, parameters))test_prediction = tf.nn.softmax(forward_propagation(tf_test_dataset, parameters))init = tf.global_variables_initializer()for beta in beta_array:with tf.Session() as sess:sess.run(init)costs = []for step in range(3001):offset=(step*batch_size)%(train_label.shape[0]-batch_size)batch_data=train_dataset[offset:(offset+batch_size),:]batch_label=train_label[offset:(offset+batch_size),:]feed={X:batch_data,Y:batch_label,beta_regu:beta}_,train_predictions,cost=sess.run([optimizer,train_prediction,loss],feed_dict=feed)# if step%500==0:# costs.append(cost)print('loss ={},at step {},beta={}'.format(cost,step,beta))# corect_prediction = tf.equal(tf.argmax(output, 1), tf.argmax(train_label, 1))# accuarcy = tf.reduce_mean(tf.cast(corect_prediction, 'float'))# feed = {tf_train_dataset: train_dataset, tf_train_label: train_label}# train_accuarcy = sess.run(accuarcy, feed_dict=feed)print('train accuracy={},beta={}'.format(accuracy(train_predictions,batch_label),beta))print('valid accuracy={},beta={}'.format(accuracy(valid_prediction.eval(),valid_label),beta))print('test accuracy={},beta={}'.format(accuracy(test_prediction.eval(), test_label),beta))# plt.plot(costs)# plt.ylabel('cost')# plt.xlabel('iterations ')# plt.show()if __name__ == '__main__':model()打印結果:
總結
以上是生活随笔為你收集整理的三层神经网络实现手写字母的识别(基于tensorflow)的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 【包邮送书活动】20210924期-开奖
- 下一篇: 吴恩达作业3:利用深层神经网络实现小猫的