TensorFlow MNIST LeNet 模型持久化
生活随笔
收集整理的這篇文章主要介紹了
TensorFlow MNIST LeNet 模型持久化
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
前向傳播過程mnist_inference.py
import tensorflow as tf# 定義神經網絡相關的參數 INPUT_NODE = 784 OUTPUT_NODE = 10def inference(inputs, dropout_keep_prob):x_image = tf.reshape(inputs, [-1, 28, 28, 1])# 第一層:卷積層conv1_weights = tf.get_variable("conv1_weights", [5, 5, 1, 32], initializer=tf.truncated_normal_initializer(stddev=0.1)) # 過濾器大小為5*5, 當前層深度為1, 過濾器的深度為32conv1 = tf.nn.conv2d(x_image, filter=conv1_weights, strides=[1, 1, 1, 1], padding='SAME') # 移動步長為1, 使用全0填充conv1_biases = tf.get_variable("conv1_biases", [32], initializer=tf.constant_initializer(0.0))relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases)) # 激活函數Relu去線性化# 第二層:最大池化層# 池化層過濾器的大小為2*2, 移動步長為2,使用全0填充pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') #輸出14*14*32# 第三層:卷積層conv2_weights = tf.get_variable("conv2_weights", [5, 5, 32, 64], initializer=tf.truncated_normal_initializer(stddev=0.1)) # 過濾器大小為5*5, 當前層深度為32, 過濾器的深度為64conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME') # 移動步長為1, 使用全0填充conv2_biases = tf.get_variable("conv2_biases", [64], initializer=tf.constant_initializer(0.0))relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))# 第四層:最大池化層# 池化層過濾器的大小為2*2, 移動步長為2,使用全0填充pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') #輸出7*7*64# 第五層:全連接層pool2_vector = tf.reshape(pool2, [-1, 7 * 7 * 64])fc1_weights = tf.get_variable("fc1_weights", [7 * 7 * 64, 1024], initializer=tf.truncated_normal_initializer(stddev=0.1)) # 7*7*64=3136把前一層的輸出變成特征向量fc1_baises = tf.get_variable("fc1_baises", [1024], initializer=tf.constant_initializer(0.1))fc1 = tf.nn.relu(tf.matmul(pool2_vector, fc1_weights) + fc1_baises)# 為了減少過擬合,加入Dropout層fc1_dropout = tf.nn.dropout(fc1, dropout_keep_prob)# 第六層:全連接層fc2_weights = tf.get_variable("fc2_weights", [1024, 10], initializer=tf.truncated_normal_initializer(stddev=0.1)) # 神經元節點數1024, 分類節點10fc2_biases = tf.get_variable("fc2_biases", [10], initializer=tf.constant_initializer(0.1))fc2 = tf.matmul(fc1_dropout, fc2_weights) + fc2_biasesreturn fc2訓練mnist_train.py import os import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data import mnist_inference# BATCH_SIZE = 100 #學習率 LEARN_RATE = 0.001 MODEL_SAVE_PATH = "model/" MODEL_NAME = "model.ckpt" EPOCH = 2def train(mnist):inputs = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE])labels = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE])dropout_keep_prob = tf.placeholder(tf.float32)logits = mnist_inference.inference(inputs, dropout_keep_prob)global_step = tf.Variable(0, trainable=False)cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)#tf.nn.sparse_softmax_cross_entropy_with_logitscost = tf.reduce_mean(cross_entropy)train_op = tf.train.AdamOptimizer(LEARN_RATE).minimize(cost, global_step=global_step)saver = tf.train.Saver()with tf.Session() as sess:tf.global_variables_initializer().run()print(mnist.train.images.shape)for i in range(20000):batch_inputs, batch_labels = mnist.train.next_batch(BATCH_SIZE)_, cost_value, step = sess.run([train_op, cost, global_step], feed_dict={inputs: batch_inputs, labels: batch_labels, dropout_keep_prob:0.5})if i % 1000 == 0:print("After %d training step(s), loss on training batch is %f." % (step, cost_value))saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)def main(argv=None):mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)train(mnist)if __name__ == '__main__':tf.app.run()
評估mnis_eval.py
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data import mnist_inference import mnist_traindef evaluate(mnist):inputs = tf.placeholder(tf.float32, [None, 784])labels = tf.placeholder(tf.float32, [None, 10])dropout_keep_prob = tf.placeholder(tf.float32)logits = mnist_inference.inference(inputs, dropout_keep_prob)print(logits)correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))saver = tf.train.Saver()with tf.Session() as sess:ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)if ckpt and ckpt.model_checkpoint_path:saver.restore(sess, ckpt.model_checkpoint_path)global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]accuracy_score = sess.run(accuracy, feed_dict={inputs: mnist.test.images, labels: mnist.test.labels, dropout_keep_prob:1.0})print("After %s training step(s), validation accuracy = %f" % (global_step, accuracy_score))else:print("No checkpoint file found")returndef main(argv=None):mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)evaluate(mnist)if __name__ == '__main__':tf.app.run()
總結
以上是生活随笔為你收集整理的TensorFlow MNIST LeNet 模型持久化的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 首个区块链标准《区块链 参考架构》正式发
- 下一篇: CFA将人工智能列入考试内容折射啥