python停止运行tensorflow_Tensorflow 开启训练后卡死
畢設(shè)做深度學(xué)習(xí)的課題,使用到了TensorFlow,但訓(xùn)練時(shí)出現(xiàn)了問題:
跑腳本開啟訓(xùn)練之后,跑完不到100次就會(huì)卡死,然后顯示python已停止工作
這是我的訓(xùn)練的代碼
# 導(dǎo)入數(shù)據(jù)集
import load_record
# 導(dǎo)入TensorFlow并創(chuàng)建Session
import tensorflow as tf
sess = tf.InteractiveSession()
# # 構(gòu)建計(jì)算圖
x = tf.placeholder("float", shape = [None, 224, 224, 1])
y_ = tf.placeholder("float", shape = [None, 2])
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev = 0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape = shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides = [1, 1, 1, 1], padding = 'SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
x_image = tf.reshape(x, [-1, 224, 224, 1])
# 第一層卷積
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# 第二層卷積
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2= max_pool_2x2(h_conv2)
# 第一層全連接層
W_fc1 = weight_variable([56 * 56 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 56*56*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout層防止過(guò)擬合
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# 第二層全連接層輸出
W_fc2 = weight_variable([1024, 2])
b_fc2 = bias_variable([2])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# 訓(xùn)練模型
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.global_variables_initializer())
print('ready...')
for i in range(500):
images, labels = load_record.inputs(data_set = 'train', batch_size = 50, num_epochs = None)
tf.train.start_queue_runners(sess = sess)
_images, _labels = sess.run([images, labels])
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:_images, y_: _labels, keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: _images, y_: _labels, keep_prob: 0.5})
數(shù)據(jù)集我是用tfrecord讀進(jìn)去的
def inputs(data_set,batch_size,num_epochs):
if not num_epochs:
num_epochs = None
if data_set == 'train':
file = TRAIN_FILE
else:
file = VALIDATION_FILE
with tf.name_scope('input') as scope:
filename_queue = tf.train.string_input_producer([file], num_epochs=num_epochs)
image,label = read_and_decode(filename_queue)
#隨機(jī)獲得batch_size大小的圖像和label
images,labels = tf.train.shuffle_batch([image, label],
batch_size=batch_size,
num_threads=64,
capacity=1000 + 3 * batch_size,
min_after_dequeue=1000
)
return images,labels
求教,如需補(bǔ)充描述請(qǐng)@
只迭代了第一次, 第二次就不行了
總結(jié)
以上是生活随笔為你收集整理的python停止运行tensorflow_Tensorflow 开启训练后卡死的全部?jī)?nèi)容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: linux中dhcp如何配置两个子网,l
- 下一篇: C++学习之路 | PTA乙级—— 10