摘要:本系列的其他文章已經根據的官方教程基于數據集采用了和進行建模。為了完整性,本文對數據應用模型求解,具體使用的為。
前言
本文輸入數據是MNIST,全稱是Modified National Institute of Standards and Technology,是一組由這個機構搜集的手寫數字掃描文件和每個文件對應標簽的數據集,經過一定的修改使其適合機器學習算法讀取。這個數據集可以從牛的不行的Yann LeCun教授的網站獲取。
本系列的其他文章已經根據TensorFlow的官方教程基于MNIST數據集采用了softmax regression和CNN進行建模。為了完整性,本文對MNIST數據應用RNN模型求解,具體使用的RNN為LSTM。
關于RNN/LSTM的理論知識,可以參考這篇文章
代碼# coding: utf-8 # @author: 陳水平 # @date:2017-02-14 # # In[1]: import tensorflow as tf import numpy as np # In[2]: sess = tf.InteractiveSession() # In[3]: from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("mnist/", one_hot=True) # In[4]: learning_rate = 0.001 batch_size = 128 n_input = 28 n_steps = 28 n_hidden = 128 n_classes = 10 x = tf.placeholder(tf.float32, [None, n_steps, n_input]) y = tf.placeholder(tf.float32, [None, n_classes]) # In[5]: def RNN(x, weight, biases): # x shape: (batch_size, n_steps, n_input) # desired shape: list of n_steps with element shape (batch_size, n_input) x = tf.transpose(x, [1, 0, 2]) x = tf.reshape(x, [-1, n_input]) x = tf.split(0, n_steps, x) outputs = list() lstm = tf.nn.rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0) state = (tf.zeros([n_steps, n_hidden]),)*2 sess.run(state) with tf.variable_scope("myrnn2") as scope: for i in range(n_steps-1): if i > 0: scope.reuse_variables() output, state = lstm(x[i], state) outputs.append(output) final = tf.matmul(outputs[-1], weight) + biases return final # In[6]: def RNN(x, n_steps, n_input, n_hidden, n_classes): # Parameters: # Input gate: input, previous output, and bias ix = tf.Variable(tf.truncated_normal([n_input, n_hidden], -0.1, 0.1)) im = tf.Variable(tf.truncated_normal([n_hidden, n_hidden], -0.1, 0.1)) ib = tf.Variable(tf.zeros([1, n_hidden])) # Forget gate: input, previous output, and bias fx = tf.Variable(tf.truncated_normal([n_input, n_hidden], -0.1, 0.1)) fm = tf.Variable(tf.truncated_normal([n_hidden, n_hidden], -0.1, 0.1)) fb = tf.Variable(tf.zeros([1, n_hidden])) # Memory cell: input, state, and bias cx = tf.Variable(tf.truncated_normal([n_input, n_hidden], -0.1, 0.1)) cm = tf.Variable(tf.truncated_normal([n_hidden, n_hidden], -0.1, 0.1)) cb = tf.Variable(tf.zeros([1, n_hidden])) # Output gate: input, previous output, and bias ox = tf.Variable(tf.truncated_normal([n_input, n_hidden], -0.1, 0.1)) om = tf.Variable(tf.truncated_normal([n_hidden, n_hidden], -0.1, 0.1)) ob = tf.Variable(tf.zeros([1, n_hidden])) # Classifier weights and biases w = tf.Variable(tf.truncated_normal([n_hidden, n_classes])) b = tf.Variable(tf.zeros([n_classes])) # Definition of the cell computation def lstm_cell(i, o, state): input_gate = tf.sigmoid(tf.matmul(i, ix) + tf.matmul(o, im) + ib) forget_gate = tf.sigmoid(tf.matmul(i, fx) + tf.matmul(o, fm) + fb) update = tf.tanh(tf.matmul(i, cx) + tf.matmul(o, cm) + cb) state = forget_gate * state + input_gate * update output_gate = tf.sigmoid(tf.matmul(i, ox) + tf.matmul(o, om) + ob) return output_gate * tf.tanh(state), state # Unrolled LSTM loop outputs = list() state = tf.Variable(tf.zeros([batch_size, n_hidden])) output = tf.Variable(tf.zeros([batch_size, n_hidden])) # x shape: (batch_size, n_steps, n_input) # desired shape: list of n_steps with element shape (batch_size, n_input) x = tf.transpose(x, [1, 0, 2]) x = tf.reshape(x, [-1, n_input]) x = tf.split(0, n_steps, x) for i in x: output, state = lstm_cell(i, output, state) outputs.append(output) logits =tf.matmul(outputs[-1], w) + b return logits # In[7]: pred = RNN(x, n_steps, n_input, n_hidden, n_classes) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # Initializing the variables init = tf.global_variables_initializer() # In[8]: # Launch the graph sess.run(init) for step in range(20000): batch_x, batch_y = mnist.train.next_batch(batch_size) batch_x = batch_x.reshape((batch_size, n_steps, n_input)) sess.run(optimizer, feed_dict={x: batch_x, y: batch_y}) if step % 50 == 0: acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y}) loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y}) print "Iter " + str(step) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc) print "Optimization Finished!" # In[9]: # Calculate accuracy for 128 mnist test images test_len = batch_size test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input)) test_label = mnist.test.labels[:test_len] print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: test_data, y: test_label})
輸出如下:
Iter 0, Minibatch Loss= 2.540429, Training Accuracy= 0.07812 Iter 50, Minibatch Loss= 2.423611, Training Accuracy= 0.06250 Iter 100, Minibatch Loss= 2.318830, Training Accuracy= 0.13281 Iter 150, Minibatch Loss= 2.276640, Training Accuracy= 0.13281 Iter 200, Minibatch Loss= 2.276727, Training Accuracy= 0.12500 Iter 250, Minibatch Loss= 2.267064, Training Accuracy= 0.16406 Iter 300, Minibatch Loss= 2.234139, Training Accuracy= 0.19531 Iter 350, Minibatch Loss= 2.295060, Training Accuracy= 0.12500 Iter 400, Minibatch Loss= 2.261856, Training Accuracy= 0.16406 Iter 450, Minibatch Loss= 2.220284, Training Accuracy= 0.17969 Iter 500, Minibatch Loss= 2.276015, Training Accuracy= 0.13281 Iter 550, Minibatch Loss= 2.220499, Training Accuracy= 0.14062 Iter 600, Minibatch Loss= 2.219574, Training Accuracy= 0.11719 Iter 650, Minibatch Loss= 2.189177, Training Accuracy= 0.25781 Iter 700, Minibatch Loss= 2.195167, Training Accuracy= 0.19531 Iter 750, Minibatch Loss= 2.226459, Training Accuracy= 0.18750 Iter 800, Minibatch Loss= 2.148620, Training Accuracy= 0.23438 Iter 850, Minibatch Loss= 2.122925, Training Accuracy= 0.21875 Iter 900, Minibatch Loss= 2.065122, Training Accuracy= 0.24219 ... Iter 19350, Minibatch Loss= 0.001304, Training Accuracy= 1.00000 Iter 19400, Minibatch Loss= 0.000144, Training Accuracy= 1.00000 Iter 19450, Minibatch Loss= 0.000907, Training Accuracy= 1.00000 Iter 19500, Minibatch Loss= 0.002555, Training Accuracy= 1.00000 Iter 19550, Minibatch Loss= 0.002018, Training Accuracy= 1.00000 Iter 19600, Minibatch Loss= 0.000853, Training Accuracy= 1.00000 Iter 19650, Minibatch Loss= 0.001035, Training Accuracy= 1.00000 Iter 19700, Minibatch Loss= 0.007034, Training Accuracy= 0.99219 Iter 19750, Minibatch Loss= 0.000608, Training Accuracy= 1.00000 Iter 19800, Minibatch Loss= 0.002913, Training Accuracy= 1.00000 Iter 19850, Minibatch Loss= 0.003484, Training Accuracy= 1.00000 Iter 19900, Minibatch Loss= 0.005693, Training Accuracy= 1.00000 Iter 19950, Minibatch Loss= 0.001904, Training Accuracy= 1.00000 Optimization Finished! Testing Accuracy: 0.992188
文章版權歸作者所有,未經允許請勿轉載,若此文章存在違規行為,您可以聯系管理員刪除。
轉載請注明本文地址:http://specialneedsforspecialkids.com/yun/38428.html
摘要:的卷積神經網絡應用卷積神經網絡的概念卷積神經網絡是一種前饋神經網絡,它的人工神經元可以響應一部分覆蓋范圍內的周圍單元,對于大型圖像處理有出色表現。 MNIST的卷積神經網絡應用 卷積神經網絡的概念 卷積神經網絡(Convolutional Neural Network,CNN)是一種前饋神經網絡,它的人工神經元可以響應一部分覆蓋范圍內的周圍單元,對于大型圖像處理有出色表現。[2] 它...
摘要:相比于直接使用搭建卷積神經網絡,將作為高級,并使用作為后端要簡單地多。測試一學習模型的類型卷積神經網絡數據集任務小圖片數據集目標將圖片分類為個類別根據每一個的訓練速度,要比快那么一點點。 如果我們對 Keras 在數據科學和深度學習方面的流行還有疑問,那么考慮一下所有的主流云平臺和深度學習框架的支持情況就能發現它的強大之處。目前,Keras 官方版已經支持谷歌的 TensorFlow、微軟的...
閱讀 2082·2021-11-02 14:48
閱讀 2760·2019-08-30 14:19
閱讀 2929·2019-08-30 13:19
閱讀 1297·2019-08-29 16:17
閱讀 3230·2019-08-26 14:05
閱讀 2987·2019-08-26 13:58
閱讀 3075·2019-08-23 18:10
閱讀 1105·2019-08-23 18:04