|
1 | 1 | { |
2 | 2 | "cells": [ |
3 | 3 | { |
4 | | - "cell_type": "code", |
5 | | - "execution_count": null, |
6 | | - "metadata": {}, |
7 | | - "outputs": [], |
| 4 | + "cell_type": "markdown", |
| 5 | + "metadata": { |
| 6 | + "collapsed": true |
| 7 | + }, |
8 | 8 | "source": [ |
9 | 9 | "'''\n", |
10 | 10 | "A Reccurent Neural Network (LSTM) implementation example using TensorFlow library.\n", |
|
18 | 18 | }, |
19 | 19 | { |
20 | 20 | "cell_type": "code", |
21 | | - "execution_count": 1, |
22 | | - "metadata": {}, |
23 | | - "outputs": [ |
24 | | - { |
25 | | - "name": "stdout", |
26 | | - "output_type": "stream", |
27 | | - "text": [ |
28 | | - "Extracting /tmp/data/train-images-idx3-ubyte.gz\n", |
29 | | - "Extracting /tmp/data/train-labels-idx1-ubyte.gz\n", |
30 | | - "Extracting /tmp/data/t10k-images-idx3-ubyte.gz\n", |
31 | | - "Extracting /tmp/data/t10k-labels-idx1-ubyte.gz\n" |
32 | | - ] |
33 | | - } |
34 | | - ], |
| 21 | + "execution_count": null, |
| 22 | + "metadata": { |
| 23 | + "collapsed": false |
| 24 | + }, |
| 25 | + "outputs": [], |
35 | 26 | "source": [ |
36 | 27 | "import tensorflow as tf\n", |
37 | | - "from tensorflow.python.ops import rnn, rnn_cell\n", |
| 28 | + "from tensorflow.contrib import rnn\n", |
38 | 29 | "import numpy as np\n", |
39 | 30 | "\n", |
40 | 31 | "# Import MINST data\n", |
41 | 32 | "from tensorflow.examples.tutorials.mnist import input_data\n", |
42 | | - "mnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)" |
| 33 | + "mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)" |
43 | 34 | ] |
44 | 35 | }, |
45 | 36 | { |
46 | | - "cell_type": "code", |
47 | | - "execution_count": null, |
48 | | - "metadata": {}, |
49 | | - "outputs": [], |
| 37 | + "cell_type": "markdown", |
| 38 | + "metadata": { |
| 39 | + "collapsed": true |
| 40 | + }, |
50 | 41 | "source": [ |
51 | 42 | "'''\n", |
52 | 43 | "To classify images using a reccurent neural network, we consider every image\n", |
|
58 | 49 | { |
59 | 50 | "cell_type": "code", |
60 | 51 | "execution_count": 2, |
61 | | - "metadata": {}, |
| 52 | + "metadata": { |
| 53 | + "collapsed": false |
| 54 | + }, |
62 | 55 | "outputs": [], |
63 | 56 | "source": [ |
64 | 57 | "# Parameters\n", |
|
89 | 82 | { |
90 | 83 | "cell_type": "code", |
91 | 84 | "execution_count": 3, |
92 | | - "metadata": {}, |
| 85 | + "metadata": { |
| 86 | + "collapsed": false |
| 87 | + }, |
93 | 88 | "outputs": [], |
94 | 89 | "source": [ |
95 | 90 | "def RNN(x, weights, biases):\n", |
|
103 | 98 | " # Reshaping to (n_steps*batch_size, n_input)\n", |
104 | 99 | " x = tf.reshape(x, [-1, n_input])\n", |
105 | 100 | " # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)\n", |
106 | | - " x = tf.split(0, n_steps, x)\n", |
| 101 | + " x = tf.split(x, n_steps, 0)\n", |
107 | 102 | "\n", |
108 | 103 | " # Define a lstm cell with tensorflow\n", |
109 | | - " lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)\n", |
| 104 | + " lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)\n", |
110 | 105 | "\n", |
111 | 106 | " # Get lstm cell output\n", |
112 | | - " outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)\n", |
| 107 | + " outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)\n", |
113 | 108 | "\n", |
114 | 109 | " # Linear activation, using rnn inner loop last output\n", |
115 | 110 | " return tf.matmul(outputs[-1], weights['out']) + biases['out']\n", |
116 | 111 | "\n", |
117 | 112 | "pred = RNN(x, weights, biases)\n", |
118 | 113 | "\n", |
119 | 114 | "# Define loss and optimizer\n", |
120 | | - "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\n", |
| 115 | + "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n", |
121 | 116 | "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n", |
122 | 117 | "\n", |
123 | 118 | "# Evaluate model\n", |
124 | 119 | "correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\n", |
125 | 120 | "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n", |
126 | 121 | "\n", |
127 | 122 | "# Initializing the variables\n", |
128 | | - "init = tf.initialize_all_variables()" |
| 123 | + "init = tf.global_variables_initializer()" |
129 | 124 | ] |
130 | 125 | }, |
131 | 126 | { |
132 | 127 | "cell_type": "code", |
133 | | - "execution_count": 4, |
134 | | - "metadata": {}, |
135 | | - "outputs": [ |
136 | | - { |
137 | | - "name": "stdout", |
138 | | - "output_type": "stream", |
139 | | - "text": [ |
140 | | - "Iter 1280, Minibatch Loss= 1.538532, Training Accuracy= 0.49219\n", |
141 | | - "Iter 2560, Minibatch Loss= 1.462834, Training Accuracy= 0.50781\n", |
142 | | - "Iter 3840, Minibatch Loss= 1.048393, Training Accuracy= 0.66406\n", |
143 | | - "Iter 5120, Minibatch Loss= 0.889872, Training Accuracy= 0.71875\n", |
144 | | - "Iter 6400, Minibatch Loss= 0.681855, Training Accuracy= 0.76562\n", |
145 | | - "Iter 7680, Minibatch Loss= 0.987207, Training Accuracy= 0.69531\n", |
146 | | - "Iter 8960, Minibatch Loss= 0.759543, Training Accuracy= 0.71094\n", |
147 | | - "Iter 10240, Minibatch Loss= 0.557055, Training Accuracy= 0.80469\n", |
148 | | - "Iter 11520, Minibatch Loss= 0.371352, Training Accuracy= 0.89844\n", |
149 | | - "Iter 12800, Minibatch Loss= 0.661293, Training Accuracy= 0.80469\n", |
150 | | - "Iter 14080, Minibatch Loss= 0.474259, Training Accuracy= 0.86719\n", |
151 | | - "Iter 15360, Minibatch Loss= 0.328436, Training Accuracy= 0.88281\n", |
152 | | - "Iter 16640, Minibatch Loss= 0.348017, Training Accuracy= 0.93750\n", |
153 | | - "Iter 17920, Minibatch Loss= 0.340086, Training Accuracy= 0.88281\n", |
154 | | - "Iter 19200, Minibatch Loss= 0.261532, Training Accuracy= 0.89844\n", |
155 | | - "Iter 20480, Minibatch Loss= 0.161785, Training Accuracy= 0.94531\n", |
156 | | - "Iter 21760, Minibatch Loss= 0.419619, Training Accuracy= 0.83594\n", |
157 | | - "Iter 23040, Minibatch Loss= 0.120714, Training Accuracy= 0.95312\n", |
158 | | - "Iter 24320, Minibatch Loss= 0.339519, Training Accuracy= 0.89062\n", |
159 | | - "Iter 25600, Minibatch Loss= 0.405463, Training Accuracy= 0.88281\n", |
160 | | - "Iter 26880, Minibatch Loss= 0.172193, Training Accuracy= 0.95312\n", |
161 | | - "Iter 28160, Minibatch Loss= 0.256769, Training Accuracy= 0.91406\n", |
162 | | - "Iter 29440, Minibatch Loss= 0.247753, Training Accuracy= 0.91406\n", |
163 | | - "Iter 30720, Minibatch Loss= 0.230820, Training Accuracy= 0.91406\n", |
164 | | - "Iter 32000, Minibatch Loss= 0.216861, Training Accuracy= 0.93750\n", |
165 | | - "Iter 33280, Minibatch Loss= 0.236337, Training Accuracy= 0.89062\n", |
166 | | - "Iter 34560, Minibatch Loss= 0.252351, Training Accuracy= 0.93750\n", |
167 | | - "Iter 35840, Minibatch Loss= 0.180090, Training Accuracy= 0.92188\n", |
168 | | - "Iter 37120, Minibatch Loss= 0.304125, Training Accuracy= 0.91406\n", |
169 | | - "Iter 38400, Minibatch Loss= 0.114474, Training Accuracy= 0.96094\n", |
170 | | - "Iter 39680, Minibatch Loss= 0.158405, Training Accuracy= 0.96875\n", |
171 | | - "Iter 40960, Minibatch Loss= 0.285858, Training Accuracy= 0.92188\n", |
172 | | - "Iter 42240, Minibatch Loss= 0.134199, Training Accuracy= 0.96094\n", |
173 | | - "Iter 43520, Minibatch Loss= 0.235847, Training Accuracy= 0.92969\n", |
174 | | - "Iter 44800, Minibatch Loss= 0.155971, Training Accuracy= 0.94531\n", |
175 | | - "Iter 46080, Minibatch Loss= 0.061549, Training Accuracy= 0.99219\n", |
176 | | - "Iter 47360, Minibatch Loss= 0.232569, Training Accuracy= 0.94531\n", |
177 | | - "Iter 48640, Minibatch Loss= 0.270348, Training Accuracy= 0.91406\n", |
178 | | - "Iter 49920, Minibatch Loss= 0.202416, Training Accuracy= 0.92188\n", |
179 | | - "Iter 51200, Minibatch Loss= 0.113857, Training Accuracy= 0.96094\n", |
180 | | - "Iter 52480, Minibatch Loss= 0.137900, Training Accuracy= 0.94531\n", |
181 | | - "Iter 53760, Minibatch Loss= 0.052416, Training Accuracy= 0.98438\n", |
182 | | - "Iter 55040, Minibatch Loss= 0.312064, Training Accuracy= 0.91406\n", |
183 | | - "Iter 56320, Minibatch Loss= 0.144335, Training Accuracy= 0.93750\n", |
184 | | - "Iter 57600, Minibatch Loss= 0.114723, Training Accuracy= 0.96875\n", |
185 | | - "Iter 58880, Minibatch Loss= 0.193597, Training Accuracy= 0.96094\n", |
186 | | - "Iter 60160, Minibatch Loss= 0.110877, Training Accuracy= 0.95312\n", |
187 | | - "Iter 61440, Minibatch Loss= 0.119864, Training Accuracy= 0.96094\n", |
188 | | - "Iter 62720, Minibatch Loss= 0.118780, Training Accuracy= 0.94531\n", |
189 | | - "Iter 64000, Minibatch Loss= 0.082259, Training Accuracy= 0.97656\n", |
190 | | - "Iter 65280, Minibatch Loss= 0.087364, Training Accuracy= 0.97656\n", |
191 | | - "Iter 66560, Minibatch Loss= 0.207975, Training Accuracy= 0.92969\n", |
192 | | - "Iter 67840, Minibatch Loss= 0.120612, Training Accuracy= 0.96875\n", |
193 | | - "Iter 69120, Minibatch Loss= 0.070608, Training Accuracy= 0.96875\n", |
194 | | - "Iter 70400, Minibatch Loss= 0.100786, Training Accuracy= 0.96094\n", |
195 | | - "Iter 71680, Minibatch Loss= 0.114746, Training Accuracy= 0.94531\n", |
196 | | - "Iter 72960, Minibatch Loss= 0.083427, Training Accuracy= 0.96875\n", |
197 | | - "Iter 74240, Minibatch Loss= 0.089978, Training Accuracy= 0.96094\n", |
198 | | - "Iter 75520, Minibatch Loss= 0.195322, Training Accuracy= 0.94531\n", |
199 | | - "Iter 76800, Minibatch Loss= 0.161109, Training Accuracy= 0.96094\n", |
200 | | - "Iter 78080, Minibatch Loss= 0.169762, Training Accuracy= 0.94531\n", |
201 | | - "Iter 79360, Minibatch Loss= 0.054240, Training Accuracy= 0.98438\n", |
202 | | - "Iter 80640, Minibatch Loss= 0.160100, Training Accuracy= 0.95312\n", |
203 | | - "Iter 81920, Minibatch Loss= 0.110728, Training Accuracy= 0.96875\n", |
204 | | - "Iter 83200, Minibatch Loss= 0.054918, Training Accuracy= 0.98438\n", |
205 | | - "Iter 84480, Minibatch Loss= 0.104170, Training Accuracy= 0.96875\n", |
206 | | - "Iter 85760, Minibatch Loss= 0.071871, Training Accuracy= 0.97656\n", |
207 | | - "Iter 87040, Minibatch Loss= 0.170529, Training Accuracy= 0.96094\n", |
208 | | - "Iter 88320, Minibatch Loss= 0.087350, Training Accuracy= 0.96875\n", |
209 | | - "Iter 89600, Minibatch Loss= 0.079943, Training Accuracy= 0.96875\n", |
210 | | - "Iter 90880, Minibatch Loss= 0.128451, Training Accuracy= 0.92969\n", |
211 | | - "Iter 92160, Minibatch Loss= 0.046963, Training Accuracy= 0.98438\n", |
212 | | - "Iter 93440, Minibatch Loss= 0.162998, Training Accuracy= 0.96875\n", |
213 | | - "Iter 94720, Minibatch Loss= 0.122588, Training Accuracy= 0.96094\n", |
214 | | - "Iter 96000, Minibatch Loss= 0.073954, Training Accuracy= 0.97656\n", |
215 | | - "Iter 97280, Minibatch Loss= 0.130790, Training Accuracy= 0.96094\n", |
216 | | - "Iter 98560, Minibatch Loss= 0.067689, Training Accuracy= 0.97656\n", |
217 | | - "Iter 99840, Minibatch Loss= 0.186411, Training Accuracy= 0.92188\n", |
218 | | - "Optimization Finished!\n", |
219 | | - "Testing Accuracy: 0.976562\n" |
220 | | - ] |
221 | | - } |
222 | | - ], |
| 128 | + "execution_count": null, |
| 129 | + "metadata": { |
| 130 | + "collapsed": false |
| 131 | + }, |
| 132 | + "outputs": [], |
223 | 133 | "source": [ |
224 | 134 | "# Launch the graph\n", |
225 | 135 | "with tf.Session() as sess:\n", |
|
250 | 160 | " print \"Testing Accuracy:\", \\\n", |
251 | 161 | " sess.run(accuracy, feed_dict={x: test_data, y: test_label})" |
252 | 162 | ] |
| 163 | + }, |
| 164 | + { |
| 165 | + "cell_type": "code", |
| 166 | + "execution_count": null, |
| 167 | + "metadata": { |
| 168 | + "collapsed": true |
| 169 | + }, |
| 170 | + "outputs": [], |
| 171 | + "source": [] |
253 | 172 | } |
254 | 173 | ], |
255 | 174 | "metadata": { |
|
261 | 180 | "language_info": { |
262 | 181 | "codemirror_mode": { |
263 | 182 | "name": "ipython", |
264 | | - "version": 2.0 |
| 183 | + "version": 2 |
265 | 184 | }, |
266 | 185 | "file_extension": ".py", |
267 | 186 | "mimetype": "text/x-python", |
268 | 187 | "name": "python", |
269 | 188 | "nbconvert_exporter": "python", |
270 | 189 | "pygments_lexer": "ipython2", |
271 | | - "version": "2.7.11" |
| 190 | + "version": "2.7.13" |
272 | 191 | } |
273 | 192 | }, |
274 | 193 | "nbformat": 4, |
|
0 commit comments