diff --git a/README.md b/README.md index 6784530d..c94991fb 100644 --- a/README.md +++ b/README.md @@ -70,3 +70,6 @@ For more details about TensorFlow installation, you can check [Setup_TensorFlow. ## Dataset Some examples require MNIST dataset for training and testing. Don't worry, this dataset will automatically be downloaded when running examples (with input_data.py). MNIST is a database of handwritten digits, with 60,000 examples for training and 10,000 examples for testing. (Website: [http://yann.lecun.com/exdb/mnist/](http://yann.lecun.com/exdb/mnist/)) + + +## TODO : Python 3.5 compatibility \ No newline at end of file diff --git a/examples/1 - Introduction/basic_operations.py b/examples/1 - Introduction/basic_operations.py index afdef20b..47c07857 100644 --- a/examples/1 - Introduction/basic_operations.py +++ b/examples/1 - Introduction/basic_operations.py @@ -15,9 +15,9 @@ # Launch the default graph. with tf.Session() as sess: - print "a=2, b=3" - print "Addition with constants: %i" % sess.run(a+b) - print "Multiplication with constants: %i" % sess.run(a*b) + print ("a=2, b=3") + print ("Addition with constants: %i" % sess.run(a+b)) + print ("Multiplication with constants: %i" % sess.run(a*b)) # Basic Operations with variable as graph input # The value returned by the constructor represents the output @@ -33,8 +33,8 @@ # Launch the default graph. with tf.Session() as sess: # Run every operation with variable input - print "Addition with variables: %i" % sess.run(add, feed_dict={a: 2, b: 3}) - print "Multiplication with variables: %i" % sess.run(mul, feed_dict={a: 2, b: 3}) + print ("Addition with variables: %i" % sess.run(add, feed_dict={a: 2, b: 3})) + print ("Multiplication with variables: %i" % sess.run(mul, feed_dict={a: 2, b: 3})) # ---------------- @@ -69,5 +69,5 @@ # The output of the op is returned in 'result' as a numpy `ndarray` object. with tf.Session() as sess: result = sess.run(product) - print result + print (result) # ==> [[ 12.]] diff --git a/examples/1 - Introduction/helloworld.py b/examples/1 - Introduction/helloworld.py index 554cbed4..5e222775 100644 --- a/examples/1 - Introduction/helloworld.py +++ b/examples/1 - Introduction/helloworld.py @@ -19,4 +19,4 @@ # Start tf session sess = tf.Session() -print sess.run(hello) \ No newline at end of file +print (sess.run(hello)) \ No newline at end of file diff --git a/examples/2 - Basic Classifiers/input_data.py b/examples/2 - Basic Classifiers/input_data.py index d1d0d28e..e94625b6 100644 --- a/examples/2 - Basic Classifiers/input_data.py +++ b/examples/2 - Basic Classifiers/input_data.py @@ -4,6 +4,8 @@ import os import urllib import numpy +from urllib import request + SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/' def maybe_download(filename, work_directory): """Download the data from Yann's website, unless it's already here.""" @@ -11,7 +13,7 @@ def maybe_download(filename, work_directory): os.mkdir(work_directory) filepath = os.path.join(work_directory, filename) if not os.path.exists(filepath): - filepath, _ = urllib.urlretrieve(SOURCE_URL + filename, filepath) + filepath, _ = request.urlretrieve(SOURCE_URL + filename, filepath) statinfo = os.stat(filepath) print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') return filepath diff --git a/examples/2 - Basic Classifiers/linear_regression.py b/examples/2 - Basic Classifiers/linear_regression.py index a11b851d..c9eadd83 100644 --- a/examples/2 - Basic Classifiers/linear_regression.py +++ b/examples/2 - Basic Classifiers/linear_regression.py @@ -51,23 +51,23 @@ #Display logs per epoch step if epoch % display_step == 0: - print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(sess.run(cost, feed_dict={X: train_X, Y:train_Y})), \ - "W=", sess.run(W), "b=", sess.run(b) + print ("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(sess.run(cost, feed_dict={X: train_X, Y:train_Y})), \ + "W=", sess.run(W), "b=", sess.run(b)) - print "Optimization Finished!" + print ("Optimization Finished!") training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y}) - print "Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n' + print ("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n') # Testing example, as requested (Issue #2) test_X = numpy.asarray([6.83,4.668,8.9,7.91,5.7,8.7,3.1,2.1]) test_Y = numpy.asarray([1.84,2.273,3.2,2.831,2.92,3.24,1.35,1.03]) - print "Testing... (L2 loss Comparison)" + print ("Testing... (L2 loss Comparison)") testing_cost = sess.run(tf.reduce_sum(tf.pow(activation-Y, 2))/(2*test_X.shape[0]), feed_dict={X: test_X, Y: test_Y}) #same function as cost above - print "Testing cost=", testing_cost - print "Absolute l2 loss difference:", abs(training_cost - testing_cost) + print ("Testing cost=", testing_cost) + print ("Absolute l2 loss difference:", abs(training_cost - testing_cost)) #Graphic display plt.plot(train_X, train_Y, 'ro', label='Original data') diff --git a/examples/2 - Basic Classifiers/logistic_regression.py b/examples/2 - Basic Classifiers/logistic_regression.py index 0a06a9a9..f366d737 100644 --- a/examples/2 - Basic Classifiers/logistic_regression.py +++ b/examples/2 - Basic Classifiers/logistic_regression.py @@ -55,12 +55,12 @@ avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch # Display logs per epoch step if epoch % display_step == 0: - print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost) + print ("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)) - print "Optimization Finished!" + print ("Optimization Finished!") # Test model correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1)) # Calculate accuracy accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) - print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}) + print ("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})) diff --git a/examples/2 - Basic Classifiers/nearest_neighbor.py b/examples/2 - Basic Classifiers/nearest_neighbor.py index 6a1f726d..981a0db8 100644 --- a/examples/2 - Basic Classifiers/nearest_neighbor.py +++ b/examples/2 - Basic Classifiers/nearest_neighbor.py @@ -45,10 +45,10 @@ # Get nearest neighbor nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i,:]}) # Get nearest neighbor class label and compare it to its true label - print "Test", i, "Prediction:", np.argmax(Ytr[nn_index]), "True Class:", np.argmax(Yte[i]) + print ("Test", i, "Prediction:", np.argmax(Ytr[nn_index]), "True Class:", np.argmax(Yte[i])) # Calculate accuracy if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]): accuracy += 1./len(Xte) - print "Done!" - print "Accuracy:", accuracy + print ("Done!") + print ("Accuracy:", accuracy) diff --git a/examples/3 - Neural Networks/bidirectional_rnn.py b/examples/3 - Neural Networks/bidirectional_rnn.py index 3c9e4bfb..cef2f735 100644 --- a/examples/3 - Neural Networks/bidirectional_rnn.py +++ b/examples/3 - Neural Networks/bidirectional_rnn.py @@ -150,14 +150,14 @@ def BiRNN(_X, _istate_fw, _istate_bw, _weights, _biases, _batch_size, _seq_len): loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, istate_fw: np.zeros((batch_size, 2*n_hidden)), istate_bw: np.zeros((batch_size, 2*n_hidden))}) - print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + \ - ", Training Accuracy= " + "{:.5f}".format(acc) + print ("Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + \ + ", Training Accuracy= " + "{:.5f}".format(acc)) step += 1 - print "Optimization Finished!" + print ("Optimization Finished!") # Calculate accuracy for 128 mnist test images test_len = 128 test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input)) test_label = mnist.test.labels[:test_len] - print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: test_data, y: test_label, + print ("Testing Accuracy:", sess.run(accuracy, feed_dict={x: test_data, y: test_label, istate_fw: np.zeros((test_len, 2*n_hidden)), - istate_bw: np.zeros((test_len, 2*n_hidden))}) + istate_bw: np.zeros((test_len, 2*n_hidden))})) diff --git a/examples/3 - Neural Networks/input_data.py b/examples/3 - Neural Networks/input_data.py index d1d0d28e..08df3129 100644 --- a/examples/3 - Neural Networks/input_data.py +++ b/examples/3 - Neural Networks/input_data.py @@ -2,7 +2,7 @@ from __future__ import print_function import gzip import os -import urllib +from urllib import request import numpy SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/' def maybe_download(filename, work_directory): @@ -11,7 +11,7 @@ def maybe_download(filename, work_directory): os.mkdir(work_directory) filepath = os.path.join(work_directory, filename) if not os.path.exists(filepath): - filepath, _ = urllib.urlretrieve(SOURCE_URL + filename, filepath) + filepath, _ = request.urlretrieve(SOURCE_URL + filename, filepath) statinfo = os.stat(filepath) print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') return filepath diff --git a/examples/4 - Multi GPU/multigpu_basics.py b/examples/4 - Multi GPU/multigpu_basics.py index 5727dfe2..3fedbcf0 100644 --- a/examples/4 - Multi GPU/multigpu_basics.py +++ b/examples/4 - Multi GPU/multigpu_basics.py @@ -87,5 +87,5 @@ def matpow(M, n): t2_2 = datetime.datetime.now() -print "Single GPU computation time: " + str(t2_1-t1_1) -print "Multi GPU computation time: " + str(t2_2-t1_2) \ No newline at end of file +print ("Single GPU computation time: " + str(t2_1-t1_1)) +print ("Multi GPU computation time: " + str(t2_2-t1_2)) \ No newline at end of file