From 450f7880108687e3db1f6fbdb48c423e833a0ada Mon Sep 17 00:00:00 2001 From: Raphael Freudiger Date: Sat, 30 Jan 2016 16:03:13 +0100 Subject: [PATCH 1/2] feat(linear_regression): add summary writer --- .../linear_regression.py | 56 +++++++++++++------ 1 file changed, 38 insertions(+), 18 deletions(-) diff --git a/examples/2 - Basic Classifiers/linear_regression.py b/examples/2 - Basic Classifiers/linear_regression.py index a11b851d..d0714d40 100644 --- a/examples/2 - Basic Classifiers/linear_regression.py +++ b/examples/2 - Basic Classifiers/linear_regression.py @@ -12,7 +12,7 @@ # Parameters learning_rate = 0.01 -training_epochs = 2000 +training_epochs = 5000 display_step = 50 # Training Data @@ -20,9 +20,13 @@ train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3]) n_samples = train_X.shape[0] +# Testing example, as requested (Issue #2) +test_X = numpy.asarray([6.83,4.668,8.9,7.91,5.7,8.7,3.1,2.1]) +test_Y = numpy.asarray([1.84,2.273,3.2,2.831,2.92,3.24,1.35,1.03]) + # tf Graph Input -X = tf.placeholder("float") -Y = tf.placeholder("float") +x = tf.placeholder("float", name='x') +y_ = tf.placeholder("float", name='y_') # Create Model @@ -31,41 +35,57 @@ b = tf.Variable(rng.randn(), name="bias") # Construct a linear model -activation = tf.add(tf.mul(X, W), b) +with tf.name_scope('Wx_b') as scope: + y = tf.add(tf.mul(x, W), b) + +# Add summary ops to collect data +_ = tf.histogram_summary('weights', W) +_ = tf.histogram_summary('biases', b) +_ = tf.histogram_summary('y', y) + +#_ = tf.scalar_summary('biases', b) +#_ = tf.scalar_summary('weights', W) # Minimize the squared errors -cost = tf.reduce_sum(tf.pow(activation-Y, 2))/(2*n_samples) #L2 loss -optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent +with tf.name_scope('cost') as scope: + cost = tf.reduce_sum(tf.pow(y-y_, 2))/(2*n_samples) #L2 loss + _ = tf.scalar_summary('cost', cost) + +with tf.name_scope('train') as scope: + optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) #Gradient descent + +with tf.name_scope('test') as scope: + accuracy = tf.reduce_sum(tf.pow(y-y_, 2))/(2*n_samples) #L2 loss + _ = tf.scalar_summary('accuracy', accuracy) # Initializing the variables +merged = tf.merge_all_summaries() init = tf.initialize_all_variables() # Launch the graph with tf.Session() as sess: + writer = tf.train.SummaryWriter('/tmp/tf_logs', sess.graph_def) sess.run(init) # Fit all training data for epoch in range(training_epochs): - for (x, y) in zip(train_X, train_Y): - sess.run(optimizer, feed_dict={X: x, Y: y}) + for (xval, yval) in zip(train_X, train_Y): + sess.run(optimizer, feed_dict={x: xval, y_: yval}) #Display logs per epoch step if epoch % display_step == 0: - print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(sess.run(cost, feed_dict={X: train_X, Y:train_Y})), \ + result = sess.run([merged, accuracy], feed_dict={x: test_X, y_:test_Y}) + print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(result[1]), \ "W=", sess.run(W), "b=", sess.run(b) + writer.add_summary(result[0], epoch) print "Optimization Finished!" - training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y}) + training_cost = sess.run(cost, feed_dict={x: train_X, y_: train_Y}) print "Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n' - - # Testing example, as requested (Issue #2) - test_X = numpy.asarray([6.83,4.668,8.9,7.91,5.7,8.7,3.1,2.1]) - test_Y = numpy.asarray([1.84,2.273,3.2,2.831,2.92,3.24,1.35,1.03]) - print "Testing... (L2 loss Comparison)" - testing_cost = sess.run(tf.reduce_sum(tf.pow(activation-Y, 2))/(2*test_X.shape[0]), - feed_dict={X: test_X, Y: test_Y}) #same function as cost above + testing_cost = sess.run(accuracy, + feed_dict={x: test_X, y_: test_Y}) #same function as cost above print "Testing cost=", testing_cost print "Absolute l2 loss difference:", abs(training_cost - testing_cost) @@ -74,4 +94,4 @@ plt.plot(test_X, test_Y, 'bo', label='Testing data') plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line') plt.legend() - plt.show() \ No newline at end of file + plt.show() From 5d40f1ab19728608c779fb85b22101b79df31aac Mon Sep 17 00:00:00 2001 From: Raphael Freudiger Date: Sat, 30 Jan 2016 16:03:30 +0100 Subject: [PATCH 2/2] feat(logistic_regression): add summary writer --- .../logistic_regression.py | 49 +++++++++++++------ 1 file changed, 35 insertions(+), 14 deletions(-) diff --git a/examples/2 - Basic Classifiers/logistic_regression.py b/examples/2 - Basic Classifiers/logistic_regression.py index 0a06a9a9..8a74ae3c 100644 --- a/examples/2 - Basic Classifiers/logistic_regression.py +++ b/examples/2 - Basic Classifiers/logistic_regression.py @@ -19,48 +19,69 @@ display_step = 1 # tf Graph Input -x = tf.placeholder("float", [None, 784]) # mnist data image of shape 28*28=784 -y = tf.placeholder("float", [None, 10]) # 0-9 digits recognition => 10 classes +x = tf.placeholder("float", [None, 784], name='x') # mnist data image of shape 28*28=784 +y = tf.placeholder("float", [None, 10], name='y') # 0-9 digits recognition => 10 classes # Create model # Set model weights -W = tf.Variable(tf.zeros([784, 10])) -b = tf.Variable(tf.zeros([10])) +W = tf.Variable(tf.zeros([784, 10]),name='W') +b = tf.Variable(tf.zeros([10]),name='b') # Construct model -activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax +with tf.name_scope('Wx_b') as scope: + activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax + +# export some variables +_ = tf.histogram_summary('weights', W) +_ = tf.histogram_summary('biases', b) +_ = tf.histogram_summary('activation', activation) # Minimize error using cross entropy -cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(activation), reduction_indices=1)) # Cross entropy -optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Gradient Descent +with tf.name_scope('cost') as scope: + cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(activation), reduction_indices=1)) # Cross entropy + _ = tf.scalar_summary('cost', cost) + +avg_cost = tf.Variable( 0.,name='avg_cost') +with tf.name_scope('train') as scope: + optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Gradient Descent + tf_total_batch = tf.placeholder("float", None, name='total_batch') + avg_add = avg_cost.assign_add(tf.div(cost,tf_total_batch)) + avg_reset = avg_cost.assign(0.) + _ = tf.scalar_summary('avg_cost', avg_cost) # Initializing the variables +merged = tf.merge_all_summaries() init = tf.initialize_all_variables() # Launch the graph with tf.Session() as sess: + writer = tf.train.SummaryWriter('/tmp/tf_logs', sess.graph_def) sess.run(init) # Training cycle for epoch in range(training_epochs): - avg_cost = 0. + sess.run(avg_reset) total_batch = int(mnist.train.num_examples/batch_size) # Loop over all batches for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # Fit training using batch data sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys}) - # Compute average loss - avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch + current_cost, avg = sess.run([cost, avg_add], feed_dict={x: batch_xs, y: batch_ys, tf_total_batch: total_batch}) + #avg = sess.run(avg_add, feed_dict={x: batch_xs, y: batch_ys}) # Display logs per epoch step if epoch % display_step == 0: - print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost) + # Compute average loss + result = sess.run(merged, feed_dict={x: batch_xs, y: batch_ys}) + print "Epoch:", '%04d' % (epoch+1), "cost=", avg + writer.add_summary(result, epoch) print "Optimization Finished!" # Test model - correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1)) - # Calculate accuracy - accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) + with tf.name_scope('test') as scope: + correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1)) + # Calculate accuracy + accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})