|
| 1 | +import tensorflow as tf |
| 2 | +import numpy as np |
| 3 | + |
| 4 | +N, D_in, H, D_out = 64, 1000, 100, 10 |
| 5 | + |
| 6 | +x = tf.placeholder(tf.float32, shape=(None, D_in)) |
| 7 | +y = tf.placeholder(tf.float32, shape=(None, D_out)) |
| 8 | + |
| 9 | +w1 = tf.Variable(tf.random_normal((D_in, H))) |
| 10 | +w2 = tf.Variable(tf.random_normal((H, D_out))) |
| 11 | + |
| 12 | +h = tf.matmul(x, w1) |
| 13 | +h_relu = tf.maximum(h, tf.zeros(1)) |
| 14 | +y_pred = tf.matmul(h_relu, w2) |
| 15 | +loss = tf.reduce_sum((y - y_pred) ** 2.0) |
| 16 | + |
| 17 | +learning_rate = 1e-6 |
| 18 | +grad_w1, grad_w2 = tf.gradients(loss, [w1, w2]) |
| 19 | +new_w1 = w1.assign(w1 - learning_rate * grad_w1) |
| 20 | +new_w2 = w2.assign(w2 - learning_rate * grad_w2) |
| 21 | + |
| 22 | + |
| 23 | +with tf.Session() as sess: |
| 24 | + sess.run(tf.global_variables_initializer()) |
| 25 | + |
| 26 | + x_value = np.random.randn(N, D_in) |
| 27 | + y_value = np.random.randn(N, D_out) |
| 28 | + for _ in range(500): |
| 29 | + loss_value, _, _ = sess.run( |
| 30 | + [loss, new_w1, new_w2], |
| 31 | + feed_dict={x: x_value, y: y_value}) |
| 32 | + print(loss_value) |
0 commit comments