|
11 | 11 | import data_generation |
12 | 12 | import models |
13 | 13 |
|
14 | | -batch_size = 32 |
| 14 | +batch_size = 16 |
15 | 15 | seq_len = 100 # This is equivalent to time steps of the sequence in keras |
16 | 16 | input_size = 1 |
17 | 17 | hidden_size = 51 |
18 | | -nb_layers = 1 |
19 | 18 | target_size = 1 |
20 | 19 | nb_samples = 1000 |
21 | 20 | nb_epochs = 20 |
|
36 | 35 | val_loss = 0 |
37 | 36 | rnn.train(True) |
38 | 37 | for batch, i in enumerate(range(0, X_train.size(0) - 1, batch_size)): |
39 | | - data, targets = data_generation.get_batch(X_train, y_train, i) |
| 38 | + # print "batch = ", batch, " -- i = ", i |
| 39 | + data, targets = data_generation.get_batch(X_train, y_train, i, batch_size=batch_size) |
| 40 | + # print ("X_train.size() = {}, y_train.size() = {} \n data.size() = {}, targets.size() = {}" |
| 41 | + # .format(X_train.size(), y_train.size(), data.size(), targets.size())) |
40 | 42 | output = rnn(data) |
41 | 43 | optimizer.zero_grad() |
42 | 44 | loss = loss_fn(output, targets) |
43 | 45 | loss.backward() |
44 | 46 | optimizer.step() |
45 | 47 | training_loss += loss.data[0] |
| 48 | + # print "-"*30 |
| 49 | + # exit() |
46 | 50 | training_loss /= batch |
47 | 51 | rnn.train(False) |
48 | 52 | for batch, i in enumerate(range(0, X_val.size(0) - 1, batch_size)): |
49 | | - data, targets = data_generation.get_batch(X_val, y_val, i) |
| 53 | + data, targets = data_generation.get_batch(X_val, y_val, i, batch_size=batch_size) |
50 | 54 | output = rnn(data) |
51 | 55 | loss = loss_fn(output, targets) |
52 | 56 | val_loss += loss.data[0] |
|
63 | 67 | list2 = [] |
64 | 68 | for batch, i in enumerate(range(0, X_test.size(0) - 1, batch_size)): |
65 | 69 | print i |
66 | | - data, targets = data_generation.get_batch(X_test, y_test, i) |
| 70 | + data, targets = data_generation.get_batch(X_test, y_test, i, batch_size=batch_size) |
67 | 71 | output = rnn(data) |
68 | 72 | loss = loss_fn(output, targets) |
69 | 73 | test_loss += loss.data[0] |
|
86 | 90 | output = torch.squeeze(output).data.cpu().numpy() |
87 | 91 | plt.figure() |
88 | 92 | plt.plot(output) |
| 93 | +plt.xlabel("Time step") |
| 94 | +plt.ylabel("Signal amplitude") |
89 | 95 | plt.show() |
90 | 96 | """ |
91 | 97 | Generating sequences - attempt 2 --> Concatenating the output with the input, and feed the new data point to the model. |
|
0 commit comments