From 4afdda2075d031efb2d7a736a829c901f1e695ea Mon Sep 17 00:00:00 2001 From: ChenQuan Date: Sat, 28 Sep 2019 18:58:27 +0800 Subject: [PATCH 1/2] Change the network input size --- src/Main.py | 10 +++++----- src/utils.py | 9 ++++++--- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/Main.py b/src/Main.py index 1fe61c5..404f892 100644 --- a/src/Main.py +++ b/src/Main.py @@ -6,7 +6,7 @@ from sklearn.model_selection import train_test_split from sklearn.utils import shuffle -from src.utils import get_data +from src.utils import get_data, image_h_w from src.UNetKeras import UNetKeras import tensorflow as tf @@ -19,7 +19,7 @@ del X, y print("========= Build model =========") - model = UNetKeras() + model = UNetKeras(height=image_h_w, width=image_h_w) model.compile() print("========= Start train model =========") @@ -38,8 +38,8 @@ import numpy as np import os - images_test = np.reshape(np.argmax(y_test, axis=-1), (-1, 512, 512)) - images_pred = np.reshape(pred, (-1, 512, 512)) + images_test = np.reshape(np.argmax(y_test, axis=-1), (-1, image_h_w, image_h_w)) + images_pred = np.reshape(pred, (-1, image_h_w, image_h_w)) predict_file_path = ".predict_images_last_saved_model/" if not os.path.exists(predict_file_path): os.mkdir(predict_file_path) @@ -61,7 +61,7 @@ val_best_model = tf.keras.models.load_model("model/val_best_model.h5") val_best_model.evaluate(X_test, y_test, batch_size=32) pred = val_best_model.predict(X_test) - images_pred = np.reshape(np.argmax(pred, axis=-1), (-1, 512, 512)) + images_pred = np.reshape(np.argmax(pred, axis=-1), (-1, image_h_w, image_h_w)) for i in range(len(images_pred)): # plt.subplot(1, 2, 1) diff --git a/src/utils.py b/src/utils.py index 1d90b4b..8d1234c 100644 --- a/src/utils.py +++ b/src/utils.py @@ -8,6 +8,9 @@ import numpy as np import h5py from scipy import ndimage +import scipy + +image_h_w = 256 # 加载数据 @@ -24,7 +27,8 @@ def _load(): for i in range(1, 2201): now_file_path = "../data/Image/IM" + str(i) + ".png" image = np.array(ndimage.imread(now_file_path, flatten=False)) - images.append(image) # images shape=(m,64,64,3) + image = scipy.misc.imresize(image, size=(image_h_w, image_h_w)) + images.append(image) images = np.array(images, copy=True) file = h5py.File('../data/images.h5', 'w') # 创建HDF5文件 file.create_dataset('images', data=images) # 写入 @@ -50,7 +54,7 @@ def _load(): labels = flie.get("labels") labels = np.array(labels, dtype=np.float32) - images = images/255. + images = images / 255. train_image = np.expand_dims(images, -1) print(train_image.shape) train_label = np.expand_dims(labels, -1) @@ -89,4 +93,3 @@ def encode_one_hot(x, classes_num=3): else: raise IndexError("The last dimension is not 1") return x_tiled - From 867e323698b7bdcba6f4207c0c375c0da79fa033 Mon Sep 17 00:00:00 2001 From: Chen Quan Date: Sun, 29 Sep 2019 09:25:17 +0800 Subject: [PATCH 2/2] Update BATCH_SIZE var --- src/Main.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Main.py b/src/Main.py index 404f892..effe030 100644 --- a/src/Main.py +++ b/src/Main.py @@ -9,7 +9,7 @@ from src.utils import get_data, image_h_w from src.UNetKeras import UNetKeras import tensorflow as tf - +BATCH_SIZE = 1 if __name__ == "__main__": print("========= Get data =========") X, y = get_data() @@ -25,15 +25,15 @@ print("========= Start train model =========") ModelCheckpoint = tf.keras.callbacks.ModelCheckpoint("model/val_best_model.h5", monitor="val_loss", verbose=1, save_best_only=True) - model.fit(X_train, y_train, batch_size=1, epochs=10, validation_split=0.01, callbacks=[ModelCheckpoint]) + model.fit(X_train, y_train, batch_size=BATCH_SIZE, epochs=10, validation_split=0.01, callbacks=[ModelCheckpoint]) print("========= Save last time model =========") model.model.save("model/model_final_time.h5") print("========= Model evaluate Start =========") print("========= Test the last saved model =========") - model.model.evaluate(X_test, y_test, batch_size=32) - pred = model.predict(X_test) + model.model.evaluate(X_test, y_test, batch_size=BATCH_SIZE) + pred = model.predict(X_test,batch_size=BATCH_SIZE) import matplotlib.pyplot as plt import numpy as np import os @@ -59,8 +59,8 @@ os.mkdir(predict_file_path) val_best_model = tf.keras.models.load_model("model/val_best_model.h5") - val_best_model.evaluate(X_test, y_test, batch_size=32) - pred = val_best_model.predict(X_test) + val_best_model.evaluate(X_test, y_test, batch_size=BATCH_SIZE) + pred = val_best_model.predict(X_test,batch_size=BATCH_SIZE) images_pred = np.reshape(np.argmax(pred, axis=-1), (-1, image_h_w, image_h_w)) for i in range(len(images_pred)):