|
| 1 | +""" |
| 2 | +Created on Sat Mar 24 11:25:41 2018 |
| 3 | + |
| 4 | +@author: Muhammed Buyukkinaci |
| 5 | +""" |
| 6 | +#Importing libraries |
| 7 | +import tensorflow as tf # ML library for graphs |
| 8 | +import cv2 # image processing |
| 9 | +import numpy as np # mathematical operations |
| 10 | +import os # working with directories |
| 11 | +from random import shuffle # mixing up or currently ordered data that might lead our network astray in training. |
| 12 | +from tqdm import tqdm # a nice progress bar for loops |
| 13 | + |
| 14 | +IMG_SIZE = 80# our images are 80x80x3 |
| 15 | + |
| 16 | +print(os.getcwd()) |
| 17 | + |
| 18 | +""" |
| 19 | +#Converting the output into one-hot format |
| 20 | +def label_img(img): |
| 21 | + word_label = img.split('_')[0] |
| 22 | + if word_label == 'chair': return [1,0,0,0] |
| 23 | + elif word_label == 'kitchen': return [0,1,0,0] |
| 24 | + elif word_label == 'knife': return [0,0,1,0] |
| 25 | + elif word_label == 'saucepan': return [0,0,0,1] |
| 26 | +""" |
| 27 | +""" |
| 28 | +#A function to read images from training folder |
| 29 | +def create_train_data(): |
| 30 | + training_data = [] |
| 31 | + for img in tqdm(os.listdir(TRAIN_DIR)): |
| 32 | + label = label_img(img) |
| 33 | + path = TRAIN_DIR + '\\' + img |
| 34 | + img = cv2.imread(path,1) |
| 35 | + img = cv2.resize(img, (IMG_SIZE,IMG_SIZE)) |
| 36 | + training_data.append([np.array(img),np.array(label)]) |
| 37 | + shuffle(training_data) |
| 38 | + np.save('train_data_mc.npy', training_data) |
| 39 | + return training_data |
| 40 | +""" |
| 41 | +#If you are reading images, make the line below uncomment |
| 42 | +#train_data = create_train_data() |
| 43 | +#If you are reading .npy file, use the line below. |
| 44 | +train_data = np.load('train_data_mc.npy') |
| 45 | + |
| 46 | +""" |
| 47 | +#A function to read images from testing folder |
| 48 | +def process_test_data(): |
| 49 | + testing_data = [] |
| 50 | + for img in tqdm(os.listdir(TEST_DIR)): |
| 51 | + label = label_img(img) |
| 52 | + path = TEST_DIR + '\\' + img |
| 53 | + img = cv2.imread(path,1) |
| 54 | + img = cv2.resize(img, (IMG_SIZE,IMG_SIZE)) |
| 55 | + testing_data.append([np.array(img),label]) |
| 56 | + shuffle(testing_data) |
| 57 | + np.save('test_data_mc.npy', testing_data) |
| 58 | + return testing_data |
| 59 | +""" |
| 60 | +#If you are reading images, make the line below uncomment. |
| 61 | + |
| 62 | +#test_data = process_test_data() |
| 63 | + |
| 64 | +#If you are reading .npy file, use the line below. |
| 65 | +test_data = np.load('test_data_mc.npy') |
| 66 | + |
| 67 | +#For visualizations |
| 68 | +import matplotlib.pyplot as plt |
| 69 | + |
| 70 | +#A CHAIR |
| 71 | +plt.imshow(np.array(train_data[180][0])) |
| 72 | +plt.show() |
| 73 | + |
| 74 | +#KITCHEN |
| 75 | +plt.imshow(np.array(train_data[100][0])) |
| 76 | +plt.show() |
| 77 | + |
| 78 | +#KNIFE |
| 79 | +plt.imshow(np.array(train_data[111][0])) |
| 80 | +plt.show() |
| 81 | + |
| 82 | +#SAUCEPAN |
| 83 | +plt.imshow(np.array(train_data[154][0])) |
| 84 | +plt.show() |
| 85 | + |
| 86 | + |
| 87 | +#Splitting train and CV data |
| 88 | +train = train_data[:4800] |
| 89 | +cv = train_data[4800:] |
| 90 | + |
| 91 | +X = np.array([i[0] for i in train]).reshape(-1,IMG_SIZE,IMG_SIZE,3) |
| 92 | +Y = np.array([i[1] for i in train]) |
| 93 | + |
| 94 | +cv_x = np.array([i[0] for i in cv]).reshape(-1,IMG_SIZE,IMG_SIZE,3) |
| 95 | +cv_y = np.array([i[1] for i in cv]) |
| 96 | + |
| 97 | +test_x = np.array([i[0] for i in test_data]).reshape(-1,IMG_SIZE,IMG_SIZE,3) |
| 98 | +test_y = np.array([i[1] for i in test_data]) |
| 99 | + |
| 100 | +print(X.shape) |
| 101 | +print(Y[2]) |
| 102 | +print(cv_x.shape) |
| 103 | +print(test_x.shape) |
| 104 | + |
| 105 | +#HYPERPARAMETERS |
| 106 | +steps = 4800 |
| 107 | +epochs = 60 |
| 108 | +step_size = 16 |
| 109 | +total_batch = int(steps/step_size) |
| 110 | +LR = 0.00001 |
| 111 | + |
| 112 | +#FUNCTIONS TO CODE LAYERS |
| 113 | +def init_weights(shape): |
| 114 | + init_random_dist = tf.truncated_normal(shape, stddev=0.1) |
| 115 | + return tf.Variable(init_random_dist) |
| 116 | + |
| 117 | +def init_bias(shape): |
| 118 | + init_bias_vals = tf.constant(0.1, shape=shape) |
| 119 | + return tf.Variable(init_bias_vals) |
| 120 | + |
| 121 | +def conv2d(x, W): |
| 122 | + return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') |
| 123 | + |
| 124 | +def max_pool_2by2(x): |
| 125 | + return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], |
| 126 | + strides=[1, 2, 2, 1], padding='SAME') |
| 127 | + |
| 128 | +def convolutional_layer(input_x, shape): |
| 129 | + W = init_weights(shape) |
| 130 | + b = init_bias([shape[3]]) |
| 131 | + return tf.nn.relu(conv2d(input_x, W) + b) |
| 132 | + |
| 133 | +def normal_full_layer(input_layer, size): |
| 134 | + input_size = int(input_layer.get_shape()[1]) |
| 135 | + W = init_weights([input_size, size]) |
| 136 | + b = init_bias([size]) |
| 137 | + return tf.matmul(input_layer, W) + b |
| 138 | + |
| 139 | +print(X[5].shape) |
| 140 | + |
| 141 | +#GRAPH |
| 142 | +with tf.Session() as sess: |
| 143 | + tf.reset_default_graph() |
| 144 | + |
| 145 | +#Defining Placeholdera |
| 146 | +x = tf.placeholder(tf.float32,shape=[None,80,80,3]) |
| 147 | +y_true = tf.placeholder(tf.float32,shape=[None,4]) |
| 148 | + |
| 149 | +#Defining GRAPH |
| 150 | +with tf.name_scope('Model'): |
| 151 | + convo_1 = convolutional_layer(x,shape=[6,6,3,32]) |
| 152 | + convo_1_pooling = max_pool_2by2(convo_1) |
| 153 | + convo_2 = convolutional_layer(convo_1_pooling,shape=[6,6,32,64]) |
| 154 | + convo_2_pooling = max_pool_2by2(convo_2) |
| 155 | + convo_3 = convolutional_layer(convo_2_pooling,shape=[6,6,64,64]) |
| 156 | + convo_3_pooling = max_pool_2by2(convo_3) |
| 157 | + convo_4 = convolutional_layer(convo_3_pooling,shape=[6,6,64,128]) |
| 158 | + convo_4_pooling = max_pool_2by2(convo_4) |
| 159 | + convo_2_flat = tf.reshape(convo_4_pooling,[-1,5*5*128]) |
| 160 | + |
| 161 | + full_layer_one = tf.nn.relu(normal_full_layer(convo_2_flat,4096)) |
| 162 | + hold_prob1 = tf.placeholder(tf.float32) |
| 163 | + full_one_dropout = tf.nn.dropout(full_layer_one,keep_prob=hold_prob1) |
| 164 | + |
| 165 | + full_layer_two = tf.nn.relu(normal_full_layer(full_one_dropout,2048)) |
| 166 | + hold_prob2 = tf.placeholder(tf.float32) |
| 167 | + full_two_dropout = tf.nn.dropout(full_layer_two,keep_prob=hold_prob2) |
| 168 | + |
| 169 | + full_layer_three = tf.nn.relu(normal_full_layer(full_two_dropout,1024)) |
| 170 | + hold_prob3 = tf.placeholder(tf.float32) |
| 171 | + full_three_dropout = tf.nn.dropout(full_layer_three,keep_prob=hold_prob3) |
| 172 | + |
| 173 | + full_layer_four = tf.nn.relu(normal_full_layer(full_three_dropout,512)) |
| 174 | + hold_prob4 = tf.placeholder(tf.float32) |
| 175 | + full_four_dropout = tf.nn.dropout(full_layer_four,keep_prob=hold_prob4) |
| 176 | + |
| 177 | + y_pred = normal_full_layer(full_four_dropout,4) |
| 178 | + |
| 179 | +#Writing Loss and Accuracy |
| 180 | +with tf.name_scope('Loss'): |
| 181 | + cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_true,logits=y_pred)) |
| 182 | + |
| 183 | +with tf.name_scope('ADAM'): |
| 184 | + train = tf.train.AdamOptimizer(learning_rate=LR).minimize(cross_entropy) |
| 185 | + |
| 186 | +with tf.name_scope('Accuracy'): |
| 187 | + matches = tf.equal(tf.argmax(y_pred,1),tf.argmax(y_true,1)) |
| 188 | + acc = tf.reduce_mean(tf.cast(matches,tf.float32)) |
| 189 | + |
| 190 | +init = tf.global_variables_initializer() |
| 191 | + |
| 192 | +tf.summary.scalar("loss", cross_entropy) |
| 193 | +tf.summary.scalar("accuracy", acc) |
| 194 | +merged_summary_op = tf.summary.merge_all() |
| 195 | + |
| 196 | +acc_list = [] |
| 197 | +cross_entropy_list = [] |
| 198 | +acc_train = [] |
| 199 | + |
| 200 | +saver = tf.train.Saver() |
| 201 | + |
| 202 | +#If you are using CPU, just use with tf.Session() as sess: |
| 203 | +#Starting Session |
| 204 | + |
| 205 | +with tf.Session() as sess: |
| 206 | + sess.run(init) |
| 207 | + summary_writer = tf.summary.FileWriter(TRAIN_DIR, graph=tf.get_default_graph()) |
| 208 | + for i in range(epochs): |
| 209 | + for j in range(0,steps,step_size): |
| 210 | + _ , c , summary,d = sess.run([train,cross_entropy,merged_summary_op,acc],feed_dict={x:X[j:j+step_size] , y_true:Y[j:j+step_size] ,hold_prob1:0.5,hold_prob2:0.5,hold_prob3:0.5,hold_prob4:0.5}) |
| 211 | + summary_writer.add_summary(summary, i * total_batch + j) |
| 212 | + acc_train.append(d) |
| 213 | + mean_of_cross_entropy = sess.run(cross_entropy,feed_dict={x:cv_x,y_true:cv_y ,hold_prob1:1.0,hold_prob2:1.0,hold_prob3:1.0,hold_prob4:1.0}) |
| 214 | + mean_of_acc = sess.run(acc,feed_dict={x:cv_x ,y_true:cv_y ,hold_prob1:1.0,hold_prob2:1.0,hold_prob3:1.0,hold_prob4:1.0}) |
| 215 | + cross_entropy_list.append(mean_of_cross_entropy) |
| 216 | + acc_list.append(mean_of_acc) |
| 217 | + print(i,mean_of_cross_entropy,mean_of_acc) |
| 218 | + saver.save(sess, os.getcwd()+"\\CNN_MC.ckpt") |
| 219 | + print("test accuracy = ",np.mean([sess.run(acc,feed_dict={x:test_x[:400],y_true:test_y[:400] ,hold_prob1:1.0,hold_prob2:1.0,hold_prob3:1.0,hold_prob4:1.0}),sess.run(acc,feed_dict={x:test_x[400:800],y_true:test_y[400:800] ,hold_prob1:1.0,hold_prob2:1.0,hold_prob3:1.0,hold_prob4:1.0}),sess.run(acc,feed_dict={x:test_x[800:],y_true:test_y[800:] ,hold_prob1:1.0,hold_prob2:1.0,hold_prob3:1.0,hold_prob4:1.0})])) |
| 220 | + print("cross_entropy loss = ",np.mean([sess.run(cross_entropy,feed_dict={x:test_x[:400],y_true:test_y[:400] ,hold_prob1:1.0,hold_prob2:1.0,hold_prob3:1.0,hold_prob4:1.0}),sess.run(cross_entropy,feed_dict={x:test_x[400:800],y_true:test_y[400:800] ,hold_prob1:1.0,hold_prob2:1.0,hold_prob3:1.0,hold_prob4:1.0}),sess.run(cross_entropy,feed_dict={x:test_x[800:],y_true:test_y[800:] ,hold_prob1:1.0,hold_prob2:1.0,hold_prob3:1.0,hold_prob4:1.0})])) |
| 221 | + |
| 222 | +import pandas as pd |
| 223 | + |
| 224 | +pd.Series(acc_list).plot(kind='line',title='Accuracy on CV data') |
| 225 | + |
| 226 | +pd.Series(cross_entropy_list).plot(kind='line',title='CV ERROR') |
| 227 | + |
| 228 | +#RESTORING and MAKING PREDICTIONS FOR FIRST 64 IMAGES |
| 229 | +with tf.Session() as session: |
| 230 | + saver.restore(session, "CNN_MC.ckpt") |
| 231 | + print("Model restored.") |
| 232 | + print('Initialized') |
| 233 | + k = session.run([y_pred], feed_dict={x:test_x[0:64] , hold_prob1:1,hold_prob2:1,hold_prob3:1,hold_prob4:1}) |
| 234 | + |
| 235 | +print(np.array(k).shape) |
| 236 | + |
| 237 | +#Reshaping k |
| 238 | +k = np.array(k).reshape(64,4) |
| 239 | + |
| 240 | +pred_labels = [] |
| 241 | + |
| 242 | +for i in range(64): |
| 243 | + r = np.round(np.array(np.exp(k[i])/sum(np.exp(k[i]))),3).argmax() |
| 244 | + if r ==0 : pred_labels.append("chair") |
| 245 | + elif r ==1: pred_labels.append("kitchen") |
| 246 | + elif r ==2: pred_labels.append("knife") |
| 247 | + elif r ==3: pred_labels.append("saucepan") |
| 248 | + |
| 249 | +#Multiple images parameters |
| 250 | +w=80 |
| 251 | +h=80 |
| 252 | +columns = 8 |
| 253 | +rows = 8 |
| 254 | + |
| 255 | +images = test_x[:64] |
| 256 | + |
| 257 | +print(images.shape) |
| 258 | + |
| 259 | +fig = plt.figure(figsize=(20, 20)) |
| 260 | +for m in range(1, columns*rows +1): |
| 261 | + img = images[m-1].reshape([80, 80, 3]) |
| 262 | + fig.add_subplot(rows, columns, m) |
| 263 | + plt.imshow(img) |
| 264 | + plt.title("Pred: " + pred_labels[m-1]) |
| 265 | + plt.axis('off') |
| 266 | +plt.show() |
| 267 | + |
0 commit comments