Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit ecbfe8c

Browse files
Add files via upload
1 parent dc3306c commit ecbfe8c

File tree

2 files changed

+456
-0
lines changed

2 files changed

+456
-0
lines changed

‎multiclass_classification_cpu.py

Lines changed: 227 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,227 @@
1+
"""
2+
Created on Sat Mar 24 11:25:41 2018
3+
4+
@author: Muhammed Buyukkinaci
5+
"""
6+
7+
import tensorflow as tf # ML library for graphs
8+
9+
#Importing libraries
10+
import cv2 # image processing
11+
import numpy as np # mathematical operations
12+
import os # working with directories
13+
from random import shuffle # mixing up or currently ordered data that might lead our network astray in training.
14+
from tqdm import tqdm # a nice progress bar for loops
15+
16+
#WORKING DIRECTORIES
17+
TRAIN_DIR = 'C:\\Users\\Muhammed Buyukkinaci\\python\\images\\multiclass_classification\\training'
18+
TEST_DIR = 'C:\\Users\\Muhammed Buyukkinaci\\python\\images\\multiclass_classification\\testing'
19+
IMG_SIZE = 80# our images are 80x80x3
20+
21+
print(os.getcwd())
22+
23+
#Converting the output into one-hot format
24+
def label_img(img):
25+
word_label = img.split('_')[0]
26+
if word_label == 'chair': return [1,0,0,0]
27+
elif word_label == 'kitchen': return [0,1,0,0]
28+
elif word_label == 'knife': return [0,0,1,0]
29+
elif word_label == 'saucepan': return [0,0,0,1]
30+
31+
32+
#A function to read images from training folder
33+
def create_train_data():
34+
training_data = []
35+
for img in tqdm(os.listdir(TRAIN_DIR)):
36+
label = label_img(img)
37+
path = TRAIN_DIR + '\\' + img
38+
img = cv2.imread(path,1)
39+
img = cv2.resize(img, (IMG_SIZE,IMG_SIZE))
40+
training_data.append([np.array(img),np.array(label)])
41+
shuffle(training_data)
42+
np.save('train_data_mc.npy', training_data)
43+
return training_data
44+
45+
#If you are reading images, make the line below uncomment
46+
#train_data = create_train_data()
47+
#If you are reading .npy file, use the line below.
48+
train_data = np.load('train_data_mc.npy')
49+
50+
51+
#A function to read images from testing folder
52+
def process_test_data():
53+
testing_data = []
54+
for img in tqdm(os.listdir(TEST_DIR)):
55+
label = label_img(img)
56+
path = TEST_DIR + '\\' + img
57+
img = cv2.imread(path,1)
58+
img = cv2.resize(img, (IMG_SIZE,IMG_SIZE))
59+
testing_data.append([np.array(img),label])
60+
shuffle(testing_data)
61+
np.save('test_data_mc.npy', testing_data)
62+
return testing_data
63+
64+
#If you are reading images, make the line below uncomment.
65+
66+
#test_data = process_test_data()
67+
68+
#If you are reading .npy file, use the line below.
69+
test_data = np.load('test_data_mc.npy')
70+
71+
#For visualizations
72+
import matplotlib.pyplot as plt
73+
74+
#A CHAIR
75+
plt.imshow(np.array(train_data[180][0]))
76+
77+
#KITCHEN
78+
plt.imshow(np.array(train_data[100][0]))
79+
80+
#KNIFE
81+
plt.imshow(np.array(train_data[111][0]))
82+
83+
#SAUCEPAN
84+
plt.imshow(np.array(train_data[154][0]))
85+
86+
#Splitting train and CV data
87+
train = train_data[:4800]
88+
cv = train_data[4800:]
89+
90+
X = np.array([i[0] for i in train]).reshape(-1,IMG_SIZE,IMG_SIZE,3)
91+
Y = np.array([i[1] for i in train])
92+
93+
cv_x = np.array([i[0] for i in cv]).reshape(-1,IMG_SIZE,IMG_SIZE,3)
94+
cv_y = np.array([i[1] for i in cv])
95+
96+
test_x = np.array([i[0] for i in test_data]).reshape(-1,IMG_SIZE,IMG_SIZE,3)
97+
test_y = np.array([i[1] for i in test_data])
98+
99+
print(X.shape)
100+
print(Y[2])
101+
print(cv_x.shape)
102+
print(test_x.shape)
103+
104+
#HYPERPARAMETERS
105+
steps = 4800
106+
epochs = 60
107+
step_size = 16
108+
total_batch = int(steps/step_size)
109+
LR = 0.00001
110+
111+
#FUNCTIONS TO CODE LAYERS
112+
def init_weights(shape):
113+
init_random_dist = tf.truncated_normal(shape, stddev=0.1)
114+
return tf.Variable(init_random_dist)
115+
116+
def init_bias(shape):
117+
init_bias_vals = tf.constant(0.1, shape=shape)
118+
return tf.Variable(init_bias_vals)
119+
120+
def conv2d(x, W):
121+
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
122+
123+
def max_pool_2by2(x):
124+
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
125+
strides=[1, 2, 2, 1], padding='SAME')
126+
127+
def convolutional_layer(input_x, shape):
128+
W = init_weights(shape)
129+
b = init_bias([shape[3]])
130+
return tf.nn.relu(conv2d(input_x, W) + b)
131+
132+
def normal_full_layer(input_layer, size):
133+
input_size = int(input_layer.get_shape()[1])
134+
W = init_weights([input_size, size])
135+
b = init_bias([size])
136+
return tf.matmul(input_layer, W) + b
137+
138+
print((X[5].shape)
139+
140+
#GRAPH
141+
tf.reset_default_graph()
142+
143+
#Defining Placeholdera
144+
x = tf.placeholder(tf.float32,shape=[None,80,80,3])
145+
y_true = tf.placeholder(tf.float32,shape=[None,4])
146+
147+
#Defining GRAPH
148+
with tf.name_scope('Model'):
149+
convo_1 = convolutional_layer(x,shape=[6,6,3,32])
150+
convo_1_pooling = max_pool_2by2(convo_1)
151+
convo_2 = convolutional_layer(convo_1_pooling,shape=[6,6,32,64])
152+
convo_2_pooling = max_pool_2by2(convo_2)
153+
convo_3 = convolutional_layer(convo_2_pooling,shape=[6,6,64,64])
154+
convo_3_pooling = max_pool_2by2(convo_3)
155+
convo_4 = convolutional_layer(convo_3_pooling,shape=[6,6,64,128])
156+
convo_4_pooling = max_pool_2by2(convo_4)
157+
convo_2_flat = tf.reshape(convo_4_pooling,[-1,5*5*128])
158+
159+
full_layer_one = tf.nn.relu(normal_full_layer(convo_2_flat,4096))
160+
hold_prob1 = tf.placeholder(tf.float32)
161+
full_one_dropout = tf.nn.dropout(full_layer_one,keep_prob=hold_prob1)
162+
163+
full_layer_two = tf.nn.relu(normal_full_layer(full_one_dropout,2048))
164+
hold_prob2 = tf.placeholder(tf.float32)
165+
full_two_dropout = tf.nn.dropout(full_layer_two,keep_prob=hold_prob2)
166+
167+
full_layer_three = tf.nn.relu(normal_full_layer(full_two_dropout,1024))
168+
hold_prob3 = tf.placeholder(tf.float32)
169+
full_three_dropout = tf.nn.dropout(full_layer_three,keep_prob=hold_prob3)
170+
171+
full_layer_four = tf.nn.relu(normal_full_layer(full_three_dropout,512))
172+
hold_prob4 = tf.placeholder(tf.float32)
173+
full_four_dropout = tf.nn.dropout(full_layer_four,keep_prob=hold_prob4)
174+
175+
y_pred = normal_full_layer(full_four_dropout,4)
176+
177+
#Writing Loss and Accuracy
178+
with tf.name_scope('Loss'):
179+
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_true,logits=y_pred))
180+
181+
with tf.name_scope('ADAM'):
182+
train = tf.train.AdamOptimizer(learning_rate=LR).minimize(cross_entropy)
183+
184+
with tf.name_scope('Accuracy'):
185+
matches = tf.equal(tf.argmax(y_pred,1),tf.argmax(y_true,1))
186+
acc = tf.reduce_mean(tf.cast(matches,tf.float32))
187+
188+
init = tf.global_variables_initializer()
189+
190+
tf.summary.scalar("loss", cross_entropy)
191+
tf.summary.scalar("accuracy", acc)
192+
merged_summary_op = tf.summary.merge_all()
193+
194+
acc_list = []
195+
logloss_list = []
196+
acc_train = []
197+
198+
saver = tf.train.Saver()
199+
200+
#If you are using CPU, just use with tf.Session() as sess:
201+
#Starting Session
202+
203+
with tf.Session() as sess:
204+
sess.run(init)
205+
summary_writer = tf.summary.FileWriter(TRAIN_DIR, graph=tf.get_default_graph())
206+
for i in range(epochs):
207+
for j in range(0,steps,step_size):
208+
_ , c , summary,d = sess.run([train,cross_entropy,merged_summary_op,acc],feed_dict={x:X[j:j+step_size] , y_true:Y[j:j+step_size] ,hold_prob1:0.5,hold_prob2:0.5,hold_prob3:0.5,hold_prob4:0.5})
209+
summary_writer.add_summary(summary, i * total_batch + j)
210+
acc_train.append(d)
211+
mean_of_ll = sess.run(cross_entropy,feed_dict={x:cv_x,y_true:cv_y ,hold_prob1:1.0,hold_prob2:1.0,hold_prob3:1.0,hold_prob4:1.0})
212+
mean_of_acc = sess.run(acc,feed_dict={x:cv_x ,y_true:cv_y ,hold_prob1:1.0,hold_prob2:1.0,hold_prob3:1.0,hold_prob4:1.0})
213+
logloss_list.append(mean_of_ll)
214+
acc_list.append(mean_of_acc)
215+
print(i,mean_of_ll,mean_of_acc)
216+
saver.save(sess, os.getcwd()+"\\CNN_MC.ckpt")
217+
print("test accuracy = ",np.mean([sess.run(acc,feed_dict={x:test_x[:400],y_true:test_y[:400] ,hold_prob1:1.0,hold_prob2:1.0,hold_prob3:1.0,hold_prob4:1.0}),sess.run(acc,feed_dict={x:test_x[400:800],y_true:test_y[400:800] ,hold_prob1:1.0,hold_prob2:1.0,hold_prob3:1.0,hold_prob4:1.0}),sess.run(acc,feed_dict={x:test_x[800:],y_true:test_y[800:] ,hold_prob1:1.0,hold_prob2:1.0,hold_prob3:1.0,hold_prob4:1.0})]))
218+
print("cross_entropy loss = ",np.mean([sess.run(cross_entropy,feed_dict={x:test_x[:400],y_true:test_y[:400] ,hold_prob1:1.0,hold_prob2:1.0,hold_prob3:1.0,hold_prob4:1.0}),sess.run(cross_entropy,feed_dict={x:test_x[400:800],y_true:test_y[400:800] ,hold_prob1:1.0,hold_prob2:1.0,hold_prob3:1.0,hold_prob4:1.0}),sess.run(cross_entropy,feed_dict={x:test_x[800:],y_true:test_y[800:] ,hold_prob1:1.0,hold_prob2:1.0,hold_prob3:1.0,hold_prob4:1.0})]))
219+
220+
import pandas as pd
221+
222+
pd.Series(acc_list).plot(kind='line',title='Accuracy on CV data')
223+
224+
pd.Series(logloss_list).plot(kind='line',title='CV ERROR')
225+
226+
227+

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /