Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit 1fe46b3

Browse files
Added tuner file
1 parent 2ce33e3 commit 1fe46b3

File tree

1 file changed

+162
-0
lines changed

1 file changed

+162
-0
lines changed

‎Tutorials/divorce_tuner.py

Lines changed: 162 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,162 @@
1+
# Import Libraries
2+
import tensorflow
3+
import pandas as pd
4+
import numpy as np
5+
import matplotlib.pyplot as plt
6+
import seaborn as sns
7+
from tensorflow.keras.models import Sequential
8+
from tensorflow.keras.layers import Dense
9+
from sklearn.model_selection import train_test_split
10+
from sklearn.preprocessing import StandardScaler
11+
from rarfile import RarFile
12+
from urllib.request import urlretrieve
13+
from kerastuner import HyperModel
14+
from kerastuner.tuners import RandomSearch, Hyperband, BayesianOptimization
15+
from tensorflow import keras
16+
from sklearn.metrics import confusion_matrix
17+
18+
19+
sns.set(rc={'figure.figsize': (20, 10)})
20+
21+
# download the compressed divorce file
22+
urlretrieve('https://archive.ics.uci.edu/ml/machine-learning-databases/00497/divorce.rar', 'divorce_file.rar')
23+
24+
# extract rar file
25+
with RarFile('divorce_file.rar', mode='r') as rf:
26+
rf.extractall()
27+
28+
# read divorce data
29+
df = pd.read_excel('divorce.xlsx')
30+
31+
# clean columns
32+
clean_cols = [x.lower() for x in df.columns.to_list()]
33+
df.columns = clean_cols
34+
35+
# Separate the target and features as separate dataframes
36+
X = df.drop('class', axis=1)
37+
y = df[['class']].astype('int')
38+
39+
# Stratified split based on the distribution of the target vector, y
40+
X_train, X_test, y_train, y_test = train_test_split(X, y,
41+
stratify=y,
42+
test_size=0.20,
43+
random_state=30)
44+
45+
46+
class MyHyperModel(HyperModel):
47+
48+
def __init__(self, num_classes):
49+
self.num_classes = num_classes
50+
51+
def build(self, hp):
52+
53+
# specify model
54+
model = keras.Sequential()
55+
56+
# range of models to build
57+
for i in range(hp.Int('num_layers', 2, 20)):
58+
59+
model.add(keras.layers.Dense(units=hp.Int('units_' + str(i),
60+
min_value=32,
61+
max_value=512,
62+
step=32),
63+
activation='relu'))
64+
65+
model.add(keras.layers.Dense(self.num_classes, activation='sigmoid'))
66+
67+
model.compile(
68+
optimizer=keras.optimizers.Adam(
69+
hp.Choice('learning_rate',
70+
values=[1e-2, 1e-3, 1e-4])),
71+
loss='binary_crossentropy',
72+
metrics=['accuracy'])
73+
74+
return model
75+
76+
77+
78+
hypermodel = MyHyperModel(num_classes=1)
79+
80+
tuner = Hyperband(
81+
hypermodel,
82+
objective='accuracy',
83+
max_epochs=10,
84+
seed=10,
85+
project_name='divorce test')
86+
87+
88+
tuner.search(X_train.values, y_train.values.flatten(),
89+
epochs=10,
90+
validation_data=(X_test.values, y_test.values.flatten()))
91+
92+
params = tuner.get_best_hyperparameters()[0]
93+
94+
model = tuner.hypermodel.build(params)
95+
96+
model.fit(X.values, y.values.flatten(), epochs=20)
97+
98+
hyperband_accuracy_df = pd.DataFrame(model.history.history)
99+
100+
hyperband_accuracy_df[['loss', 'accuracy']].plot()
101+
plt.title('Loss & Accuracy Per EPOCH')
102+
plt.xlabel('EPOCH')
103+
plt.ylabel('Accruacy')
104+
plt.show()
105+
106+
107+
random_tuner = RandomSearch(
108+
hypermodel,
109+
objective='accuracy',
110+
max_trials=10,
111+
seed=10,
112+
project_name='divorce test')
113+
114+
115+
random_tuner.search(X_train.values, y_train.values.flatten(),
116+
epochs=10,
117+
validation_data=(X_test.values, y_test.values.flatten()))
118+
119+
random_params = random_tuner.get_best_hyperparameters()[0]
120+
121+
random_model = random_tuner.hypermodel.build(params)
122+
123+
random_model.fit(X.values, y.values.flatten(), epochs=15)
124+
125+
random_accuracy_df = pd.DataFrame(random_model.history.history)
126+
127+
random_accuracy_df[['loss', 'accuracy']].plot()
128+
plt.title('Loss & Accuracy Per EPOCH For Random Model')
129+
plt.xlabel('EPOCH')
130+
plt.ylabel('Accruacy')
131+
plt.show()
132+
133+
134+
135+
bayesian_tuner = BayesianOptimization(
136+
hypermodel,
137+
objective='accuracy',
138+
max_trials=10,
139+
seed=10,
140+
project_name='divorce test')
141+
142+
bayesian_tuner.search(X_train.values, y_train.values.flatten(),
143+
epochs=10,
144+
validation_data=(X_test.values, y_test.values.flatten()))
145+
146+
bayesian_params = bayesian_tuner.get_best_hyperparameters()[0]
147+
148+
bayesian_model = bayesian_tuner.hypermodel.build(bayesian_params)
149+
150+
bayesian_model.fit(X.values, y.values.flatten(), epochs=15)
151+
152+
bayesian_accuracy_df = pd.DataFrame(bayesian_model.history.history)
153+
154+
bayesian_accuracy_df[['loss', 'accuracy']].plot()
155+
plt.title('Loss & Accuracy Per EPOCH For Bayesian Optimisation Model')
156+
plt.xlabel('EPOCH')
157+
plt.ylabel('Accruacy')
158+
plt.show()
159+
160+
161+
162+

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /