Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit e6f1ecf

Browse files
committed
Create Sensitivity check with Iris Data.ipynb
1 parent 6d98635 commit e6f1ecf

File tree

1 file changed

+346
-0
lines changed

1 file changed

+346
-0
lines changed
Lines changed: 346 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,346 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"metadata": {},
6+
"source": [
7+
"# Demo - Sensitivity check with Iris Data"
8+
]
9+
},
10+
{
11+
"cell_type": "code",
12+
"execution_count": 1,
13+
"metadata": {},
14+
"outputs": [],
15+
"source": [
16+
"import numpy as np\n",
17+
"from sklearn import datasets\n",
18+
"\n",
19+
"import torch\n",
20+
"import torch.nn as nn\n",
21+
"import torch.optim as optim\n",
22+
"\n",
23+
"import torchbnn as bnn"
24+
]
25+
},
26+
{
27+
"cell_type": "code",
28+
"execution_count": 2,
29+
"metadata": {},
30+
"outputs": [],
31+
"source": [
32+
"import matplotlib.pyplot as plt\n",
33+
"%matplotlib inline"
34+
]
35+
},
36+
{
37+
"cell_type": "markdown",
38+
"metadata": {},
39+
"source": [
40+
"## 1. Load Iris Data"
41+
]
42+
},
43+
{
44+
"cell_type": "code",
45+
"execution_count": 3,
46+
"metadata": {},
47+
"outputs": [],
48+
"source": [
49+
"iris = datasets.load_iris()"
50+
]
51+
},
52+
{
53+
"cell_type": "code",
54+
"execution_count": 4,
55+
"metadata": {},
56+
"outputs": [],
57+
"source": [
58+
"X = iris.data\n",
59+
"Y = iris.target "
60+
]
61+
},
62+
{
63+
"cell_type": "code",
64+
"execution_count": 5,
65+
"metadata": {},
66+
"outputs": [
67+
{
68+
"data": {
69+
"text/plain": [
70+
"(torch.Size([150, 4]), torch.Size([150]))"
71+
]
72+
},
73+
"execution_count": 5,
74+
"metadata": {},
75+
"output_type": "execute_result"
76+
}
77+
],
78+
"source": [
79+
"x, y = torch.from_numpy(X).float(), torch.from_numpy(Y).long()\n",
80+
"x.shape, y.shape"
81+
]
82+
},
83+
{
84+
"cell_type": "markdown",
85+
"metadata": {},
86+
"source": [
87+
"## 2. Define Model"
88+
]
89+
},
90+
{
91+
"cell_type": "code",
92+
"execution_count": 6,
93+
"metadata": {},
94+
"outputs": [],
95+
"source": [
96+
"ce_loss = nn.CrossEntropyLoss()\n",
97+
"kl_loss = bnn.BKLLoss(reduction='mean', last_layer_only=False)"
98+
]
99+
},
100+
{
101+
"cell_type": "markdown",
102+
"metadata": {},
103+
"source": [
104+
"## 3. Train Model"
105+
]
106+
},
107+
{
108+
"cell_type": "markdown",
109+
"metadata": {},
110+
"source": [
111+
"### 3.1. Sensitivity to KL loss"
112+
]
113+
},
114+
{
115+
"cell_type": "code",
116+
"execution_count": 7,
117+
"metadata": {},
118+
"outputs": [],
119+
"source": [
120+
"model = nn.Sequential(\n",
121+
" bnn.BayesLinear(prior_mu=0, prior_sigma=0.05, in_features=4, out_features=100),\n",
122+
" nn.ReLU(),\n",
123+
" bnn.BayesLinear(prior_mu=0, prior_sigma=0.05, in_features=100, out_features=3),\n",
124+
")\n",
125+
"\n",
126+
"\n",
127+
"optimizer = optim.Adam(model.parameters(), lr=0.01)"
128+
]
129+
},
130+
{
131+
"cell_type": "code",
132+
"execution_count": 8,
133+
"metadata": {},
134+
"outputs": [],
135+
"source": [
136+
"kl_weight = 0.1"
137+
]
138+
},
139+
{
140+
"cell_type": "code",
141+
"execution_count": 9,
142+
"metadata": {},
143+
"outputs": [
144+
{
145+
"name": "stdout",
146+
"output_type": "stream",
147+
"text": [
148+
"- Accuracy: 98.666667 %\n",
149+
"- CE : 0.21, KL : 2.36\n"
150+
]
151+
}
152+
],
153+
"source": [
154+
"for step in range(3000):\n",
155+
" pre = model(x)\n",
156+
" ce = ce_loss(pre, y)\n",
157+
" kl = kl_loss(model)\n",
158+
" cost = ce + kl_weight*kl\n",
159+
" \n",
160+
" optimizer.zero_grad()\n",
161+
" cost.backward()\n",
162+
" optimizer.step()\n",
163+
" \n",
164+
"_, predicted = torch.max(pre.data, 1)\n",
165+
"total = y.size(0)\n",
166+
"correct = (predicted == y).sum()\n",
167+
"print('- Accuracy: %f %%' % (100 * float(correct) / total))\n",
168+
"print('- CE : %2.2f, KL : %2.2f' % (ce.item(), kl.item()))"
169+
]
170+
},
171+
{
172+
"cell_type": "code",
173+
"execution_count": 10,
174+
"metadata": {},
175+
"outputs": [],
176+
"source": [
177+
"model = nn.Sequential(\n",
178+
" bnn.BayesLinear(prior_mu=0, prior_sigma=0.05, in_features=4, out_features=100),\n",
179+
" nn.ReLU(),\n",
180+
" bnn.BayesLinear(prior_mu=0, prior_sigma=0.05, in_features=100, out_features=3),\n",
181+
")\n",
182+
"\n",
183+
"\n",
184+
"optimizer = optim.Adam(model.parameters(), lr=0.01)"
185+
]
186+
},
187+
{
188+
"cell_type": "code",
189+
"execution_count": 11,
190+
"metadata": {},
191+
"outputs": [],
192+
"source": [
193+
"kl_weight = 0.01"
194+
]
195+
},
196+
{
197+
"cell_type": "code",
198+
"execution_count": 12,
199+
"metadata": {},
200+
"outputs": [
201+
{
202+
"name": "stdout",
203+
"output_type": "stream",
204+
"text": [
205+
"- Accuracy: 98.666667 %\n",
206+
"- CE : 0.07, KL : 6.41\n"
207+
]
208+
}
209+
],
210+
"source": [
211+
"for step in range(3000):\n",
212+
" pre = model(x)\n",
213+
" ce = ce_loss(pre, y)\n",
214+
" kl = kl_loss(model)\n",
215+
" cost = ce + kl_weight*kl\n",
216+
" \n",
217+
" optimizer.zero_grad()\n",
218+
" cost.backward()\n",
219+
" optimizer.step()\n",
220+
" \n",
221+
"_, predicted = torch.max(pre.data, 1)\n",
222+
"total = y.size(0)\n",
223+
"correct = (predicted == y).sum()\n",
224+
"print('- Accuracy: %f %%' % (100 * float(correct) / total))\n",
225+
"print('- CE : %2.2f, KL : %2.2f' % (ce.item(), kl.item()))"
226+
]
227+
},
228+
{
229+
"cell_type": "markdown",
230+
"metadata": {},
231+
"source": [
232+
"### 3.2. Custom KL loss"
233+
]
234+
},
235+
{
236+
"cell_type": "code",
237+
"execution_count": 13,
238+
"metadata": {},
239+
"outputs": [],
240+
"source": [
241+
"model = nn.Sequential(\n",
242+
" bnn.BayesLinear(prior_mu=0, prior_sigma=0.05, in_features=4, out_features=100),\n",
243+
" nn.ReLU(),\n",
244+
" bnn.BayesLinear(prior_mu=0, prior_sigma=0.05, in_features=100, out_features=3),\n",
245+
")\n",
246+
"\n",
247+
"\n",
248+
"optimizer = optim.Adam(model.parameters(), lr=0.01)"
249+
]
250+
},
251+
{
252+
"cell_type": "code",
253+
"execution_count": 14,
254+
"metadata": {},
255+
"outputs": [],
256+
"source": [
257+
"def custom_kl_loss(mu_0, log_sigma_0, mu_1, log_sigma_1) :\n",
258+
" kl = log_sigma_1 - log_sigma_0 + \\\n",
259+
" (log_sigma_0**2 + (mu_0-mu_1)**2)/(2*log_sigma_1**2) - 0.5\n",
260+
" return kl.sum()"
261+
]
262+
},
263+
{
264+
"cell_type": "code",
265+
"execution_count": 15,
266+
"metadata": {},
267+
"outputs": [
268+
{
269+
"name": "stdout",
270+
"output_type": "stream",
271+
"text": [
272+
"- Accuracy: 96.000000 %\n",
273+
"- CE : 0.13, KL : -10.83\n"
274+
]
275+
}
276+
],
277+
"source": [
278+
"for step in range(3000):\n",
279+
" pre = model(x)\n",
280+
" ce = ce_loss(pre, y)\n",
281+
" \n",
282+
" # custom kl loss\n",
283+
" ckl = 0\n",
284+
" n = 0\n",
285+
" \n",
286+
" for m in model.modules() :\n",
287+
" if isinstance(m, (bnn.BayesLinear, bnn.BayesConv2d)):\n",
288+
" kl = custom_kl_loss(m.weight_mu, m.weight_log_sigma,\n",
289+
" m.prior_mu, m.prior_log_sigma)\n",
290+
" ckl += kl\n",
291+
" n += len(m.weight_mu.view(-1))\n",
292+
"\n",
293+
" if m.bias :\n",
294+
" kl = custom_kl_loss(m.bias_mu, m.bias_log_sigma,\n",
295+
" m.prior_mu, m.prior_log_sigma)\n",
296+
" ckl += kl\n",
297+
" n += len(m.bias_mu.view(-1))\n",
298+
"\n",
299+
" if isinstance(m, bnn.BayesBatchNorm2d):\n",
300+
" if m.affine :\n",
301+
" kl = custom_kl_loss(m.weight_mu, m.weight_log_sigma,\n",
302+
" m.prior_mu, m.prior_log_sigma)\n",
303+
" ckl += kl\n",
304+
" n += len(m.weight_mu.view(-1))\n",
305+
"\n",
306+
" kl = custom_kl_loss(m.bias_mu, m.bias_log_sigma,\n",
307+
" m.prior_mu, m.prior_log_sigma)\n",
308+
" ckl += kl \n",
309+
" n += len(m.bias_mu.view(-1))\n",
310+
" \n",
311+
" cost = ce + kl_weight*ckl\n",
312+
" \n",
313+
" optimizer.zero_grad()\n",
314+
" cost.backward()\n",
315+
" optimizer.step()\n",
316+
" \n",
317+
"_, predicted = torch.max(pre.data, 1)\n",
318+
"total = y.size(0)\n",
319+
"correct = (predicted == y).sum()\n",
320+
"print('- Accuracy: %f %%' % (100 * float(correct) / total))\n",
321+
"print('- CE : %2.2f, KL : %2.2f' % (ce.item(), kl.item()))"
322+
]
323+
}
324+
],
325+
"metadata": {
326+
"kernelspec": {
327+
"display_name": "Python 3",
328+
"language": "python",
329+
"name": "python3"
330+
},
331+
"language_info": {
332+
"codemirror_mode": {
333+
"name": "ipython",
334+
"version": 3
335+
},
336+
"file_extension": ".py",
337+
"mimetype": "text/x-python",
338+
"name": "python",
339+
"nbconvert_exporter": "python",
340+
"pygments_lexer": "ipython3",
341+
"version": "3.6.5"
342+
}
343+
},
344+
"nbformat": 4,
345+
"nbformat_minor": 2
346+
}

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /