Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit d07d42b

Browse files
Re-updated Ch5 and Ch6 to Python3
1 parent 3c2a5db commit d07d42b

File tree

5 files changed

+183
-186
lines changed

5 files changed

+183
-186
lines changed

‎Chapter5_LossFunctions/Chapter5.ipynb‎

Lines changed: 119 additions & 121 deletions
Large diffs are not rendered by default.

‎Chapter5_LossFunctions/DarkWorldsMetric.py‎

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ def calc_delta_r(x_predicted,y_predicted,x_true,y_true):
5858
for perm in it.permutations(a[num_halos-2],num_halos):
5959
which_true_halos=[]
6060
which_predicted_halos=[]
61-
for j in xrange(num_halos): #loop through all the true halos with the
61+
for j in range(num_halos): #loop through all the true halos with the
6262

6363
distances_perm[count,j]=np.sqrt((x_true[j]-x_predicted[int(perm[j])])**2\
6464
+(y_true[j]-y_predicted[int(perm[j])])**2)
@@ -141,7 +141,7 @@ def convert_to_360(angle, x_in, y_in):
141141
theta: the angle in the range 0:2pi
142142
"""
143143
n = len(x_in)
144-
for i in xrange(n):
144+
for i in range(n):
145145
if x_in[i] < 0 and y_in[i] > 0:
146146
angle[i] = angle[i]+mt.pi
147147
elif x_in[i] < 0 and y_in[i] < 0:
@@ -204,7 +204,7 @@ def main_score( nhalo_all, x_true_all, y_true_all, x_ref_all, y_ref_all, sky_pre
204204

205205
x_predicted=np.array([],dtype=float)
206206
y_predicted=np.array([],dtype=float)
207-
for i in xrange(nhalo):
207+
for i in range(nhalo):
208208
x_predicted=np.append(x_predicted,float(sky[0])) #get the predicted values
209209
y_predicted=np.append(y_predicted,float(sky[1]))
210210
#The solution file for the test data provides masses
@@ -271,9 +271,9 @@ def main_score( nhalo_all, x_true_all, y_true_all, x_ref_all, y_ref_all, sky_pre
271271
W1=1./1000. #Weight the av_r such that < 1 is a good score > 1 is not so good.
272272
W2=1.
273273
metric = W1*av_r + W2*angle_vec #Weighted metric, weights TBD
274-
print'Your average distance in pixels you are away from the true halo is', av_r
275-
print'Your average angular vector is', angle_vec
276-
print'Your score for the training data is', metric
274+
print('Your average distance in pixels you are away from the true halo is', av_r)
275+
print('Your average angular vector is', angle_vec)
276+
print('Your score for the training data is', metric)
277277
return metric
278278

279279

@@ -316,10 +316,9 @@ def main(user_fname, fname):
316316
#first input would be
317317
#a float, if succeed it
318318
#is not a header
319-
print'THE INPUT FILE DOES NOT APPEAR TO HAVE A HEADER'
319+
print('THE INPUT FILE DOES NOT APPEAR TO HAVE A HEADER')
320320
except :
321-
print 'THE INPUT FILE APPEARS TO HAVE A HEADER, SKIPPING THE FIRST LINE'
322-
321+
print('THE INPUT FILE APPEARS TO HAVE A HEADER, SKIPPING THE FIRST LINE')
323322
skip_header = sky_prediction.next()
324323

325324

@@ -331,7 +330,7 @@ def main(user_fname, fname):
331330
if does_it_exist > 0: #If it does then find the matching solutions to the sky_id
332331
selectskyinsolutions=true_sky_id.index(sky_id)-1
333332
else: #Otherwise exit
334-
print'Sky_id does not exist, formatting problem: ',sky_id
333+
print('Sky_id does not exist, formatting problem: ',sky_id)
335334
sys.exit(2)
336335

337336

@@ -342,7 +341,7 @@ def main(user_fname, fname):
342341

343342
x_predicted=np.array([],dtype=float)
344343
y_predicted=np.array([],dtype=float)
345-
for i in xrange(nhalo):
344+
for i in range(nhalo):
346345
x_predicted=np.append(x_predicted,float(sky[2*i+1])) #get the predicted values
347346
y_predicted=np.append(y_predicted,float(sky[2*i+2]))
348347
#The solution file for the test data provides masses
@@ -409,9 +408,9 @@ def main(user_fname, fname):
409408
W1=1./1000. #Weight the av_r such that < 1 is a good score > 1 is not so good.
410409
W2=1.
411410
metric = W1*av_r + W2*angle_vec #Weighted metric, weights TBD
412-
print'Your average distance in pixels you are away from the true halo is', av_r
413-
print'Your average angular vector is', angle_vec
414-
print'Your score for the training data is', metric
411+
print('Your average distance in pixels you are away from the true halo is', av_r)
412+
print('Your average angular vector is', angle_vec)
413+
print('Your score for the training data is', metric)
415414

416415

417416
if __name__ == "__main__":

‎Chapter5_LossFunctions/draw_sky2.py‎

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,14 @@
22
from matplotlib.patches import Ellipse
33
import numpy as np
44

5-
def draw_sky(galaxies):
5+
def draw_sky(galaxies):
66
"""adapted from Vishal Goklani"""
77
size_multiplier = 45
88
fig = plt.figure(figsize=(10,10))
99
#fig.patch.set_facecolor("blue")
1010
ax = fig.add_subplot(111, aspect='equal')
1111
n = galaxies.shape[0]
12-
for i in xrange(n):
12+
for i in range(n):
1313
_g = galaxies[i,:]
1414
x,y = _g[0], _g[1]
1515
d = np.sqrt( _g[2]**2 + _g[3]**2 )

‎Chapter6_Priorities/Chapter6.ipynb‎

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
"cell_type": "markdown",
1212
"metadata": {},
1313
"source": [
14-
"#Chapter 6\n",
14+
"#Chapter 6\n",
1515
"\n",
1616
"____\n",
1717
"\n",
@@ -27,7 +27,7 @@
2727
"\n",
2828
"Up until now, we have mostly ignored our choice of priors. This is unfortunate as we can be very expressive with our priors, but we also must be careful about choosing them. This is especially true if we want to be objective, that is, not to express any personal beliefs in the priors. \n",
2929
"\n",
30-
"###Subjective vs Objective priors\n",
30+
"###Subjective vs Objective priors\n",
3131
"\n",
3232
"Bayesian priors can be classified into two classes: *objective* priors, which aim to allow the data to influence the posterior the most, and *subjective* priors, which allow the practitioner to express his or her views into the prior. \n",
3333
"\n",
@@ -926,7 +926,7 @@
926926
"figsize(12.0, 8)\n",
927927
"beta = stats.beta\n",
928928
"hidden_prob = beta.rvs(1, 13, size=35)\n",
929-
"printhidden_prob\n",
929+
"print(hidden_prob)\n",
930930
"bandits = Bandits(hidden_prob)\n",
931931
"bayesian_strat = BayesianStrategy(bandits)\n",
932932
"\n",
@@ -1042,8 +1042,8 @@
10421042
" \"AMZN\": (0.03, 0.02),\n",
10431043
" }\n",
10441044
"\n",
1045-
"for i, (name, params) in enumerate(expert_prior_params.iteritems()):\n",
1046-
" plt.subplot(2, 2, i)\n",
1045+
"for i, (name, params) in enumerate(expert_prior_params.items()):\n",
1046+
" plt.subplot(2, 2, i + 1)\n",
10471047
" y = normal.pdf(x, params[0], scale=params[1])\n",
10481048
" #plt.plot( x, y, c = colors[i] )\n",
10491049
" plt.fill_between(x, 0, y, color=colors[i], linewidth=2,\n",
@@ -1103,7 +1103,7 @@
11031103
"\n",
11041104
"stocks = [\"AAPL\", \"GOOG\", \"TSLA\", \"AMZN\"]\n",
11051105
"\n",
1106-
"enddate = datetime.datetime.now().strftime(\"%Y-%m-%d\") # today's date.\n",
1106+
"enddate = \"2015年04月27日\"\n",
11071107
"startdate = \"2012年09月01日\"\n",
11081108
"\n",
11091109
"stock_closes = {}\n",
@@ -1120,7 +1120,7 @@
11201120
" _previous_day = np.roll(stock_closes[stock], -1)\n",
11211121
" stock_returns[stock] = ((stock_closes[stock] - _previous_day) / _previous_day)[:n_observations]\n",
11221122
"\n",
1123-
"dates = map(lambda x: datetime.datetime.strptime(x, \"%Y-%m-%d\"), x[1:n_observations + 1, 0])"
1123+
"dates = list(map(lambda x: datetime.datetime.strptime(x, \"%Y-%m-%d\"), x[1:n_observations + 1, 0]))"
11241124
]
11251125
},
11261126
{
@@ -1144,12 +1144,12 @@
11441144
"source": [
11451145
"figsize(12.5, 4)\n",
11461146
"\n",
1147-
"for _stock, _returns in stock_returns.iteritems():\n",
1147+
"for _stock, _returns in stock_returns.items():\n",
11481148
" p = plt.plot((1 + _returns)[::-1].cumprod() - 1, '-o', label=\"%s\" % _stock,\n",
11491149
" markersize=4, markeredgecolor=\"none\")\n",
11501150
"\n",
11511151
"plt.xticks(np.arange(100)[::-8],\n",
1152-
" map(lambda x: datetime.datetime.strftime(x, \"%Y-%m-%d\"), dates[::8]),\n",
1152+
" list(map(lambda x: datetime.datetime.strftime(x, \"%Y-%m-%d\"), dates[::8])),\n",
11531153
" rotation=60);\n",
11541154
"\n",
11551155
"plt.legend(loc=\"upper left\")\n",
@@ -1179,9 +1179,9 @@
11791179
"figsize(11., 5)\n",
11801180
"returns = np.zeros((n_observations, 4))\n",
11811181
"\n",
1182-
"for i, (_stock, _returns) in enumerate(stock_returns.iteritems()):\n",
1182+
"for i, (_stock, _returns) in enumerate(stock_returns.items()):\n",
11831183
" returns[:, i] = _returns\n",
1184-
" plt.subplot(2, 2, i)\n",
1184+
" plt.subplot(2, 2, i+1)\n",
11851185
" plt.hist(_returns, bins=20,\n",
11861186
" normed=True, histtype=\"stepfilled\",\n",
11871187
" color=colors[i], alpha=0.7)\n",
@@ -1258,7 +1258,7 @@
12581258
"for i in range(4):\n",
12591259
" plt.hist(mu_samples[:, i], alpha=0.8 - 0.05 * i, bins=30,\n",
12601260
" histtype=\"stepfilled\", normed=True,\n",
1261-
" label=\"%s\" % stock_returns.keys()[i])\n",
1261+
" label=\"%s\" % list(stock_returns.keys())[i])\n",
12621262
"\n",
12631263
"plt.vlines(mu_samples.mean(axis=0), 0, 500, linestyle=\"--\", linewidth=.5)\n",
12641264
"\n",
@@ -1302,8 +1302,8 @@
13021302
" plt.subplot(2, 2, i + 1)\n",
13031303
" plt.hist(mu_samples[:, i], alpha=0.8 - 0.05 * i, bins=30,\n",
13041304
" histtype=\"stepfilled\", normed=True, color=colors[i],\n",
1305-
" label=\"%s\" % stock_returns.keys()[i])\n",
1306-
" plt.title(\"%s\" % stock_returns.keys()[i])\n",
1305+
" label=\"%s\" % list(stock_returns.keys())[i])\n",
1306+
" plt.title(\"%s\" % list(stock_returns.keys())[i])\n",
13071307
" plt.xlim(-0.15, 0.15)\n",
13081308
"\n",
13091309
"plt.suptitle(\"Posterior distribution of daily stock returns\")\n",
@@ -2512,21 +2512,21 @@
25122512
"metadata": {
25132513
"anaconda-cloud": {},
25142514
"kernelspec": {
2515-
"display_name": "Python 2",
2515+
"display_name": "Python [conda env:bayes]",
25162516
"language": "python",
2517-
"name": "python2"
2517+
"name": "conda-env-bayes-py"
25182518
},
25192519
"language_info": {
25202520
"codemirror_mode": {
25212521
"name": "ipython",
2522-
"version": 2
2522+
"version": 3
25232523
},
25242524
"file_extension": ".py",
25252525
"mimetype": "text/x-python",
25262526
"name": "python",
25272527
"nbconvert_exporter": "python",
2528-
"pygments_lexer": "ipython2",
2529-
"version": "2.7.10"
2528+
"pygments_lexer": "ipython3",
2529+
"version": "3.5.2"
25302530
}
25312531
},
25322532
"nbformat": 4,

‎Chapter6_Priorities/other_strats.py‎

Lines changed: 30 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,13 @@
33

44
import scipy.stats as stats
55
import numpy as np
6-
from pymc import rbeta
6+
#from pymc import rbeta
77

88
rand = np.random.rand
99
beta = stats.beta
1010

1111

12-
class GeneralBanditStrat(object):
12+
class GeneralBanditStrat(object):
1313

1414
"""
1515
Implements a online, learning strategy to solve
@@ -32,72 +32,72 @@ class GeneralBanditStrat( object ):
3232
def __init__(self, bandits, choice_function):
3333

3434
self.bandits = bandits
35-
n_bandits = len(self.bandits)
36-
self.wins = np.zeros(n_bandits)
37-
self.trials = np.zeros(n_bandits)
35+
n_bandits = len(self.bandits)
36+
self.wins = np.zeros(n_bandits)
37+
self.trials = np.zeros(n_bandits)
3838
self.N = 0
3939
self.choices = []
4040
self.score = []
4141
self.choice_function = choice_function
4242

43-
def sample_bandits(self, n=1):
43+
def sample_bandits(self, n=1):
4444

45-
score = np.zeros(n)
46-
choices = np.zeros(n)
45+
score = np.zeros(n)
46+
choices = np.zeros(n)
4747

4848
for k in range(n):
4949
#sample from the bandits's priors, and select the largest sample
5050
choice = self.choice_function(self)
5151

5252
#sample the chosen bandit
53-
result = self.bandits.pull(choice)
53+
result = self.bandits.pull(choice)
5454

5555
#update priors and score
56-
self.wins[choice] += result
57-
self.trials[choice] += 1
58-
score[k] = result
56+
self.wins[choice] += result
57+
self.trials[choice] += 1
58+
score[k] = result
5959
self.N += 1
60-
choices[k] = choice
60+
choices[k] = choice
6161

62-
self.score = np.r_[self.score, score]
63-
self.choices = np.r_[self.choices, choices]
62+
self.score = np.r_[self.score, score]
63+
self.choices = np.r_[self.choices, choices]
6464
return
6565

6666

6767
def bayesian_bandit_choice(self):
68-
return np.argmax(rbeta( 1 + self.wins, 1 + self.trials - self.wins))
68+
return np.argmax(np.random.beta(1 + self.wins, 1 + self.trials - self.wins))
6969

70-
def max_mean(self):
70+
def max_mean(self):
7171
"""pick the bandit with the current best observed proportion of winning """
72-
return np.argmax(self.wins / (self.trials +1 ) )
72+
return np.argmax(self.wins / (self.trials +1))
7373

7474
def lower_credible_choice( self ):
7575
"""pick the bandit with the best LOWER BOUND. See chapter 5"""
7676
def lb(a,b):
77-
return a/(a+b) - 1.65*np.sqrt((a*b)/( (a+b)**2*(a+b+1) ) )
77+
return a/(a+b) - 1.65*np.sqrt((a*b)/( (a+b)**2*(a+b+1)))
7878
a = self.wins + 1
7979
b = self.trials - self.wins + 1
80-
return np.argmax(lb(a,b))
80+
return np.argmax(lb(a,b))
8181

82-
def upper_credible_choice(self):
82+
def upper_credible_choice(self):
8383
"""pick the bandit with the best LOWER BOUND. See chapter 5"""
8484
def lb(a,b):
85-
return a/(a+b) + 1.65*np.sqrt((a*b)/((a+b)**2*(a+b+1) ) )
85+
return a/(a+b) + 1.65*np.sqrt((a*b)/((a+b)**2*(a+b+1)))
8686
a = self.wins + 1
8787
b = self.trials - self.wins + 1
88-
return np.argmax(lb(a,b))
88+
return np.argmax(lb(a,b))
8989

90-
def random_choice(self):
91-
return np.random.randint(0, len(self.wins ) )
90+
def random_choice(self):
91+
return np.random.randint(0, len(self.wins))
9292

9393

94-
def ucb_bayes(self):
94+
def ucb_bayes(self):
9595
C = 0
9696
n = 10000
97-
alpha =1 - 1./((self.N+1))
98-
return np.argmax(beta.ppf(alpha,
97+
alpha =1 - 1./((self.N+1))
98+
return np.argmax(beta.ppf(alpha,
9999
1 + self.wins,
100-
1 + self.trials - self.wins ) )
100+
1 + self.trials - self.wins))
101101

102102

103103

@@ -117,7 +117,7 @@ def __init__(self, p_array):
117117
self.p = p_array
118118
self.optimal = np.argmax(p_array)
119119

120-
def pull(self, i):
120+
def pull(self, i):
121121
#i is which arm to pull
122122
return rand() < self.p[i]
123123

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /