Following Data Science from Scratch by Joel Grus, I wrote a simple batch gradient descent solver in Python 2.7. I know this isn't the most efficient way to solve this problem, but this code should be running faster. How can I speed it up? My gut is telling me that mse_grad2
is the problem...
from __future__ import division
import random
x = [49,41,40,25,21,21,19,19,18,18,16,15,15,15,15,14,14,13,13,13,13,12,12,11,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,8,8,8,8,8,8,8,8,8,8,8,8,8,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
y = [68.77,51.25,52.08,38.36,44.54,57.13,51.4,41.42,31.22,34.76,54.01,38.79,47.59,49.1,27.66,41.03,36.73,48.65,28.12,46.62,35.57,32.98,35,26.07,23.77,39.73,40.57,31.65,31.21,36.32,20.45,21.93,26.02,27.34,23.49,46.94,30.5,33.8,24.23,21.4,27.94,32.24,40.57,25.07,19.42,22.39,18.42,46.96,23.72,26.41,26.97,36.76,40.32,35.02,29.47,30.2,31,38.11,38.18,36.31,21.03,30.86,36.07,28.66,29.08,37.28,15.28,24.17,22.31,30.17,25.53,19.85,35.37,44.6,17.23,13.47,26.33,35.02,32.09,24.81,19.33,28.77,24.26,31.98,25.73,24.86,16.28,34.51,15.23,39.72,40.8,26.06,35.76,34.76,16.13,44.04,18.03,19.65,32.62,35.59,39.43,14.18,35.24,40.13,41.82,35.45,36.07,43.67,24.61,20.9,21.9,18.79,27.61,27.21,26.61,29.77,20.59,27.53,13.82,33.2,25,33.1,36.65,18.63,14.87,22.2,36.81,25.53,24.62,26.25,18.21,28.08,19.42,29.79,32.8,35.99,28.32,27.79,35.88,29.06,36.28,14.1,36.63,37.49,26.9,18.58,38.48,24.48,18.95,33.55,14.24,29.04,32.51,25.63,22.22,19,32.73,15.16,13.9,27.2,32.01,29.27,33,13.74,20.42,27.32,18.23,35.35,28.48,9.08,24.62,20.12,35.26,19.92,31.02,16.49,12.16,30.7,31.22,34.65,13.13,27.51,33.2,31.57,14.1,33.42,17.44,10.12,24.42,9.82,23.39,30.93,15.03,21.67,31.09,33.29,22.61,26.89,23.48,8.38,27.81,32.35,23.84]
x_data = [[1] + [ind_var_i] for ind_var_i in x]
y_data = y
theta_0 = [0,0]
def dot(v,w):
return sum(v_i * w_i for v_i,w_i in zip(v,w))
def step(v, direction, step_size):
return [v_i + step_size * direction_i for v_i,direction_i in zip(v,direction)]
def mse_cost(theta):
return sum((y_data_i - dot(theta,x_data_i))**2 for x_data_i,y_data_i in zip(x_data,y_data))
def mse_grad2(theta):
new_theta = [0,0]
for i,_ in enumerate(theta):
new_theta[i] = -2 * sum((y_data_i - dot(theta,x_data_i)) * x_data_i[i] for x_data_i,y_data_i in zip(x_data,y_data))
return new_theta
def safe(f):
def safe_f(*args,**kwargs):
try:
return f(*args,**kwargs)
except:
return float('inf')
return safe_f
def minimize_batch(mse_cost, mse_grad2, theta_0, tolerance=0.00000001):
step_sizes = [100,10,1,0.1,0.01,0.001,0.0001,0.00001]
theta = theta_0
mse_cost = safe(mse_cost)
value = mse_cost(theta)
while True:
gradient2 = mse_grad2(theta)
next_thetas = [step(theta, gradient2, -step_size) for step_size in step_sizes]
next_theta = min(next_thetas,key=mse_cost)
next_value = mse_cost(next_theta)
if abs(value - next_value) < tolerance:
return theta
else:
theta, value = next_theta, next_value
new_theta = minimize_batch(mse_cost, mse_grad2, theta_0, tolerance = 0.000001)
print new_theta
1 Answer 1
dot() gets called three million times.
I changed it from:
def dot(v,w):
return sum(v_i * w_i for v_i, w_i in zip(v,w))
to:
def dot(v,w):
return (v[0] * w[0]) + (v[1] * w[1])
And the runtime dropped from 7.09 seconds to 2.89 seconds, with the same results.
-
\$\begingroup\$ Thanks @TessellatingHeckler. I don't know how to formally debug programs yet, but I did use time.time() to get a quick and dirty estimate of how long the code takes to run. I realized (just as you did) that dot was the culprit. Could you tell me (or tell me where I could find) why that particular code is inefficient? Many thanks. \$\endgroup\$razalfuhl– razalfuhl2015年06月26日 16:08:47 +00:00Commented Jun 26, 2015 at 16:08
-
\$\begingroup\$ "Formally debug", haha, no I just put a global variable in it that incremented every time dot was called, and printed the count at the end. I suspect there's lots more improvements possible, that's the first thing I looked at - partly because it was at the top of the file, and partly because it looks like it gets called inside several loops, so improving it would improve all of those. I think it's inefficient because it creates a zip iterator, builds a new list in memory, iterates over that list with sum(), then deletes it. The replacement does much less - property access and arithmetic. \$\endgroup\$TessellatingHeckler– TessellatingHeckler2015年06月26日 17:08:46 +00:00Commented Jun 26, 2015 at 17:08
-
\$\begingroup\$ Out of curiosity, I've now had a look at the rest, and at best I can bring the ~2.89 seconds down to ~2.25 seconds, and only by making the code much uglier. The next biggest change is changing
x_data
from a list of two-item-lists into two separate lists ofx0_data
andx1_data
. That saves a lot ofx_data_i[0]
dereferencing and a quarter second or so. If there are other huge savings, they're above my skill. I imagine further improvements might come from numerical libraries (numpy?). \$\endgroup\$TessellatingHeckler– TessellatingHeckler2015年06月26日 18:25:58 +00:00Commented Jun 26, 2015 at 18:25
Explore related questions
See similar questions with these tags.
theta_0
be in the call tominimize_batch
? \$\endgroup\$