[Python-checkins] benchmarks: Improve the precision and reproduceability of benchmark results:

antoine.pitrou python-checkins at python.org
Sat Oct 25 22:17:34 CEST 2014


https://hg.python.org/benchmarks/rev/dc7d29be5a9e
changeset: 217:dc7d29be5a9e
user: Antoine Pitrou <solipsis at pitrou.net>
date: Sat Oct 25 22:17:25 2014 +0200
summary:
 Improve the precision and reproduceability of benchmark results:
- use time.perf_counter() if both interpreters support it
- force deterministic hash() results
files:
 perf.py | 37 ++++++++++++++-
 performance/bm_call_method.py | 8 +-
 performance/bm_call_method_slots.py | 8 +-
 performance/bm_call_method_unknown.py | 8 +-
 performance/bm_call_simple.py | 8 +-
 performance/bm_chameleon.py | 7 +-
 performance/bm_chaos.py | 10 ++--
 performance/bm_django.py | 6 +-
 performance/bm_django_v2.py | 6 +-
 performance/bm_elementtree.py | 6 +-
 performance/bm_fannkuch.py | 6 +-
 performance/bm_float.py | 6 +-
 performance/bm_go.py | 6 +-
 performance/bm_hexiom2.py | 6 +-
 performance/bm_html5lib.py | 6 +-
 performance/bm_json.py | 12 ++--
 performance/bm_json_v2.py | 6 +-
 performance/bm_logging.py | 18 +++---
 performance/bm_mako.py | 6 +-
 performance/bm_mako_v2.py | 6 +-
 performance/bm_meteor_contest.py | 6 +-
 performance/bm_nbody.py | 6 +-
 performance/bm_nqueens.py | 6 +-
 performance/bm_pathlib.py | 8 +-
 performance/bm_pickle.py | 30 ++++++------
 performance/bm_pidigits.py | 6 +-
 performance/bm_raytrace.py | 7 +-
 performance/bm_regex_compile.py | 12 ++--
 performance/bm_regex_effbot.py | 6 +-
 performance/bm_regex_v8.py | 6 +-
 performance/bm_richards.py | 6 +-
 performance/bm_rietveld.py | 6 +-
 performance/bm_spambayes.py | 6 +-
 performance/bm_spectral_norm.py | 6 +-
 performance/bm_spitfire.py | 10 ++--
 performance/bm_telco.py | 13 ++--
 performance/bm_threading.py | 12 ++--
 performance/bm_tornado_http.py | 6 +-
 performance/bm_unpack_sequence.py | 20 ++++----
 performance/util.py | 8 ++-
 40 files changed, 202 insertions(+), 166 deletions(-)
diff --git a/perf.py b/perf.py
--- a/perf.py
+++ b/perf.py
@@ -120,6 +120,29 @@
 return 'lib'
 
 
+def supported_timers(python):
+ """Return a list of supported timers by the given Python interpreter,
+ in decreasing order of priority.
+ """
+ version = interpreter_version(python)
+ if version >= '3.3':
+ return ['perf_counter', 'time']
+ else:
+ return ['time']
+
+
+def choose_timer(base_python, changed_python):
+ """Choose the best timer supported by both *base_python* and
+ *changed_python*.
+ """
+ u = supported_timers(base_python)
+ v = set(supported_timers(changed_python))
+ for timer in u:
+ if timer in v:
+ return timer
+ assert 0 # At least time.time() should always be available
+
+
 def avg(seq):
 return sum(seq) / float(len(seq))
 
@@ -969,6 +992,9 @@
 for k in ("COMSPEC", "SystemRoot"):
 if k in os.environ and k not in fixed_env:
 fixed_env[k] = os.environ[k]
+ # Make hashing deterministic (this may make some benchmarks more
+ # reproduceable).
+ fixed_env["PYTHONHASHSEED"] = "1"
 return fixed_env
 
 
@@ -1130,7 +1156,8 @@
 trials = max(1, int(trials * iteration_scaling))
 
 RemovePycs()
- command = python + [bm_path, "-n", trials] + extra_args
+ bench_args = [bm_path, "-n", trials, "--timer", options.timer]
+ command = python + bench_args + extra_args
 output = CallAndCaptureOutput(command, bm_env,
 track_memory=options.track_memory,
 inherit_env=options.inherit_env)
@@ -2401,6 +2428,7 @@
 usage="%prog [options] baseline_python changed_python",
 description=("Compares the performance of baseline_python with" +
 " changed_python and prints a report."))
+
 parser.add_option("-r", "--rigorous", action="store_true",
 help=("Spend longer running tests to get more" +
 " accurate results"))
@@ -2410,6 +2438,9 @@
 help="Print more output")
 parser.add_option("-m", "--track_memory", action="store_true",
 help="Track memory usage. This only works on Linux.")
+ parser.add_option("--timer", action="store",
+ help="Override timer function.")
+
 parser.add_option("-a", "--args", default="",
 help=("Pass extra arguments to the python binaries."
 " If there is a comma in this option's value, the"
@@ -2488,6 +2519,10 @@
 if options.diff_instrumentation:
 info("Suppressing performance data due to --diff_instrumentation")
 
+ if not options.timer:
+ options.timer = choose_timer(base_cmd_prefix, changed_cmd_prefix)
+ info("Automatically selected timer: %s", options.timer)
+
 should_run = ParseBenchmarksOption(options.benchmarks, bench_groups,
 options.fast)
 
diff --git a/performance/bm_call_method.py b/performance/bm_call_method.py
--- a/performance/bm_call_method.py
+++ b/performance/bm_call_method.py
@@ -113,11 +113,11 @@
 pass
 
 
-def test_calls(iterations):
+def test_calls(iterations, timer):
 times = []
 f = Foo()
 for _ in xrange(iterations):
- t0 = time.time()
+ t0 = timer()
 # 20 calls
 f.foo(1, 2, 3, 4)
 f.foo(1, 2, 3, 4)
@@ -139,7 +139,7 @@
 f.foo(1, 2, 3, 4)
 f.foo(1, 2, 3, 4)
 f.foo(1, 2, 3, 4)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
@@ -153,6 +153,6 @@
 options, _ = parser.parse_args()
 
 # Priming run.
- test_calls(1)
+ test_calls(1, time.time)
 
 util.run_benchmark(options, options.num_runs, test_calls)
diff --git a/performance/bm_call_method_slots.py b/performance/bm_call_method_slots.py
--- a/performance/bm_call_method_slots.py
+++ b/performance/bm_call_method_slots.py
@@ -117,13 +117,13 @@
 pass
 
 
-def test_calls(iterations):
+def test_calls(iterations, timer):
 times = []
 f = Foo()
 if hasattr(f, '__dict__'):
 raise Exception("f has a __dict__ attribute!")
 for _ in xrange(iterations):
- t0 = time.time()
+ t0 = timer()
 # 20 calls
 f.foo(1, 2, 3, 4)
 f.foo(1, 2, 3, 4)
@@ -145,7 +145,7 @@
 f.foo(1, 2, 3, 4)
 f.foo(1, 2, 3, 4)
 f.foo(1, 2, 3, 4)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
@@ -159,6 +159,6 @@
 options, _ = parser.parse_args()
 
 # Priming run.
- test_calls(1)
+ test_calls(1, time.time)
 
 util.run_benchmark(options, options.num_runs, test_calls)
diff --git a/performance/bm_call_method_unknown.py b/performance/bm_call_method_unknown.py
--- a/performance/bm_call_method_unknown.py
+++ b/performance/bm_call_method_unknown.py
@@ -313,13 +313,13 @@
 pass
 
 
-def test_calls(iterations):
+def test_calls(iterations, timer):
 times = []
 a = Foo()
 b = Bar()
 c = Baz()
 for _ in xrange(iterations):
- t0 = time.time()
+ t0 = timer()
 # 18 calls
 a.foo(b, c)
 b.foo(c, a)
@@ -339,7 +339,7 @@
 a.foo(b, c)
 b.foo(c, a)
 c.foo(a, b)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
@@ -353,6 +353,6 @@
 options, _ = parser.parse_args()
 
 # Priming run.
- test_calls(1)
+ test_calls(1, time.time)
 
 util.run_benchmark(options, options.num_runs, test_calls)
diff --git a/performance/bm_call_simple.py b/performance/bm_call_simple.py
--- a/performance/bm_call_simple.py
+++ b/performance/bm_call_simple.py
@@ -115,10 +115,10 @@
 pass
 
 
-def test_calls(iterations):
+def test_calls(iterations, timer):
 times = []
 for _ in xrange(iterations):
- t0 = time.time()
+ t0 = timer()
 # 20 calls
 foo(1, 2, 3, 4)
 foo(1, 2, 3, 4)
@@ -140,7 +140,7 @@
 foo(1, 2, 3, 4)
 foo(1, 2, 3, 4)
 foo(1, 2, 3, 4)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
@@ -154,6 +154,6 @@
 options, _ = parser.parse_args()
 
 # Priming run.
- test_calls(1)
+ test_calls(1, time.time)
 
 util.run_benchmark(options, options.num_runs, test_calls)
diff --git a/performance/bm_chameleon.py b/performance/bm_chameleon.py
--- a/performance/bm_chameleon.py
+++ b/performance/bm_chameleon.py
@@ -15,16 +15,15 @@
 </tr>
 </table>""" % compat.unicode.__name__
 
-def main(n):
+def main(n, timer):
 tmpl = PageTemplate(BIGTABLE_ZPT)
 options = {'table': [dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9, j=10)
 for x in range(1000)]}
- import time
 l = []
 for k in range(n):
- t0 = time.time()
+ t0 = timer()
 tmpl(options=options)
- l.append(time.time() - t0)
+ l.append(timer() - t0)
 return l
 
 if __name__ == '__main__':
diff --git a/performance/bm_chaos.py b/performance/bm_chaos.py
--- a/performance/bm_chaos.py
+++ b/performance/bm_chaos.py
@@ -199,14 +199,14 @@
 if point.y < self.miny:
 point.y = self.miny
 
- def create_image_chaos(self, w, h, n):
+ def create_image_chaos(self, timer, w, h, n):
 im = [[1] * h for i in range(w)]
 point = GVector((self.maxx + self.minx) / 2,
 (self.maxy + self.miny) / 2, 0)
 colored = 0
 times = []
 for _ in range(n):
- t1 = time.time()
+ t1 = timer()
 for i in xrange(5000):
 point = self.transform_point(point)
 x = (point.x - self.minx) / self.width * w
@@ -218,12 +218,12 @@
 if y == h:
 y -= 1
 im[x][h - y - 1] = 0
- t2 = time.time()
+ t2 = timer()
 times.append(t2 - t1)
 return times
 
 
-def main(n):
+def main(n, timer):
 splines = [
 Spline([
 GVector(1.597350, 3.304460, 0.000000),
@@ -248,7 +248,7 @@
 3, [0, 0, 0, 1, 1, 1])
 ]
 c = Chaosgame(splines, 0.25)
- return c.create_image_chaos(1000, 1200, n)
+ return c.create_image_chaos(timer, 1000, 1200, n)
 
 
 
diff --git a/performance/bm_django.py b/performance/bm_django.py
--- a/performance/bm_django.py
+++ b/performance/bm_django.py
@@ -32,7 +32,7 @@
 </table>
 """)
 
-def test_django(count):
+def test_django(count, timer):
 table = [xrange(150) for _ in xrange(150)]
 context = Context({"table": table})
 
@@ -42,9 +42,9 @@
 
 times = []
 for _ in xrange(count):
- t0 = time.time()
+ t0 = timer()
 data = DJANGO_TMPL.render(context)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
diff --git a/performance/bm_django_v2.py b/performance/bm_django_v2.py
--- a/performance/bm_django_v2.py
+++ b/performance/bm_django_v2.py
@@ -28,7 +28,7 @@
 </table>
 """)
 
-def test_django(count):
+def test_django(count, timer):
 table = [xrange(150) for _ in xrange(150)]
 context = Context({"table": table})
 
@@ -38,9 +38,9 @@
 
 times = []
 for _ in xrange(count):
- t0 = time.time()
+ t0 = timer()
 data = DJANGO_TMPL.render(context)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
diff --git a/performance/bm_elementtree.py b/performance/bm_elementtree.py
--- a/performance/bm_elementtree.py
+++ b/performance/bm_elementtree.py
@@ -179,7 +179,7 @@
 raise RuntimeError("unexpected output detected")
 
 
-def run_etree_benchmark(iterations, etree, bench_func):
+def run_etree_benchmark(iterations, timer, etree, bench_func):
 times = []
 
 xml_root = build_xml_tree(etree)
@@ -195,9 +195,9 @@
 bench_func(etree, file_path, xml_data, xml_root)
 
 for _ in xrange(iterations):
- t0 = time.time()
+ t0 = timer()
 bench_func(etree, file_path, xml_data, xml_root)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 finally:
 try:
diff --git a/performance/bm_fannkuch.py b/performance/bm_fannkuch.py
--- a/performance/bm_fannkuch.py
+++ b/performance/bm_fannkuch.py
@@ -54,12 +54,12 @@
 
 DEFAULT_ARG = 9
 
-def main(n):
+def main(n, timer):
 times = []
 for i in xrange(n):
- t0 = time.time()
+ t0 = timer()
 fannkuch(DEFAULT_ARG)
- tk = time.time()
+ tk = timer()
 times.append(tk - t0)
 return times
 
diff --git a/performance/bm_float.py b/performance/bm_float.py
--- a/performance/bm_float.py
+++ b/performance/bm_float.py
@@ -47,14 +47,14 @@
 
 POINTS = 100000
 
-def main(arg):
+def main(arg, timer):
 # XXX warmup
 
 times = []
 for i in xrange(arg):
- t0 = time.time()
+ t0 = timer()
 o = benchmark(POINTS)
- tk = time.time()
+ tk = timer()
 times.append(tk - t0)
 return times
 
diff --git a/performance/bm_go.py b/performance/bm_go.py
--- a/performance/bm_go.py
+++ b/performance/bm_go.py
@@ -424,14 +424,14 @@
 board = Board()
 pos = computer_move(board)
 
-def main(n):
+def main(n, timer):
 times = []
 for i in range(5):
 versus_cpu() # warmup
 for i in range(n):
- t1 = time.time()
+ t1 = timer()
 versus_cpu()
- t2 = time.time()
+ t2 = timer()
 times.append(t2 - t1)
 return times
 
diff --git a/performance/bm_hexiom2.py b/performance/bm_hexiom2.py
--- a/performance/bm_hexiom2.py
+++ b/performance/bm_hexiom2.py
@@ -518,14 +518,14 @@
 if output.getvalue() != expected:
 raise AssertionError("got a wrong answer:\n%s" % output.getvalue())
 
-def main(n):
+def main(n, timer):
 # only run 1/25th of the requested number of iterations.
 # with the default n=50 from runner.py, this means twice.
 l = []
 for i in xrange(n):
- t0 = time.time()
+ t0 = timer()
 run_level36()
- time_elapsed = time.time() - t0
+ time_elapsed = timer() - t0
 l.append(time_elapsed)
 return l
 
diff --git a/performance/bm_html5lib.py b/performance/bm_html5lib.py
--- a/performance/bm_html5lib.py
+++ b/performance/bm_html5lib.py
@@ -23,16 +23,16 @@
 import html5lib
 
 
-def test_html5lib(count, spec_data):
+def test_html5lib(count, timer, spec_data):
 # No warm-up runs for this benchmark; in real life, the parser doesn't get
 # to warm up (this isn't a daemon process).
 
 times = []
 for _ in xrange(count):
 spec_data.seek(0)
- t0 = time.time()
+ t0 = timer()
 html5lib.parse(spec_data)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
diff --git a/performance/bm_json.py b/performance/bm_json.py
--- a/performance/bm_json.py
+++ b/performance/bm_json.py
@@ -76,7 +76,7 @@
 DICT_GROUP = [mutate_dict(DICT, random_source) for _ in range(3)]
 
 
-def test_json_dump(num_obj_copies, json, options):
+def test_json_dump(num_obj_copies, timer, json, options):
 # Warm-up runs.
 json.dumps(DICT)
 json.dumps(TUPLE)
@@ -85,7 +85,7 @@
 loops = num_obj_copies // 20 # We do 20 runs per loop.
 times = []
 for _ in xrange(options.num_runs):
- t0 = time.time()
+ t0 = timer()
 for _ in xrange(loops):
 json.dumps(DICT)
 json.dumps(DICT)
@@ -147,12 +147,12 @@
 json.dumps(DICT_GROUP)
 json.dumps(DICT_GROUP)
 json.dumps(DICT_GROUP)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
 
-def test_json_load(num_obj_copies, json, options):
+def test_json_load(num_obj_copies, timer, json, options):
 json_dict = json.dumps(DICT)
 json_tuple = json.dumps(TUPLE)
 json_dict_group = json.dumps(DICT_GROUP)
@@ -165,7 +165,7 @@
 loops = num_obj_copies // 20 # We do 20 runs per loop.
 times = []
 for _ in xrange(options.num_runs):
- t0 = time.time()
+ t0 = timer()
 for _ in xrange(loops):
 json.loads(json_dict)
 json.loads(json_dict)
@@ -227,7 +227,7 @@
 json.loads(json_dict_group)
 json.loads(json_dict_group)
 json.loads(json_dict_group)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
diff --git a/performance/bm_json_v2.py b/performance/bm_json_v2.py
--- a/performance/bm_json_v2.py
+++ b/performance/bm_json_v2.py
@@ -17,15 +17,15 @@
 
 cases = ['EMPTY', 'SIMPLE', 'NESTED', 'HUGE']
 
-def main(n):
+def main(n, timer):
 l = []
 for i in xrange(n):
- t0 = time.time()
+ t0 = timer()
 for case in cases:
 data, count = globals()[case]
 for i in xrange(count):
 json.dumps(data)
- l.append(time.time() - t0)
+ l.append(timer() - t0)
 return l
 
 if __name__ == '__main__':
diff --git a/performance/bm_logging.py b/performance/bm_logging.py
--- a/performance/bm_logging.py
+++ b/performance/bm_logging.py
@@ -19,11 +19,11 @@
 MESSAGE = 'some important information to be logged'
 
 
-def test_no_output(iterations, logger):
+def test_no_output(iterations, timer, logger):
 times = []
 m = MESSAGE
 for _ in xrange(iterations):
- t0 = time.time()
+ t0 = timer()
 for _ in xrange(10000):
 logger.debug(m)
 logger.debug(m)
@@ -35,16 +35,16 @@
 logger.debug(m)
 logger.debug(m)
 logger.debug(m)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
 
-def test_simple_output(iterations, logger):
+def test_simple_output(iterations, timer, logger):
 times = []
 m = MESSAGE
 for _ in xrange(iterations):
- t0 = time.time()
+ t0 = timer()
 for _ in xrange(1000):
 logger.warn(m)
 logger.warn(m)
@@ -56,17 +56,17 @@
 logger.warn(m)
 logger.warn(m)
 logger.warn(m)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
 
-def test_formatted_output(iterations, logger):
+def test_formatted_output(iterations, timer, logger):
 times = []
 f = FORMAT
 m = MESSAGE
 for _ in xrange(iterations):
- t0 = time.time()
+ t0 = timer()
 for _ in xrange(1000):
 logger.warn(f, m)
 logger.warn(f, m)
@@ -78,7 +78,7 @@
 logger.warn(f, m)
 logger.warn(f, m)
 logger.warn(f, m)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
diff --git a/performance/bm_mako.py b/performance/bm_mako.py
--- a/performance/bm_mako.py
+++ b/performance/bm_mako.py
@@ -25,7 +25,7 @@
 </table>
 """)
 
-def test_mako(count):
+def test_mako(count, timer):
 table = [xrange(150) for _ in xrange(150)]
 
 # Warm up Mako.
@@ -34,9 +34,9 @@
 
 times = []
 for _ in xrange(count):
- t0 = time.time()
+ t0 = timer()
 MAKO_TMPL.render(table = table)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
diff --git a/performance/bm_mako_v2.py b/performance/bm_mako_v2.py
--- a/performance/bm_mako_v2.py
+++ b/performance/bm_mako_v2.py
@@ -118,7 +118,7 @@
 """
 
 
-def test_mako(count):
+def test_mako(count, timer):
 
 lookup = TemplateLookup()
 lookup.put_string('base.mako', BASE_TEMPLATE)
@@ -132,11 +132,11 @@
 
 times = []
 for i in xrange(count):
- t0 = time.time()
+ t0 = timer()
 data = template.render(table=table, paragraphs=paragraphs,
 lorem=LOREM_IPSUM, title=title,
 img_count=50, xrange=xrange)
- t1 = time.time()
+ t1 = timer()
 times.append(t1-t0)
 return times
 
diff --git a/performance/bm_meteor_contest.py b/performance/bm_meteor_contest.py
--- a/performance/bm_meteor_contest.py
+++ b/performance/bm_meteor_contest.py
@@ -131,10 +131,10 @@
 
 SOLVE_ARG = 60
 
-def main(n):
+def main(n, timer):
 times = []
 for i in xrange(n):
- t0 = time.time()
+ t0 = timer()
 free = frozenset(xrange(len(board)))
 curr_board = [-1] * len(board)
 pieces_left = list(range(len(pieces)))
@@ -142,7 +142,7 @@
 solve(SOLVE_ARG, 0, free, curr_board, pieces_left, solutions)
 #print len(solutions), 'solutions found\n'
 #for i in (0, -1): print_board(solutions[i])
- tk = time.time()
+ tk = timer()
 times.append(tk - t0)
 return times
 
diff --git a/performance/bm_nbody.py b/performance/bm_nbody.py
--- a/performance/bm_nbody.py
+++ b/performance/bm_nbody.py
@@ -124,7 +124,7 @@
 v[2] = pz / m
 
 
-def test_nbody(iterations):
+def test_nbody(iterations, timer):
 # Warm-up runs.
 report_energy()
 advance(0.01, 20000)
@@ -132,11 +132,11 @@
 
 times = []
 for _ in xrange(iterations):
- t0 = time.time()
+ t0 = timer()
 report_energy()
 advance(0.01, 20000)
 report_energy()
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
diff --git a/performance/bm_nqueens.py b/performance/bm_nqueens.py
--- a/performance/bm_nqueens.py
+++ b/performance/bm_nqueens.py
@@ -60,16 +60,16 @@
 yield vec
 
 
-def test_n_queens(iterations):
+def test_n_queens(iterations, timer):
 # Warm-up runs.
 list(n_queens(8))
 list(n_queens(8))
 
 times = []
 for _ in xrange(iterations):
- t0 = time.time()
+ t0 = timer()
 list(n_queens(8))
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
diff --git a/performance/bm_pathlib.py b/performance/bm_pathlib.py
--- a/performance/bm_pathlib.py
+++ b/performance/bm_pathlib.py
@@ -43,7 +43,7 @@
 shutil.rmtree(TMP_PATH)
 
 
-def test_pathlib(count):
+def test_pathlib(count, timer):
 base_path = Path(TMP_PATH)
 
 # Warm up the filesystem cache and keep some objects in memory.
@@ -53,8 +53,8 @@
 assert len(path_objects) == NUM_FILES
 
 times = []
- for _ in xrange(count // 2):
- t0 = time.time()
+ for _ in xrange(max(1, count // 2)):
+ t0 = timer()
 # Do something simple with each path.
 for p in base_path:
 p.st_mtime
@@ -64,7 +64,7 @@
 p.st_mtime
 for p in base_path.glob("*.py"):
 p.st_mtime
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
diff --git a/performance/bm_pickle.py b/performance/bm_pickle.py
--- a/performance/bm_pickle.py
+++ b/performance/bm_pickle.py
@@ -86,7 +86,7 @@
 DICT_GROUP = [mutate_dict(DICT, random_source) for _ in range(3)]
 
 
-def test_pickle(num_obj_copies, pickle, options):
+def test_pickle(num_obj_copies, timer, pickle, options):
 # Warm-up runs.
 pickle.dumps(DICT, options.protocol)
 pickle.dumps(TUPLE, options.protocol)
@@ -95,7 +95,7 @@
 loops = num_obj_copies // 20 # We do 20 runs per loop.
 times = []
 for _ in xrange(options.num_runs):
- t0 = time.time()
+ t0 = timer()
 for _ in xrange(loops):
 pickle.dumps(DICT, options.protocol)
 pickle.dumps(DICT, options.protocol)
@@ -157,12 +157,12 @@
 pickle.dumps(DICT_GROUP, options.protocol)
 pickle.dumps(DICT_GROUP, options.protocol)
 pickle.dumps(DICT_GROUP, options.protocol)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
 
-def test_unpickle(num_obj_copies, pickle, options):
+def test_unpickle(num_obj_copies, timer, pickle, options):
 pickled_dict = pickle.dumps(DICT, options.protocol)
 pickled_tuple = pickle.dumps(TUPLE, options.protocol)
 pickled_dict_group = pickle.dumps(DICT_GROUP, options.protocol)
@@ -175,7 +175,7 @@
 loops = num_obj_copies // 20 # We do 20 runs per loop.
 times = []
 for _ in xrange(options.num_runs):
- t0 = time.time()
+ t0 = timer()
 for _ in xrange(loops):
 pickle.loads(pickled_dict)
 pickle.loads(pickled_dict)
@@ -237,7 +237,7 @@
 pickle.loads(pickled_dict_group)
 pickle.loads(pickled_dict_group)
 pickle.loads(pickled_dict_group)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
@@ -245,7 +245,7 @@
 LIST = [[list(range(10)), list(range(10))] for _ in xrange(10)]
 
 
-def test_pickle_list(loops, pickle, options):
+def test_pickle_list(loops, timer, pickle, options):
 # Warm-up runs.
 pickle.dumps(LIST, options.protocol)
 pickle.dumps(LIST, options.protocol)
@@ -253,7 +253,7 @@
 loops = loops // 5 # Scale to compensate for the workload.
 times = []
 for _ in xrange(options.num_runs):
- t0 = time.time()
+ t0 = timer()
 for _ in xrange(loops):
 pickle.dumps(LIST, options.protocol)
 pickle.dumps(LIST, options.protocol)
@@ -265,12 +265,12 @@
 pickle.dumps(LIST, options.protocol)
 pickle.dumps(LIST, options.protocol)
 pickle.dumps(LIST, options.protocol)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
 
-def test_unpickle_list(loops, pickle, options):
+def test_unpickle_list(loops, timer, pickle, options):
 pickled_list = pickle.dumps(LIST, options.protocol)
 
 # Warm-up runs.
@@ -280,7 +280,7 @@
 loops = loops // 5 # Scale to compensate for the workload.
 times = []
 for _ in xrange(options.num_runs):
- t0 = time.time()
+ t0 = timer()
 for _ in xrange(loops):
 pickle.loads(pickled_list)
 pickle.loads(pickled_list)
@@ -292,14 +292,14 @@
 pickle.loads(pickled_list)
 pickle.loads(pickled_list)
 pickle.loads(pickled_list)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
 
 MICRO_DICT = dict((key, dict.fromkeys(range(10))) for key in xrange(100))
 
-def test_pickle_dict(loops, pickle, options):
+def test_pickle_dict(loops, timer, pickle, options):
 # Warm-up runs.
 pickle.dumps(MICRO_DICT, options.protocol)
 pickle.dumps(MICRO_DICT, options.protocol)
@@ -307,14 +307,14 @@
 loops = max(1, loops // 10)
 times = []
 for _ in xrange(options.num_runs):
- t0 = time.time()
+ t0 = timer()
 for _ in xrange(loops):
 pickle.dumps(MICRO_DICT, options.protocol)
 pickle.dumps(MICRO_DICT, options.protocol)
 pickle.dumps(MICRO_DICT, options.protocol)
 pickle.dumps(MICRO_DICT, options.protocol)
 pickle.dumps(MICRO_DICT, options.protocol)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
diff --git a/performance/bm_pidigits.py b/performance/bm_pidigits.py
--- a/performance/bm_pidigits.py
+++ b/performance/bm_pidigits.py
@@ -16,7 +16,7 @@
 
 NDIGITS = 2000
 
-def test_pidgits(iterations):
+def test_pidgits(iterations, timer):
 _map = imap
 _count = itertools.count
 _islice = itertools.islice
@@ -57,9 +57,9 @@
 
 times = []
 for _ in xrange(iterations):
- t0 = time.time()
+ t0 = timer()
 calc_ndigits(NDIGITS)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
diff --git a/performance/bm_raytrace.py b/performance/bm_raytrace.py
--- a/performance/bm_raytrace.py
+++ b/performance/bm_raytrace.py
@@ -354,15 +354,14 @@
 s.addObject(Halfspace(Point(0,0,0), Vector.UP), CheckerboardSurface())
 s.render(c)
 
-def main(n):
- import time
+def main(n, timer):
 times = []
 for i in range(5):
 _main() # warmup
 for i in range(n):
- t1 = time.time()
+ t1 = timer()
 _main()
- t2 = time.time()
+ t2 = timer()
 times.append(t2 - t1)
 return times
 
diff --git a/performance/bm_regex_compile.py b/performance/bm_regex_compile.py
--- a/performance/bm_regex_compile.py
+++ b/performance/bm_regex_compile.py
@@ -41,10 +41,10 @@
 re.sub = capture_sub
 try:
 import bm_regex_effbot
- bm_regex_effbot.test_regex_effbot(1)
+ bm_regex_effbot.test_regex_effbot(1, time.time)
 
 import bm_regex_v8
- bm_regex_v8.test_regex_v8(1)
+ bm_regex_v8.test_regex_v8(1, time.time)
 finally:
 re.compile = real_compile
 re.search = real_search
@@ -52,7 +52,7 @@
 return regexes
 
 
-def test_regex_compile(count):
+def test_regex_compile(count, timer):
 try:
 clear_cache = re._cache.clear
 except AttributeError:
@@ -67,11 +67,11 @@
 times = []
 
 for _ in xrange(count):
- t0 = time.time()
+ t0 = timer()
 for regex, flags in regexes:
 clear_cache()
 re.compile(regex, flags)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
@@ -83,4 +83,4 @@
 util.add_standard_options_to(parser)
 options, args = parser.parse_args()
 
- util.run_benchmark(options, options.num_runs, test_regex_compile)
\ No newline at end of file
+ util.run_benchmark(options, options.num_runs, test_regex_compile)
diff --git a/performance/bm_regex_effbot.py b/performance/bm_regex_effbot.py
--- a/performance/bm_regex_effbot.py
+++ b/performance/bm_regex_effbot.py
@@ -136,7 +136,7 @@
 re.search(regexs[id], string_tables[n][id])
 
 
-def test_regex_effbot(iterations):
+def test_regex_effbot(iterations, timer):
 sizes = init_benchmarks()
 
 # Warm up.
@@ -145,10 +145,10 @@
 
 times = []
 for i in xrange(iterations):
- t0 = time.time()
+ t0 = timer()
 for size in sizes:
 run_benchmarks(size)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
diff --git a/performance/bm_regex_v8.py b/performance/bm_regex_v8.py
--- a/performance/bm_regex_v8.py
+++ b/performance/bm_regex_v8.py
@@ -1658,10 +1658,10 @@
 re.search(r'jvaqbjf', strings[63])
 
 
-def test_regex_v8(count):
+def test_regex_v8(count, timer):
 times = []
 for i in xrange(count):
- t0 = time.time()
+ t0 = timer()
 block0()
 block1()
 block2()
@@ -1674,7 +1674,7 @@
 block9()
 block10()
 block11()
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
diff --git a/performance/bm_richards.py b/performance/bm_richards.py
--- a/performance/bm_richards.py
+++ b/performance/bm_richards.py
@@ -19,16 +19,16 @@
 from compat import xrange
 
 
-def test_richards(iterations):
+def test_richards(iterations, timer):
 # Warm-up
 r = richards.Richards()
 r.run(iterations=2)
 
 times = []
 for _ in xrange(iterations):
- t0 = time.time()
+ t0 = timer()
 r.run(iterations=1)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
diff --git a/performance/bm_rietveld.py b/performance/bm_rietveld.py
--- a/performance/bm_rietveld.py
+++ b/performance/bm_rietveld.py
@@ -87,14 +87,14 @@
 return tmpl, context
 
 
-def test_rietveld(count, tmpl, context):
+def test_rietveld(count, timer, tmpl, context):
 # Warm up Django.
 tmpl.render(context)
 tmpl.render(context)
 
 times = []
 for _ in xrange(count):
- t0 = time.time()
+ t0 = timer()
 # 30 calls to render, so that we don't measure loop overhead.
 tmpl.render(context)
 tmpl.render(context)
@@ -126,7 +126,7 @@
 tmpl.render(context)
 tmpl.render(context)
 tmpl.render(context)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
diff --git a/performance/bm_spambayes.py b/performance/bm_spambayes.py
--- a/performance/bm_spambayes.py
+++ b/performance/bm_spambayes.py
@@ -21,7 +21,7 @@
 import util
 
 
-def test_spambayes(iterations, messages, ham_classifier):
+def test_spambayes(iterations, timer, messages, ham_classifier):
 # Prime the pump. This still leaves some hot functions uncompiled; these
 # will be noticed as hot during the timed loops below.
 for msg in messages:
@@ -29,10 +29,10 @@
 
 times = []
 for _ in xrange(iterations):
- t0 = time.time()
+ t0 = timer()
 for msg in messages:
 ham_classifier.score(msg)
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
diff --git a/performance/bm_spectral_norm.py b/performance/bm_spectral_norm.py
--- a/performance/bm_spectral_norm.py
+++ b/performance/bm_spectral_norm.py
@@ -40,10 +40,10 @@
 
 DEFAULT_N = 130
 
-def main(n):
+def main(n, timer):
 times = []
 for i in xrange(n):
- t0 = time.time()
+ t0 = timer()
 u = [1] * DEFAULT_N
 
 for dummy in xrange(10):
@@ -55,7 +55,7 @@
 for ue, ve in izip(u, v):
 vBv += ue * ve
 vv += ve * ve
- tk = time.time()
+ tk = timer()
 times.append(tk - t0)
 return times
 
diff --git a/performance/bm_spitfire.py b/performance/bm_spitfire.py
--- a/performance/bm_spitfire.py
+++ b/performance/bm_spitfire.py
@@ -36,7 +36,7 @@
 </table>
 """
 
-def test_spitfire(count):
+def test_spitfire(count, timer):
 # Activate the most aggressive Spitfire optimizations. While it might
 # conceivably be interesting to stress Spitfire's lower optimization
 # levels, we assume no-one will be running a production system with those
@@ -55,20 +55,20 @@
 
 times = []
 for _ in xrange(count):
- t0 = time.time()
+ t0 = timer()
 data = spitfire_tmpl_o4(search_list=[{"table": table}]).main()
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
 
-def test_spitfire_without_psyco(count):
+def test_spitfire_without_psyco(count, timer):
 class FakePsyco(object):
 def bind(self, *args, **kwargs):
 pass
 sys.modules["psyco"] = FakePsyco()
 
- return test_spitfire(count)
+ return test_spitfire(count, timer)
 
 
 if __name__ == "__main__":
diff --git a/performance/bm_telco.py b/performance/bm_telco.py
--- a/performance/bm_telco.py
+++ b/performance/bm_telco.py
@@ -20,7 +20,6 @@
 import os
 from struct import unpack
 import sys
-from time import clock as time
 
 from compat import xrange
 
@@ -32,7 +31,7 @@
 
 filename = rel_path("data", "telco-bench.b")
 
-def run():
+def run(timer):
 getcontext().rounding = ROUND_DOWN
 rates = list(map(Decimal, ('0.0013', '0.00894')))
 twodig = Decimal('0.01')
@@ -41,7 +40,7 @@
 disttax = Decimal("0.0341")
 
 infil = open(filename, "rb")
- start = time()
+ start = timer()
 
 sumT = Decimal("0") # sum of total prices
 sumB = Decimal("0") # sum of basic tax
@@ -73,14 +72,14 @@
 sumT += t
 
 infil.close()
- end = time()
+ end = timer()
 return end - start
 
-def main(n):
- run() # warmup
+def main(n, timer):
+ run(timer) # warmup
 times = []
 for i in xrange(n):
- times.append(run())
+ times.append(run(timer))
 return times
 
 
diff --git a/performance/bm_threading.py b/performance/bm_threading.py
--- a/performance/bm_threading.py
+++ b/performance/bm_threading.py
@@ -29,33 +29,33 @@
 iterations -= 1
 
 
-def test_iterative_count(iterations, num_threads):
+def test_iterative_count(iterations, timer, num_threads):
 # Warm up.
 count(1000)
 
 times = []
 for _ in xrange(iterations):
- t0 = time.time()
+ t0 = timer()
 for _ in xrange(num_threads):
 count()
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
 
-def test_threaded_count(iterations, num_threads):
+def test_threaded_count(iterations, timer, num_threads):
 # Warm up.
 count(1000)
 
 times = []
 for _ in xrange(iterations):
 threads = [threading.Thread(target=count) for _ in xrange(num_threads)]
- t0 = time.time()
+ t0 = timer()
 for thread in threads:
 thread.start()
 for thread in threads:
 thread.join()
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
diff --git a/performance/bm_tornado_http.py b/performance/bm_tornado_http.py
--- a/performance/bm_tornado_http.py
+++ b/performance/bm_tornado_http.py
@@ -57,7 +57,7 @@
 return sockets[0].getsockname()
 
 
-def test_tornado(count):
+def test_tornado(count, timer):
 loop = IOLoop.instance()
 host, port = make_http_server(loop, make_application())
 url = "http://%s:%s/" % (host, port)
@@ -67,14 +67,14 @@
 def main():
 client = AsyncHTTPClient()
 for i in xrange(count):
- t0 = time.time()
+ t0 = timer()
 futures = [client.fetch(url) for j in xrange(CONCURRENCY)]
 for fut in futures:
 resp = yield fut
 buf = resp.buffer
 buf.seek(0, 2)
 assert buf.tell() == len(CHUNK) * NCHUNKS
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 
 loop.run_sync(main)
diff --git a/performance/bm_unpack_sequence.py b/performance/bm_unpack_sequence.py
--- a/performance/bm_unpack_sequence.py
+++ b/performance/bm_unpack_sequence.py
@@ -11,10 +11,10 @@
 from compat import xrange
 
 
-def do_unpacking(iterations, to_unpack):
+def do_unpacking(iterations, timer, to_unpack):
 times = []
 for _ in xrange(iterations):
- t0 = time.time()
+ t0 = timer()
 # 400 unpackings
 a, b, c, d, e, f, g, h, i, j = to_unpack
 a, b, c, d, e, f, g, h, i, j = to_unpack
@@ -416,26 +416,26 @@
 a, b, c, d, e, f, g, h, i, j = to_unpack
 a, b, c, d, e, f, g, h, i, j = to_unpack
 a, b, c, d, e, f, g, h, i, j = to_unpack
- t1 = time.time()
+ t1 = timer()
 times.append(t1 - t0)
 return times
 
 
-def test_tuple_unpacking(iterations):
+def test_tuple_unpacking(iterations, timer):
 x = tuple(range(10))
 
- return do_unpacking(iterations, x)
+ return do_unpacking(iterations, timer, x)
 
 
-def test_list_unpacking(iterations):
+def test_list_unpacking(iterations, timer):
 x = list(range(10))
 
- return do_unpacking(iterations, x)
+ return do_unpacking(iterations, timer, x)
 
 
-def test_all(iterations):
- tuple_data = test_tuple_unpacking(iterations)
- list_data = test_list_unpacking(iterations)
+def test_all(iterations, timer):
+ tuple_data = test_tuple_unpacking(iterations, timer)
+ list_data = test_list_unpacking(iterations, timer)
 return [x + y for (x, y) in zip(tuple_data, list_data)]
 
 
diff --git a/performance/util.py b/performance/util.py
--- a/performance/util.py
+++ b/performance/util.py
@@ -6,6 +6,7 @@
 
 import math
 import operator
+import time
 
 # Local imports
 from compat import reduce, print_
@@ -21,13 +22,14 @@
 function. This should return a list of floats (benchmark execution
 times).
 """
+ timer = getattr(time, options.timer)
 if options.profile:
 import cProfile
 prof = cProfile.Profile()
- prof.runcall(bench_func, num_runs, *args)
+ prof.runcall(bench_func, num_runs, timer, *args)
 prof.print_stats(sort=options.profile_sort)
 else:
- data = bench_func(num_runs, *args)
+ data = bench_func(num_runs, timer, *args)
 if options.take_geo_mean:
 product = reduce(operator.mul, data, 1)
 print_(math.pow(product, 1.0 / len(data)))
@@ -52,3 +54,5 @@
 default="time", help="Column to sort cProfile output by.")
 parser.add_option("--take_geo_mean", action="store_true",
 help="Return the geo mean, rather than individual data.")
+ parser.add_option("--timer", action="store", default="time",
+ help="Timing function from the time module.")
-- 
Repository URL: https://hg.python.org/benchmarks


More information about the Python-checkins mailing list

AltStyle によって変換されたページ (->オリジナル) /