[Python-checkins] bpo-35513, unittest: TextTestRunner uses time.perf_counter() (GH-11180)

Victor Stinner webhook-mailer at python.org
Mon Dec 17 05:30:44 EST 2018


https://github.com/python/cpython/commit/8db5b54463118e5eb60cb3582e3108623f01f5df
commit: 8db5b54463118e5eb60cb3582e3108623f01f5df
branch: master
author: Victor Stinner <vstinner at redhat.com>
committer: GitHub <noreply at github.com>
date: 2018年12月17日T11:30:34+01:00
summary:
bpo-35513, unittest: TextTestRunner uses time.perf_counter() (GH-11180)
TextTestRunner of unittest.runner now uses time.perf_counter() rather
than time.time() to measure the execution time of a test: time.time()
can go backwards, whereas time.perf_counter() is monotonic.
Similar change made in libregrtest, pprint and random.
files:
A Misc/NEWS.d/next/Library/2018-12-16-23-28-49.bpo-35513.pn-Zh3.rst
M Lib/pprint.py
M Lib/random.py
M Lib/test/libregrtest/runtest.py
M Lib/test/time_hashlib.py
M Lib/unittest/runner.py
diff --git a/Lib/pprint.py b/Lib/pprint.py
index bcf2eedebe6b..f2a117864e5e 100644
--- a/Lib/pprint.py
+++ b/Lib/pprint.py
@@ -568,11 +568,11 @@ def _perfcheck(object=None):
 if object is None:
 object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
 p = PrettyPrinter()
- t1 = time.time()
+ t1 = time.perf_counter()
 _safe_repr(object, {}, None, 0)
- t2 = time.time()
+ t2 = time.perf_counter()
 p.pformat(object)
- t3 = time.time()
+ t3 = time.perf_counter()
 print("_safe_repr:", t2 - t1)
 print("pformat:", t3 - t2)
 
diff --git a/Lib/random.py b/Lib/random.py
index 03c058a39d6e..9c2904cfd2f9 100644
--- a/Lib/random.py
+++ b/Lib/random.py
@@ -740,14 +740,14 @@ def _test_generator(n, func, args):
 sqsum = 0.0
 smallest = 1e10
 largest = -1e10
- t0 = time.time()
+ t0 = time.perf_counter()
 for i in range(n):
 x = func(*args)
 total += x
 sqsum = sqsum + x*x
 smallest = min(x, smallest)
 largest = max(x, largest)
- t1 = time.time()
+ t1 = time.perf_counter()
 print(round(t1-t0, 3), 'sec,', end=' ')
 avg = total/n
 stddev = _sqrt(sqsum/n - avg*avg)
diff --git a/Lib/test/libregrtest/runtest.py b/Lib/test/libregrtest/runtest.py
index 466b522db9ed..dc2abf237bc0 100644
--- a/Lib/test/libregrtest/runtest.py
+++ b/Lib/test/libregrtest/runtest.py
@@ -162,7 +162,7 @@ def runtest_inner(ns, test, display_failure=True):
 abstest = get_abs_module(ns, test)
 clear_caches()
 with saved_test_environment(test, ns.verbose, ns.quiet, pgo=ns.pgo) as environment:
- start_time = time.time()
+ start_time = time.perf_counter()
 the_module = importlib.import_module(abstest)
 # If the test has a test_main, that will run the appropriate
 # tests. If not, use normal unittest test loading.
@@ -180,7 +180,7 @@ def test_runner():
 refleak = dash_R(the_module, test, test_runner, ns.huntrleaks)
 else:
 test_runner()
- test_time = time.time() - start_time
+ test_time = time.perf_counter() - start_time
 post_test_cleanup()
 except support.ResourceDenied as msg:
 if not ns.quiet and not ns.pgo:
diff --git a/Lib/test/time_hashlib.py b/Lib/test/time_hashlib.py
index 2585ecb78207..55ebac62912f 100644
--- a/Lib/test/time_hashlib.py
+++ b/Lib/test/time_hashlib.py
@@ -14,26 +14,26 @@ def test_scaled_msg(scale, name):
 longStr = b'Z'*scale
 
 localCF = creatorFunc
- start = time.time()
+ start = time.perf_counter()
 for f in range(iterations):
 x = localCF(longStr).digest()
- end = time.time()
+ end = time.perf_counter()
 
 print(('%2.2f' % (end-start)), "seconds", iterations, "x", len(longStr), "bytes", name)
 
 def test_create():
- start = time.time()
+ start = time.perf_counter()
 for f in range(20000):
 d = creatorFunc()
- end = time.time()
+ end = time.perf_counter()
 
 print(('%2.2f' % (end-start)), "seconds", '[20000 creations]')
 
 def test_zero():
- start = time.time()
+ start = time.perf_counter()
 for f in range(20000):
 x = creatorFunc().digest()
- end = time.time()
+ end = time.perf_counter()
 
 print(('%2.2f' % (end-start)), "seconds", '[20000 "" digests]')
 
diff --git a/Lib/unittest/runner.py b/Lib/unittest/runner.py
index 2c5ea4ab079d..45e7e4c0458d 100644
--- a/Lib/unittest/runner.py
+++ b/Lib/unittest/runner.py
@@ -168,7 +168,7 @@ def run(self, test):
 warnings.filterwarnings('module',
 category=DeprecationWarning,
 message=r'Please use assert\w+ instead.')
- startTime = time.time()
+ startTime = time.perf_counter()
 startTestRun = getattr(result, 'startTestRun', None)
 if startTestRun is not None:
 startTestRun()
@@ -178,7 +178,7 @@ def run(self, test):
 stopTestRun = getattr(result, 'stopTestRun', None)
 if stopTestRun is not None:
 stopTestRun()
- stopTime = time.time()
+ stopTime = time.perf_counter()
 timeTaken = stopTime - startTime
 result.printErrors()
 if hasattr(result, 'separator2'):
diff --git a/Misc/NEWS.d/next/Library/2018-12-16-23-28-49.bpo-35513.pn-Zh3.rst b/Misc/NEWS.d/next/Library/2018-12-16-23-28-49.bpo-35513.pn-Zh3.rst
new file mode 100644
index 000000000000..f1436a718de2
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2018-12-16-23-28-49.bpo-35513.pn-Zh3.rst
@@ -0,0 +1,4 @@
+:class:`~unittest.runner.TextTestRunner` of :mod:`unittest.runner` now uses
+:func:`time.perf_counter` rather than :func:`time.time` to measure the
+execution time of a test: :func:`time.time` can go backwards, whereas
+:func:`time.perf_counter` is monotonic.


More information about the Python-checkins mailing list

AltStyle によって変換されたページ (->オリジナル) /