[Python-checkins] bpo-33338: [tokenize] Minor code cleanup (#6573)

Łukasz Langa webhook-mailer at python.org
Mon Apr 23 04:07:20 EDT 2018


https://github.com/python/cpython/commit/c2d384dbd7c6ed9bdfaac45f05b463263c743ee7
commit: c2d384dbd7c6ed9bdfaac45f05b463263c743ee7
branch: master
author: Łukasz Langa <lukasz at langa.pl>
committer: GitHub <noreply at github.com>
date: 2018年04月23日T01:07:11-07:00
summary:
bpo-33338: [tokenize] Minor code cleanup (#6573)
This change contains minor things that make diffing between Lib/tokenize.py and
Lib/lib2to3/pgen2/tokenize.py cleaner.
files:
M Lib/tokenize.py
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index 6528b9006128..40e6a8b9297b 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -28,7 +28,6 @@
 from codecs import lookup, BOM_UTF8
 import collections
 from io import TextIOWrapper
-from itertools import chain
 import itertools as _itertools
 import re
 import sys
@@ -278,7 +277,7 @@ def compat(self, token, iterable):
 startline = token[0] in (NEWLINE, NL)
 prevstring = False
 
- for tok in chain([token], iterable):
+ for tok in _itertools.chain([token], iterable):
 toknum, tokval = tok[:2]
 if toknum == ENCODING:
 self.encoding = tokval
@@ -475,13 +474,10 @@ def tokenize(readline):
 The first token sequence will always be an ENCODING token
 which tells you which encoding was used to decode the bytes stream.
 """
- # This import is here to avoid problems when the itertools module is not
- # built yet and tokenize is imported.
- from itertools import chain, repeat
 encoding, consumed = detect_encoding(readline)
- rl_gen = iter(readline, b"")
- empty = repeat(b"")
- return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
+ empty = _itertools.repeat(b"")
+ rl_gen = _itertools.chain(consumed, iter(readline, b""), empty)
+ return _tokenize(rl_gen.__next__, encoding)
 
 
 def _tokenize(readline, encoding):
@@ -496,7 +492,7 @@ def _tokenize(readline, encoding):
 # BOM will already have been stripped.
 encoding = "utf-8"
 yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
- while True: # loop over lines in stream
+ while True: # loop over lines in stream
 try:
 line = readline()
 except StopIteration:
@@ -581,7 +577,7 @@ def _tokenize(readline, encoding):
 continue
 token, initial = line[start:end], line[start]
 
- if (initial in numchars or # ordinary number
+ if (initial in numchars or # ordinary number
 (initial == '.' and token != '.' and token != '...')):
 yield TokenInfo(NUMBER, token, spos, epos, line)
 elif initial in '\r\n':
@@ -667,7 +663,8 @@ def main():
 
 # Helper error handling routines
 def perror(message):
- print(message, file=sys.stderr)
+ sys.stderr.write(message)
+ sys.stderr.write('\n')
 
 def error(message, filename=None, location=None):
 if location:


More information about the Python-checkins mailing list

AltStyle によって変換されたページ (->オリジナル) /