[Python-checkins] cpython (3.4): Issue #23681: Fixed Python 2 to 3 poring bugs.

serhiy.storchaka python-checkins at python.org
Fri Mar 20 15:56:25 CET 2015


https://hg.python.org/cpython/rev/104c55bc2276
changeset: 95099:104c55bc2276
branch: 3.4
parent: 95097:a90ec6b96af2
user: Serhiy Storchaka <storchaka at gmail.com>
date: Fri Mar 20 16:46:19 2015 +0200
summary:
 Issue #23681: Fixed Python 2 to 3 poring bugs.
Indexing bytes retiurns an integer, not bytes.
files:
 Lib/poplib.py | 2 +-
 Lib/quopri.py | 2 +-
 Lib/sunau.py | 7 ++-----
 Lib/test/test_buffer.py | 8 ++++----
 Lib/test/test_tokenize.py | 7 ++++---
 5 files changed, 12 insertions(+), 14 deletions(-)
diff --git a/Lib/poplib.py b/Lib/poplib.py
--- a/Lib/poplib.py
+++ b/Lib/poplib.py
@@ -136,7 +136,7 @@
 # so only possibilities are ...LF, ...CRLF, CR...LF
 if line[-2:] == CRLF:
 return line[:-2], octets
- if line[0] == CR:
+ if line[:1] == CR:
 return line[1:-1], octets
 return line[:-1], octets
 
diff --git a/Lib/quopri.py b/Lib/quopri.py
--- a/Lib/quopri.py
+++ b/Lib/quopri.py
@@ -145,7 +145,7 @@
 new = new + c; i = i+1
 elif i+1 == n and not partial:
 partial = 1; break
- elif i+1 < n and line[i+1] == ESCAPE:
+ elif i+1 < n and line[i+1:i+2] == ESCAPE:
 new = new + ESCAPE; i = i+2
 elif i+2 < n and ishex(line[i+1:i+2]) and ishex(line[i+2:i+3]):
 new = new + bytes((unhex(line[i+1:i+3]),)); i = i+3
diff --git a/Lib/sunau.py b/Lib/sunau.py
--- a/Lib/sunau.py
+++ b/Lib/sunau.py
@@ -210,12 +210,9 @@
 self._framesize = self._framesize * self._nchannels
 if self._hdr_size > 24:
 self._info = file.read(self._hdr_size - 24)
- for i in range(len(self._info)):
- if self._info[i] == b'0円':
- self._info = self._info[:i]
- break
+ self._info, _, _ = self._info.partition(b'0円')
 else:
- self._info = ''
+ self._info = b''
 try:
 self._data_pos = file.tell()
 except (AttributeError, OSError):
diff --git a/Lib/test/test_buffer.py b/Lib/test/test_buffer.py
--- a/Lib/test/test_buffer.py
+++ b/Lib/test/test_buffer.py
@@ -149,15 +149,15 @@
 format character."""
 x = randrange(*fmtdict[mode][char])
 if char == 'c':
- x = bytes(chr(x), 'latin1')
+ x = bytes([x])
+ if obj == 'numpy' and x == b'\x00':
+ # http://projects.scipy.org/numpy/ticket/1925
+ x = b'\x01'
 if char == '?':
 x = bool(x)
 if char == 'f' or char == 'd':
 x = struct.pack(char, x)
 x = struct.unpack(char, x)[0]
- if obj == 'numpy' and x == b'\x00':
- # http://projects.scipy.org/numpy/ticket/1925
- x = b'\x01'
 return x
 
 def gen_item(fmt, obj):
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -1066,7 +1066,7 @@
 encoding = object()
 encoding_used = None
 def mock_detect_encoding(readline):
- return encoding, ['first', 'second']
+ return encoding, [b'first', b'second']
 
 def mock__tokenize(readline, encoding):
 nonlocal encoding_used
@@ -1085,7 +1085,7 @@
 counter += 1
 if counter == 5:
 return b''
- return counter
+ return str(counter).encode()
 
 orig_detect_encoding = tokenize_module.detect_encoding
 orig__tokenize = tokenize_module._tokenize
@@ -1093,7 +1093,8 @@
 tokenize_module._tokenize = mock__tokenize
 try:
 results = tokenize(mock_readline)
- self.assertEqual(list(results), ['first', 'second', 1, 2, 3, 4])
+ self.assertEqual(list(results),
+ [b'first', b'second', b'1', b'2', b'3', b'4'])
 finally:
 tokenize_module.detect_encoding = orig_detect_encoding
 tokenize_module._tokenize = orig__tokenize
-- 
Repository URL: https://hg.python.org/cpython


More information about the Python-checkins mailing list

AltStyle によって変換されたページ (->オリジナル) /