# HG changeset patch # Parent dcd6d41f2c9aaaf9ff280ea99a8474647a7daac1 Use max_length in LZMAFile, BZ2FILE decompression * Split out _DecompressReader and wrap it with BufferedReader to provide the read mode APIs of LZMAFile * The specification of the peek() method is vague * read() now accepts size=None, because BufferedReader does * BufferedReader.seek() raises a different exception for invalid "whence" * Work around different signature for BufferedReader.read1() * Remove BZ2File(buffering=...) parameter TODO: Find a home for _DecompressReader and _BaseStream; try using them with GzipFile; maybe support open(buffering=...) and buffering=0 diff -r dcd6d41f2c9a Doc/library/bz2.rst --- a/Doc/library/bz2.rst Fri Mar 06 23:33:51 2015 +0200 +++ b/Doc/library/bz2.rst Sat Mar 07 07:14:01 2015 +0000 @@ -58,7 +58,7 @@ The ``'x'`` (exclusive creation) mode was added. -.. class:: BZ2File(filename, mode='r', buffering=None, compresslevel=9) +.. class:: BZ2File(filename, mode='r', buffer_size=io.DEFAULT_BUFFER_SIZE, compresslevel=9) Open a bzip2-compressed file in binary mode. @@ -74,7 +74,9 @@ If *filename* is a file object (rather than an actual file name), a mode of ``'w'`` does not truncate the file, and is instead equivalent to ``'a'``. - The *buffering* argument is ignored. Its use is deprecated. + The *buffer_size* argument is only used in read mode. It gives the amount + of decompressed data that may be buffered between reads, and also + influences the chunk size when reading raw compressed data. If *mode* is ``'w'`` or ``'a'``, *compresslevel* can be a number between ``1`` and ``9`` specifying the level of compression: ``1`` produces the @@ -87,6 +89,10 @@ :class:`io.BufferedIOBase`, except for :meth:`detach` and :meth:`truncate`. Iteration and the :keyword:`with` statement are supported. + .. versionchanged:: 3.5 + The *buffer_size* argument was added. Previously, there was an unused + parameter called *buffering* at this position. + :class:`BZ2File` also provides the following method: .. method:: peek([n]) diff -r dcd6d41f2c9a Doc/library/lzma.rst --- a/Doc/library/lzma.rst Fri Mar 06 23:33:51 2015 +0200 +++ b/Doc/library/lzma.rst Sat Mar 07 07:14:01 2015 +0000 @@ -61,7 +61,7 @@ Added support for the ``"x"``, ``"xb"`` and ``"xt"`` modes. -.. class:: LZMAFile(filename=None, mode="r", \*, format=None, check=-1, preset=None, filters=None) +.. class:: LZMAFile(filename=None, mode="r", buffer_size=io.DEFAULT_BUFFER_SIZE, \*, format=None, check=-1, preset=None, filters=None) Open an LZMA-compressed file in binary mode. @@ -79,6 +79,10 @@ If *filename* is a file object (rather than an actual file name), a mode of ``"w"`` does not truncate the file, and is instead equivalent to ``"a"``. + The *buffer_size* argument is only used in read mode. It gives the amount + of decompressed data that may be buffered between reads, and also + influences the chunk size when reading raw compressed data. + When opening a file for reading, the input file may be the concatenation of multiple separate compressed streams. These are transparently decoded as a single logical stream. @@ -94,6 +98,9 @@ :class:`io.BufferedIOBase`, except for :meth:`detach` and :meth:`truncate`. Iteration and the :keyword:`with` statement are supported. + .. versionadded:: 3.5 + The *buffer_size* argument. + The following method is also provided: .. method:: peek(size=-1) diff -r dcd6d41f2c9a Lib/bz2.py --- a/Lib/bz2.py Fri Mar 06 23:33:51 2015 +0200 +++ b/Lib/bz2.py Sat Mar 07 07:14:01 2015 +0000 @@ -10,7 +10,7 @@ __author__ = "Nadeem Vawda " import io -import warnings +from lzma import _BaseStream, _DecompressReader # TODO: relocate try: from threading import RLock @@ -22,15 +22,13 @@ _MODE_CLOSED = 0 _MODE_READ = 1 -_MODE_READ_EOF = 2 +# Value 2 no longer used _MODE_WRITE = 3 -_BUFFER_SIZE = 8192 - _builtin_open = open -class BZ2File(io.BufferedIOBase): +class BZ2File(_BaseStream, io.BufferedIOBase): """A file object providing transparent bzip2 (de)compression. @@ -41,7 +39,8 @@ returned as bytes, and data to be written should be given as bytes. """ - def __init__(self, filename, mode="r", buffering=None, compresslevel=9): + def __init__(self, filename, mode="r", + buffer_size=io.DEFAULT_BUFFER_SIZE, compresslevel=9): """Open a bzip2-compressed file. If filename is a str or bytes object, it gives the name @@ -52,7 +51,8 @@ 'x' for creating exclusively, or 'a' for appending. These can equivalently be given as 'rb', 'wb', 'xb', and 'ab'. - buffering is ignored. Its use is deprecated. + buffer_size is only used in read mode. It specifies buffer sizes for + both compressed and decompressed data. If mode is 'w', 'x' or 'a', compresslevel can be a number between 1 and 9 specifying the level of compression: 1 produces the least @@ -62,17 +62,11 @@ multiple compressed streams. """ # This lock must be recursive, so that BufferedIOBase's - # readline(), readlines() and writelines() don't deadlock. + # writelines() does not deadlock. self._lock = RLock() self._fp = None self._closefp = False self._mode = _MODE_CLOSED - self._pos = 0 - self._size = -1 - - if buffering is not None: - warnings.warn("Use of 'buffering' argument is deprecated", - DeprecationWarning) if not (1 <= compresslevel <= 9): raise ValueError("compresslevel must be between 1 and 9") @@ -80,9 +74,6 @@ if mode in ("", "r", "rb"): mode = "rb" mode_code = _MODE_READ - self._decompressor = BZ2Decompressor() - self._buffer = b"" - self._buffer_offset = 0 elif mode in ("w", "wb"): mode = "wb" mode_code = _MODE_WRITE @@ -99,6 +90,7 @@ raise ValueError("Invalid mode: %r" % (mode,)) if isinstance(filename, (str, bytes)): + # buffer_size not used here for simplicity self._fp = _builtin_open(filename, mode) self._closefp = True self._mode = mode_code @@ -108,6 +100,14 @@ else: raise TypeError("filename must be a str or bytes object, or a file") + if self._mode == _MODE_READ: + raw = _DecompressReader(self._fp, buffer_size, + BZ2Decompressor, OSError) + self._buffer = io.BufferedReader(raw, buffer_size) + self._buffer_size = buffer_size + else: + self._pos = 0 + def close(self): """Flush and close the file. @@ -118,8 +118,8 @@ if self._mode == _MODE_CLOSED: return try: - if self._mode in (_MODE_READ, _MODE_READ_EOF): - self._decompressor = None + if self._mode == _MODE_READ: + self._buffer.close() elif self._mode == _MODE_WRITE: self._fp.write(self._compressor.flush()) self._compressor = None @@ -131,8 +131,7 @@ self._fp = None self._closefp = False self._mode = _MODE_CLOSED - self._buffer = b"" - self._buffer_offset = 0 + self._buffer = None @property def closed(self): @@ -146,125 +145,18 @@ def seekable(self): """Return whether the file supports seeking.""" - return self.readable() and self._fp.seekable() + return self.readable() and self._buffer.seekable() def readable(self): """Return whether the file was opened for reading.""" self._check_not_closed() - return self._mode in (_MODE_READ, _MODE_READ_EOF) + return self._mode == _MODE_READ def writable(self): """Return whether the file was opened for writing.""" self._check_not_closed() return self._mode == _MODE_WRITE - # Mode-checking helper functions. - - def _check_not_closed(self): - if self.closed: - raise ValueError("I/O operation on closed file") - - def _check_can_read(self): - if self._mode not in (_MODE_READ, _MODE_READ_EOF): - self._check_not_closed() - raise io.UnsupportedOperation("File not open for reading") - - def _check_can_write(self): - if self._mode != _MODE_WRITE: - self._check_not_closed() - raise io.UnsupportedOperation("File not open for writing") - - def _check_can_seek(self): - if self._mode not in (_MODE_READ, _MODE_READ_EOF): - self._check_not_closed() - raise io.UnsupportedOperation("Seeking is only supported " - "on files open for reading") - if not self._fp.seekable(): - raise io.UnsupportedOperation("The underlying file object " - "does not support seeking") - - # Fill the readahead buffer if it is empty. Returns False on EOF. - def _fill_buffer(self): - if self._mode == _MODE_READ_EOF: - return False - # Depending on the input data, our call to the decompressor may not - # return any data. In this case, try again after reading another block. - while self._buffer_offset == len(self._buffer): - rawblock = (self._decompressor.unused_data or - self._fp.read(_BUFFER_SIZE)) - - if not rawblock: - if self._decompressor.eof: - # End-of-stream marker and end of file. We're good. - self._mode = _MODE_READ_EOF - self._size = self._pos - return False - else: - # Problem - we were expecting more compressed data. - raise EOFError("Compressed file ended before the " - "end-of-stream marker was reached") - - if self._decompressor.eof: - # Continue to next stream. - self._decompressor = BZ2Decompressor() - try: - self._buffer = self._decompressor.decompress(rawblock) - except OSError: - # Trailing data isn't a valid bzip2 stream. We're done here. - self._mode = _MODE_READ_EOF - self._size = self._pos - return False - else: - self._buffer = self._decompressor.decompress(rawblock) - self._buffer_offset = 0 - return True - - # Read data until EOF. - # If return_data is false, consume the data without returning it. - def _read_all(self, return_data=True): - # The loop assumes that _buffer_offset is 0. Ensure that this is true. - self._buffer = self._buffer[self._buffer_offset:] - self._buffer_offset = 0 - - blocks = [] - while self._fill_buffer(): - if return_data: - blocks.append(self._buffer) - self._pos += len(self._buffer) - self._buffer = b"" - if return_data: - return b"".join(blocks) - - # Read a block of up to n bytes. - # If return_data is false, consume the data without returning it. - def _read_block(self, n, return_data=True): - # If we have enough data buffered, return immediately. - end = self._buffer_offset + n - if end <= len(self._buffer): - data = self._buffer[self._buffer_offset : end] - self._buffer_offset = end - self._pos += len(data) - return data if return_data else None - - # The loop assumes that _buffer_offset is 0. Ensure that this is true. - self._buffer = self._buffer[self._buffer_offset:] - self._buffer_offset = 0 - - blocks = [] - while n> 0 and self._fill_buffer(): - if n < len(self._buffer): - data = self._buffer[:n] - self._buffer_offset = n - else: - data = self._buffer - self._buffer = b"" - if return_data: - blocks.append(data) - self._pos += len(data) - n -= len(data) - if return_data: - return b"".join(blocks) - def peek(self, n=0): """Return buffered data without advancing the file position. @@ -273,9 +165,10 @@ """ with self._lock: self._check_can_read() - if not self._fill_buffer(): - return b"" - return self._buffer[self._buffer_offset:] + # Relies on the undocumented fact that BufferedReader.peek() + # always returns at least one byte (except at EOF), independent + # of the value of n + return self._buffer.peek(n) def read(self, size=-1): """Read up to size uncompressed bytes from the file. @@ -285,47 +178,29 @@ """ with self._lock: self._check_can_read() - if size == 0: - return b"" - elif size < 0: - return self._read_all() - else: - return self._read_block(size) + return self._buffer.read(size) def read1(self, size=-1): """Read up to size uncompressed bytes, while trying to avoid - making multiple reads from the underlying stream. + making multiple reads from the underlying stream. Reads a buffer's + worth of data is size is negative. Returns b'' if the file is at EOF. """ - # Usually, read1() calls _fp.read() at most once. However, sometimes - # this does not give enough data for the decompressor to make progress. - # In this case we make multiple reads, to avoid returning b"". with self._lock: self._check_can_read() - if (size == 0 or - # Only call _fill_buffer() if the buffer is actually empty. - # This gives a significant speedup if *size* is small. - (self._buffer_offset == len(self._buffer) and not self._fill_buffer())): - return b"" - if size> 0: - data = self._buffer[self._buffer_offset : - self._buffer_offset + size] - self._buffer_offset += len(data) - else: - data = self._buffer[self._buffer_offset:] - self._buffer = b"" - self._buffer_offset = 0 - self._pos += len(data) - return data + if size < 0: + size = self._buffer_size + return self._buffer.read1(size) def readinto(self, b): - """Read up to len(b) bytes into b. + """Read bytes into b. Returns the number of bytes read (0 for EOF). """ with self._lock: - return io.BufferedIOBase.readinto(self, b) + self._check_can_read() + return self._buffer.readinto(b) def readline(self, size=-1): """Read a line of uncompressed bytes from the file. @@ -340,15 +215,7 @@ size = size.__index__() with self._lock: self._check_can_read() - # Shortcut for the common case - the whole line is in the buffer. - if size < 0: - end = self._buffer.find(b"\n", self._buffer_offset) + 1 - if end> 0: - line = self._buffer[self._buffer_offset : end] - self._buffer_offset = end - self._pos += len(line) - return line - return io.BufferedIOBase.readline(self, size) + return self._buffer.readline(size) def readlines(self, size=-1): """Read a list of lines of uncompressed bytes from the file. @@ -362,7 +229,8 @@ raise TypeError("Integer argument expected") size = size.__index__() with self._lock: - return io.BufferedIOBase.readlines(self, size) + self._check_can_read() + return self._buffer.readlines(size) def write(self, data): """Write a byte string to the file. @@ -389,15 +257,6 @@ with self._lock: return io.BufferedIOBase.writelines(self, seq) - # Rewind the file to the beginning of the data stream. - def _rewind(self): - self._fp.seek(0, 0) - self._mode = _MODE_READ - self._pos = 0 - self._decompressor = BZ2Decompressor() - self._buffer = b"" - self._buffer_offset = 0 - def seek(self, offset, whence=0): """Change the file position. @@ -414,35 +273,17 @@ this operation may be extremely slow. """ with self._lock: - self._check_can_seek() - - # Recalculate offset as an absolute file position. - if whence == 0: - pass - elif whence == 1: - offset = self._pos + offset - elif whence == 2: - # Seeking relative to EOF - we need to know the file's size. - if self._size < 0: - self._read_all(return_data=False) - offset = self._size + offset - else: - raise ValueError("Invalid value for whence: %s" % (whence,)) - - # Make it so that offset is the number of bytes to skip forward. - if offset < self._pos: - self._rewind() - else: - offset -= self._pos - - # Read and discard data until we reach the desired position. - self._read_block(offset, return_data=False) - - return self._pos + if self._mode != _MODE_READ: + self._check_not_closed() + raise io.UnsupportedOperation("Seeking is only supported " + "on files open for reading") + return self._buffer.seek(offset, whence) def tell(self): """Return the current file position.""" with self._lock: + if self._mode == _MODE_READ: + return self._buffer.tell() self._check_not_closed() return self._pos diff -r dcd6d41f2c9a Lib/lzma.py --- a/Lib/lzma.py Fri Mar 06 23:33:51 2015 +0200 +++ b/Lib/lzma.py Sat Mar 07 07:14:01 2015 +0000 @@ -29,13 +29,32 @@ _MODE_CLOSED = 0 _MODE_READ = 1 -_MODE_READ_EOF = 2 +# Value 2 no longer used _MODE_WRITE = 3 -_BUFFER_SIZE = 8192 +class _BaseStream(io.IOBase): + """Mode-checking helper functions.""" -class LZMAFile(io.BufferedIOBase): + def _check_not_closed(self): + if self.closed: + raise ValueError("I/O operation on closed file") + + def _check_can_read(self): + if not self.readable(): + raise io.UnsupportedOperation("File not open for reading") + + def _check_can_write(self): + if not self.writable(): + raise io.UnsupportedOperation("File not open for writing") + + def _check_can_seek(self): + if not self.seekable(): + raise io.UnsupportedOperation("The underlying file object " + "does not support seeking") + + +class LZMAFile(_BaseStream, io.BufferedIOBase): """A file object providing transparent LZMA (de)compression. @@ -46,7 +65,8 @@ is returned as bytes, and data to be written must be given as bytes. """ - def __init__(self, filename=None, mode="r", *, + def __init__(self, filename=None, mode="r", + buffer_size=io.DEFAULT_BUFFER_SIZE, *, format=None, check=-1, preset=None, filters=None): """Open an LZMA-compressed file in binary mode. @@ -58,6 +78,9 @@ "x" for creating exclusively, or "a" for appending. These can equivalently be given as "rb", "wb", "xb" and "ab" respectively. + buffer_size is only used in read mode. It specifies buffer sizes for + both compressed and decompressed data. + format specifies the container format to use for the file. If mode is "r", this defaults to FORMAT_AUTO. Otherwise, the default is FORMAT_XZ. @@ -92,8 +115,6 @@ self._fp = None self._closefp = False self._mode = _MODE_CLOSED - self._pos = 0 - self._size = -1 if mode in ("r", "rb"): if check != -1: @@ -105,25 +126,20 @@ if format is None: format = FORMAT_AUTO mode_code = _MODE_READ - # Save the args to pass to the LZMADecompressor initializer. - # If the file contains multiple compressed streams, each - # stream will need a separate decompressor object. - self._init_args = {"format":format, "filters":filters} - self._decompressor = LZMADecompressor(**self._init_args) - self._buffer = b"" - self._buffer_offset = 0 elif mode in ("w", "wb", "a", "ab", "x", "xb"): if format is None: format = FORMAT_XZ mode_code = _MODE_WRITE self._compressor = LZMACompressor(format=format, check=check, preset=preset, filters=filters) + self._pos = 0 else: raise ValueError("Invalid mode: {!r}".format(mode)) if isinstance(filename, (str, bytes)): if "b" not in mode: mode += "b" + # buffer_size not used here for simplicity self._fp = builtins.open(filename, mode) self._closefp = True self._mode = mode_code @@ -133,6 +149,12 @@ else: raise TypeError("filename must be a str or bytes object, or a file") + if self._mode == _MODE_READ: + raw = _DecompressReader(self._fp, buffer_size, + LZMADecompressor, LZMAError, format=format, filters=filters) + self._buffer = io.BufferedReader(raw, buffer_size) + self._buffer_size = buffer_size + def close(self): """Flush and close the file. @@ -142,9 +164,9 @@ if self._mode == _MODE_CLOSED: return try: - if self._mode in (_MODE_READ, _MODE_READ_EOF): - self._decompressor = None - self._buffer = b"" + if self._mode == _MODE_READ: + self._buffer.close() + self._buffer = None elif self._mode == _MODE_WRITE: self._fp.write(self._compressor.flush()) self._compressor = None @@ -169,123 +191,18 @@ def seekable(self): """Return whether the file supports seeking.""" - return self.readable() and self._fp.seekable() + return self.readable() and self._buffer.seekable() def readable(self): """Return whether the file was opened for reading.""" self._check_not_closed() - return self._mode in (_MODE_READ, _MODE_READ_EOF) + return self._mode == _MODE_READ def writable(self): """Return whether the file was opened for writing.""" self._check_not_closed() return self._mode == _MODE_WRITE - # Mode-checking helper functions. - - def _check_not_closed(self): - if self.closed: - raise ValueError("I/O operation on closed file") - - def _check_can_read(self): - if self._mode not in (_MODE_READ, _MODE_READ_EOF): - self._check_not_closed() - raise io.UnsupportedOperation("File not open for reading") - - def _check_can_write(self): - if self._mode != _MODE_WRITE: - self._check_not_closed() - raise io.UnsupportedOperation("File not open for writing") - - def _check_can_seek(self): - if self._mode not in (_MODE_READ, _MODE_READ_EOF): - self._check_not_closed() - raise io.UnsupportedOperation("Seeking is only supported " - "on files open for reading") - if not self._fp.seekable(): - raise io.UnsupportedOperation("The underlying file object " - "does not support seeking") - - # Fill the readahead buffer if it is empty. Returns False on EOF. - def _fill_buffer(self): - if self._mode == _MODE_READ_EOF: - return False - # Depending on the input data, our call to the decompressor may not - # return any data. In this case, try again after reading another block. - while self._buffer_offset == len(self._buffer): - rawblock = (self._decompressor.unused_data or - self._fp.read(_BUFFER_SIZE)) - - if not rawblock: - if self._decompressor.eof: - self._mode = _MODE_READ_EOF - self._size = self._pos - return False - else: - raise EOFError("Compressed file ended before the " - "end-of-stream marker was reached") - - if self._decompressor.eof: - # Continue to next stream. - self._decompressor = LZMADecompressor(**self._init_args) - try: - self._buffer = self._decompressor.decompress(rawblock) - except LZMAError: - # Trailing data isn't a valid compressed stream; ignore it. - self._mode = _MODE_READ_EOF - self._size = self._pos - return False - else: - self._buffer = self._decompressor.decompress(rawblock) - self._buffer_offset = 0 - return True - - # Read data until EOF. - # If return_data is false, consume the data without returning it. - def _read_all(self, return_data=True): - # The loop assumes that _buffer_offset is 0. Ensure that this is true. - self._buffer = self._buffer[self._buffer_offset:] - self._buffer_offset = 0 - - blocks = [] - while self._fill_buffer(): - if return_data: - blocks.append(self._buffer) - self._pos += len(self._buffer) - self._buffer = b"" - if return_data: - return b"".join(blocks) - - # Read a block of up to n bytes. - # If return_data is false, consume the data without returning it. - def _read_block(self, n, return_data=True): - # If we have enough data buffered, return immediately. - end = self._buffer_offset + n - if end <= len(self._buffer): - data = self._buffer[self._buffer_offset : end] - self._buffer_offset = end - self._pos += len(data) - return data if return_data else None - - # The loop assumes that _buffer_offset is 0. Ensure that this is true. - self._buffer = self._buffer[self._buffer_offset:] - self._buffer_offset = 0 - - blocks = [] - while n> 0 and self._fill_buffer(): - if n < len(self._buffer): - data = self._buffer[:n] - self._buffer_offset = n - else: - data = self._buffer - self._buffer = b"" - if return_data: - blocks.append(data) - self._pos += len(data) - n -= len(data) - if return_data: - return b"".join(blocks) - def peek(self, size=-1): """Return buffered data without advancing the file position. @@ -293,9 +210,9 @@ The exact number of bytes returned is unspecified. """ self._check_can_read() - if not self._fill_buffer(): - return b"" - return self._buffer[self._buffer_offset:] + # Relies on the undocumented fact that BufferedReader.peek() always + # returns at least one byte (except at EOF) + return self._buffer.peek(size) def read(self, size=-1): """Read up to size uncompressed bytes from the file. @@ -304,38 +221,19 @@ Returns b"" if the file is already at EOF. """ self._check_can_read() - if size == 0: - return b"" - elif size < 0: - return self._read_all() - else: - return self._read_block(size) + return self._buffer.read(size) def read1(self, size=-1): """Read up to size uncompressed bytes, while trying to avoid - making multiple reads from the underlying stream. + making multiple reads from the underlying stream. Reads a buffer's + worth of data if size is negative. Returns b"" if the file is at EOF. """ - # Usually, read1() calls _fp.read() at most once. However, sometimes - # this does not give enough data for the decompressor to make progress. - # In this case we make multiple reads, to avoid returning b"". self._check_can_read() - if (size == 0 or - # Only call _fill_buffer() if the buffer is actually empty. - # This gives a significant speedup if *size* is small. - (self._buffer_offset == len(self._buffer) and not self._fill_buffer())): - return b"" - if size> 0: - data = self._buffer[self._buffer_offset : - self._buffer_offset + size] - self._buffer_offset += len(data) - else: - data = self._buffer[self._buffer_offset:] - self._buffer = b"" - self._buffer_offset = 0 - self._pos += len(data) - return data + if size < 0: + size = self._buffer_size + return self._buffer.read1(size) def readline(self, size=-1): """Read a line of uncompressed bytes from the file. @@ -345,15 +243,7 @@ case the line may be incomplete). Returns b'' if already at EOF. """ self._check_can_read() - # Shortcut for the common case - the whole line is in the buffer. - if size < 0: - end = self._buffer.find(b"\n", self._buffer_offset) + 1 - if end> 0: - line = self._buffer[self._buffer_offset : end] - self._buffer_offset = end - self._pos += len(line) - return line - return io.BufferedIOBase.readline(self, size) + return self._buffer.readline(size) def write(self, data): """Write a bytes object to the file. @@ -368,15 +258,6 @@ self._pos += len(data) return len(data) - # Rewind the file to the beginning of the data stream. - def _rewind(self): - self._fp.seek(0, 0) - self._mode = _MODE_READ - self._pos = 0 - self._decompressor = LZMADecompressor(**self._init_args) - self._buffer = b"" - self._buffer_offset = 0 - def seek(self, offset, whence=0): """Change the file position. @@ -389,20 +270,115 @@ Returns the new file position. - Note that seeking is emulated, sp depending on the parameters, + Note that seeking is emulated, so depending on the parameters, this operation may be extremely slow. """ + if self._mode != _MODE_READ: + self._check_not_closed() + raise io.UnsupportedOperation("Seeking is only supported " + "on files open for reading") + return self._buffer.seek(offset, whence) + + def tell(self): + """Return the current file position.""" + if self._mode == _MODE_READ: + return self._buffer.tell() + self._check_not_closed() + return self._pos + + +class _DecompressReader(_BaseStream, io.RawIOBase): + def readable(self): + return True + + def __init__(self, fp, chunk_size, + decomp_factory, decomp_error, **decomp_args): + self._fp = fp + self._chunk_size = chunk_size + self._eof = False + self._pos = 0 # Current offset in decompressed stream + self._size = -1 # Set to size of decompressed stream for SEEK_END + # Save the decompressor factory and arguments. + # If the file contains multiple compressed streams, each + # stream will need a separate decompressor object. + self._decomp_factory = decomp_factory + self._decomp_args = decomp_args + self._decompressor = self._decomp_factory(**self._decomp_args) + self._decomp_error = decomp_error + + def close(self): + self._decompressor = None + return super().close() + + def seekable(self): + return self._fp.seekable() + + def readinto(self, b): + data = self.read(len(b)) + with memoryview(b) as view: + view.cast("B")[:len(data)] = data + return len(data) + + def read(self, size=-1): + if size < 0: + return self.readall() + + if not size or self._eof: + return b"" + # Depending on the input data, our call to the decompressor may not + # return any data. In this case, try again after reading another block. + while True: + if self._decompressor.eof: + rawblock = (self._decompressor.unused_data or + self._fp.read(self._chunk_size)) + if not rawblock: + self._eof = True + self._size = self._pos + return b"" + # Continue to next stream. + self._decompressor = self._decomp_factory( + **self._decomp_args) + try: + data = self._decompressor.decompress(rawblock, size) + except self._decomp_error: + # Trailing data isn't a valid compressed stream; ignore it. + self._eof = True + self._size = self._pos + return b"" + else: + if self._decompressor.needs_input: + rawblock = self._fp.read(self._chunk_size) + if not rawblock: + raise EOFError("Compressed file ended before the " + "end-of-stream marker was reached") + else: + rawblock = bytes() + data = self._decompressor.decompress(rawblock, size) + if data: + break + self._pos += len(data) + return data + + # Rewind the file to the beginning of the data stream. + def _rewind(self): + self._fp.seek(0) + self._eof = False + self._pos = 0 + self._decompressor = self._decomp_factory(**self._decomp_args) + + def seek(self, offset, whence=io.SEEK_SET): self._check_can_seek() # Recalculate offset as an absolute file position. - if whence == 0: + if whence == io.SEEK_SET: pass - elif whence == 1: + elif whence == io.SEEK_CUR: offset = self._pos + offset - elif whence == 2: + elif whence == io.SEEK_END: # Seeking relative to EOF - we need to know the file's size. if self._size < 0: - self._read_all(return_data=False) + while self.read(self._chunk_size): + pass offset = self._size + offset else: raise ValueError("Invalid value for whence: {}".format(whence)) @@ -414,7 +390,11 @@ offset -= self._pos # Read and discard data until we reach the desired position. - self._read_block(offset, return_data=False) + while offset> 0: + data = self.read(min(self._chunk_size, offset)) + if not data: + break + offset -= len(data) return self._pos diff -r dcd6d41f2c9a Lib/test/test_bz2.py --- a/Lib/test/test_bz2.py Fri Mar 06 23:33:51 2015 +0200 +++ b/Lib/test/test_bz2.py Sat Mar 07 07:14:01 2015 +0000 @@ -110,7 +110,7 @@ def testRead(self): self.createTempFile() with BZ2File(self.filename) as bz2f: - self.assertRaises(TypeError, bz2f.read, None) + self.assertRaises(TypeError, bz2f.read, float()) self.assertEqual(bz2f.read(), self.TEXT) def testReadBadFile(self): @@ -121,21 +121,16 @@ def testReadMultiStream(self): self.createTempFile(streams=5) with BZ2File(self.filename) as bz2f: - self.assertRaises(TypeError, bz2f.read, None) + self.assertRaises(TypeError, bz2f.read, float()) self.assertEqual(bz2f.read(), self.TEXT * 5) def testReadMonkeyMultiStream(self): # Test BZ2File.read() on a multi-stream archive where a stream # boundary coincides with the end of the raw read buffer. - buffer_size = bz2._BUFFER_SIZE - bz2._BUFFER_SIZE = len(self.DATA) - try: - self.createTempFile(streams=5) - with BZ2File(self.filename) as bz2f: - self.assertRaises(TypeError, bz2f.read, None) - self.assertEqual(bz2f.read(), self.TEXT * 5) - finally: - bz2._BUFFER_SIZE = buffer_size + self.createTempFile(streams=5) + with BZ2File(self.filename, buffer_size=len(self.DATA)) as bz2f: + self.assertRaises(TypeError, bz2f.read, float()) + self.assertEqual(bz2f.read(), self.TEXT * 5) def testReadTrailingJunk(self): self.createTempFile(suffix=self.BAD_DATA) @@ -150,7 +145,7 @@ def testRead0(self): self.createTempFile() with BZ2File(self.filename) as bz2f: - self.assertRaises(TypeError, bz2f.read, None) + self.assertRaises(TypeError, bz2f.read, float()) self.assertEqual(bz2f.read(0), b"") def testReadChunk10(self): @@ -567,7 +562,7 @@ def testReadBytesIO(self): with BytesIO(self.DATA) as bio: with BZ2File(bio) as bz2f: - self.assertRaises(TypeError, bz2f.read, None) + self.assertRaises(TypeError, bz2f.read, float()) self.assertEqual(bz2f.read(), self.TEXT) self.assertFalse(bio.closed) @@ -614,6 +609,17 @@ with BZ2File(BytesIO(truncated[:i])) as f: self.assertRaises(EOFError, f.read, 1) + def test_decompress_limited(self): + """Decompressed data should be limited when doing a limited read""" + bomb = bz2.compress(bytes(int(2e6)), compresslevel=8) + BUFFER_SIZE = 3000 + self.assertLess(len(bomb), BUFFER_SIZE) + + decomp = BZ2File(BytesIO(bomb), buffer_size=BUFFER_SIZE) + self.assertEqual(bytes(1), decomp.read(1)) + self.assertLessEqual(decomp._buffer.raw.tell(), 1 + BUFFER_SIZE, + "Excessive amount of data was decompressed") + class BZ2CompressorTest(BaseTest): def testCompress(self): diff -r dcd6d41f2c9a Lib/test/test_lzma.py --- a/Lib/test/test_lzma.py Fri Mar 06 23:33:51 2015 +0200 +++ b/Lib/test/test_lzma.py Sat Mar 07 07:14:01 2015 +0000 @@ -772,13 +772,10 @@ def test_read_multistream_buffer_size_aligned(self): # Test the case where a stream boundary coincides with the end # of the raw read buffer. - saved_buffer_size = lzma._BUFFER_SIZE - lzma._BUFFER_SIZE = len(COMPRESSED_XZ) - try: - with LZMAFile(BytesIO(COMPRESSED_XZ * 5)) as f: - self.assertEqual(f.read(), INPUT * 5) - finally: - lzma._BUFFER_SIZE = saved_buffer_size + input = BytesIO(COMPRESSED_XZ * 5) + buffer_size = len(COMPRESSED_XZ) + with LZMAFile(input, buffer_size=buffer_size) as f: + self.assertEqual(f.read(), INPUT * 5) def test_read_trailing_junk(self): with LZMAFile(BytesIO(COMPRESSED_XZ + COMPRESSED_BOGUS)) as f: @@ -829,7 +826,7 @@ with LZMAFile(BytesIO(), "w") as f: self.assertRaises(ValueError, f.read) with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: - self.assertRaises(TypeError, f.read, None) + self.assertRaises(TypeError, f.read, float()) def test_read_bad_data(self): with LZMAFile(BytesIO(COMPRESSED_BOGUS)) as f: @@ -925,6 +922,18 @@ with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: self.assertListEqual(f.readlines(), lines) + def test_decompress_limited(self): + """Read a small portion of a highly compressed stream""" + + bomb = lzma.compress(bytes(int(2e6)), preset=6) + BUFFER_SIZE = 3000 + self.assertLess(len(bomb), BUFFER_SIZE) + + decomp = LZMAFile(BytesIO(bomb), buffer_size=BUFFER_SIZE) + self.assertEqual(bytes(1), decomp.read(1)) + self.assertLessEqual(decomp._buffer.raw.tell(), 1 + BUFFER_SIZE, + "Excessive amount of data was decompressed") + def test_write(self): with BytesIO() as dst: with LZMAFile(dst, "w") as f: @@ -1090,7 +1099,8 @@ self.assertRaises(ValueError, f.seek, 0) with LZMAFile(BytesIO(COMPRESSED_XZ)) as f: self.assertRaises(ValueError, f.seek, 0, 3) - self.assertRaises(ValueError, f.seek, 9, ()) + # io.BufferedReader raises TypeError instead of ValueError + self.assertRaises((TypeError, ValueError), f.seek, 9, ()) self.assertRaises(TypeError, f.seek, None) self.assertRaises(TypeError, f.seek, b"derp")

AltStyle によって変換されたページ (->オリジナル) /