Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit b27e72f

Browse files
committed
2 parents 5dd0f30 + 183d1bc commit b27e72f

File tree

12 files changed

+96
-246
lines changed

12 files changed

+96
-246
lines changed

‎README.rst‎

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -56,10 +56,6 @@ DEVELOPMENT
5656
:target: https://ci.appveyor.com/project/ankostis/gitpython/branch/master)
5757
.. image:: https://coveralls.io/repos/gitpython-developers/gitdb/badge.png
5858
:target: https://coveralls.io/r/gitpython-developers/gitdb
59-
.. image:: http://www.issuestats.com/github/gitpython-developers/gitdb/badge/pr
60-
:target: http://www.issuestats.com/github/gitpython-developers/gitdb
61-
.. image:: http://www.issuestats.com/github/gitpython-developers/gitdb/badge/issue
62-
:target: http://www.issuestats.com/github/gitpython-developers/gitdb
6359

6460
The library is considered mature, and not under active development. It's primary (known) use is in git-python.
6561

‎gitdb/db/loose.py‎

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,11 +50,11 @@
5050
stream_copy
5151
)
5252

53-
from gitdb.utils.compat import MAXSIZE
5453
from gitdb.utils.encoding import force_bytes
5554

5655
import tempfile
5756
import os
57+
import sys
5858

5959

6060
__all__ = ('LooseObjectDB', )
@@ -196,7 +196,7 @@ def store(self, istream):
196196
if istream.binsha is not None:
197197
# copy as much as possible, the actual uncompressed item size might
198198
# be smaller than the compressed version
199-
stream_copy(istream.read, writer.write, MAXSIZE, self.stream_chunk_size)
199+
stream_copy(istream.read, writer.write, sys.maxsize, self.stream_chunk_size)
200200
else:
201201
# write object with header, we have to make a new one
202202
write_object(istream.type, istream.size, istream.read, writer.write,

‎gitdb/db/pack.py‎

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
)
1919

2020
from gitdb.pack import PackEntity
21-
from gitdb.utils.compat import xrange
2221

2322
from functools import reduce
2423

@@ -107,7 +106,7 @@ def sha_iter(self):
107106
for entity in self.entities():
108107
index = entity.index()
109108
sha_by_index = index.sha
110-
for index in xrange(index.size()):
109+
for index in range(index.size()):
111110
yield sha_by_index(index)
112111
# END for each index
113112
# END for each entity

‎gitdb/fun.py‎

Lines changed: 70 additions & 147 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616

1717
from gitdb.const import NULL_BYTE, BYTE_SPACE
1818
from gitdb.utils.encoding import force_text
19-
from gitdb.utils.compat import izip, buffer, xrange, PY3
2019
from gitdb.typ import (
2120
str_blob_type,
2221
str_commit_type,
@@ -101,7 +100,7 @@ def delta_chunk_apply(dc, bbuf, write):
101100
:param write: write method to call with data to write"""
102101
if dc.data is None:
103102
# COPY DATA FROM SOURCE
104-
write(buffer(bbuf, dc.so, dc.ts))
103+
write(bbuf[dc.so:dc.so+dc.ts])
105104
else:
106105
# APPEND DATA
107106
# whats faster: if + 4 function calls or just a write with a slice ?
@@ -264,7 +263,7 @@ def compress(self):
264263
# if first_data_index is not None:
265264
nd = StringIO() # new data
266265
so = self[first_data_index].to # start offset in target buffer
267-
for x in xrange(first_data_index, i - 1):
266+
for x in range(first_data_index, i - 1):
268267
xdc = self[x]
269268
nd.write(xdc.data[:xdc.ts])
270269
# END collect data
@@ -314,7 +313,7 @@ def check_integrity(self, target_size=-1):
314313
right.next()
315314
# this is very pythonic - we might have just use index based access here,
316315
# but this could actually be faster
317-
for lft, rgt in izip(left, right):
316+
for lft, rgt in zip(left, right):
318317
assert lft.rbound() == rgt.to
319318
assert lft.to + lft.ts == rgt.to
320319
# END for each pair
@@ -424,20 +423,12 @@ def pack_object_header_info(data):
424423
type_id = (c >> 4) & 7 # numeric type
425424
size = c & 15 # starting size
426425
s = 4 # starting bit-shift size
427-
if PY3:
428-
while c & 0x80:
429-
c = byte_ord(data[i])
430-
i += 1
431-
size += (c & 0x7f) << s
432-
s += 7
433-
# END character loop
434-
else:
435-
while c & 0x80:
436-
c = ord(data[i])
437-
i += 1
438-
size += (c & 0x7f) << s
439-
s += 7
440-
# END character loop
426+
while c & 0x80:
427+
c = byte_ord(data[i])
428+
i += 1
429+
size += (c & 0x7f) << s
430+
s += 7
431+
# END character loop
441432
# end performance at expense of maintenance ...
442433
return (type_id, size, i)
443434

@@ -450,28 +441,16 @@ def create_pack_object_header(obj_type, obj_size):
450441
:param obj_type: pack type_id of the object
451442
:param obj_size: uncompressed size in bytes of the following object stream"""
452443
c = 0 # 1 byte
453-
if PY3:
454-
hdr = bytearray() # output string
455-
456-
c = (obj_type << 4) | (obj_size & 0xf)
457-
obj_size >>= 4
458-
while obj_size:
459-
hdr.append(c | 0x80)
460-
c = obj_size & 0x7f
461-
obj_size >>= 7
462-
# END until size is consumed
463-
hdr.append(c)
464-
else:
465-
hdr = bytes() # output string
466-
467-
c = (obj_type << 4) | (obj_size & 0xf)
468-
obj_size >>= 4
469-
while obj_size:
470-
hdr += chr(c | 0x80)
471-
c = obj_size & 0x7f
472-
obj_size >>= 7
473-
# END until size is consumed
474-
hdr += chr(c)
444+
hdr = bytearray() # output string
445+
446+
c = (obj_type << 4) | (obj_size & 0xf)
447+
obj_size >>= 4
448+
while obj_size:
449+
hdr.append(c | 0x80)
450+
c = obj_size & 0x7f
451+
obj_size >>= 7
452+
# END until size is consumed
453+
hdr.append(c)
475454
# end handle interpreter
476455
return hdr
477456

@@ -484,26 +463,15 @@ def msb_size(data, offset=0):
484463
i = 0
485464
l = len(data)
486465
hit_msb = False
487-
if PY3:
488-
while i < l:
489-
c = data[i + offset]
490-
size |= (c & 0x7f) << i * 7
491-
i += 1
492-
if not c & 0x80:
493-
hit_msb = True
494-
break
495-
# END check msb bit
496-
# END while in range
497-
else:
498-
while i < l:
499-
c = ord(data[i + offset])
500-
size |= (c & 0x7f) << i * 7
501-
i += 1
502-
if not c & 0x80:
503-
hit_msb = True
504-
break
505-
# END check msb bit
506-
# END while in range
466+
while i < l:
467+
c = data[i + offset]
468+
size |= (c & 0x7f) << i * 7
469+
i += 1
470+
if not c & 0x80:
471+
hit_msb = True
472+
break
473+
# END check msb bit
474+
# END while in range
507475
# end performance ...
508476
if not hit_msb:
509477
raise AssertionError("Could not find terminating MSB byte in data stream")
@@ -663,93 +631,48 @@ def apply_delta_data(src_buf, src_buf_size, delta_buf, delta_buf_size, write):
663631
**Note:** transcribed to python from the similar routine in patch-delta.c"""
664632
i = 0
665633
db = delta_buf
666-
if PY3:
667-
while i < delta_buf_size:
668-
c = db[i]
669-
i += 1
670-
if c & 0x80:
671-
cp_off, cp_size = 0, 0
672-
if (c & 0x01):
673-
cp_off = db[i]
674-
i += 1
675-
if (c & 0x02):
676-
cp_off |= (db[i] << 8)
677-
i += 1
678-
if (c & 0x04):
679-
cp_off |= (db[i] << 16)
680-
i += 1
681-
if (c & 0x08):
682-
cp_off |= (db[i] << 24)
683-
i += 1
684-
if (c & 0x10):
685-
cp_size = db[i]
686-
i += 1
687-
if (c & 0x20):
688-
cp_size |= (db[i] << 8)
689-
i += 1
690-
if (c & 0x40):
691-
cp_size |= (db[i] << 16)
692-
i += 1
693-
694-
if not cp_size:
695-
cp_size = 0x10000
696-
697-
rbound = cp_off + cp_size
698-
if (rbound < cp_size or
699-
rbound > src_buf_size):
700-
break
701-
write(buffer(src_buf, cp_off, cp_size))
702-
elif c:
703-
write(db[i:i + c])
704-
i += c
705-
else:
706-
raise ValueError("unexpected delta opcode 0")
707-
# END handle command byte
708-
# END while processing delta data
709-
else:
710-
while i < delta_buf_size:
711-
c = ord(db[i])
712-
i += 1
713-
if c & 0x80:
714-
cp_off, cp_size = 0, 0
715-
if (c & 0x01):
716-
cp_off = ord(db[i])
717-
i += 1
718-
if (c & 0x02):
719-
cp_off |= (ord(db[i]) << 8)
720-
i += 1
721-
if (c & 0x04):
722-
cp_off |= (ord(db[i]) << 16)
723-
i += 1
724-
if (c & 0x08):
725-
cp_off |= (ord(db[i]) << 24)
726-
i += 1
727-
if (c & 0x10):
728-
cp_size = ord(db[i])
729-
i += 1
730-
if (c & 0x20):
731-
cp_size |= (ord(db[i]) << 8)
732-
i += 1
733-
if (c & 0x40):
734-
cp_size |= (ord(db[i]) << 16)
735-
i += 1
736-
737-
if not cp_size:
738-
cp_size = 0x10000
739-
740-
rbound = cp_off + cp_size
741-
if (rbound < cp_size or
742-
rbound > src_buf_size):
743-
break
744-
write(buffer(src_buf, cp_off, cp_size))
745-
elif c:
746-
write(db[i:i + c])
747-
i += c
748-
else:
749-
raise ValueError("unexpected delta opcode 0")
750-
# END handle command byte
751-
# END while processing delta data
752-
# end save byte_ord call and prevent performance regression in py2
634+
while i < delta_buf_size:
635+
c = db[i]
636+
i += 1
637+
if c & 0x80:
638+
cp_off, cp_size = 0, 0
639+
if (c & 0x01):
640+
cp_off = db[i]
641+
i += 1
642+
if (c & 0x02):
643+
cp_off |= (db[i] << 8)
644+
i += 1
645+
if (c & 0x04):
646+
cp_off |= (db[i] << 16)
647+
i += 1
648+
if (c & 0x08):
649+
cp_off |= (db[i] << 24)
650+
i += 1
651+
if (c & 0x10):
652+
cp_size = db[i]
653+
i += 1
654+
if (c & 0x20):
655+
cp_size |= (db[i] << 8)
656+
i += 1
657+
if (c & 0x40):
658+
cp_size |= (db[i] << 16)
659+
i += 1
660+
661+
if not cp_size:
662+
cp_size = 0x10000
663+
664+
rbound = cp_off + cp_size
665+
if (rbound < cp_size or
666+
rbound > src_buf_size):
667+
break
668+
write(src_buf[cp_off:cp_off + cp_size])
669+
elif c:
670+
write(db[i:i + c])
671+
i += c
672+
else:
673+
raise ValueError("unexpected delta opcode 0")
674+
# END handle command byte
675+
# END while processing delta data
753676

754677
# yes, lets use the exact same error message that git uses :)
755678
assert i == delta_buf_size, "delta replay has gone wild"

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /