Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

add pytest-codspeed for benchmarking #286

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
Vizonex wants to merge 1 commit into python-hyper:master
base: master
Choose a base branch
Loading
from Vizonex:add-pytest-codspeed
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pyproject.toml
View file Open in desktop
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ testing = [
"pytest-cov>=6.0.0,<7",
"pytest-xdist>=3.6.1,<4",
"hypothesis>=6.119.4,<7",
"pytest-codspeed>=4.0.0<5",
]

linting = [
Expand Down
88 changes: 88 additions & 0 deletions tests/test_huffman_benchmark.py
View file Open in desktop
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
from __future__ import annotations

from hpack import HPACKDecodingError
from hpack.huffman import HuffmanEncoder
from hpack.huffman_constants import REQUEST_CODES, REQUEST_CODES_LENGTH
from hpack.huffman_table import decode_huffman

from concurrent.futures import ThreadPoolExecutor

import pytest

class TestHuffmanEncoderBenchmark:

@pytest.mark.benchmark
def test_request_huffman_encode(self):
encoder = HuffmanEncoder(REQUEST_CODES, REQUEST_CODES_LENGTH)
assert (
encoder.encode(b"www.example.com") ==
b'\xf1\xe3\xc2\xe5\xf2:k\xa0\xab\x90\xf4\xff'
)
assert encoder.encode(b"no-cache") == b'\xa8\xeb\x10d\x9c\xbf'
assert encoder.encode(b"custom-key") == b'%\xa8I\xe9[\xa9}\x7f'
assert (
encoder.encode(b"custom-value") == b'%\xa8I\xe9[\xb8\xe8\xb4\xbf'
)


@pytest.mark.benchmark
def test_request_huffman_encoder_under_heavy_threading(self):
INPUTS = [
b"www.example.com",
b"no-cache",
b"custom-key",
b"custom-value"
] * 50 # 200 Entries to simulate heavy traffic

ANSWERS = {
b'\xf1\xe3\xc2\xe5\xf2:k\xa0\xab\x90\xf4\xff',
b'\xa8\xeb\x10d\x9c\xbf',
b'%\xa8I\xe9[\xa9}\x7f',
b'%\xa8I\xe9[\xb8\xe8\xb4\xbf'
}
encoder = HuffmanEncoder(REQUEST_CODES, REQUEST_CODES_LENGTH)

with ThreadPoolExecutor(2) as te:
for answer in te.map(encoder.encode, INPUTS):
assert answer in ANSWERS




class TestHuffmanDecoderBenchmark:

# 3.13t cannot use pytest-codspeed due to cffi-2.0
# so using markers was the best solution
@pytest.mark.benchmark
def test_request_huffman_decoder(self):
assert (
decode_huffman(b'\xf1\xe3\xc2\xe5\xf2:k\xa0\xab\x90\xf4\xff') ==
b"www.example.com"
)
assert decode_huffman(b'\xa8\xeb\x10d\x9c\xbf') == b"no-cache"
assert decode_huffman(b'%\xa8I\xe9[\xa9}\x7f') == b"custom-key"
assert (
decode_huffman(b'%\xa8I\xe9[\xb8\xe8\xb4\xbf') == b"custom-value"
)

@pytest.mark.benchmark
def test_request_huffman_decoder_under_heavy_threading(self):
# Trying to ensure that huffman decoder is threadsafe and can work under heavy traffic
# SEE: https://github.com/python-hyper/hpack/issues/284
INPUTS = [
b'\xf1\xe3\xc2\xe5\xf2:k\xa0\xab\x90\xf4\xff',
b'\xa8\xeb\x10d\x9c\xbf',
b'%\xa8I\xe9[\xa9}\x7f',
b'%\xa8I\xe9[\xb8\xe8\xb4\xbf'
] * 50 # 200 entries should be enough to simulate heavy loads to rip through
ANSWERS = {
b"www.example.com",
b"no-cache",
b"custom-key",
b"custom-value"
}

with ThreadPoolExecutor(2) as te:
for answer in te.map(decode_huffman, INPUTS):
assert answer in ANSWERS

AltStyle によって変換されたページ (->オリジナル) /