Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

TST: Replace 'ensure_clean' with 'temp_file' in some tests #62474

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
kianelbo wants to merge 1 commit into pandas-dev:main
base: main
Choose a base branch
Loading
from kianelbo:tempfile-replace
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 17 additions & 17 deletions pandas/tests/io/parser/common/test_chunksize.py
View file Open in desktop
Original file line number Diff line number Diff line change
Expand Up @@ -295,29 +295,29 @@ def test_empty_with_nrows_chunksize(all_parsers, iterator):
tm.assert_frame_equal(result, expected)


def test_read_csv_memory_growth_chunksize(all_parsers):
def test_read_csv_memory_growth_chunksize(temp_file, all_parsers):
# see gh-24805
#
# Let's just make sure that we don't crash
# as we iteratively process all chunks.
parser = all_parsers

with tm.ensure_clean() as path:
with open(path, "w", encoding="utf-8") as f:
for i in range(1000):
f.write(str(i) + "\n")

if parser.engine == "pyarrow":
msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
with pytest.raises(ValueError, match=msg):
with parser.read_csv(path, chunksize=20) as result:
for _ in result:
pass
return

with parser.read_csv(path, chunksize=20) as result:
for _ in result:
pass
path = str(temp_file)
with open(path, "w", encoding="utf-8") as f:
for i in range(1000):
f.write(str(i) + "\n")

if parser.engine == "pyarrow":
msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
with pytest.raises(ValueError, match=msg):
with parser.read_csv(path, chunksize=20) as result:
for _ in result:
pass
return

with parser.read_csv(path, chunksize=20) as result:
for _ in result:
pass


def test_chunksize_with_usecols_second_block_shorter(all_parsers):
Expand Down
22 changes: 11 additions & 11 deletions pandas/tests/io/parser/common/test_iterator.py
View file Open in desktop
Original file line number Diff line number Diff line change
Expand Up @@ -142,19 +142,19 @@ def test_iterator_skipfooter_errors(all_parsers, kwargs):
pass


def test_iteration_open_handle(all_parsers):
def test_iteration_open_handle(temp_file, all_parsers):
parser = all_parsers
kwargs = {"header": None}

with tm.ensure_clean() as path:
with open(path, "w", encoding="utf-8") as f:
f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")
path = str(temp_file)
with open(path, "w", encoding="utf-8") as f:
f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")

with open(path, encoding="utf-8") as f:
for line in f:
if "CCC" in line:
break
with open(path, encoding="utf-8") as f:
for line in f:
if "CCC" in line:
break

result = parser.read_csv(f, **kwargs)
expected = DataFrame({0: ["DDD", "EEE", "FFF", "GGG"]})
tm.assert_frame_equal(result, expected)
result = parser.read_csv(f, **kwargs)
expected = DataFrame({0: ["DDD", "EEE", "FFF", "GGG"]})
tm.assert_frame_equal(result, expected)
20 changes: 10 additions & 10 deletions pandas/tests/io/parser/test_index_col.py
View file Open in desktop
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def test_multi_index_naming_not_all_at_beginning(all_parsers):


@xfail_pyarrow # ValueError: Found non-unique column index
def test_no_multi_index_level_names_empty(all_parsers):
def test_no_multi_index_level_names_empty(temp_file, all_parsers):
# GH 10984
parser = all_parsers
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
Expand All @@ -209,9 +209,9 @@ def test_no_multi_index_level_names_empty(all_parsers):
index=midx,
columns=["x", "y", "z"],
)
with tm.ensure_clean() as path:
expected.to_csv(path)
result = parser.read_csv(path, index_col=[0, 1, 2])
path = str(temp_file)
expected.to_csv(path)
result = parser.read_csv(path, index_col=[0, 1, 2])
tm.assert_frame_equal(result, expected)


Expand Down Expand Up @@ -240,7 +240,7 @@ def test_header_with_index_col(all_parsers):


@pytest.mark.slow
def test_index_col_large_csv(all_parsers, monkeypatch):
def test_index_col_large_csv(temp_file, all_parsers, monkeypatch):
# https://github.com/pandas-dev/pandas/issues/37094
parser = all_parsers

Expand All @@ -252,11 +252,11 @@ def test_index_col_large_csv(all_parsers, monkeypatch):
}
)

with tm.ensure_clean() as path:
df.to_csv(path, index=False)
with monkeypatch.context() as m:
m.setattr("pandas.core.algorithms._MINIMUM_COMP_ARR_LEN", ARR_LEN)
result = parser.read_csv(path, index_col=[0])
path = str(temp_file)
df.to_csv(path, index=False)
with monkeypatch.context() as m:
m.setattr("pandas.core.algorithms._MINIMUM_COMP_ARR_LEN", ARR_LEN)
result = parser.read_csv(path, index_col=[0])

tm.assert_frame_equal(result, df.set_index("a"))

Expand Down
14 changes: 8 additions & 6 deletions pandas/tests/io/parser/test_python_parser_only.py
View file Open in desktop
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,9 @@ def test_skipfooter(python_parser_only, kwargs):
@pytest.mark.parametrize(
"compression,klass", [("gzip", "GzipFile"), ("bz2", "BZ2File")]
)
def test_decompression_regex_sep(python_parser_only, csv1, compression, klass):
def test_decompression_regex_sep(
temp_file, python_parser_only, csv1, compression, klass
):
# see gh-6607
parser = python_parser_only

Expand All @@ -171,12 +173,12 @@ def test_decompression_regex_sep(python_parser_only, csv1, compression, klass):
module = pytest.importorskip(compression)
klass = getattr(module, klass)

with tm.ensure_clean() as path:
with klass(path, mode="wb") as tmp:
tmp.write(data)
path = str(temp_file)
with klass(path, mode="wb") as tmp:
tmp.write(data)

result = parser.read_csv(path, sep="::", compression=compression)
tm.assert_frame_equal(result, expected)
result = parser.read_csv(path, sep="::", compression=compression)
tm.assert_frame_equal(result, expected)


def test_read_csv_buglet_4x_multi_index(python_parser_only):
Expand Down
44 changes: 22 additions & 22 deletions pandas/tests/io/pytables/test_round_trip.py
View file Open in desktop
Original file line number Diff line number Diff line change
Expand Up @@ -27,33 +27,33 @@
pytestmark = [pytest.mark.single_cpu]


def test_conv_read_write():
with tm.ensure_clean() as path:
def test_conv_read_write(temp_file):
path = str(temp_file)

def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key=key, **kwargs)
return read_hdf(path, key)
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key=key, **kwargs)
return read_hdf(path, key)

o = Series(
np.arange(10, dtype=np.float64), index=date_range("2020年01月01日", periods=10)
)
tm.assert_series_equal(o, roundtrip("series", o))
o = Series(
np.arange(10, dtype=np.float64), index=date_range("2020年01月01日", periods=10)
)
tm.assert_series_equal(o, roundtrip("series", o))

o = Series(range(10), dtype="float64", index=[f"i_{i}" for i in range(10)])
tm.assert_series_equal(o, roundtrip("string_series", o))
o = Series(range(10), dtype="float64", index=[f"i_{i}" for i in range(10)])
tm.assert_series_equal(o, roundtrip("string_series", o))

o = DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=Index(list("ABCD")),
index=Index([f"i-{i}" for i in range(30)]),
)
tm.assert_frame_equal(o, roundtrip("frame", o))
o = DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=Index(list("ABCD")),
index=Index([f"i-{i}" for i in range(30)]),
)
tm.assert_frame_equal(o, roundtrip("frame", o))

# table
df = DataFrame({"A": range(5), "B": range(5)})
df.to_hdf(path, key="table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
# table
df = DataFrame({"A": range(5), "B": range(5)})
df.to_hdf(path, key="table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)


def test_long_strings(setup_path):
Expand Down
56 changes: 28 additions & 28 deletions pandas/tests/io/test_sql.py
View file Open in desktop
Original file line number Diff line number Diff line change
Expand Up @@ -748,10 +748,10 @@ def postgresql_psycopg2_conn_types(postgresql_psycopg2_engine_types):


@pytest.fixture
def sqlite_str():
def sqlite_str(temp_file):
pytest.importorskip("sqlalchemy")
with tm.ensure_clean() as name:
yield f"sqlite:///{name}"
name = str(temp_file)
yield f"sqlite:///{name}"


@pytest.fixture
Expand Down Expand Up @@ -817,20 +817,20 @@ def sqlite_conn_types(sqlite_engine_types):


@pytest.fixture
def sqlite_adbc_conn():
def sqlite_adbc_conn(temp_file):
pytest.importorskip("pyarrow")
pytest.importorskip("adbc_driver_sqlite")
from adbc_driver_sqlite import dbapi

with tm.ensure_clean() as name:
uri = f"file:{name}"
with dbapi.connect(uri) as conn:
yield conn
for view in get_all_views(conn):
drop_view(view, conn)
for tbl in get_all_tables(conn):
drop_table(tbl, conn)
conn.commit()
name = str(temp_file)
uri = f"file:{name}"
with dbapi.connect(uri) as conn:
yield conn
for view in get_all_views(conn):
drop_view(view, conn)
for tbl in get_all_tables(conn):
drop_table(tbl, conn)
conn.commit()


@pytest.fixture
Expand Down Expand Up @@ -2504,20 +2504,20 @@ def test_sqlalchemy_integer_overload_mapping(conn, request, integer):
sql.SQLTable("test_type", db, frame=df)


def test_database_uri_string(request, test_frame1):
def test_database_uri_string(temp_file, request, test_frame1):
pytest.importorskip("sqlalchemy")
# Test read_sql and .to_sql method with a database URI (GH10654)
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = "sqlite:///" + name
table = "iris"
test_frame1.to_sql(name=table, con=db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
test_frame4 = sql.read_sql_query(query, db_uri)
name = str(temp_file)
db_uri = "sqlite:///" + name
table = "iris"
test_frame1.to_sql(name=table, con=db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
Expand Down Expand Up @@ -2581,16 +2581,16 @@ def test_column_with_percentage(conn, request):
tm.assert_frame_equal(res, df)


def test_sql_open_close(test_frame3):
def test_sql_open_close(temp_file, test_frame3):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).

with tm.ensure_clean() as name:
with contextlib.closing(sqlite3.connect(name)) as conn:
assert sql.to_sql(test_frame3, "test_frame3_legacy", conn, index=False) == 4
name = str(temp_file)
with contextlib.closing(sqlite3.connect(name)) as conn:
assert sql.to_sql(test_frame3, "test_frame3_legacy", conn, index=False) == 4

with contextlib.closing(sqlite3.connect(name)) as conn:
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
with contextlib.closing(sqlite3.connect(name)) as conn:
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)

tm.assert_frame_equal(test_frame3, result)

Expand Down
Loading

AltStyle によって変換されたページ (->オリジナル) /