Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit 1fad1b4

Browse files
committed
Replace 'ensure_clean' with 'temp_file' in some tests
1 parent e4ca405 commit 1fad1b4

File tree

6 files changed

+94
-94
lines changed

6 files changed

+94
-94
lines changed

‎pandas/tests/io/parser/common/test_chunksize.py‎

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -295,29 +295,29 @@ def test_empty_with_nrows_chunksize(all_parsers, iterator):
295295
tm.assert_frame_equal(result, expected)
296296

297297

298-
def test_read_csv_memory_growth_chunksize(all_parsers):
298+
def test_read_csv_memory_growth_chunksize(temp_file, all_parsers):
299299
# see gh-24805
300300
#
301301
# Let's just make sure that we don't crash
302302
# as we iteratively process all chunks.
303303
parser = all_parsers
304304

305-
withtm.ensure_clean() aspath:
306-
with open(path, "w", encoding="utf-8") as f:
307-
for i in range(1000):
308-
f.write(str(i) + "\n")
309-
310-
if parser.engine == "pyarrow":
311-
msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
312-
with pytest.raises(ValueError, match=msg):
313-
with parser.read_csv(path, chunksize=20) as result:
314-
for _ in result:
315-
pass
316-
return
317-
318-
with parser.read_csv(path, chunksize=20) as result:
319-
for _ in result:
320-
pass
305+
path=str(temp_file)
306+
with open(path, "w", encoding="utf-8") as f:
307+
for i in range(1000):
308+
f.write(str(i) + "\n")
309+
310+
if parser.engine == "pyarrow":
311+
msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
312+
with pytest.raises(ValueError, match=msg):
313+
with parser.read_csv(path, chunksize=20) as result:
314+
for _ in result:
315+
pass
316+
return
317+
318+
with parser.read_csv(path, chunksize=20) as result:
319+
for _ in result:
320+
pass
321321

322322

323323
def test_chunksize_with_usecols_second_block_shorter(all_parsers):

‎pandas/tests/io/parser/common/test_iterator.py‎

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -142,19 +142,19 @@ def test_iterator_skipfooter_errors(all_parsers, kwargs):
142142
pass
143143

144144

145-
def test_iteration_open_handle(all_parsers):
145+
def test_iteration_open_handle(temp_file, all_parsers):
146146
parser = all_parsers
147147
kwargs = {"header": None}
148148

149-
withtm.ensure_clean() aspath:
150-
with open(path, "w", encoding="utf-8") as f:
151-
f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")
149+
path=str(temp_file)
150+
with open(path, "w", encoding="utf-8") as f:
151+
f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")
152152

153-
with open(path, encoding="utf-8") as f:
154-
for line in f:
155-
if "CCC" in line:
156-
break
153+
with open(path, encoding="utf-8") as f:
154+
for line in f:
155+
if "CCC" in line:
156+
break
157157

158-
result = parser.read_csv(f, **kwargs)
159-
expected = DataFrame({0: ["DDD", "EEE", "FFF", "GGG"]})
160-
tm.assert_frame_equal(result, expected)
158+
result = parser.read_csv(f, **kwargs)
159+
expected = DataFrame({0: ["DDD", "EEE", "FFF", "GGG"]})
160+
tm.assert_frame_equal(result, expected)

‎pandas/tests/io/parser/test_index_col.py‎

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -200,7 +200,7 @@ def test_multi_index_naming_not_all_at_beginning(all_parsers):
200200

201201

202202
@xfail_pyarrow # ValueError: Found non-unique column index
203-
def test_no_multi_index_level_names_empty(all_parsers):
203+
def test_no_multi_index_level_names_empty(temp_file, all_parsers):
204204
# GH 10984
205205
parser = all_parsers
206206
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
@@ -209,9 +209,9 @@ def test_no_multi_index_level_names_empty(all_parsers):
209209
index=midx,
210210
columns=["x", "y", "z"],
211211
)
212-
withtm.ensure_clean() aspath:
213-
expected.to_csv(path)
214-
result = parser.read_csv(path, index_col=[0, 1, 2])
212+
path=str(temp_file)
213+
expected.to_csv(path)
214+
result = parser.read_csv(path, index_col=[0, 1, 2])
215215
tm.assert_frame_equal(result, expected)
216216

217217

@@ -240,7 +240,7 @@ def test_header_with_index_col(all_parsers):
240240

241241

242242
@pytest.mark.slow
243-
def test_index_col_large_csv(all_parsers, monkeypatch):
243+
def test_index_col_large_csv(temp_file, all_parsers, monkeypatch):
244244
# https://github.com/pandas-dev/pandas/issues/37094
245245
parser = all_parsers
246246

@@ -252,11 +252,11 @@ def test_index_col_large_csv(all_parsers, monkeypatch):
252252
}
253253
)
254254

255-
withtm.ensure_clean() aspath:
256-
df.to_csv(path, index=False)
257-
with monkeypatch.context() as m:
258-
m.setattr("pandas.core.algorithms._MINIMUM_COMP_ARR_LEN", ARR_LEN)
259-
result = parser.read_csv(path, index_col=[0])
255+
path=str(temp_file)
256+
df.to_csv(path, index=False)
257+
with monkeypatch.context() as m:
258+
m.setattr("pandas.core.algorithms._MINIMUM_COMP_ARR_LEN", ARR_LEN)
259+
result = parser.read_csv(path, index_col=[0])
260260

261261
tm.assert_frame_equal(result, df.set_index("a"))
262262

‎pandas/tests/io/parser/test_python_parser_only.py‎

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ def test_skipfooter(python_parser_only, kwargs):
158158
@pytest.mark.parametrize(
159159
"compression,klass", [("gzip", "GzipFile"), ("bz2", "BZ2File")]
160160
)
161-
def test_decompression_regex_sep(python_parser_only, csv1, compression, klass):
161+
def test_decompression_regex_sep(temp_file, python_parser_only, csv1, compression, klass):
162162
# see gh-6607
163163
parser = python_parser_only
164164

@@ -171,12 +171,12 @@ def test_decompression_regex_sep(python_parser_only, csv1, compression, klass):
171171
module = pytest.importorskip(compression)
172172
klass = getattr(module, klass)
173173

174-
withtm.ensure_clean() aspath:
175-
with klass(path, mode="wb") as tmp:
176-
tmp.write(data)
174+
path=str(temp_file)
175+
with klass(path, mode="wb") as tmp:
176+
tmp.write(data)
177177

178-
result = parser.read_csv(path, sep="::", compression=compression)
179-
tm.assert_frame_equal(result, expected)
178+
result = parser.read_csv(path, sep="::", compression=compression)
179+
tm.assert_frame_equal(result, expected)
180180

181181

182182
def test_read_csv_buglet_4x_multi_index(python_parser_only):

‎pandas/tests/io/pytables/test_round_trip.py‎

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -27,33 +27,33 @@
2727
pytestmark = [pytest.mark.single_cpu]
2828

2929

30-
def test_conv_read_write():
31-
withtm.ensure_clean() aspath:
30+
def test_conv_read_write(temp_file):
31+
path=str(temp_file)
3232

33-
def roundtrip(key, obj, **kwargs):
34-
obj.to_hdf(path, key=key, **kwargs)
35-
return read_hdf(path, key)
33+
def roundtrip(key, obj, **kwargs):
34+
obj.to_hdf(path, key=key, **kwargs)
35+
return read_hdf(path, key)
3636

37-
o = Series(
38-
np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
39-
)
40-
tm.assert_series_equal(o, roundtrip("series", o))
37+
o = Series(
38+
np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
39+
)
40+
tm.assert_series_equal(o, roundtrip("series", o))
4141

42-
o = Series(range(10), dtype="float64", index=[f"i_{i}" for i in range(10)])
43-
tm.assert_series_equal(o, roundtrip("string_series", o))
42+
o = Series(range(10), dtype="float64", index=[f"i_{i}" for i in range(10)])
43+
tm.assert_series_equal(o, roundtrip("string_series", o))
4444

45-
o = DataFrame(
46-
1.1 * np.arange(120).reshape((30, 4)),
47-
columns=Index(list("ABCD")),
48-
index=Index([f"i-{i}" for i in range(30)]),
49-
)
50-
tm.assert_frame_equal(o, roundtrip("frame", o))
45+
o = DataFrame(
46+
1.1 * np.arange(120).reshape((30, 4)),
47+
columns=Index(list("ABCD")),
48+
index=Index([f"i-{i}" for i in range(30)]),
49+
)
50+
tm.assert_frame_equal(o, roundtrip("frame", o))
5151

52-
# table
53-
df = DataFrame({"A": range(5), "B": range(5)})
54-
df.to_hdf(path, key="table", append=True)
55-
result = read_hdf(path, "table", where=["index>2"])
56-
tm.assert_frame_equal(df[df.index > 2], result)
52+
# table
53+
df = DataFrame({"A": range(5), "B": range(5)})
54+
df.to_hdf(path, key="table", append=True)
55+
result = read_hdf(path, "table", where=["index>2"])
56+
tm.assert_frame_equal(df[df.index > 2], result)
5757

5858

5959
def test_long_strings(setup_path):

‎pandas/tests/io/test_sql.py‎

Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -748,10 +748,10 @@ def postgresql_psycopg2_conn_types(postgresql_psycopg2_engine_types):
748748

749749

750750
@pytest.fixture
751-
def sqlite_str():
751+
def sqlite_str(temp_file):
752752
pytest.importorskip("sqlalchemy")
753-
withtm.ensure_clean() asname:
754-
yield f"sqlite:///{name}"
753+
name=str(temp_file)
754+
yield f"sqlite:///{name}"
755755

756756

757757
@pytest.fixture
@@ -817,20 +817,20 @@ def sqlite_conn_types(sqlite_engine_types):
817817

818818

819819
@pytest.fixture
820-
def sqlite_adbc_conn():
820+
def sqlite_adbc_conn(temp_file):
821821
pytest.importorskip("pyarrow")
822822
pytest.importorskip("adbc_driver_sqlite")
823823
from adbc_driver_sqlite import dbapi
824824

825-
withtm.ensure_clean() asname:
826-
uri = f"file:{name}"
827-
with dbapi.connect(uri) as conn:
828-
yield conn
829-
for view in get_all_views(conn):
830-
drop_view(view, conn)
831-
for tbl in get_all_tables(conn):
832-
drop_table(tbl, conn)
833-
conn.commit()
825+
name=str(temp_file)
826+
uri = f"file:{name}"
827+
with dbapi.connect(uri) as conn:
828+
yield conn
829+
for view in get_all_views(conn):
830+
drop_view(view, conn)
831+
for tbl in get_all_tables(conn):
832+
drop_table(tbl, conn)
833+
conn.commit()
834834

835835

836836
@pytest.fixture
@@ -2504,20 +2504,20 @@ def test_sqlalchemy_integer_overload_mapping(conn, request, integer):
25042504
sql.SQLTable("test_type", db, frame=df)
25052505

25062506

2507-
def test_database_uri_string(request, test_frame1):
2507+
def test_database_uri_string(temp_file, request, test_frame1):
25082508
pytest.importorskip("sqlalchemy")
25092509
# Test read_sql and .to_sql method with a database URI (GH10654)
25102510
# db_uri = 'sqlite:///:memory:' # raises
25112511
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
25122512
# "iris": syntax error [SQL: 'iris']
2513-
withtm.ensure_clean() asname:
2514-
db_uri = "sqlite:///" + name
2515-
table = "iris"
2516-
test_frame1.to_sql(name=table, con=db_uri, if_exists="replace", index=False)
2517-
test_frame2 = sql.read_sql(table, db_uri)
2518-
test_frame3 = sql.read_sql_table(table, db_uri)
2519-
query = "SELECT * FROM iris"
2520-
test_frame4 = sql.read_sql_query(query, db_uri)
2513+
name=str(temp_file)
2514+
db_uri = "sqlite:///" + name
2515+
table = "iris"
2516+
test_frame1.to_sql(name=table, con=db_uri, if_exists="replace", index=False)
2517+
test_frame2 = sql.read_sql(table, db_uri)
2518+
test_frame3 = sql.read_sql_table(table, db_uri)
2519+
query = "SELECT * FROM iris"
2520+
test_frame4 = sql.read_sql_query(query, db_uri)
25212521
tm.assert_frame_equal(test_frame1, test_frame2)
25222522
tm.assert_frame_equal(test_frame1, test_frame3)
25232523
tm.assert_frame_equal(test_frame1, test_frame4)
@@ -2581,16 +2581,16 @@ def test_column_with_percentage(conn, request):
25812581
tm.assert_frame_equal(res, df)
25822582

25832583

2584-
def test_sql_open_close(test_frame3):
2584+
def test_sql_open_close(temp_file, test_frame3):
25852585
# Test if the IO in the database still work if the connection closed
25862586
# between the writing and reading (as in many real situations).
25872587

2588-
withtm.ensure_clean() asname:
2589-
with contextlib.closing(sqlite3.connect(name)) as conn:
2590-
assert sql.to_sql(test_frame3, "test_frame3_legacy", conn, index=False) == 4
2588+
name=str(temp_file)
2589+
with contextlib.closing(sqlite3.connect(name)) as conn:
2590+
assert sql.to_sql(test_frame3, "test_frame3_legacy", conn, index=False) == 4
25912591

2592-
with contextlib.closing(sqlite3.connect(name)) as conn:
2593-
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
2592+
with contextlib.closing(sqlite3.connect(name)) as conn:
2593+
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
25942594

25952595
tm.assert_frame_equal(test_frame3, result)
25962596

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /