Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
62 changes: 30 additions & 32 deletions pandas/tests/io/parser/common/test_file_buffer_url.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,25 +97,25 @@ def test_nonexistent_path(all_parsers):

@pytest.mark.skipif(WASM, reason="limited file system access on WASM")
@td.skip_if_windows # os.chmod does not work in windows
def test_no_permission(all_parsers):
def test_no_permission(all_parsers, temp_file):
# GH 23784
parser = all_parsers

msg = r"\[Errno 13\]"
with tm.ensure_clean() as path:
os.chmod(path, 0) # make file unreadable
path = temp_file
os.chmod(path, 0) # make file unreadable

# verify that this process cannot open the file (not running as sudo)
try:
with open(path, encoding="utf-8"):
pass
pytest.skip("Running as sudo.")
except PermissionError:
# verify that this process cannot open the file (not running as sudo)
try:
with open(path, encoding="utf-8"):
pass
pytest.skip("Running as sudo.")
except PermissionError:
pass

with pytest.raises(PermissionError, match=msg) as e:
parser.read_csv(path)
assert path == e.value.filename
with pytest.raises(PermissionError, match=msg) as e:
parser.read_csv(path)
assert path == e.value.filename


@pytest.mark.parametrize(
Expand Down Expand Up @@ -269,19 +269,18 @@ def test_internal_eof_byte(all_parsers):
tm.assert_frame_equal(result, expected)


def test_internal_eof_byte_to_file(all_parsers):
def test_internal_eof_byte_to_file(all_parsers, tmp_path):
# see gh-16559
parser = all_parsers
data = b'c1,c2\r\n"test \x1a test", test\r\n'
expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"])
path = f"__{uuid.uuid4()}__.csv"
path = tmp_path / f"__{uuid.uuid4()}__.csv"

with tm.ensure_clean(path) as path:
with open(path, "wb") as f:
f.write(data)
with open(path, "wb") as f:
f.write(data)

result = parser.read_csv(path)
tm.assert_frame_equal(result, expected)
result = parser.read_csv(path)
tm.assert_frame_equal(result, expected)


def test_file_handle_string_io(all_parsers):
Expand Down Expand Up @@ -372,7 +371,7 @@ def test_read_csv_file_handle(all_parsers, io_class, encoding):
assert not handle.closed


def test_memory_map_compression(all_parsers, compression):
def test_memory_map_compression(all_parsers, compression, temp_file):
"""
Support memory map for compressed files.

Expand All @@ -381,16 +380,16 @@ def test_memory_map_compression(all_parsers, compression):
parser = all_parsers
expected = DataFrame({"a": [1], "b": [2]})

with tm.ensure_clean() as path:
expected.to_csv(path, index=False, compression=compression)
path = temp_file
expected.to_csv(path, index=False, compression=compression)

if parser.engine == "pyarrow":
msg = "The 'memory_map' option is not supported with the 'pyarrow' engine"
with pytest.raises(ValueError, match=msg):
parser.read_csv(path, memory_map=True, compression=compression)
return
if parser.engine == "pyarrow":
msg = "The 'memory_map' option is not supported with the 'pyarrow' engine"
with pytest.raises(ValueError, match=msg):
parser.read_csv(path, memory_map=True, compression=compression)
return

result = parser.read_csv(path, memory_map=True, compression=compression)
result = parser.read_csv(path, memory_map=True, compression=compression)

tm.assert_frame_equal(
result,
Expand Down Expand Up @@ -442,12 +441,11 @@ def test_context_manageri_user_provided(all_parsers, datapath):


@skip_pyarrow # ParserError: Empty CSV file
def test_file_descriptor_leak(all_parsers):
def test_file_descriptor_leak(all_parsers, temp_file):
# GH 31488
parser = all_parsers
with tm.ensure_clean() as path:
with pytest.raises(EmptyDataError, match="No columns to parse from file"):
parser.read_csv(path)
with pytest.raises(EmptyDataError, match="No columns to parse from file"):
parser.read_csv(temp_file)


def test_memory_map(all_parsers, csv_dir_path):
Expand Down
79 changes: 38 additions & 41 deletions pandas/tests/io/pytables/test_store.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,22 +41,20 @@
tables = pytest.importorskip("tables")


def test_context(setup_path):
with tm.ensure_clean(setup_path) as path:
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
with tm.ensure_clean(setup_path) as path:
with HDFStore(path) as tbl:
tbl["a"] = DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=Index(list("ABCD"), dtype=object),
index=Index([f"i-{i}" for i in range(30)], dtype=object),
)
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
def test_context(temp_file):
try:
with HDFStore(temp_file) as tbl:
raise ValueError("blah")
except ValueError:
pass
with HDFStore(temp_file) as tbl:
tbl["a"] = DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=Index(list("ABCD"), dtype=object),
index=Index([f"i-{i}" for i in range(30)], dtype=object),
)
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame


def test_no_track_times(tmp_path, setup_path):
Expand Down Expand Up @@ -971,37 +969,36 @@ def test_pickle_path_localpath():


@pytest.mark.parametrize("propindexes", [True, False])
def test_copy(propindexes):
def test_copy(propindexes, temp_file):
df = DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=Index(list("ABCD")),
index=Index([f"i-{i}" for i in range(30)]),
)

with tm.ensure_clean() as path:
with HDFStore(path) as st:
st.append("df", df, data_columns=["A"])
with tempfile.NamedTemporaryFile() as new_f:
with HDFStore(path) as store:
with contextlib.closing(
store.copy(new_f.name, keys=None, propindexes=propindexes)
) as tstore:
# check keys
keys = store.keys()
assert set(keys) == set(tstore.keys())
# check indices & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)

assert orig_t.nrows == new_t.nrows

# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
assert new_t[a.name].is_indexed
with HDFStore(temp_file) as st:
st.append("df", df, data_columns=["A"])
with tempfile.NamedTemporaryFile() as new_f:
with HDFStore(temp_file) as store:
with contextlib.closing(
store.copy(new_f.name, keys=None, propindexes=propindexes)
) as tstore:
# check keys
keys = store.keys()
assert set(keys) == set(tstore.keys())
# check indices & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)

assert orig_t.nrows == new_t.nrows

# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
assert new_t[a.name].is_indexed


def test_duplicate_column_name(tmp_path, setup_path):
Expand Down
Loading
Loading