public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] repo/gentoo:master commit in: sci-libs/datasets/, sci-libs/datasets/files/
@ 2023-05-07 20:16 Alfredo Tupone
  0 siblings, 0 replies; 5+ messages in thread
From: Alfredo Tupone @ 2023-05-07 20:16 UTC (permalink / raw
  To: gentoo-commits

commit:     6d7268c6c8e47b43efd5550b05d3ad587704bdff
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Sun May  7 20:15:13 2023 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Sun May  7 20:15:42 2023 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=6d7268c6

sci-libs/datasets: new package, add 2.11.0

Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/datasets/Manifest                         |   1 +
 sci-libs/datasets/datasets-2.11.0.ebuild           |  53 +++++++
 .../datasets/files/datasets-2.11.0-tests.patch     | 168 +++++++++++++++++++++
 sci-libs/datasets/metadata.xml                     |  12 ++
 4 files changed, 234 insertions(+)

diff --git a/sci-libs/datasets/Manifest b/sci-libs/datasets/Manifest
new file mode 100644
index 000000000000..37284ce3b3e7
--- /dev/null
+++ b/sci-libs/datasets/Manifest
@@ -0,0 +1 @@
+DIST datasets-2.11.0.gh.tar.gz 2141289 BLAKE2B 0fb471dd6ee5de3831eb6586c4a15e67381262470b72d5ab02ee87dfc7977cb4d40e04da6507049d1e47cb8948cad11988bb7627293b48231e1cd413d2cfb885 SHA512 9ec2274d7978e3dde1b2f8ce78dd65bdf66742bbfee7b8672af46216aeaae3ef5c4604a8a5ea0bdee808f1c362cca9a122c16d2e9a161678148e581e4cd5c863

diff --git a/sci-libs/datasets/datasets-2.11.0.ebuild b/sci-libs/datasets/datasets-2.11.0.ebuild
new file mode 100644
index 000000000000..0ff1a4f0b386
--- /dev/null
+++ b/sci-libs/datasets/datasets-2.11.0.ebuild
@@ -0,0 +1,53 @@
+# Copyright 2023 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+DISTUTILS_USE_PEP517=setuptools
+PYTHON_COMPAT=( python3_{9..11} )
+DISTUTILS_SINGLE_IMPL=1
+inherit distutils-r1
+
+DESCRIPTION="access and share datasets for Audio, Computer Vision, and NLP tasks"
+HOMEPAGE="
+	https://pypi.org/project/datasets/
+"
+SRC_URI="https://github.com/huggingface/${PN}/archive/refs/tags/${PV}.tar.gz
+	-> ${P}.gh.tar.gz"
+IUSE="test"
+
+LICENSE="Apache-2.0"
+SLOT="0"
+KEYWORDS="~amd64"
+
+RDEPEND="
+	${PYTHON_DEPS}
+	sci-libs/pytorch[${PYTHON_SINGLE_USEDEP}]
+	$(python_gen_cond_dep '
+		dev-python/pyarrow[${PYTHON_USEDEP},parquet,snappy]
+		dev-python/fsspec[${PYTHON_USEDEP}]
+		dev-python/multiprocess[${PYTHON_USEDEP}]
+		dev-python/aiohttp[${PYTHON_USEDEP}]
+		dev-python/xxhash[${PYTHON_USEDEP}]
+		dev-python/zstandard[${PYTHON_USEDEP}]
+		dev-python/absl-py[${PYTHON_USEDEP}]
+		sci-libs/scikit-learn[${PYTHON_USEDEP}]
+	')
+"
+DEPEND="${RDEPEND}"
+BDEPEND="test? (
+	$(python_gen_cond_dep '
+		dev-python/pytest-datadir[${PYTHON_USEDEP}]
+	')
+)"
+
+PATCHES=( "${FILESDIR}"/${P}-tests.patch )
+
+distutils_enable_tests pytest
+
+src_prepare() {
+	# Require jiwer
+	rm metrics/cer/test_cer.py || die
+
+	distutils-r1_src_prepare
+}

diff --git a/sci-libs/datasets/files/datasets-2.11.0-tests.patch b/sci-libs/datasets/files/datasets-2.11.0-tests.patch
new file mode 100644
index 000000000000..01e5d9c70e7b
--- /dev/null
+++ b/sci-libs/datasets/files/datasets-2.11.0-tests.patch
@@ -0,0 +1,168 @@
+--- a/tests/test_metric_common.py	2023-05-04 18:48:48.550861318 +0200
++++ b/tests/test_metric_common.py	2023-05-04 18:50:25.787364577 +0200
+@@ -93,6 +93,7 @@
+     INTENSIVE_CALLS_PATCHER = {}
+     metric_name = None
+ 
++    @pytest.mark.skip(reason="disabling, depends on bert_score, bleurt, math_equivalence, coval, nltk, faiss, mauve, rouge_score, sacrebleu, sacremoses ...")
+     def test_load_metric(self, metric_name):
+         doctest.ELLIPSIS_MARKER = "[...]"
+         metric_module = importlib.import_module(
+--- a/tests/test_hf_gcp.py	2023-05-04 19:33:31.150825303 +0200
++++ b/tests/test_hf_gcp.py	2023-05-04 19:40:08.401759538 +0200
+@@ -69,6 +69,7 @@
+             self.assertTrue(os.path.exists(datset_info_path))
+ 
+ 
++@pytest.mark.skip(reason="require apache_beam")
+ @pytest.mark.integration
+ def test_wikipedia_frr(tmp_path_factory):
+     tmp_dir = tmp_path_factory.mktemp("test_hf_gcp") / "test_wikipedia_simple"
+--- a/tests/test_distributed.py	2023-05-04 19:43:09.861275030 +0200
++++ b/tests/test_distributed.py	2023-05-04 19:44:17.608326722 +0200
+@@ -55,6 +55,7 @@
+     assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
+ 
+ 
++@pytest.mark.skip(reason="require distributed torch")
+ @pytest.mark.parametrize("streaming", [False, True])
+ @require_torch
+ @pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
+@@ -76,6 +77,7 @@
+     execute_subprocess_async(cmd, env=os.environ.copy())
+ 
+ 
++@pytest.mark.skip(reason="require distributed torch")
+ @pytest.mark.parametrize(
+     "nproc_per_node, num_workers",
+     [
+--- a/tests/utils.py	2023-05-06 08:43:16.251987543 +0200
++++ b/tests/utils.py	2023-05-06 08:44:24.467952870 +0200
+@@ -54,8 +54,8 @@
+ # Audio
+ require_sndfile = pytest.mark.skipif(
+     # On Windows and OS X, soundfile installs sndfile
+-    find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
+-    reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
++    True,
++    reason="test requires librosa",
+ )
+ 
+ # Beam
+--- a/tests/features/test_audio.py	2023-05-06 09:03:58.680108142 +0200
++++ a/tests/features/test_audio.py	2023-05-06 09:05:50.463407967 +0200
+@@ -57,6 +57,7 @@
+     assert features.arrow_schema == pa.schema({"sequence_of_audios": pa.list_(Audio().pa_type)})
+ 
+ 
++@pytest.mark.skip(reason="require librosa")
+ @pytest.mark.parametrize(
+     "build_example",
+     [
+@@ -82,6 +82,7 @@
+     assert decoded_example.keys() == {"path", "array", "sampling_rate"}
+ 
+ 
++@pytest.mark.skip(reason="require librosa")
+ @pytest.mark.parametrize(
+     "build_example",
+     [
+@@ -148,6 +149,7 @@
+     assert decoded_example["sampling_rate"] == 48000
+ 
+ 
++@pytest.mark.skip(reason="require librosa")
+ @pytest.mark.parametrize("sampling_rate", [16_000, 48_000])
+ def test_audio_decode_example_pcm(shared_datadir, sampling_rate):
+     audio_path = str(shared_datadir / "test_audio_16000.pcm")
+@@ -416,6 +417,7 @@
+     assert column[0]["sampling_rate"] == 16000
+ 
+ 
++@pytest.mark.skip(reason="require librosa")
+ @pytest.mark.parametrize(
+     "build_data",
+     [
+@@ -440,6 +442,7 @@
+     assert item["audio"].keys() == {"path", "array", "sampling_rate"}
+ 
+ 
++@pytest.mark.skip(reason="require librosa")
+ def test_dataset_concatenate_audio_features(shared_datadir):
+     # we use a different data structure between 1 and 2 to make sure they are compatible with each other
+     audio_path = str(shared_datadir / "test_audio_44100.wav")
+@@ -453,6 +456,7 @@
+     assert concatenated_dataset[1]["audio"]["array"].shape == dset2[0]["audio"]["array"].shape
+ 
+ 
++@pytest.mark.skip(reason="require librosa")
+ def test_dataset_concatenate_nested_audio_features(shared_datadir):
+     # we use a different data structure between 1 and 2 to make sure they are compatible with each other
+     audio_path = str(shared_datadir / "test_audio_44100.wav")
+@@ -616,6 +616,7 @@
+     assert isinstance(ds, Dataset)
+ 
+ 
++@require_sndfile
+ def test_dataset_with_audio_feature_undecoded(shared_datadir):
+     audio_path = str(shared_datadir / "test_audio_44100.wav")
+     data = {"audio": [audio_path]}
+@@ -633,6 +634,7 @@
+     assert column[0] == {"path": audio_path, "bytes": None}
+ 
+ 
++@require_sndfile
+ def test_formatted_dataset_with_audio_feature_undecoded(shared_datadir):
+     audio_path = str(shared_datadir / "test_audio_44100.wav")
+     data = {"audio": [audio_path]}
+@@ -664,6 +666,7 @@
+         assert column[0] == {"path": audio_path, "bytes": None}
+ 
+ 
++@require_sndfile
+ def test_dataset_with_audio_feature_map_undecoded(shared_datadir):
+     audio_path = str(shared_datadir / "test_audio_44100.wav")
+     data = {"audio": [audio_path]}
+--- a/tests/test_metric_common.py	2023-05-06 13:20:24.496197629 +0200
++++ b/tests/test_metric_common.py	2023-05-06 13:21:09.916732417 +0200
+@@ -210,6 +210,7 @@
+             yield
+ 
+ 
++@pytest.mark.skip(reason="require seqeval")
+ def test_seqeval_raises_when_incorrect_scheme():
+     metric = load_metric(os.path.join("metrics", "seqeval"))
+     wrong_scheme = "ERROR"
+--- a/tests/packaged_modules/test_audiofolder.py	2023-05-06 14:00:39.560876163 +0200
++++ b/tests/packaged_modules/test_audiofolder.py	2023-05-06 14:01:26.005212423 +0200
+@@ -4,7 +4,6 @@
+ import librosa
+ import numpy as np
+ import pytest
+-import soundfile as sf
+ 
+ from datasets import Audio, ClassLabel, Features, Value
+ from datasets.data_files import DataFilesDict, get_data_patterns_locally
+@@ -191,9 +190,11 @@
+     assert len(data_files_with_two_splits_and_metadata["test"]) == 2
+     return data_files_with_two_splits_and_metadata
+ 
+-
++@pytest.mark.skip(reason="require soundfile")
+ @pytest.fixture
+ def data_files_with_zip_archives(tmp_path, audio_file):
++    import soundfile as sf
++
+     data_dir = tmp_path / "audiofolder_data_dir_with_zip_archives"
+     data_dir.mkdir(parents=True, exist_ok=True)
+     archive_dir = data_dir / "archive"
+--- a/tests/test_arrow_dataset.py	2023-05-06 15:36:11.080459079 +0200
++++ b/tests/test_arrow_dataset.py	2023-05-06 15:38:07.452828528 +0200
+@@ -3928,6 +3928,7 @@
+                 )
+                 self.assertDictEqual(features_after_cast, dset.features)
+ 
++    @pytest.mark.skip(reason="require soundfile")
+     def test_task_automatic_speech_recognition(self):
+         # Include a dummy extra column `dummy` to test we drop it correctly
+         features_before_cast = Features(

diff --git a/sci-libs/datasets/metadata.xml b/sci-libs/datasets/metadata.xml
new file mode 100644
index 000000000000..94c112402049
--- /dev/null
+++ b/sci-libs/datasets/metadata.xml
@@ -0,0 +1,12 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE pkgmetadata SYSTEM "https://www.gentoo.org/dtd/metadata.dtd">
+<pkgmetadata>
+	<maintainer type="person">
+		<email>tupone@gentoo.org</email>
+		<name>Tupone Alfredo</name>
+	</maintainer>
+	<upstream>
+		<remote-id type="github">huggingface/datasets</remote-id>
+		<remote-id type="pypi">datasets</remote-id>
+	</upstream>
+</pkgmetadata>


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [gentoo-commits] repo/gentoo:master commit in: sci-libs/datasets/, sci-libs/datasets/files/
@ 2023-08-24 15:29 Alfredo Tupone
  0 siblings, 0 replies; 5+ messages in thread
From: Alfredo Tupone @ 2023-08-24 15:29 UTC (permalink / raw
  To: gentoo-commits

commit:     6e90b8bfd4261e397712272c9565c5ab32aeeb98
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Thu Aug 24 15:28:19 2023 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Thu Aug 24 15:28:47 2023 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=6e90b8bf

sci-libs/datasets: add 2.12.0

Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/datasets/Manifest                         |   1 +
 sci-libs/datasets/datasets-2.12.0.ebuild           |  53 +++++
 .../datasets/files/datasets-2.12.0-tests.patch     | 242 +++++++++++++++++++++
 3 files changed, 296 insertions(+)

diff --git a/sci-libs/datasets/Manifest b/sci-libs/datasets/Manifest
index 37284ce3b3e7..3fa2a12d0e5c 100644
--- a/sci-libs/datasets/Manifest
+++ b/sci-libs/datasets/Manifest
@@ -1 +1,2 @@
 DIST datasets-2.11.0.gh.tar.gz 2141289 BLAKE2B 0fb471dd6ee5de3831eb6586c4a15e67381262470b72d5ab02ee87dfc7977cb4d40e04da6507049d1e47cb8948cad11988bb7627293b48231e1cd413d2cfb885 SHA512 9ec2274d7978e3dde1b2f8ce78dd65bdf66742bbfee7b8672af46216aeaae3ef5c4604a8a5ea0bdee808f1c362cca9a122c16d2e9a161678148e581e4cd5c863
+DIST datasets-2.12.0.gh.tar.gz 2149274 BLAKE2B 8f188901dfe293ac2b673f37e0d135e01a8f131adf9030ef1815ce2faa7ba0b36faf64a002cae1ced2d3ed5b7f50f43ba5cda90ab9254fd5f66bbfaed6085f3f SHA512 7389a1c6ee8ff4cda39a2c3f52218aa6f4b1cd6b45f48f83bfa2191359a8999d54153120d968b3cf7e5e932f88822783578e3d859dcb20f38fb0d915d88220c9

diff --git a/sci-libs/datasets/datasets-2.12.0.ebuild b/sci-libs/datasets/datasets-2.12.0.ebuild
new file mode 100644
index 000000000000..66b609fd2b57
--- /dev/null
+++ b/sci-libs/datasets/datasets-2.12.0.ebuild
@@ -0,0 +1,53 @@
+# Copyright 2023 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+DISTUTILS_USE_PEP517=setuptools
+PYTHON_COMPAT=( python3_{9..11} )
+DISTUTILS_SINGLE_IMPL=1
+inherit distutils-r1
+
+DESCRIPTION="Access and share datasets for Audio, Computer Vision, and NLP tasks"
+HOMEPAGE="
+	https://pypi.org/project/datasets/
+"
+SRC_URI="https://github.com/huggingface/${PN}/archive/refs/tags/${PV}.tar.gz
+	-> ${P}.gh.tar.gz"
+IUSE="test"
+
+LICENSE="Apache-2.0"
+SLOT="0"
+KEYWORDS="~amd64"
+
+RDEPEND="
+	${PYTHON_DEPS}
+	sci-libs/pytorch[${PYTHON_SINGLE_USEDEP}]
+	$(python_gen_cond_dep '
+		dev-python/absl-py[${PYTHON_USEDEP}]
+		dev-python/aiohttp[${PYTHON_USEDEP}]
+		dev-python/fsspec[${PYTHON_USEDEP}]
+		dev-python/multiprocess[${PYTHON_USEDEP}]
+		dev-python/pandas[${PYTHON_USEDEP}]
+		dev-python/pyarrow[${PYTHON_USEDEP},parquet,snappy]
+		dev-python/tqdm[${PYTHON_USEDEP}]
+		dev-python/xxhash[${PYTHON_USEDEP}]
+		dev-python/zstandard[${PYTHON_USEDEP}]
+		sci-libs/huggingface_hub[${PYTHON_USEDEP}]
+		sci-libs/scikit-learn[${PYTHON_USEDEP}]
+	')
+"
+DEPEND="${RDEPEND}"
+BDEPEND="test? (
+	$(python_gen_cond_dep '
+		dev-python/pytest-datadir[${PYTHON_USEDEP}]
+		dev-python/decorator[${PYTHON_USEDEP}]
+		=dev-python/sqlalchemy-1*[${PYTHON_USEDEP}]
+		sci-libs/jiwer[${PYTHON_USEDEP}]
+		sci-libs/seqeval[${PYTHON_USEDEP}]
+	')
+)"
+
+PATCHES=( "${FILESDIR}"/${P}-tests.patch )
+
+distutils_enable_tests pytest

diff --git a/sci-libs/datasets/files/datasets-2.12.0-tests.patch b/sci-libs/datasets/files/datasets-2.12.0-tests.patch
new file mode 100644
index 000000000000..6be3156bb70d
--- /dev/null
+++ b/sci-libs/datasets/files/datasets-2.12.0-tests.patch
@@ -0,0 +1,242 @@
+--- a/tests/test_metric_common.py	2023-05-04 18:48:48.550861318 +0200
++++ b/tests/test_metric_common.py	2023-05-04 18:50:25.787364577 +0200
+@@ -93,6 +93,7 @@
+     INTENSIVE_CALLS_PATCHER = {}
+     metric_name = None
+ 
++    @pytest.mark.skip(reason="disabling, depends on bert_score, bleurt, math_equivalence, coval, nltk, faiss, mauve, rouge_score, sacrebleu, sacremoses ...")
+     @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
+     @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning")
+     def test_load_metric(self, metric_name):
+--- a/tests/test_hf_gcp.py	2023-05-04 19:33:31.150825303 +0200
++++ b/tests/test_hf_gcp.py	2023-05-04 19:40:08.401759538 +0200
+@@ -75,6 +75,7 @@
+             self.assertTrue(os.path.exists(datset_info_path))
+ 
+ 
++@pytest.mark.skip(reason="require apache_beam")
+ @pytest.mark.integration
+ def test_as_dataset_from_hf_gcs(tmp_path_factory):
+     tmp_dir = tmp_path_factory.mktemp("test_hf_gcp") / "test_wikipedia_simple"
+--- a/tests/test_distributed.py	2023-05-04 19:43:09.861275030 +0200
++++ b/tests/test_distributed.py	2023-05-04 19:44:17.608326722 +0200
+@@ -74,6 +74,7 @@
+         split_dataset_by_node(full_ds.shuffle(), rank=0, world_size=world_size)
+ 
+ 
++@pytest.mark.skip(reason="require distributed torch")
+ @pytest.mark.parametrize("streaming", [False, True])
+ @require_torch
+ @pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
+@@ -95,6 +96,7 @@
+     execute_subprocess_async(cmd, env=os.environ.copy())
+ 
+ 
++@pytest.mark.skip(reason="require distributed torch")
+ @pytest.mark.parametrize(
+     "nproc_per_node, num_workers",
+     [
+--- a/tests/utils.py	2023-05-06 08:43:16.251987543 +0200
++++ b/tests/utils.py	2023-05-06 08:44:24.467952870 +0200
+@@ -55,8 +55,8 @@
+ # Audio
+ require_sndfile = pytest.mark.skipif(
+     # On Windows and OS X, soundfile installs sndfile
+-    find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
+-    reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
++    True,
++    reason="test requires librosa",
+ )
+ 
+ # Beam
+--- a/tests/features/test_audio.py	2023-05-06 09:03:58.680108142 +0200
++++ a/tests/features/test_audio.py	2023-05-06 09:05:50.463407967 +0200
+@@ -57,6 +57,7 @@
+     assert features.arrow_schema == pa.schema({"sequence_of_audios": pa.list_(Audio().pa_type)})
+ 
+ 
++@pytest.mark.skip(reason="require librosa")
+ @pytest.mark.parametrize(
+     "build_example",
+     [
+@@ -81,6 +82,7 @@
+     assert decoded_example.keys() == {"path", "array", "sampling_rate"}
+ 
+ 
++@pytest.mark.skip(reason="require librosa")
+ @pytest.mark.parametrize(
+     "build_example",
+     [
+@@ -148,6 +149,7 @@
+     assert decoded_example["sampling_rate"] == 48000
+ 
+ 
++@pytest.mark.skip(reason="require librosa")
+ @pytest.mark.parametrize("sampling_rate", [16_000, 48_000])
+ def test_audio_decode_example_pcm(shared_datadir, sampling_rate):
+     audio_path = str(shared_datadir / "test_audio_16000.pcm")
+@@ -414,6 +417,7 @@
+     assert column[0]["sampling_rate"] == 16000
+ 
+ 
++@pytest.mark.skip(reason="require librosa")
+ @pytest.mark.parametrize(
+     "build_data",
+     [
+@@ -438,6 +442,7 @@
+     assert item["audio"].keys() == {"path", "array", "sampling_rate"}
+ 
+ 
++@pytest.mark.skip(reason="require librosa")
+ def test_dataset_concatenate_audio_features(shared_datadir):
+     # we use a different data structure between 1 and 2 to make sure they are compatible with each other
+     audio_path = str(shared_datadir / "test_audio_44100.wav")
+@@ -451,6 +456,7 @@
+     assert concatenated_dataset[1]["audio"]["array"].shape == dset2[0]["audio"]["array"].shape
+ 
+ 
++@pytest.mark.skip(reason="require librosa")
+ def test_dataset_concatenate_nested_audio_features(shared_datadir):
+     # we use a different data structure between 1 and 2 to make sure they are compatible with each other
+     audio_path = str(shared_datadir / "test_audio_44100.wav")
+@@ -610,6 +616,7 @@
+     assert isinstance(ds, Dataset)
+ 
+ 
++@require_sndfile
+ def test_dataset_with_audio_feature_undecoded(shared_datadir):
+     audio_path = str(shared_datadir / "test_audio_44100.wav")
+     data = {"audio": [audio_path]}
+@@ -627,6 +634,7 @@
+     assert column[0] == {"path": audio_path, "bytes": None}
+ 
+ 
++@require_sndfile
+ def test_formatted_dataset_with_audio_feature_undecoded(shared_datadir):
+     audio_path = str(shared_datadir / "test_audio_44100.wav")
+     data = {"audio": [audio_path]}
+@@ -658,6 +666,7 @@
+         assert column[0] == {"path": audio_path, "bytes": None}
+ 
+ 
++@require_sndfile
+ def test_dataset_with_audio_feature_map_undecoded(shared_datadir):
+     audio_path = str(shared_datadir / "test_audio_44100.wav")
+     data = {"audio": [audio_path]}
+--- a/tests/packaged_modules/test_audiofolder.py	2023-05-06 14:00:39.560876163 +0200
++++ b/tests/packaged_modules/test_audiofolder.py	2023-05-06 14:01:26.005212423 +0200
+@@ -1,10 +1,8 @@
+ import shutil
+ import textwrap
+ 
+-import librosa
+ import numpy as np
+ import pytest
+-import soundfile as sf
+ 
+ from datasets import Audio, ClassLabel, Features, Value
+ from datasets.data_files import DataFilesDict, get_data_patterns_locally
+@@ -192,8 +190,11 @@
+     return data_files_with_two_splits_and_metadata
+ 
+ 
++@pytest.mark.skip(reason="require soundfile")
+ @pytest.fixture
+ def data_files_with_zip_archives(tmp_path, audio_file):
++    import soundfile as sf
++    import librosa
+     data_dir = tmp_path / "audiofolder_data_dir_with_zip_archives"
+     data_dir.mkdir(parents=True, exist_ok=True)
+     archive_dir = data_dir / "archive"
+--- a/tests/test_arrow_dataset.py	2023-05-06 15:36:11.080459079 +0200
++++ b/tests/test_arrow_dataset.py	2023-05-06 15:38:07.452828528 +0200
+@@ -3983,6 +3983,7 @@
+                 )
+                 self.assertDictEqual(features_after_cast, dset.features)
+ 
++    @pytest.mark.skip(reason="require soundfile")
+     def test_task_automatic_speech_recognition(self):
+         # Include a dummy extra column `dummy` to test we drop it correctly
+         features_before_cast = Features(
+--- a/tests/test_streaming_download_manager.py	2023-05-15 23:06:59.146379973 +0200
++++ b/tests/test_streaming_download_manager.py	2023-05-15 23:11:32.441363757 +0200
+@@ -217,6 +217,7 @@
+     assert output_path == _readd_double_slash_removed_by_path(Path(expected_path).as_posix())
+ 
+ 
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+     "input_path, exists",
+     [
+@@ -299,6 +300,7 @@
+         assert list(f) == TEST_URL_CONTENT.splitlines(keepends=True)
+ 
+ 
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+     "input_path, expected_paths",
+     [
+@@ -328,6 +330,7 @@
+         xlistdir(root_url, use_auth_token=hf_token)
+ 
+ 
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+     "input_path, isdir",
+     [
+@@ -355,6 +358,7 @@
+         xisdir(root_url, use_auth_token=hf_token)
+ 
+ 
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+     "input_path, isfile",
+     [
+@@ -378,6 +382,7 @@
+     assert xisfile(root_url + "qwertyuiop", use_auth_token=hf_token) is False
+ 
+ 
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+     "input_path, size",
+     [
+@@ -402,6 +407,7 @@
+         xgetsize(root_url + "qwertyuiop", use_auth_token=hf_token)
+ 
+ 
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+     "input_path, expected_paths",
+     [
+@@ -444,6 +450,7 @@
+     assert len(xglob("zip://qwertyuiop/*::" + root_url, use_auth_token=hf_token)) == 0
+ 
+ 
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+     "input_path, expected_outputs",
+     [
+@@ -533,6 +540,7 @@
+     def test_xpath_as_posix(self, input_path, expected_path):
+         assert xPath(input_path).as_posix() == expected_path
+ 
++    @pytest.mark.skip(reason="not working in sandbox")
+     @pytest.mark.parametrize(
+         "input_path, exists",
+         [
+@@ -548,6 +556,7 @@
+             (tmp_path / "file.txt").touch()
+         assert xexists(input_path) is exists
+ 
++    @pytest.mark.skip(reason="not working in sandbox")
+     @pytest.mark.parametrize(
+         "input_path, pattern, expected_paths",
+         [
+@@ -586,6 +595,7 @@
+         output_paths = sorted(xPath(input_path).glob(pattern))
+         assert output_paths == expected_paths
+ 
++    @pytest.mark.skip(reason="not working in sandbox")
+     @pytest.mark.parametrize(
+         "input_path, pattern, expected_paths",
+         [


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [gentoo-commits] repo/gentoo:master commit in: sci-libs/datasets/, sci-libs/datasets/files/
@ 2024-02-21 11:33 Alfredo Tupone
  0 siblings, 0 replies; 5+ messages in thread
From: Alfredo Tupone @ 2024-02-21 11:33 UTC (permalink / raw
  To: gentoo-commits

commit:     c05b1cf909d724d72c0ce14fd7c870ab3749677f
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Wed Feb 21 11:32:18 2024 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Wed Feb 21 11:33:05 2024 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=c05b1cf9

sci-libs/datasets: add 2.16.0, drop 2.15.0

Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/datasets/Manifest                         |  2 +-
 ...tasets-2.15.0.ebuild => datasets-2.16.0.ebuild} |  4 +-
 .../datasets/files/datasets-2.15.0-tests.patch     | 46 -----------
 .../datasets/files/datasets-2.16.0-tests.patch     | 89 ++++++++++++++++++++++
 4 files changed, 92 insertions(+), 49 deletions(-)

diff --git a/sci-libs/datasets/Manifest b/sci-libs/datasets/Manifest
index 42abdebf934c..0880ec7cb629 100644
--- a/sci-libs/datasets/Manifest
+++ b/sci-libs/datasets/Manifest
@@ -1 +1 @@
-DIST datasets-2.15.0.gh.tar.gz 2147191 BLAKE2B eadf0133f0baa9f0469a51f28e00d3656b2b799ed1ff221ad6df39640c9777ccd46b706e46898ffa0597bc43288ee5991410d5c6d0a2cb3b814658c92d779a68 SHA512 589ca7992d58007c556558ef0889354fe34821f55e79025ea475d08c105428fe84c77c9183ec0028d8e60b25ba0ea8565bd8c6003a85bb6472d1cb4a247142e2
+DIST datasets-2.16.0.gh.tar.gz 2163874 BLAKE2B baec91a0e39fac3e07f11e352a286c0940cbc672e7233267e70d1abb64dd31bae18c55213a20fafaeaf2f60268104f294c77c9b73ddc1b289175904288a7c440 SHA512 f2a17ffab192163cfc196cc2bad0adb2ca657b5cf911f74f299b6e29eb4fcfacc377505b1857974a6b55252eedf8775a8706f9e991450c55e5d613020dc03735

diff --git a/sci-libs/datasets/datasets-2.15.0.ebuild b/sci-libs/datasets/datasets-2.16.0.ebuild
similarity index 95%
rename from sci-libs/datasets/datasets-2.15.0.ebuild
rename to sci-libs/datasets/datasets-2.16.0.ebuild
index 52af2f93ac88..0325b5ae63d6 100644
--- a/sci-libs/datasets/datasets-2.15.0.ebuild
+++ b/sci-libs/datasets/datasets-2.16.0.ebuild
@@ -4,7 +4,7 @@
 EAPI=8
 
 DISTUTILS_USE_PEP517=setuptools
-PYTHON_COMPAT=( python3_{9..11} )
+PYTHON_COMPAT=( python3_{10..12} )
 DISTUTILS_SINGLE_IMPL=1
 inherit distutils-r1
 
@@ -36,7 +36,7 @@ RDEPEND="
 		dev-python/tqdm[${PYTHON_USEDEP}]
 		dev-python/xxhash[${PYTHON_USEDEP}]
 		dev-python/zstandard[${PYTHON_USEDEP}]
-		>=sci-libs/huggingface_hub-0.14.0[${PYTHON_USEDEP}]
+		sci-libs/huggingface_hub[${PYTHON_USEDEP}]
 		sci-libs/scikit-learn[${PYTHON_USEDEP}]
 	')
 "

diff --git a/sci-libs/datasets/files/datasets-2.15.0-tests.patch b/sci-libs/datasets/files/datasets-2.15.0-tests.patch
deleted file mode 100644
index 64d8dcfdc8d8..000000000000
--- a/sci-libs/datasets/files/datasets-2.15.0-tests.patch
+++ /dev/null
@@ -1,46 +0,0 @@
---- a/tests/test_arrow_dataset.py	2024-02-20 21:53:24.248470991 +0100
-+++ b/tests/test_arrow_dataset.py	2024-02-20 21:53:29.441804737 +0100
-@@ -3978,7 +3978,6 @@
-     [
-         "relative/path",
-         "/absolute/path",
--        "s3://bucket/relative/path",
-         "hdfs://relative/path",
-         "hdfs:///absolute/path",
-     ],
---- a/tests/test_hf_gcp.py	2024-02-20 21:55:18.821852434 +0100
-+++ b/tests/test_hf_gcp.py	2024-02-20 21:55:46.525186394 +0100
-@@ -22,7 +22,6 @@
-     {"dataset": "wikipedia", "config_name": "20220301.it"},
-     {"dataset": "wikipedia", "config_name": "20220301.simple"},
-     {"dataset": "snli", "config_name": "plain_text"},
--    {"dataset": "eli5", "config_name": "LFQA_reddit"},
-     {"dataset": "wiki40b", "config_name": "en"},
-     {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
-     {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
---- a/tests/test_inspect.py	2024-02-20 22:01:35.148488467 +0100
-+++ b/tests/test_inspect.py	2024-02-20 22:02:14.458561571 +0100
-@@ -15,7 +15,7 @@
- pytestmark = pytest.mark.integration
- 
- 
--@pytest.mark.parametrize("path", ["paws", "csv"])
-+@pytest.mark.parametrize("path", ["csv"])
- def test_inspect_dataset(path, tmp_path):
-     inspect_dataset(path, tmp_path)
-     script_name = path + ".py"
---- a/tests/test_load.py	2024-02-20 22:12:13.699209107 +0100
-+++ b/tests/test_load.py	2024-02-20 22:13:10.862626708 +0100
-@@ -1235,12 +1235,6 @@
- 
- 
- @pytest.mark.integration
--def test_load_streaming_private_dataset_with_zipped_data(hf_token, hf_private_dataset_repo_zipped_txt_data):
--    ds = load_dataset(hf_private_dataset_repo_zipped_txt_data, streaming=True, token=hf_token)
--    assert next(iter(ds)) is not None
--
--
--@pytest.mark.integration
- def test_load_dataset_config_kwargs_passed_as_arguments():
-     ds_default = load_dataset(SAMPLE_DATASET_IDENTIFIER4)
-     ds_custom = load_dataset(SAMPLE_DATASET_IDENTIFIER4, drop_metadata=True)

diff --git a/sci-libs/datasets/files/datasets-2.16.0-tests.patch b/sci-libs/datasets/files/datasets-2.16.0-tests.patch
new file mode 100644
index 000000000000..6b2845bce168
--- /dev/null
+++ b/sci-libs/datasets/files/datasets-2.16.0-tests.patch
@@ -0,0 +1,89 @@
+--- a/tests/test_arrow_dataset.py	2024-02-20 21:53:24.248470991 +0100
++++ b/tests/test_arrow_dataset.py	2024-02-20 21:53:29.441804737 +0100
+@@ -3982,7 +3982,6 @@
+     [
+         "relative/path",
+         "/absolute/path",
+-        "s3://bucket/relative/path",
+         "hdfs://relative/path",
+         "hdfs:///absolute/path",
+     ],
+--- a/tests/test_load.py	2024-02-20 22:12:13.699209107 +0100
++++ b/tests/test_load.py	2024-02-20 22:13:10.862626708 +0100
+@@ -386,21 +386,6 @@
+             hf_modules_cache=self.hf_modules_cache,
+         )
+ 
+-    def test_HubDatasetModuleFactoryWithScript_dont_trust_remote_code(self):
+-        # "squad" has a dataset script
+-        factory = HubDatasetModuleFactoryWithScript(
+-            "squad", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path
+-        )
+-        with patch.object(config, "HF_DATASETS_TRUST_REMOTE_CODE", None):  # this will be the default soon
+-            self.assertRaises(ValueError, factory.get_module)
+-        factory = HubDatasetModuleFactoryWithScript(
+-            "squad",
+-            download_config=self.download_config,
+-            dynamic_modules_path=self.dynamic_modules_path,
+-            trust_remote_code=False,
+-        )
+-        self.assertRaises(ValueError, factory.get_module)
+-
+     def test_HubDatasetModuleFactoryWithScript_with_github_dataset(self):
+         # "wmt_t2t" has additional imports (internal)
+         factory = HubDatasetModuleFactoryWithScript(
+@@ -1235,12 +1235,6 @@
+ 
+ 
+ @pytest.mark.integration
+-def test_load_streaming_private_dataset_with_zipped_data(hf_token, hf_private_dataset_repo_zipped_txt_data):
+-    ds = load_dataset(hf_private_dataset_repo_zipped_txt_data, streaming=True, token=hf_token)
+-    assert next(iter(ds)) is not None
+-
+-
+-@pytest.mark.integration
+ def test_load_dataset_config_kwargs_passed_as_arguments():
+     ds_default = load_dataset(SAMPLE_DATASET_IDENTIFIER4)
+     ds_custom = load_dataset(SAMPLE_DATASET_IDENTIFIER4, drop_metadata=True)
+--- a/tests/test_hf_gcp.py	2024-02-21 09:59:26.918397895 +0100
++++ b/tests/test_hf_gcp.py	2024-02-21 09:59:46.335100597 +0100
+@@ -21,7 +21,6 @@
+     {"dataset": "wikipedia", "config_name": "20220301.frr"},
+     {"dataset": "wikipedia", "config_name": "20220301.it"},
+     {"dataset": "wikipedia", "config_name": "20220301.simple"},
+-    {"dataset": "eli5", "config_name": "LFQA_reddit"},
+     {"dataset": "wiki40b", "config_name": "en"},
+     {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
+     {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
+--- a/tests/test_inspect.py	2024-02-21 10:03:32.315520016 +0100
++++ b/tests/test_inspect.py	2024-02-21 10:03:50.345553490 +0100
+@@ -18,7 +18,7 @@
+ pytestmark = pytest.mark.integration
+ 
+ 
+-@pytest.mark.parametrize("path", ["paws", csv.__file__])
++@pytest.mark.parametrize("path", [csv.__file__])
+ def test_inspect_dataset(path, tmp_path):
+     inspect_dataset(path, tmp_path)
+     script_name = Path(path).stem + ".py"
+--- a/tests/packaged_modules/test_cache.py	2024-02-21 12:04:18.036866572 +0100
++++ b/tests/packaged_modules/test_cache.py	2024-02-21 12:04:54.333558520 +0100
+@@ -44,18 +44,3 @@
+         Cache(dataset_name=text_dir.name, hash="missing").download_and_prepare()
+     with pytest.raises(ValueError):
+         Cache(dataset_name=text_dir.name, config_name="missing", version="auto", hash="auto").download_and_prepare()
+-
+-
+-@pytest.mark.integration
+-def test_cache_multi_configs():
+-    repo_id = SAMPLE_DATASET_TWO_CONFIG_IN_METADATA
+-    dataset_name = repo_id.split("/")[-1]
+-    config_name = "v1"
+-    ds = load_dataset(repo_id, config_name)
+-    cache = Cache(dataset_name=dataset_name, repo_id=repo_id, config_name=config_name, version="auto", hash="auto")
+-    reloaded = cache.as_dataset()
+-    assert list(ds) == list(reloaded)
+-    assert len(ds["train"]) == len(reloaded["train"])
+-    with pytest.raises(ValueError) as excinfo:
+-        Cache(dataset_name=dataset_name, repo_id=repo_id, config_name="missing", version="auto", hash="auto")
+-    assert config_name in str(excinfo.value)


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [gentoo-commits] repo/gentoo:master commit in: sci-libs/datasets/, sci-libs/datasets/files/
@ 2024-02-22  7:27 Alfredo Tupone
  0 siblings, 0 replies; 5+ messages in thread
From: Alfredo Tupone @ 2024-02-22  7:27 UTC (permalink / raw
  To: gentoo-commits

commit:     05e867ed4f4ffc6bcbf57160225dcce103062057
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Thu Feb 22 07:26:13 2024 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Thu Feb 22 07:26:40 2024 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=05e867ed

sci-libs/datasets: add 2.17.1, drop 2.16.0

Bug: https://bugs.gentoo.org/921090
Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/datasets/Manifest                         |  2 +-
 ...tasets-2.16.0.ebuild => datasets-2.17.1.ebuild} |  8 ++++--
 .../datasets/files/datasets-2.14.4-tests.patch     | 10 --------
 ...6.0-tests.patch => datasets-2.17.1-tests.patch} | 29 ++++++++--------------
 4 files changed, 17 insertions(+), 32 deletions(-)

diff --git a/sci-libs/datasets/Manifest b/sci-libs/datasets/Manifest
index 0880ec7cb629..1b5292db6bf4 100644
--- a/sci-libs/datasets/Manifest
+++ b/sci-libs/datasets/Manifest
@@ -1 +1 @@
-DIST datasets-2.16.0.gh.tar.gz 2163874 BLAKE2B baec91a0e39fac3e07f11e352a286c0940cbc672e7233267e70d1abb64dd31bae18c55213a20fafaeaf2f60268104f294c77c9b73ddc1b289175904288a7c440 SHA512 f2a17ffab192163cfc196cc2bad0adb2ca657b5cf911f74f299b6e29eb4fcfacc377505b1857974a6b55252eedf8775a8706f9e991450c55e5d613020dc03735
+DIST datasets-2.17.1.gh.tar.gz 2168860 BLAKE2B ad7e9be7e60125d53b19b6277b6be6ae6050321e4210293a37737a345a4806d4901e9507fbf7a51c5e00a91912656d68a94e76cf70e070433beccc6e1ad54643 SHA512 43617c3d98cc3ad17fb577d6e917d164c8b6ec24740604ca281adaa2f0e5a6538633721792c9fa6621b7f1980161d8acf62dcdcdacca56e1739a8f28e3c71cdf

diff --git a/sci-libs/datasets/datasets-2.16.0.ebuild b/sci-libs/datasets/datasets-2.17.1.ebuild
similarity index 91%
rename from sci-libs/datasets/datasets-2.16.0.ebuild
rename to sci-libs/datasets/datasets-2.17.1.ebuild
index a34fcaa2f89c..9b6295db1a0e 100644
--- a/sci-libs/datasets/datasets-2.16.0.ebuild
+++ b/sci-libs/datasets/datasets-2.17.1.ebuild
@@ -27,12 +27,16 @@ RDEPEND="
 	$(python_gen_cond_dep '
 		dev-python/absl-py[${PYTHON_USEDEP}]
 		dev-python/aiohttp[${PYTHON_USEDEP}]
+		dev-python/dill[${PYTHON_USEDEP}]
+		dev-python/filelock[${PYTHON_USEDEP}]
 		<=dev-python/fsspec-2023.10.0[${PYTHON_USEDEP}]
 		dev-python/multiprocess[${PYTHON_USEDEP}]
+		dev-python/numpy[${PYTHON_USEDEP}]
 		dev-python/packaging[${PYTHON_USEDEP}]
 		dev-python/pandas[${PYTHON_USEDEP}]
 		dev-python/pyarrow[${PYTHON_USEDEP},parquet,snappy]
 		dev-python/pyyaml[${PYTHON_USEDEP}]
+		dev-python/requests[${PYTHON_USEDEP}]
 		dev-python/tqdm[${PYTHON_USEDEP}]
 		dev-python/xxhash[${PYTHON_USEDEP}]
 		dev-python/zstandard[${PYTHON_USEDEP}]
@@ -46,7 +50,7 @@ BDEPEND="test? (
 		dev-python/absl-py[${PYTHON_USEDEP}]
 		dev-python/pytest-datadir[${PYTHON_USEDEP}]
 		dev-python/decorator[${PYTHON_USEDEP}]
-		=dev-python/sqlalchemy-1*[${PYTHON_USEDEP}]
+		dev-python/sqlalchemy[${PYTHON_USEDEP}]
 		sci-libs/jiwer[${PYTHON_USEDEP}]
 		sci-libs/seqeval[${PYTHON_USEDEP}]
 	')
@@ -79,5 +83,5 @@ src_prepare() {
 		tests/test_streaming_download_manager.py \
 		tests/commands/test_test.py \
 		tests/packaged_modules/test_cache.py \
-		die
+		|| die
 }

diff --git a/sci-libs/datasets/files/datasets-2.14.4-tests.patch b/sci-libs/datasets/files/datasets-2.14.4-tests.patch
index 5dd322309b20..b9791c04e8e0 100644
--- a/sci-libs/datasets/files/datasets-2.14.4-tests.patch
+++ b/sci-libs/datasets/files/datasets-2.14.4-tests.patch
@@ -8,16 +8,6 @@
      @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
      @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning")
      def test_load_metric(self, metric_name):
---- a/tests/test_hf_gcp.py	2023-05-04 19:33:31.150825303 +0200
-+++ b/tests/test_hf_gcp.py	2023-05-04 19:40:08.401759538 +0200
-@@ -75,6 +75,7 @@
-             self.assertTrue(os.path.exists(datset_info_path))
- 
- 
-+@pytest.mark.skip(reason="require apache_beam")
- @pytest.mark.integration
- def test_as_dataset_from_hf_gcs(tmp_path_factory):
-     tmp_dir = tmp_path_factory.mktemp("test_hf_gcp") / "test_wikipedia_simple"
 --- a/tests/test_distributed.py	2023-05-04 19:43:09.861275030 +0200
 +++ b/tests/test_distributed.py	2023-05-04 19:44:17.608326722 +0200
 @@ -74,6 +74,7 @@

diff --git a/sci-libs/datasets/files/datasets-2.16.0-tests.patch b/sci-libs/datasets/files/datasets-2.17.1-tests.patch
similarity index 90%
rename from sci-libs/datasets/files/datasets-2.16.0-tests.patch
rename to sci-libs/datasets/files/datasets-2.17.1-tests.patch
index 8cb89e824b3b..14ae50602d10 100644
--- a/sci-libs/datasets/files/datasets-2.16.0-tests.patch
+++ b/sci-libs/datasets/files/datasets-2.17.1-tests.patch
@@ -1,6 +1,6 @@
 --- a/tests/test_arrow_dataset.py	2024-02-20 21:53:24.248470991 +0100
 +++ b/tests/test_arrow_dataset.py	2024-02-20 21:53:29.441804737 +0100
-@@ -3982,7 +3982,6 @@
+@@ -4016,7 +4016,6 @@
      [
          "relative/path",
          "/absolute/path",
@@ -10,15 +10,15 @@
      ],
 --- a/tests/test_load.py	2024-02-20 22:12:13.699209107 +0100
 +++ b/tests/test_load.py	2024-02-20 22:13:10.862626708 +0100
-@@ -386,6 +386,7 @@
+@@ -388,6 +388,7 @@
              hf_modules_cache=self.hf_modules_cache,
          )
  
 +    @pytest.mark.skip(reason="")
      def test_HubDatasetModuleFactoryWithScript_dont_trust_remote_code(self):
-         # "squad" has a dataset script
+         # "lhoestq/test" has a dataset script
          factory = HubDatasetModuleFactoryWithScript(
-@@ -402,6 +402,7 @@
+@@ -403,6 +404,7 @@
          )
          self.assertRaises(ValueError, factory.get_module)
  
@@ -26,7 +26,7 @@
      def test_HubDatasetModuleFactoryWithScript_with_github_dataset(self):
          # "wmt_t2t" has additional imports (internal)
          factory = HubDatasetModuleFactoryWithScript(
-@@ -411,6 +412,7 @@
+@@ -412,6 +414,7 @@
          assert importlib.import_module(module_factory_result.module_path) is not None
          assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT)
  
@@ -34,7 +34,7 @@
      def test_GithubMetricModuleFactory_with_internal_import(self):
          # "squad_v2" requires additional imports (internal)
          factory = GithubMetricModuleFactory(
-@@ -419,6 +421,7 @@
+@@ -420,6 +423,7 @@
          module_factory_result = factory.get_module()
          assert importlib.import_module(module_factory_result.module_path) is not None
  
@@ -42,7 +42,7 @@
      @pytest.mark.filterwarnings("ignore:GithubMetricModuleFactory is deprecated:FutureWarning")
      def test_GithubMetricModuleFactory_with_external_import(self):
          # "bleu" requires additional imports (external from github)
-@@ -1032,6 +1035,7 @@
+@@ -1033,6 +1037,7 @@
          datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "non-existing-config")
  
  
@@ -50,7 +50,7 @@
  @pytest.mark.parametrize("serializer", [pickle, dill])
  def test_load_dataset_builder_with_metadata_configs_pickable(serializer):
      builder = datasets.load_dataset_builder(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA)
-@@ -1153,6 +1157,7 @@
+@@ -1154,6 +1159,7 @@
      assert len(builder.config.data_files["test"]) > 0
  
  
@@ -58,7 +58,7 @@
  def test_load_dataset_builder_fail():
      with pytest.raises(DatasetNotFoundError):
          datasets.load_dataset_builder("blabla")
-@@ -1168,6 +1173,7 @@
+@@ -1169,6 +1175,7 @@
      assert isinstance(next(iter(dataset["train"])), dict)
  
  
@@ -68,7 +68,7 @@
      assert isinstance(dataset, DatasetDict)
 --- a/tests/test_hf_gcp.py	2024-02-21 09:59:26.918397895 +0100
 +++ b/tests/test_hf_gcp.py	2024-02-21 09:59:46.335100597 +0100
-@@ -47,6 +47,7 @@
+@@ -45,6 +45,7 @@
          ]
  
  
@@ -78,15 +78,6 @@
      dataset = None
 --- a/tests/test_inspect.py	2024-02-21 10:03:32.315520016 +0100
 +++ b/tests/test_inspect.py	2024-02-21 10:03:50.345553490 +0100
-@@ -18,7 +18,7 @@
- pytestmark = pytest.mark.integration
- 
- 
--@pytest.mark.parametrize("path", ["paws", csv.__file__])
-+@pytest.mark.parametrize("path", [csv.__file__])
- def test_inspect_dataset(path, tmp_path):
-     inspect_dataset(path, tmp_path)
-     script_name = Path(path).stem + ".py"
 @@ -49,6 +49,7 @@
      assert list(info.splits.keys()) == expected_splits
  


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [gentoo-commits] repo/gentoo:master commit in: sci-libs/datasets/, sci-libs/datasets/files/
@ 2024-02-22 18:37 Alfredo Tupone
  0 siblings, 0 replies; 5+ messages in thread
From: Alfredo Tupone @ 2024-02-22 18:37 UTC (permalink / raw
  To: gentoo-commits

commit:     ca4289c9b372e31848e4f95ed8f7564406ab797d
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Thu Feb 22 18:36:24 2024 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Thu Feb 22 18:37:11 2024 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=ca4289c9

sci-libs/datasets: drop test that need network

Closes: https://bugs.gentoo.org/925220
Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/datasets/datasets-2.17.1.ebuild           |   1 -
 .../datasets/files/datasets-2.14.4-tests.patch     | 232 --------------------
 .../datasets/files/datasets-2.17.1-tests.patch     | 240 +++++++++++++++++++++
 3 files changed, 240 insertions(+), 233 deletions(-)

diff --git a/sci-libs/datasets/datasets-2.17.1.ebuild b/sci-libs/datasets/datasets-2.17.1.ebuild
index 9b6295db1a0e..65e38b9dbef7 100644
--- a/sci-libs/datasets/datasets-2.17.1.ebuild
+++ b/sci-libs/datasets/datasets-2.17.1.ebuild
@@ -57,7 +57,6 @@ BDEPEND="test? (
 )"
 
 PATCHES=(
-	"${FILESDIR}"/${PN}-2.14.4-tests.patch
 	"${FILESDIR}"/${P}-tests.patch
 )
 

diff --git a/sci-libs/datasets/files/datasets-2.14.4-tests.patch b/sci-libs/datasets/files/datasets-2.14.4-tests.patch
deleted file mode 100644
index b9791c04e8e0..000000000000
--- a/sci-libs/datasets/files/datasets-2.14.4-tests.patch
+++ /dev/null
@@ -1,232 +0,0 @@
---- a/tests/test_metric_common.py	2023-05-04 18:48:48.550861318 +0200
-+++ b/tests/test_metric_common.py	2023-05-04 18:50:25.787364577 +0200
-@@ -93,6 +93,7 @@
-     INTENSIVE_CALLS_PATCHER = {}
-     metric_name = None
- 
-+    @pytest.mark.skip(reason="disabling, depends on bert_score, bleurt, math_equivalence, coval, nltk, faiss, mauve, rouge_score, sacrebleu, sacremoses ...")
-     @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
-     @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning")
-     def test_load_metric(self, metric_name):
---- a/tests/test_distributed.py	2023-05-04 19:43:09.861275030 +0200
-+++ b/tests/test_distributed.py	2023-05-04 19:44:17.608326722 +0200
-@@ -74,6 +74,7 @@
-         split_dataset_by_node(full_ds.shuffle(), rank=0, world_size=world_size)
- 
- 
-+@pytest.mark.skip(reason="require distributed torch")
- @pytest.mark.parametrize("streaming", [False, True])
- @require_torch
- @pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
-@@ -95,6 +96,7 @@
-     execute_subprocess_async(cmd, env=os.environ.copy())
- 
- 
-+@pytest.mark.skip(reason="require distributed torch")
- @pytest.mark.parametrize(
-     "nproc_per_node, num_workers",
-     [
---- a/tests/utils.py	2023-05-06 08:43:16.251987543 +0200
-+++ b/tests/utils.py	2023-05-06 08:44:24.467952870 +0200
-@@ -50,8 +50,8 @@
- # Audio
- require_sndfile = pytest.mark.skipif(
-     # On Windows and OS X, soundfile installs sndfile
--    find_spec("soundfile") is None or version.parse(importlib.metadata.version("soundfile")) < version.parse("0.12.0"),
--    reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
-+    True,
-+    reason="test requires librosa",
- )
- 
- # Beam
---- a/tests/features/test_audio.py	2023-05-06 09:03:58.680108142 +0200
-+++ a/tests/features/test_audio.py	2023-05-06 09:05:50.463407967 +0200
-@@ -57,6 +57,7 @@
-     assert features.arrow_schema == pa.schema({"sequence_of_audios": pa.list_(Audio().pa_type)})
- 
- 
-+@pytest.mark.skip(reason="require librosa")
- @pytest.mark.parametrize(
-     "build_example",
-     [
-@@ -81,6 +82,7 @@
-     assert decoded_example.keys() == {"path", "array", "sampling_rate"}
- 
- 
-+@pytest.mark.skip(reason="require librosa")
- @pytest.mark.parametrize(
-     "build_example",
-     [
-@@ -148,6 +149,7 @@
-     assert decoded_example["sampling_rate"] == 48000
- 
- 
-+@pytest.mark.skip(reason="require librosa")
- @pytest.mark.parametrize("sampling_rate", [16_000, 48_000])
- def test_audio_decode_example_pcm(shared_datadir, sampling_rate):
-     audio_path = str(shared_datadir / "test_audio_16000.pcm")
-@@ -414,6 +417,7 @@
-     assert column[0]["sampling_rate"] == 16000
- 
- 
-+@pytest.mark.skip(reason="require librosa")
- @pytest.mark.parametrize(
-     "build_data",
-     [
-@@ -438,6 +442,7 @@
-     assert item["audio"].keys() == {"path", "array", "sampling_rate"}
- 
- 
-+@pytest.mark.skip(reason="require librosa")
- def test_dataset_concatenate_audio_features(shared_datadir):
-     # we use a different data structure between 1 and 2 to make sure they are compatible with each other
-     audio_path = str(shared_datadir / "test_audio_44100.wav")
-@@ -451,6 +456,7 @@
-     assert concatenated_dataset[1]["audio"]["array"].shape == dset2[0]["audio"]["array"].shape
- 
- 
-+@pytest.mark.skip(reason="require librosa")
- def test_dataset_concatenate_nested_audio_features(shared_datadir):
-     # we use a different data structure between 1 and 2 to make sure they are compatible with each other
-     audio_path = str(shared_datadir / "test_audio_44100.wav")
-@@ -610,6 +616,7 @@
-     assert isinstance(ds, Dataset)
- 
- 
-+@require_sndfile
- def test_dataset_with_audio_feature_undecoded(shared_datadir):
-     audio_path = str(shared_datadir / "test_audio_44100.wav")
-     data = {"audio": [audio_path]}
-@@ -627,6 +634,7 @@
-     assert column[0] == {"path": audio_path, "bytes": None}
- 
- 
-+@require_sndfile
- def test_formatted_dataset_with_audio_feature_undecoded(shared_datadir):
-     audio_path = str(shared_datadir / "test_audio_44100.wav")
-     data = {"audio": [audio_path]}
-@@ -658,6 +666,7 @@
-         assert column[0] == {"path": audio_path, "bytes": None}
- 
- 
-+@require_sndfile
- def test_dataset_with_audio_feature_map_undecoded(shared_datadir):
-     audio_path = str(shared_datadir / "test_audio_44100.wav")
-     data = {"audio": [audio_path]}
---- a/tests/packaged_modules/test_audiofolder.py	2023-05-06 14:00:39.560876163 +0200
-+++ b/tests/packaged_modules/test_audiofolder.py	2023-05-06 14:01:26.005212423 +0200
-@@ -1,10 +1,8 @@
- import shutil
- import textwrap
- 
--import librosa
- import numpy as np
- import pytest
--import soundfile as sf
- 
- from datasets import Audio, ClassLabel, Features, Value
- from datasets.data_files import DataFilesDict, get_data_patterns
-@@ -192,8 +190,11 @@
-     return data_files_with_two_splits_and_metadata
- 
- 
-+@pytest.mark.skip(reason="require soundfile")
- @pytest.fixture
- def data_files_with_zip_archives(tmp_path, audio_file):
-+    import soundfile as sf
-+    import librosa
-     data_dir = tmp_path / "audiofolder_data_dir_with_zip_archives"
-     data_dir.mkdir(parents=True, exist_ok=True)
-     archive_dir = data_dir / "archive"
---- a/tests/test_arrow_dataset.py	2023-05-06 15:36:11.080459079 +0200
-+++ b/tests/test_arrow_dataset.py	2023-05-06 15:38:07.452828528 +0200
-@@ -4136,6 +4136,7 @@
-                 )
-                 self.assertDictEqual(features_after_cast, dset.features)
- 
-+    @pytest.mark.skip(reason="require soundfile")
-     def test_task_automatic_speech_recognition(self):
-         # Include a dummy extra column `dummy` to test we drop it correctly
-         features_before_cast = Features(
---- a/tests/test_streaming_download_manager.py	2023-08-26 07:33:41.937389401 +0200
-+++ b/tests/test_streaming_download_manager.py	2023-08-26 07:37:22.521218698 +0200
-@@ -218,6 +218,7 @@
-     assert output_path == _readd_double_slash_removed_by_path(Path(expected_path).as_posix())
- 
- 
-+@pytest.mark.skip(reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, exists",
-     [
-@@ -301,6 +302,7 @@
-         assert list(f) == TEST_URL_CONTENT.splitlines(keepends=True)
- 
- 
-+@pytest.mark.skip(reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, expected_paths",
-     [
-@@ -331,6 +333,7 @@
-         xlistdir(root_url, download_config=download_config)
- 
- 
-+@pytest.mark.skip(reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, isdir",
-     [
-@@ -358,6 +361,7 @@
-     assert xisdir(root_url, download_config=download_config) is False
- 
- 
-+@pytest.mark.skip(reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, isfile",
-     [
-@@ -382,6 +386,7 @@
-     assert xisfile(root_url + "qwertyuiop", download_config=download_config) is False
- 
- 
-+@pytest.mark.skip(reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, size",
-     [
-@@ -407,6 +412,7 @@
-         xgetsize(root_url + "qwertyuiop", download_config=download_config)
- 
- 
-+@pytest.mark.skip(reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, expected_paths",
-     [
-@@ -450,6 +456,7 @@
-     assert len(xglob("zip://qwertyuiop/*::" + root_url, download_config=download_config)) == 0
- 
- 
-+@pytest.mark.skip(reason="not working in sandbox")
- @pytest.mark.parametrize(
-     "input_path, expected_outputs",
-     [
-@@ -540,6 +547,7 @@
-     def test_xpath_as_posix(self, input_path, expected_path):
-         assert xPath(input_path).as_posix() == expected_path
- 
-+    @pytest.mark.skip(reason="not working in sandbox")
-     @pytest.mark.parametrize(
-         "input_path, exists",
-         [
-@@ -555,6 +563,7 @@
-             (tmp_path / "file.txt").touch()
-         assert xexists(input_path) is exists
- 
-+    @pytest.mark.skip(reason="not working in sandbox")
-     @pytest.mark.parametrize(
-         "input_path, pattern, expected_paths",
-         [
-@@ -593,6 +602,7 @@
-         output_paths = sorted(xPath(input_path).glob(pattern))
-         assert output_paths == expected_paths
- 
-+    @pytest.mark.skip(reason="not working in sandbox")
-     @pytest.mark.parametrize(
-         "input_path, pattern, expected_paths",
-         [

diff --git a/sci-libs/datasets/files/datasets-2.17.1-tests.patch b/sci-libs/datasets/files/datasets-2.17.1-tests.patch
index 14ae50602d10..2281598dfb38 100644
--- a/sci-libs/datasets/files/datasets-2.17.1-tests.patch
+++ b/sci-libs/datasets/files/datasets-2.17.1-tests.patch
@@ -8,6 +8,14 @@
          "hdfs://relative/path",
          "hdfs:///absolute/path",
      ],
+@@ -4136,6 +4136,7 @@
+                 )
+                 self.assertDictEqual(features_after_cast, dset.features)
+ 
++    @pytest.mark.skip(reason="require soundfile")
+     def test_task_automatic_speech_recognition(self):
+         # Include a dummy extra column `dummy` to test we drop it correctly
+         features_before_cast = Features(
 --- a/tests/test_load.py	2024-02-20 22:12:13.699209107 +0100
 +++ b/tests/test_load.py	2024-02-20 22:13:10.862626708 +0100
 @@ -388,6 +388,7 @@
@@ -122,3 +130,235 @@
  @pytest.mark.parametrize("remote", [False, True])
  @pytest.mark.parametrize("drop_labels", [None, True, False])
  def test_data_files_with_one_label_no_metadata(data_files_with_one_label_no_metadata, drop_labels, remote, cache_dir):
+--- a/tests/test_metric_common.py	2023-05-04 18:48:48.550861318 +0200
++++ b/tests/test_metric_common.py	2023-05-04 18:50:25.787364577 +0200
+@@ -93,6 +93,7 @@
+     INTENSIVE_CALLS_PATCHER = {}
+     metric_name = None
+ 
++    @pytest.mark.skip(reason="disabling, depends on bert_score, bleurt, math_equivalence, coval, nltk, faiss, mauve, rouge_score, sacrebleu, sacremoses ...")
+     @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
+     @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning")
+     def test_load_metric(self, metric_name):
+--- a/tests/test_distributed.py	2023-05-04 19:43:09.861275030 +0200
++++ b/tests/test_distributed.py	2023-05-04 19:44:17.608326722 +0200
+@@ -74,6 +74,7 @@
+         split_dataset_by_node(full_ds.shuffle(), rank=0, world_size=world_size)
+ 
+ 
++@pytest.mark.skip(reason="require distributed torch")
+ @pytest.mark.parametrize("streaming", [False, True])
+ @require_torch
+ @pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
+@@ -95,6 +96,7 @@
+     execute_subprocess_async(cmd, env=os.environ.copy())
+ 
+ 
++@pytest.mark.skip(reason="require distributed torch")
+ @pytest.mark.parametrize(
+     "nproc_per_node, num_workers",
+     [
+--- a/tests/utils.py	2023-05-06 08:43:16.251987543 +0200
++++ b/tests/utils.py	2023-05-06 08:44:24.467952870 +0200
+@@ -50,8 +50,8 @@
+ # Audio
+ require_sndfile = pytest.mark.skipif(
+     # On Windows and OS X, soundfile installs sndfile
+-    find_spec("soundfile") is None or version.parse(importlib.metadata.version("soundfile")) < version.parse("0.12.0"),
+-    reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
++    True,
++    reason="test requires librosa",
+ )
+ 
+ # Beam
+--- a/tests/features/test_audio.py	2023-05-06 09:03:58.680108142 +0200
++++ a/tests/features/test_audio.py	2023-05-06 09:05:50.463407967 +0200
+@@ -57,6 +57,7 @@
+     assert features.arrow_schema == pa.schema({"sequence_of_audios": pa.list_(Audio().pa_type)})
+ 
+ 
++@pytest.mark.skip(reason="require librosa")
+ @pytest.mark.parametrize(
+     "build_example",
+     [
+@@ -81,6 +82,7 @@
+     assert decoded_example.keys() == {"path", "array", "sampling_rate"}
+ 
+ 
++@pytest.mark.skip(reason="require librosa")
+ @pytest.mark.parametrize(
+     "build_example",
+     [
+@@ -148,6 +149,7 @@
+     assert decoded_example["sampling_rate"] == 48000
+ 
+ 
++@pytest.mark.skip(reason="require librosa")
+ @pytest.mark.parametrize("sampling_rate", [16_000, 48_000])
+ def test_audio_decode_example_pcm(shared_datadir, sampling_rate):
+     audio_path = str(shared_datadir / "test_audio_16000.pcm")
+@@ -414,6 +417,7 @@
+     assert column[0]["sampling_rate"] == 16000
+ 
+ 
++@pytest.mark.skip(reason="require librosa")
+ @pytest.mark.parametrize(
+     "build_data",
+     [
+@@ -438,6 +442,7 @@
+     assert item["audio"].keys() == {"path", "array", "sampling_rate"}
+ 
+ 
++@pytest.mark.skip(reason="require librosa")
+ def test_dataset_concatenate_audio_features(shared_datadir):
+     # we use a different data structure between 1 and 2 to make sure they are compatible with each other
+     audio_path = str(shared_datadir / "test_audio_44100.wav")
+@@ -451,6 +456,7 @@
+     assert concatenated_dataset[1]["audio"]["array"].shape == dset2[0]["audio"]["array"].shape
+ 
+ 
++@pytest.mark.skip(reason="require librosa")
+ def test_dataset_concatenate_nested_audio_features(shared_datadir):
+     # we use a different data structure between 1 and 2 to make sure they are compatible with each other
+     audio_path = str(shared_datadir / "test_audio_44100.wav")
+@@ -610,6 +616,7 @@
+     assert isinstance(ds, Dataset)
+ 
+ 
++@require_sndfile
+ def test_dataset_with_audio_feature_undecoded(shared_datadir):
+     audio_path = str(shared_datadir / "test_audio_44100.wav")
+     data = {"audio": [audio_path]}
+@@ -627,6 +634,7 @@
+     assert column[0] == {"path": audio_path, "bytes": None}
+ 
+ 
++@require_sndfile
+ def test_formatted_dataset_with_audio_feature_undecoded(shared_datadir):
+     audio_path = str(shared_datadir / "test_audio_44100.wav")
+     data = {"audio": [audio_path]}
+@@ -658,6 +666,7 @@
+         assert column[0] == {"path": audio_path, "bytes": None}
+ 
+ 
++@require_sndfile
+ def test_dataset_with_audio_feature_map_undecoded(shared_datadir):
+     audio_path = str(shared_datadir / "test_audio_44100.wav")
+     data = {"audio": [audio_path]}
+--- a/tests/packaged_modules/test_audiofolder.py	2023-05-06 14:00:39.560876163 +0200
++++ b/tests/packaged_modules/test_audiofolder.py	2023-05-06 14:01:26.005212423 +0200
+@@ -1,10 +1,8 @@
+ import shutil
+ import textwrap
+ 
+-import librosa
+ import numpy as np
+ import pytest
+-import soundfile as sf
+ 
+ from datasets import Audio, ClassLabel, Features, Value
+ from datasets.data_files import DataFilesDict, get_data_patterns
+@@ -192,8 +190,11 @@
+     return data_files_with_two_splits_and_metadata
+ 
+ 
++@pytest.mark.skip(reason="require soundfile")
+ @pytest.fixture
+ def data_files_with_zip_archives(tmp_path, audio_file):
++    import soundfile as sf
++    import librosa
+     data_dir = tmp_path / "audiofolder_data_dir_with_zip_archives"
+     data_dir.mkdir(parents=True, exist_ok=True)
+     archive_dir = data_dir / "archive"
+--- a/tests/test_streaming_download_manager.py	2023-08-26 07:33:41.937389401 +0200
++++ b/tests/test_streaming_download_manager.py	2023-08-26 07:37:22.521218698 +0200
+@@ -218,6 +218,7 @@
+     assert output_path == _readd_double_slash_removed_by_path(Path(expected_path).as_posix())
+ 
+ 
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+     "input_path, exists",
+     [
+@@ -301,6 +302,7 @@
+         assert list(f) == TEST_URL_CONTENT.splitlines(keepends=True)
+ 
+ 
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+     "input_path, expected_paths",
+     [
+@@ -331,6 +333,7 @@
+         xlistdir(root_url, download_config=download_config)
+ 
+ 
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+     "input_path, isdir",
+     [
+@@ -358,6 +361,7 @@
+     assert xisdir(root_url, download_config=download_config) is False
+ 
+ 
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+     "input_path, isfile",
+     [
+@@ -382,6 +386,7 @@
+     assert xisfile(root_url + "qwertyuiop", download_config=download_config) is False
+ 
+ 
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+     "input_path, size",
+     [
+@@ -407,6 +412,7 @@
+         xgetsize(root_url + "qwertyuiop", download_config=download_config)
+ 
+ 
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+     "input_path, expected_paths",
+     [
+@@ -450,6 +456,7 @@
+     assert len(xglob("zip://qwertyuiop/*::" + root_url, download_config=download_config)) == 0
+ 
+ 
++@pytest.mark.skip(reason="not working in sandbox")
+ @pytest.mark.parametrize(
+     "input_path, expected_outputs",
+     [
+@@ -540,6 +547,7 @@
+     def test_xpath_as_posix(self, input_path, expected_path):
+         assert xPath(input_path).as_posix() == expected_path
+ 
++    @pytest.mark.skip(reason="not working in sandbox")
+     @pytest.mark.parametrize(
+         "input_path, exists",
+         [
+@@ -555,6 +563,7 @@
+             (tmp_path / "file.txt").touch()
+         assert xexists(input_path) is exists
+ 
++    @pytest.mark.skip(reason="not working in sandbox")
+     @pytest.mark.parametrize(
+         "input_path, pattern, expected_paths",
+         [
+@@ -593,6 +602,7 @@
+         output_paths = sorted(xPath(input_path).glob(pattern))
+         assert output_paths == expected_paths
+ 
++    @pytest.mark.skip(reason="not working in sandbox")
+     @pytest.mark.parametrize(
+         "input_path, pattern, expected_paths",
+         [
+--- a/tests/io/test_parquet.py	2024-02-22 19:19:53.890749240 +0100
++++ b/tests/io/test_parquet.py	2024-02-22 19:20:30.954099914 +0100
+@@ -69,6 +69,7 @@
+     _check_parquet_dataset(dataset, expected_features)
+ 
+ 
++@pytest.mark.skip()
+ def test_parquet_read_geoparquet(geoparquet_path, tmp_path):
+     cache_dir = tmp_path / "cache"
+     dataset = ParquetDatasetReader(path_or_paths=geoparquet_path, cache_dir=cache_dir).read()


^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2024-02-22 18:37 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-08-24 15:29 [gentoo-commits] repo/gentoo:master commit in: sci-libs/datasets/, sci-libs/datasets/files/ Alfredo Tupone
  -- strict thread matches above, loose matches on Subject: below --
2024-02-22 18:37 Alfredo Tupone
2024-02-22  7:27 Alfredo Tupone
2024-02-21 11:33 Alfredo Tupone
2023-05-07 20:16 Alfredo Tupone

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox