Refactor conversion script
Browse files- convert_datasets_to_json.py +29 -65
convert_datasets_to_json.py
CHANGED
@@ -1,23 +1,10 @@
|
|
1 |
-
import subprocess
|
2 |
-
import urllib
|
3 |
-
|
4 |
import typer
|
5 |
-
from datasets import (DatasetDict, get_dataset_config_names,
|
6 |
-
|
7 |
from huggingface_hub import list_datasets
|
8 |
|
9 |
app = typer.Typer()
|
10 |
|
11 |
-
# These datasets do not agree with the GitHub sources due to small
|
12 |
-
# inconsistencies in `gem_id` conventions.
|
13 |
-
# Otherwise, they are identical and we skip then from the validation step.
|
14 |
-
validation_blocklist = [
|
15 |
-
"dart_test",
|
16 |
-
"schema_guided_dialog_test",
|
17 |
-
"web_nlg", # Prefix for multiple datasets
|
18 |
-
"wiki_auto", # Prefix for multiple datasets
|
19 |
-
]
|
20 |
-
|
21 |
|
22 |
def convert(dataset_id: str):
|
23 |
dataset_name = dataset_id.split("/")[-1]
|
@@ -34,60 +21,36 @@ def convert(dataset_id: str):
|
|
34 |
datasets_to_convert[split] = dataset
|
35 |
|
36 |
for split, dataset in datasets_to_convert.items():
|
37 |
-
columns_to_keep = ["gem_id", "gem_parent_id", "target"]
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
dataset = dataset.map(lambda x: {"target": [x["target"]]})
|
45 |
-
# Delete unused columns
|
46 |
-
# The test split doesn't have a parent ID
|
47 |
-
if split == "test":
|
48 |
-
columns_to_keep.remove("gem_parent_id")
|
49 |
-
# The `datasets` JSON serializer is buggy - use `pandas` for now
|
50 |
-
df = dataset.to_pandas()
|
51 |
-
# Exclude dummy config names for comparison with GitHub source dataset
|
52 |
-
if config in ["default", "xsum", "totto"]:
|
53 |
-
reference_name = f"{dataset_name}_{split}"
|
54 |
else:
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
typer.echo(f"Skipped validation for {skipped_validation}")
|
65 |
|
66 |
|
67 |
-
def
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
urllib.request.urlretrieve(
|
72 |
-
url,
|
73 |
-
f"github_references/{reference_name}.json",
|
74 |
-
)
|
75 |
-
except Exception:
|
76 |
-
typer.echo(f"⛔ Could not download {reference_name} dataset from GitHub. Skipping validation ...")
|
77 |
-
return False
|
78 |
-
# Run diff - requires `jq`
|
79 |
-
process = subprocess.run(
|
80 |
-
f"diff <(jq --sort-keys . {reference_name}.json) <(jq --sort-keys . ./github_references/{reference_name}.json)",
|
81 |
-
shell=True,
|
82 |
-
stdout=subprocess.PIPE,
|
83 |
-
executable="/bin/bash",
|
84 |
-
)
|
85 |
-
if process.stdout:
|
86 |
-
# typer.echo(process.stdout)
|
87 |
-
raise ValueError(f"❌ Validation failed for {reference_name}! New and original references do not agree 😭")
|
88 |
-
else:
|
89 |
-
typer.echo(f"✅ Validation successful for {reference_name}!")
|
90 |
-
return True
|
91 |
|
92 |
|
93 |
@app.command()
|
@@ -122,7 +85,8 @@ def main():
|
|
122 |
"dstc10_track2_task2",
|
123 |
"opusparcus",
|
124 |
"xlsum",
|
125 |
-
"
|
|
|
126 |
]
|
127 |
blocklist = ["GEM/" + dataset for dataset in blocklist]
|
128 |
gem_datasets = [dataset for dataset in gem_datasets if dataset.id not in blocklist]
|
|
|
|
|
|
|
|
|
1 |
import typer
|
2 |
+
from datasets import (Dataset, DatasetDict, get_dataset_config_names,
|
3 |
+
load_dataset)
|
4 |
from huggingface_hub import list_datasets
|
5 |
|
6 |
app = typer.Typer()
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
def convert(dataset_id: str):
|
10 |
dataset_name = dataset_id.split("/")[-1]
|
|
|
21 |
datasets_to_convert[split] = dataset
|
22 |
|
23 |
for split, dataset in datasets_to_convert.items():
|
24 |
+
columns_to_keep = ["gem_id", "gem_parent_id", "target", "references"]
|
25 |
+
remainder_cols = validate_columns(dataset)
|
26 |
+
if len(remainder_cols) > 0:
|
27 |
+
typer.echo(
|
28 |
+
f"⚠️ Skipping {dataset_name}/{config}/{split} due to missing columns: {', '.join(remainder_cols)}"
|
29 |
+
)
|
30 |
+
skipped_validation.append(dataset_name / config / split)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
else:
|
32 |
+
# Add `input` column if it exists
|
33 |
+
if "input" in dataset.column_names:
|
34 |
+
columns_to_keep.append("input")
|
35 |
+
# The test split doesn't have a parent ID
|
36 |
+
if split == "test":
|
37 |
+
columns_to_keep.remove("gem_parent_id")
|
38 |
+
# The `datasets` JSON serializer is buggy - use `pandas` for now
|
39 |
+
df = dataset.to_pandas()
|
40 |
+
# Exclude dummy config names for comparison with GitHub source dataset
|
41 |
+
if config in ["default", "xsum", "totto"]:
|
42 |
+
reference_name = f"{dataset_name}_{split}"
|
43 |
+
else:
|
44 |
+
reference_name = f"{dataset_name}_{config}_{split}"
|
45 |
+
df[columns_to_keep].to_json(f"{reference_name}.json", orient="records")
|
46 |
|
47 |
typer.echo(f"Skipped validation for {skipped_validation}")
|
48 |
|
49 |
|
50 |
+
def validate_columns(dataset: Dataset):
|
51 |
+
ref_columns = ["gem_id", "gem_parent_id", "target", "references"]
|
52 |
+
columns = dataset.column_names
|
53 |
+
return set(ref_columns) - set(columns)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
|
56 |
@app.command()
|
|
|
85 |
"dstc10_track2_task2",
|
86 |
"opusparcus",
|
87 |
"xlsum",
|
88 |
+
"wiki_auto_asset_turk", # Can't be loaded
|
89 |
+
"references", # This repo, so exclude!
|
90 |
]
|
91 |
blocklist = ["GEM/" + dataset for dataset in blocklist]
|
92 |
gem_datasets = [dataset for dataset in gem_datasets if dataset.id not in blocklist]
|