Refactor conversion script
Browse files- README.md +2 -3
- generate_evaluation_datasets.py +97 -0
README.md
CHANGED
@@ -15,12 +15,11 @@ python -m pip install -r requirements.txt
|
|
15 |
You can then run the script as follows:
|
16 |
|
17 |
```python
|
18 |
-
python
|
19 |
```
|
20 |
|
21 |
This script will:
|
22 |
|
23 |
* Download and convert the datasets under the GEM organisation to JSON format
|
24 |
-
*
|
25 |
-
* Perform a diff validation between the converted and original reference datasets via the `jq` library
|
26 |
|
|
|
15 |
You can then run the script as follows:
|
16 |
|
17 |
```python
|
18 |
+
python generate_evaluation_datasets.py
|
19 |
```
|
20 |
|
21 |
This script will:
|
22 |
|
23 |
* Download and convert the datasets under the GEM organisation to JSON format
|
24 |
+
* Validate that the each dataset has the expected columns of `gem_id`, `target`, and `references`
|
|
|
25 |
|
generate_evaluation_datasets.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import typer
|
2 |
+
from datasets import (Dataset, DatasetDict, get_dataset_config_names,
|
3 |
+
load_dataset)
|
4 |
+
from huggingface_hub import list_datasets
|
5 |
+
import pandas as pd
|
6 |
+
|
7 |
+
app = typer.Typer()
|
8 |
+
|
9 |
+
|
10 |
+
def convert(dataset_id: str):
|
11 |
+
errors = []
|
12 |
+
dataset_name = dataset_id.split("/")[-1]
|
13 |
+
try:
|
14 |
+
configs = get_dataset_config_names(dataset_id)
|
15 |
+
except:
|
16 |
+
typer.echo(f"โ Failed to get configs for {dataset_id}")
|
17 |
+
errors.append({"dataset_name": dataset_id, "error_type": "config"})
|
18 |
+
return errors
|
19 |
+
|
20 |
+
for config in configs:
|
21 |
+
typer.echo(f"๐ ๏ธ๐ ๏ธ๐ ๏ธ Converting {dataset_id} with config {config} ๐ ๏ธ๐ ๏ธ๐ ๏ธ")
|
22 |
+
try:
|
23 |
+
raw_datasets = load_dataset(dataset_id, name=config)
|
24 |
+
except:
|
25 |
+
typer.echo(f"โ Failed to load {dataset_id} with config {config}")
|
26 |
+
errors.append({"dataset_name": dataset_id, "config": config, "error_type": "load"})
|
27 |
+
continue
|
28 |
+
datasets_to_convert = DatasetDict()
|
29 |
+
|
30 |
+
for split, dataset in raw_datasets.items():
|
31 |
+
if split not in ["train", "validation"]:
|
32 |
+
datasets_to_convert[split] = dataset
|
33 |
+
|
34 |
+
for split, dataset in datasets_to_convert.items():
|
35 |
+
columns_to_keep = ["gem_id", "target", "references"]
|
36 |
+
remainder_cols = validate_columns(dataset)
|
37 |
+
if len(remainder_cols) > 0:
|
38 |
+
typer.echo(
|
39 |
+
f"โ Skipping {dataset_name}/{config}/{split} due to missing columns: {', '.join(remainder_cols)}"
|
40 |
+
)
|
41 |
+
errors.append({"dataset_name": dataset_id, "config": config, "split": split, "error_type": "missing_columns", "missing_columns": remainder_cols})
|
42 |
+
else:
|
43 |
+
# Add `input` column if it exists
|
44 |
+
if "input" in dataset.column_names:
|
45 |
+
columns_to_keep.append("input")
|
46 |
+
# The test split doesn't have a parent ID
|
47 |
+
# TODO(lewtun): check this logic!
|
48 |
+
if split != "test" and "gem_parent_id" in dataset.column_names:
|
49 |
+
columns_to_keep.append("gem_parent_id")
|
50 |
+
# The `datasets` JSON serializer is buggy - use `pandas` for now
|
51 |
+
df = dataset.to_pandas()
|
52 |
+
# Exclude dummy config names for comparison with GitHub source dataset
|
53 |
+
if config in ["default", "xsum", "totto"]:
|
54 |
+
reference_name = f"{dataset_name}_{split}"
|
55 |
+
else:
|
56 |
+
reference_name = f"{dataset_name}_{config}_{split}"
|
57 |
+
df[columns_to_keep].to_json(f"{reference_name}.json", orient="records")
|
58 |
+
|
59 |
+
typer.echo(f"โ
Successfully converted {dataset_id} with config {config}")
|
60 |
+
|
61 |
+
return errors
|
62 |
+
|
63 |
+
|
64 |
+
def validate_columns(dataset: Dataset):
|
65 |
+
ref_columns = ["gem_id", "target", "references"]
|
66 |
+
columns = dataset.column_names
|
67 |
+
return set(ref_columns) - set(columns)
|
68 |
+
|
69 |
+
|
70 |
+
@app.command()
|
71 |
+
def extract_evaluation_datasets():
|
72 |
+
errors = []
|
73 |
+
all_datasets = list_datasets()
|
74 |
+
# Filter for GEM datasets
|
75 |
+
gem_datasets = [dataset for dataset in all_datasets if dataset.id.startswith("GEM/")]
|
76 |
+
# Filter for blocklist
|
77 |
+
blocklist = [
|
78 |
+
"indonlg", # Can't load
|
79 |
+
"RiSAWOZ", # Can't load
|
80 |
+
"CrossWOZ", # Can't load
|
81 |
+
"references", # This repo, so exclude!
|
82 |
+
]
|
83 |
+
blocklist = ["GEM/" + dataset for dataset in blocklist]
|
84 |
+
gem_datasets = [dataset for dataset in gem_datasets if dataset.id not in blocklist]
|
85 |
+
for dataset in gem_datasets:
|
86 |
+
errors.extend(convert(dataset.id))
|
87 |
+
|
88 |
+
if len(errors):
|
89 |
+
typer.echo("๐ Found conversion errors!")
|
90 |
+
errors_df = pd.DataFrame(errors)
|
91 |
+
errors_df.to_csv("conversion_errors.csv", index=False)
|
92 |
+
|
93 |
+
typer.echo(f"๐ฅณ All datasets converted!")
|
94 |
+
|
95 |
+
|
96 |
+
if __name__ == "__main__":
|
97 |
+
app()
|