lewtun HF staff commited on
Commit
7a519eb
·
1 Parent(s): ecc6a08

Refactor conversion script

Browse files
Files changed (1) hide show
  1. convert_datasets_to_json.py +29 -65
convert_datasets_to_json.py CHANGED
@@ -1,23 +1,10 @@
1
- import subprocess
2
- import urllib
3
-
4
  import typer
5
- from datasets import (DatasetDict, get_dataset_config_names,
6
- get_dataset_split_names, load_dataset)
7
  from huggingface_hub import list_datasets
8
 
9
  app = typer.Typer()
10
 
11
- # These datasets do not agree with the GitHub sources due to small
12
- # inconsistencies in `gem_id` conventions.
13
- # Otherwise, they are identical and we skip then from the validation step.
14
- validation_blocklist = [
15
- "dart_test",
16
- "schema_guided_dialog_test",
17
- "web_nlg", # Prefix for multiple datasets
18
- "wiki_auto", # Prefix for multiple datasets
19
- ]
20
-
21
 
22
  def convert(dataset_id: str):
23
  dataset_name = dataset_id.split("/")[-1]
@@ -34,60 +21,36 @@ def convert(dataset_id: str):
34
  datasets_to_convert[split] = dataset
35
 
36
  for split, dataset in datasets_to_convert.items():
37
- columns_to_keep = ["gem_id", "gem_parent_id", "target"]
38
- # dataset = load_dataset(dataset_id, name=config, split=split)
39
- # For non-train splits, it seems we use the references column as the target
40
- if "train" not in split:
41
- dataset = dataset.map(lambda x: {"target": x["references"]})
42
- else:
43
- # Wrap references in list to match GEM schema
44
- dataset = dataset.map(lambda x: {"target": [x["target"]]})
45
- # Delete unused columns
46
- # The test split doesn't have a parent ID
47
- if split == "test":
48
- columns_to_keep.remove("gem_parent_id")
49
- # The `datasets` JSON serializer is buggy - use `pandas` for now
50
- df = dataset.to_pandas()
51
- # Exclude dummy config names for comparison with GitHub source dataset
52
- if config in ["default", "xsum", "totto"]:
53
- reference_name = f"{dataset_name}_{split}"
54
  else:
55
- reference_name = f"{dataset_name}_{config}_{split}"
56
- df[columns_to_keep].to_json(f"{reference_name}.json", orient="records")
57
- # Exclude known datasets from validation
58
- do_validation = list(filter(reference_name.startswith, validation_blocklist)) == []
59
- if do_validation:
60
- validated = validate(reference_name)
61
- if not validated:
62
- skipped_validation.append(reference_name)
 
 
 
 
 
 
63
 
64
  typer.echo(f"Skipped validation for {skipped_validation}")
65
 
66
 
67
- def validate(reference_name: str):
68
- # Download original references from GitHub repo
69
- url = f"https://github.com/GEM-benchmark/GEM-metrics/releases/download/data/{reference_name}.json"
70
- try:
71
- urllib.request.urlretrieve(
72
- url,
73
- f"github_references/{reference_name}.json",
74
- )
75
- except Exception:
76
- typer.echo(f"⛔ Could not download {reference_name} dataset from GitHub. Skipping validation ...")
77
- return False
78
- # Run diff - requires `jq`
79
- process = subprocess.run(
80
- f"diff <(jq --sort-keys . {reference_name}.json) <(jq --sort-keys . ./github_references/{reference_name}.json)",
81
- shell=True,
82
- stdout=subprocess.PIPE,
83
- executable="/bin/bash",
84
- )
85
- if process.stdout:
86
- # typer.echo(process.stdout)
87
- raise ValueError(f"❌ Validation failed for {reference_name}! New and original references do not agree 😭")
88
- else:
89
- typer.echo(f"✅ Validation successful for {reference_name}!")
90
- return True
91
 
92
 
93
  @app.command()
@@ -122,7 +85,8 @@ def main():
122
  "dstc10_track2_task2",
123
  "opusparcus",
124
  "xlsum",
125
- "references",
 
126
  ]
127
  blocklist = ["GEM/" + dataset for dataset in blocklist]
128
  gem_datasets = [dataset for dataset in gem_datasets if dataset.id not in blocklist]
 
 
 
 
1
  import typer
2
+ from datasets import (Dataset, DatasetDict, get_dataset_config_names,
3
+ load_dataset)
4
  from huggingface_hub import list_datasets
5
 
6
  app = typer.Typer()
7
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  def convert(dataset_id: str):
10
  dataset_name = dataset_id.split("/")[-1]
 
21
  datasets_to_convert[split] = dataset
22
 
23
  for split, dataset in datasets_to_convert.items():
24
+ columns_to_keep = ["gem_id", "gem_parent_id", "target", "references"]
25
+ remainder_cols = validate_columns(dataset)
26
+ if len(remainder_cols) > 0:
27
+ typer.echo(
28
+ f"⚠️ Skipping {dataset_name}/{config}/{split} due to missing columns: {', '.join(remainder_cols)}"
29
+ )
30
+ skipped_validation.append(dataset_name / config / split)
 
 
 
 
 
 
 
 
 
 
31
  else:
32
+ # Add `input` column if it exists
33
+ if "input" in dataset.column_names:
34
+ columns_to_keep.append("input")
35
+ # The test split doesn't have a parent ID
36
+ if split == "test":
37
+ columns_to_keep.remove("gem_parent_id")
38
+ # The `datasets` JSON serializer is buggy - use `pandas` for now
39
+ df = dataset.to_pandas()
40
+ # Exclude dummy config names for comparison with GitHub source dataset
41
+ if config in ["default", "xsum", "totto"]:
42
+ reference_name = f"{dataset_name}_{split}"
43
+ else:
44
+ reference_name = f"{dataset_name}_{config}_{split}"
45
+ df[columns_to_keep].to_json(f"{reference_name}.json", orient="records")
46
 
47
  typer.echo(f"Skipped validation for {skipped_validation}")
48
 
49
 
50
+ def validate_columns(dataset: Dataset):
51
+ ref_columns = ["gem_id", "gem_parent_id", "target", "references"]
52
+ columns = dataset.column_names
53
+ return set(ref_columns) - set(columns)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
 
56
  @app.command()
 
85
  "dstc10_track2_task2",
86
  "opusparcus",
87
  "xlsum",
88
+ "wiki_auto_asset_turk", # Can't be loaded
89
+ "references", # This repo, so exclude!
90
  ]
91
  blocklist = ["GEM/" + dataset for dataset in blocklist]
92
  gem_datasets = [dataset for dataset in gem_datasets if dataset.id not in blocklist]