lewtun HF staff commited on
Commit
d05610b
·
1 Parent(s): 8293c53

Refactor conversion script

Browse files
Files changed (1) hide show
  1. convert_datasets_to_json.py +35 -28
convert_datasets_to_json.py CHANGED
@@ -10,10 +10,16 @@ def convert(dataset_id: str):
10
  dataset_name = dataset_id.split("/")[-1]
11
  configs = get_dataset_config_names(dataset_id)
12
  skipped_validation = []
 
13
 
14
  for config in configs:
15
  typer.echo(dataset_id)
16
- raw_datasets = load_dataset(dataset_id, name=config)
 
 
 
 
 
17
  datasets_to_convert = DatasetDict()
18
 
19
  for split, dataset in raw_datasets.items():
@@ -45,46 +51,47 @@ def convert(dataset_id: str):
45
  df[columns_to_keep].to_json(f"{reference_name}.json", orient="records")
46
 
47
  typer.echo(f"Skipped validation for {skipped_validation}")
 
48
 
49
 
50
  def validate_columns(dataset: Dataset):
51
- ref_columns = ["gem_id", "gem_parent_id", "target", "references"]
52
  columns = dataset.column_names
53
  return set(ref_columns) - set(columns)
54
 
55
 
56
  @app.command()
57
- def main():
58
  all_datasets = list_datasets()
59
  # Filter for GEM datasets
60
  gem_datasets = [dataset for dataset in all_datasets if dataset.id.startswith("GEM/")]
61
  # Filter for blocklist - currently exclude all datasets not found on GitHUb release
62
  blocklist = [
63
- "ART",
64
- "mlb_data_to_text",
65
- "OrangeSum",
66
- "split_and_rephrase",
67
- "wiki_cat_sum",
68
- "viggo",
69
- "CrossWOZ",
70
- "RiSAWOZ",
71
- "indonlg",
72
- "squad_v2",
73
- "BiSECT",
74
- "surface_realisation_st_2020",
75
- "SciDuet",
76
- "cochrane-simplification",
77
- "turku_paraphrase_corpus",
78
- "turku_hockey_data2text",
79
- "sportsett_basketball",
80
- "Taskmaster",
81
- "wiki_lingua",
82
- "SIMPITIKI",
83
- "conversational_weather",
84
- "RotoWire_English-German",
85
- "dstc10_track2_task2",
86
- "opusparcus",
87
- "xlsum",
88
  "wiki_auto_asset_turk", # Can't be loaded
89
  "references", # This repo, so exclude!
90
  ]
 
10
  dataset_name = dataset_id.split("/")[-1]
11
  configs = get_dataset_config_names(dataset_id)
12
  skipped_validation = []
13
+ load_errors = []
14
 
15
  for config in configs:
16
  typer.echo(dataset_id)
17
+ try:
18
+ raw_datasets = load_dataset(dataset_id, name=config)
19
+ except:
20
+ typer.echo(f"Failed to load {dataset_id}")
21
+ load_errors.append(f"{dataset_id}/{config}")
22
+ continue
23
  datasets_to_convert = DatasetDict()
24
 
25
  for split, dataset in raw_datasets.items():
 
51
  df[columns_to_keep].to_json(f"{reference_name}.json", orient="records")
52
 
53
  typer.echo(f"Skipped validation for {skipped_validation}")
54
+ typer.echo(f"Load errors: {load_errors}")
55
 
56
 
57
  def validate_columns(dataset: Dataset):
58
+ ref_columns = ["gem_id", "target", "references"]
59
  columns = dataset.column_names
60
  return set(ref_columns) - set(columns)
61
 
62
 
63
  @app.command()
64
+ def extract_evaluation_datasets():
65
  all_datasets = list_datasets()
66
  # Filter for GEM datasets
67
  gem_datasets = [dataset for dataset in all_datasets if dataset.id.startswith("GEM/")]
68
  # Filter for blocklist - currently exclude all datasets not found on GitHUb release
69
  blocklist = [
70
+ # "ART",
71
+ # "mlb_data_to_text",
72
+ # "OrangeSum",
73
+ # "split_and_rephrase",
74
+ # "wiki_cat_sum",
75
+ # "viggo",
76
+ # "CrossWOZ",
77
+ # "RiSAWOZ",
78
+ # "indonlg",
79
+ # "squad_v2",
80
+ # "BiSECT",
81
+ # "surface_realisation_st_2020",
82
+ # "SciDuet",
83
+ # "cochrane-simplification",
84
+ # "turku_paraphrase_corpus",
85
+ # "turku_hockey_data2text",
86
+ # "sportsett_basketball",
87
+ # "Taskmaster",
88
+ # "wiki_lingua",
89
+ # "SIMPITIKI",
90
+ # "conversational_weather",
91
+ # "RotoWire_English-German",
92
+ # "dstc10_track2_task2",
93
+ # "opusparcus",
94
+ # "xlsum",
95
  "wiki_auto_asset_turk", # Can't be loaded
96
  "references", # This repo, so exclude!
97
  ]