references / convert_datasets_to_json.py
lewtun's picture
lewtun HF staff
Refactor conversion script
d05610b
raw
history blame
3.9 kB
import typer
from datasets import (Dataset, DatasetDict, get_dataset_config_names,
load_dataset)
from huggingface_hub import list_datasets
app = typer.Typer()
def convert(dataset_id: str):
dataset_name = dataset_id.split("/")[-1]
configs = get_dataset_config_names(dataset_id)
skipped_validation = []
load_errors = []
for config in configs:
typer.echo(dataset_id)
try:
raw_datasets = load_dataset(dataset_id, name=config)
except:
typer.echo(f"Failed to load {dataset_id}")
load_errors.append(f"{dataset_id}/{config}")
continue
datasets_to_convert = DatasetDict()
for split, dataset in raw_datasets.items():
if split not in ["train", "validation"]:
datasets_to_convert[split] = dataset
for split, dataset in datasets_to_convert.items():
columns_to_keep = ["gem_id", "gem_parent_id", "target", "references"]
remainder_cols = validate_columns(dataset)
if len(remainder_cols) > 0:
typer.echo(
f"⚠️ Skipping {dataset_name}/{config}/{split} due to missing columns: {', '.join(remainder_cols)}"
)
skipped_validation.append(f"{dataset_name}/{config}/{split}")
else:
# Add `input` column if it exists
if "input" in dataset.column_names:
columns_to_keep.append("input")
# The test split doesn't have a parent ID
if split == "test":
columns_to_keep.remove("gem_parent_id")
# The `datasets` JSON serializer is buggy - use `pandas` for now
df = dataset.to_pandas()
# Exclude dummy config names for comparison with GitHub source dataset
if config in ["default", "xsum", "totto"]:
reference_name = f"{dataset_name}_{split}"
else:
reference_name = f"{dataset_name}_{config}_{split}"
df[columns_to_keep].to_json(f"{reference_name}.json", orient="records")
typer.echo(f"Skipped validation for {skipped_validation}")
typer.echo(f"Load errors: {load_errors}")
def validate_columns(dataset: Dataset):
ref_columns = ["gem_id", "target", "references"]
columns = dataset.column_names
return set(ref_columns) - set(columns)
@app.command()
def extract_evaluation_datasets():
all_datasets = list_datasets()
# Filter for GEM datasets
gem_datasets = [dataset for dataset in all_datasets if dataset.id.startswith("GEM/")]
# Filter for blocklist - currently exclude all datasets not found on GitHUb release
blocklist = [
# "ART",
# "mlb_data_to_text",
# "OrangeSum",
# "split_and_rephrase",
# "wiki_cat_sum",
# "viggo",
# "CrossWOZ",
# "RiSAWOZ",
# "indonlg",
# "squad_v2",
# "BiSECT",
# "surface_realisation_st_2020",
# "SciDuet",
# "cochrane-simplification",
# "turku_paraphrase_corpus",
# "turku_hockey_data2text",
# "sportsett_basketball",
# "Taskmaster",
# "wiki_lingua",
# "SIMPITIKI",
# "conversational_weather",
# "RotoWire_English-German",
# "dstc10_track2_task2",
# "opusparcus",
# "xlsum",
"wiki_auto_asset_turk", # Can't be loaded
"references", # This repo, so exclude!
]
blocklist = ["GEM/" + dataset for dataset in blocklist]
gem_datasets = [dataset for dataset in gem_datasets if dataset.id not in blocklist]
for dataset in gem_datasets:
typer.echo(f"Converting {dataset.id} ...")
convert(dataset.id)
typer.echo(f"🥳 All datasets converted!")
if __name__ == "__main__":
app()