import typer from datasets import (Dataset, DatasetDict, get_dataset_config_names, load_dataset) from huggingface_hub import list_datasets import pandas as pd app = typer.Typer() def convert(dataset_id: str): errors = [] dataset_name = dataset_id.split("/")[-1] try: configs = get_dataset_config_names(dataset_id) except: typer.echo(f"❌ Failed to get configs for {dataset_id}") errors.append({"dataset_name": dataset_id, "error_type": "config"}) return errors for config in configs: typer.echo(f"🛠️🛠️🛠️ Converting {dataset_id} with config {config} 🛠️🛠️🛠️") try: raw_datasets = load_dataset(dataset_id, name=config) except: typer.echo(f"❌ Failed to load {dataset_id} with config {config}") errors.append({"dataset_name": dataset_id, "config": config, "error_type": "load"}) continue datasets_to_convert = DatasetDict() for split, dataset in raw_datasets.items(): if split not in ["train", "validation"]: datasets_to_convert[split] = dataset for split, dataset in datasets_to_convert.items(): columns_to_keep = ["gem_id", "target", "references"] remainder_cols = validate_columns(dataset) if len(remainder_cols) > 0: typer.echo( f"❌ Skipping {dataset_name}/{config}/{split} due to missing columns: {', '.join(remainder_cols)}" ) errors.append({"dataset_name": dataset_id, "config": config, "split": split, "error_type": "missing_columns", "missing_columns": remainder_cols}) else: # Add `input` column if it exists if "input" in dataset.column_names: columns_to_keep.append("input") # The test split doesn't have a parent ID # TODO(lewtun): check this logic! if split != "test" and "gem_parent_id" in dataset.column_names: columns_to_keep.append("gem_parent_id") # The `datasets` JSON serializer is buggy - use `pandas` for now df = dataset.to_pandas() # Exclude dummy config names for comparison with GitHub source dataset if config in ["default", "xsum", "totto"]: reference_name = f"{dataset_name}_{split}" else: reference_name = f"{dataset_name}_{config}_{split}" df[columns_to_keep].to_json(f"{reference_name}.json", orient="records") typer.echo(f"✅ Successfully converted {dataset_id} with config {config}") return errors def validate_columns(dataset: Dataset): ref_columns = ["gem_id", "target", "references"] columns = dataset.column_names return set(ref_columns) - set(columns) @app.command() def extract_evaluation_datasets(): errors = [] all_datasets = list_datasets() # Filter for GEM datasets gem_datasets = [dataset for dataset in all_datasets if dataset.id.startswith("GEM/")] # Filter for blocklist blocklist = [ "indonlg", # Can't load "RiSAWOZ", # Can't load "CrossWOZ", # Can't load "references", # This repo, so exclude! ] blocklist = ["GEM/" + dataset for dataset in blocklist] gem_datasets = [dataset for dataset in gem_datasets if dataset.id not in blocklist] for dataset in gem_datasets: errors.extend(convert(dataset.id)) if len(errors): typer.echo("🙈 Found conversion errors!") errors_df = pd.DataFrame(errors) errors_df.to_csv("conversion_errors.csv", index=False) typer.echo(f"🥳 All datasets converted!") if __name__ == "__main__": app()