|
import typer |
|
from datasets import (Dataset, DatasetDict, get_dataset_config_names, |
|
load_dataset) |
|
from huggingface_hub import list_datasets |
|
import pandas as pd |
|
|
|
app = typer.Typer() |
|
|
|
|
|
def convert(dataset_id: str): |
|
errors = [] |
|
dataset_name = dataset_id.split("/")[-1] |
|
try: |
|
configs = get_dataset_config_names(dataset_id) |
|
except: |
|
typer.echo(f"β Failed to get configs for {dataset_id}") |
|
errors.append({"dataset_name": dataset_id, "error_type": "config"}) |
|
return errors |
|
|
|
for config in configs: |
|
typer.echo(f"π οΈπ οΈπ οΈ Converting {dataset_id} with config {config} π οΈπ οΈπ οΈ") |
|
try: |
|
raw_datasets = load_dataset(dataset_id, name=config) |
|
except: |
|
typer.echo(f"β Failed to load {dataset_id} with config {config}") |
|
errors.append({"dataset_name": dataset_id, "config": config, "error_type": "load"}) |
|
continue |
|
datasets_to_convert = DatasetDict() |
|
|
|
for split, dataset in raw_datasets.items(): |
|
if split not in ["train", "validation"]: |
|
datasets_to_convert[split] = dataset |
|
|
|
for split, dataset in datasets_to_convert.items(): |
|
columns_to_keep = ["gem_id", "target", "references"] |
|
remainder_cols = validate_columns(dataset) |
|
if len(remainder_cols) > 0: |
|
typer.echo( |
|
f"β Skipping {dataset_name}/{config}/{split} due to missing columns: {', '.join(remainder_cols)}" |
|
) |
|
errors.append({"dataset_name": dataset_id, "config": config, "split": split, "error_type": "missing_columns", "missing_columns": remainder_cols}) |
|
else: |
|
|
|
if "input" in dataset.column_names: |
|
columns_to_keep.append("input") |
|
|
|
|
|
if split != "test" and "gem_parent_id" in dataset.column_names: |
|
columns_to_keep.append("gem_parent_id") |
|
|
|
df = dataset.to_pandas() |
|
|
|
if config in ["default", "xsum", "totto"]: |
|
reference_name = f"{dataset_name}_{split}" |
|
else: |
|
reference_name = f"{dataset_name}_{config}_{split}" |
|
df[columns_to_keep].to_json(f"{reference_name}.json", orient="records") |
|
|
|
typer.echo(f"β
Successfully converted {dataset_id} with config {config}") |
|
|
|
return errors |
|
|
|
|
|
def validate_columns(dataset: Dataset): |
|
ref_columns = ["gem_id", "target", "references"] |
|
columns = dataset.column_names |
|
return set(ref_columns) - set(columns) |
|
|
|
|
|
@app.command() |
|
def extract_evaluation_datasets(): |
|
errors = [] |
|
all_datasets = list_datasets() |
|
|
|
gem_datasets = [dataset for dataset in all_datasets if dataset.id.startswith("GEM/")] |
|
|
|
blocklist = [ |
|
"indonlg", |
|
"RiSAWOZ", |
|
"CrossWOZ", |
|
"references", |
|
] |
|
blocklist = ["GEM/" + dataset for dataset in blocklist] |
|
gem_datasets = [dataset for dataset in gem_datasets if dataset.id not in blocklist] |
|
for dataset in gem_datasets: |
|
errors.extend(convert(dataset.id)) |
|
|
|
if len(errors): |
|
typer.echo("π Found conversion errors!") |
|
errors_df = pd.DataFrame(errors) |
|
errors_df.to_csv("conversion_errors.csv", index=False) |
|
|
|
typer.echo(f"π₯³ All datasets converted!") |
|
|
|
|
|
if __name__ == "__main__": |
|
app() |
|
|