references / convert_datasets_to_json.py
lewtun's picture
lewtun HF staff
Add dataset conversion script for MLSUM
c2e45dc
raw
history blame
1.76 kB
from datasets import (get_dataset_config_names, get_dataset_split_names,
load_dataset)
from huggingface_hub import list_datasets
def convert(dataset_id: str):
dataset_name = dataset_id.split("/")[-1]
configs = get_dataset_config_names(dataset_id)
for config in configs:
splits = get_dataset_split_names(dataset_id, config)
splits = [split for split in splits if split not in ["train", "validation"]]
for split in splits:
columns_to_keep = ["gem_id", "gem_parent_id", "target"]
dataset = load_dataset(dataset_id, name=config, split=split)
# It seems like we store the references column as the target one
dataset = dataset.map(lambda x: {"target": x["references"]})
# Delete unused columns
# The test split doesn't have a parent ID
if split == "test":
columns_to_keep.remove("gem_parent_id")
# The `datasets` JSON serializer is buggy - use `pandas` for now
df = dataset.to_pandas()
df[columns_to_keep].to_json(f"{dataset_name}_{config}_{split}.json", orient="records")
# TODO: validate against existing references on GitHub
# diff <(jq --sort-keys . mlsum_de_challenge_test_covid.json) <(jq --sort-keys . ~/git/GEM-metrics/data/references/mlsum_de_challenge_test_covid.json)
def main():
all_datasets = list_datasets()
gem_datasets = [dataset for dataset in all_datasets if dataset.id.startswith("GEM/")]
# Test run with MLSUM
mlsum_datasets = [dataset for dataset in gem_datasets if dataset.id.startswith("GEM/mlsum")]
for dataset in mlsum_datasets:
convert(dataset.id)
if __name__ == "__main__":
main()