File size: 1,759 Bytes
c2e45dc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
from datasets import (get_dataset_config_names, get_dataset_split_names,
                      load_dataset)
from huggingface_hub import list_datasets


def convert(dataset_id: str):
    dataset_name = dataset_id.split("/")[-1]
    configs = get_dataset_config_names(dataset_id)

    for config in configs:
        splits = get_dataset_split_names(dataset_id, config)
        splits = [split for split in splits if split not in ["train", "validation"]]
        for split in splits:
            columns_to_keep = ["gem_id", "gem_parent_id", "target"]
            dataset = load_dataset(dataset_id, name=config, split=split)
            # It seems like we store the references column as the target one
            dataset = dataset.map(lambda x: {"target": x["references"]})
            # Delete unused columns
            # The test split doesn't have a parent ID
            if split == "test":
                columns_to_keep.remove("gem_parent_id")
            # The `datasets` JSON serializer is buggy - use `pandas` for now
            df = dataset.to_pandas()
            df[columns_to_keep].to_json(f"{dataset_name}_{config}_{split}.json", orient="records")
            # TODO: validate against existing references on GitHub
            # diff <(jq --sort-keys . mlsum_de_challenge_test_covid.json) <(jq --sort-keys . ~/git/GEM-metrics/data/references/mlsum_de_challenge_test_covid.json)


def main():
    all_datasets = list_datasets()
    gem_datasets = [dataset for dataset in all_datasets if dataset.id.startswith("GEM/")]
    # Test run with MLSUM
    mlsum_datasets = [dataset for dataset in gem_datasets if dataset.id.startswith("GEM/mlsum")]
    for dataset in mlsum_datasets:
        convert(dataset.id)


if __name__ == "__main__":
    main()