File size: 2,800 Bytes
3c13b6c
25a9c33
3c13b6c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25a9c33
3c13b6c
 
 
25a9c33
 
 
 
 
3c13b6c
 
 
 
 
25a9c33
 
 
3c13b6c
 
 
 
 
 
 
 
 
 
 
25a9c33
 
 
 
 
 
 
3c13b6c
25a9c33
 
 
0adc2b8
 
25a9c33
 
3c13b6c
 
25a9c33
3c13b6c
25a9c33
 
3c13b6c
 
 
 
25a9c33
 
3c13b6c
 
 
 
25a9c33
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import os
import pprint
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
from pathlib import Path
import json
from typing import List, Dict, Optional
import numpy as np

def create_dataset_splits(
    metadata_path: str,
    output_dir: str,
    train_ratio: float = 0.8,
    val_ratio: float = 0.1,
    seed: int = 42
):
    """
    Create and save train/val/test splits to disk.

    Args:
        metadata_path: Path to the metadata CSV file
        output_dir: Directory to save the split CSV files
        train_ratio: Ratio of data to use for training
        val_ratio: Ratio of data to use for validation
        seed: Random seed for reproducibility
    """

    df = pd.read_csv(metadata_path)

    np.random.seed(seed)

    # We will be splitting on the filename. This ensures that a cloned voice is always with the same original voice in a given split, and not split between train/val/test.
    unique_filenames = df['filename'].unique()
    np.random.shuffle(unique_filenames)

    n_samples = len(unique_filenames)
    train_idx = int(n_samples * train_ratio)
    val_idx = int(n_samples * (train_ratio + val_ratio))

    # Create split DataFrames
    splits = {
        'train': df[df['filename'].isin(unique_filenames[:train_idx])],
        'val': df[df['filename'].isin(unique_filenames[train_idx:val_idx])],
        'test': df[df['filename'].isin(unique_filenames[val_idx:])]
    }

    # Save splits
    output_dir = Path(output_dir)
    output_dir.mkdir(exist_ok=True, parents=True)

    # Save individual splits
    for split_name, split_df in splits.items():
        split_df.to_csv(output_dir / f'{split_name}.csv', index=False)

    # Save split info
    split_info = {}
    split_info['metadata_path'] = metadata_path
    split_info['seed'] = seed
    split_info['ratios'] = {
        'train': train_ratio,
        'val': val_ratio,
        'test': round(1 - train_ratio - val_ratio, 2),
    }
    for split_name, split_df in splits.items():
        split_info[split_name] = {
            'total_num_samples': len(split_df),
            'human_samples': len(split_df[split_df['cloned_or_human'] == "human"]),
            'cloned_samples': len(split_df[split_df['cloned_or_human'] == "cloned"]),
            'sources': split_df['source'].value_counts().to_dict(),
            'voices_per_source': split_df.groupby('source')['path'].nunique().to_dict(),
        }

    pprint.pprint(split_info)

    with open(output_dir / 'split_info.json', 'w') as f:
        json.dump(split_info, f, indent=2)


# Example usage:
if __name__ == "__main__":
    # json_file = 'files.json'
    metadata_file = 'metadata-valid.csv'
    clips_dir = '.'
    output_dir = 'splits'

    # Create splits
    create_dataset_splits(metadata_file, output_dir=output_dir)