|
import os |
|
import pprint |
|
import pandas as pd |
|
import torch |
|
from torch.utils.data import Dataset, DataLoader |
|
from pathlib import Path |
|
import json |
|
from typing import List, Dict, Optional |
|
import numpy as np |
|
|
|
def create_dataset_splits( |
|
metadata_path: str, |
|
output_dir: str, |
|
train_ratio: float = 0.8, |
|
val_ratio: float = 0.1, |
|
seed: int = 42 |
|
): |
|
""" |
|
Create and save train/val/test splits to disk. |
|
|
|
Args: |
|
metadata_path: Path to the metadata CSV file |
|
output_dir: Directory to save the split CSV files |
|
train_ratio: Ratio of data to use for training |
|
val_ratio: Ratio of data to use for validation |
|
seed: Random seed for reproducibility |
|
""" |
|
|
|
df = pd.read_csv(metadata_path) |
|
|
|
np.random.seed(seed) |
|
|
|
|
|
unique_filenames = df['filename'].unique() |
|
np.random.shuffle(unique_filenames) |
|
|
|
n_samples = len(unique_filenames) |
|
train_idx = int(n_samples * train_ratio) |
|
val_idx = int(n_samples * (train_ratio + val_ratio)) |
|
|
|
|
|
splits = { |
|
'train': df[df['filename'].isin(unique_filenames[:train_idx])], |
|
'val': df[df['filename'].isin(unique_filenames[train_idx:val_idx])], |
|
'test': df[df['filename'].isin(unique_filenames[val_idx:])] |
|
} |
|
|
|
|
|
output_dir = Path(output_dir) |
|
output_dir.mkdir(exist_ok=True, parents=True) |
|
|
|
|
|
for split_name, split_df in splits.items(): |
|
split_df.to_csv(output_dir / f'{split_name}.csv', index=False) |
|
|
|
|
|
split_info = {} |
|
split_info['metadata_path'] = metadata_path |
|
split_info['seed'] = seed |
|
split_info['ratios'] = { |
|
'train': train_ratio, |
|
'val': val_ratio, |
|
'test': round(1 - train_ratio - val_ratio, 2), |
|
} |
|
for split_name, split_df in splits.items(): |
|
split_info[split_name] = { |
|
'total_num_samples': len(split_df), |
|
'human_samples': len(split_df[split_df['cloned_or_human'] == "human"]), |
|
'cloned_samples': len(split_df[split_df['cloned_or_human'] == "cloned"]), |
|
'sources': split_df['source'].value_counts().to_dict(), |
|
'voices_per_source': split_df.groupby('source')['path'].nunique().to_dict(), |
|
} |
|
|
|
pprint.pprint(split_info) |
|
|
|
with open(output_dir / 'split_info.json', 'w') as f: |
|
json.dump(split_info, f, indent=2) |
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
metadata_file = 'metadata-valid.csv' |
|
clips_dir = '.' |
|
output_dir = 'splits' |
|
|
|
|
|
create_dataset_splits(metadata_file, output_dir=output_dir) |