File size: 1,377 Bytes
73baeae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
# Experiment Config for each experiment
output_dir: /output
logging_dir: runs/tb_log
logging_steps: 10
seed: 1115
train_file: /csv/clotho/train.csv
validation_file: /csv/clotho/valid.csv
encodec_base_path: /data/clotho/encodec
clap_base_path: /data/clotho/clap
tokenizer_name: facebook/bart-base
config_name_or_path: facebook/bart-base
model_name_or_path: /data/enclap_audiocaps
eval_num_captions: 5
overwrite_output_dir: False

# Basic Config
encodec_masking_prob: 0.15
encodec_masking_span: 10
num_train_epochs: 15
max_train_steps: null
gradient_accumulation_steps: 1
per_device_train_batch_size: 64
per_device_eval_batch_size: 64
split_batches: true
checkpointing_steps: epoch  # 'epoch' to save for each epoch, or number of steps
resume_from_checkpoint: null

# Generation Config
max_target_length: 128
val_max_target_length: 50

# Training Hyperparameters
# "lr_schedulre_type" should be one of "linear", "cosine", "cosine_with_restarts", "polynomial", 
# "constant", "constant_with_warmpup", "inverse_sqrt", "reduce_lr_on_plateau", "two_stage_inverse_sqrt"
lr_scheduler_type: inverse_sqrt
learning_rate: 2e-5  # peak lr
num_warmup_steps: 1000
weight_decay: 0.01
max_grad_norm: 1.0

# Others
with_tracking: true
report_to: tensorboard
ignore_pad_token_for_loss: true 
preprocessing_num_workers: 32
use_slow_tokenizer: false
overwrite_cache: false
pad_to_max_length: false