aaditya commited on
Commit
37c037c
·
unverified ·
1 Parent(s): 15f7910

Adding Llama-3 qlora (#1536)

Browse files

* Create qlora.yml

* Update qlora.yml

Files changed (1) hide show
  1. examples/llama-3/qlora.yml +67 -0
examples/llama-3/qlora.yml ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: meta-llama/Meta-Llama-3-8B
2
+ model_type: AutoModelForCausalLM
3
+ tokenizer_type: AutoTokenizer
4
+
5
+ load_in_8bit: false
6
+ load_in_4bit: true
7
+ strict: false
8
+
9
+ datasets:
10
+ - path: aaditya/alpaca_subset_1
11
+ type: alpaca
12
+ dataset_prepared_path:
13
+ val_set_size: 0
14
+ output_dir: ./qlora-out
15
+
16
+ adapter: qlora
17
+ lora_model_dir:
18
+
19
+ sequence_len: 4096
20
+ sample_packing: true
21
+ pad_to_sequence_len: true
22
+
23
+ lora_r: 32
24
+ lora_alpha: 16
25
+ lora_dropout: 0.05
26
+ lora_target_modules:
27
+ lora_target_linear: true
28
+ lora_fan_in_fan_out:
29
+
30
+ wandb_project:
31
+ wandb_entity:
32
+ wandb_watch:
33
+ wandb_name:
34
+ wandb_log_model:
35
+
36
+ gradient_accumulation_steps: 4
37
+ micro_batch_size: 2
38
+ num_epochs: 4
39
+ optimizer: paged_adamw_32bit
40
+ lr_scheduler: cosine
41
+ learning_rate: 0.0002
42
+
43
+ train_on_inputs: false
44
+ group_by_length: false
45
+ bf16: auto
46
+ fp16:
47
+ tf32: false
48
+
49
+ gradient_checkpointing: true
50
+ early_stopping_patience:
51
+ resume_from_checkpoint:
52
+ local_rank:
53
+ logging_steps: 1
54
+ xformers_attention:
55
+ flash_attention: true
56
+
57
+ warmup_steps: 10
58
+ evals_per_epoch: 4
59
+ eval_table_size:
60
+ saves_per_epoch: 1
61
+ debug:
62
+ deepspeed:
63
+ weight_decay: 0.0
64
+ fsdp:
65
+ fsdp_config:
66
+ special_tokens:
67
+ pad_token: "<|end_of_text|>"