sidbin commited on
Commit
28f2a66
·
verified ·
1 Parent(s): ac11017

Upload README.md with huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +207 -0
README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: NousResearch/Hermes-3-Llama-3.1-8B
3
+ library_name: peft
4
+ license: llama3
5
+ tags:
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: lora-out
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl)
16
+ <details><summary>See axolotl config</summary>
17
+
18
+ axolotl version: `0.4.1`
19
+ ```yaml
20
+ ###
21
+ # Model Configuration: LLaMA-3 70B
22
+ ###
23
+
24
+ base_model: NousResearch/Hermes-3-Llama-3.1-8B
25
+ # base_model: NousResearch/Hermes-3-Llama-3.1-70B
26
+ sequence_len: 1024
27
+
28
+ # base model weight quantization
29
+ load_in_8bit: true
30
+ # load_in_4bit: true
31
+
32
+ # attention implementation
33
+ flash_attention: true
34
+
35
+ # finetuned adapter config
36
+ adapter: lora
37
+ lora_model_dir:
38
+ lora_r: 16
39
+ lora_alpha: 32
40
+ lora_dropout: 0.05
41
+ lora_target_linear: true
42
+ lora_fan_in_fan_out:
43
+ lora_modules_to_save: # required when adding new tokens to LLaMA/Mistral
44
+ - embed_tokens
45
+ - lm_head
46
+ # for details, see https://github.com/huggingface/peft/issues/334#issuecomment-1561727994
47
+
48
+ ###
49
+ # Dataset Configuration: sqlqa
50
+ ###
51
+ # datasets:
52
+ # - path: data.jsonl
53
+ # type: alpaca
54
+
55
+ datasets:
56
+ - path: data.jsonl
57
+ ds_type: json
58
+ type:
59
+ field_instruction: instruction
60
+ field_input: input
61
+ field_output: output
62
+ format: |-
63
+ [INST] {instruction}
64
+ {input} [/INST]
65
+
66
+ chat_template: llama3
67
+ tokens:
68
+ - "[INST]"
69
+ - " [/INST]"
70
+ - "[QL]"
71
+ - " [/QL]"
72
+ - "[EXPLANATION]"
73
+ - " [/EXPLANATION]"
74
+ # dataset formatting config
75
+
76
+ special_tokens:
77
+ pad_token: <|end_of_text|>
78
+
79
+ val_set_size: 0.05
80
+
81
+ ###
82
+ # Training Configuration
83
+ ###
84
+
85
+ # masks the input messages so that the model learns and understands the language w/o being reliant on the input
86
+ train_on_inputs: false
87
+ # random seed for better reproducibility
88
+ seed: 117
89
+
90
+ # optimizer config
91
+ optimizer: adamw_bnb_8bit
92
+ learning_rate: 0.0001
93
+ lr_scheduler: cosine
94
+ num_epochs: 4
95
+ micro_batch_size: 4
96
+ gradient_accumulation_steps: 1
97
+ warmup_steps: 10
98
+
99
+ # axolotl saving config
100
+ dataset_prepared_path: last_run_prepared
101
+ output_dir: ./lora-out
102
+
103
+ # logging and eval config
104
+ logging_steps: 1
105
+ eval_steps: 0.05
106
+
107
+ # training performance optimization config
108
+ bf16: auto
109
+ tf32: false
110
+ gradient_checkpointing: true
111
+
112
+ ###
113
+ # Miscellaneous Configuration
114
+ ###
115
+
116
+ # when true, prevents over-writing the config from the CLI
117
+ strict: false
118
+
119
+ # "Don't mess with this, it's here for accelerate and torchrun" -- axolotl docs
120
+ local_rank:
121
+
122
+ # WANDB
123
+ wandb_mode:
124
+ wandb_project:
125
+ wandb_watch:
126
+ wandb_name:
127
+ wandb_run_id:
128
+
129
+ # Multi-GPU
130
+ # deepspeed: /root/axolotl/deepspeed_configs/zero3_bf16.json
131
+ # deepspeed: zero3_bf16.json
132
+ # deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json
133
+ deepspeed:
134
+ fsdp:
135
+ fsdp_config:
136
+
137
+ ```
138
+
139
+ </details><br>
140
+
141
+ # lora-out
142
+
143
+ This model is a fine-tuned version of [NousResearch/Hermes-3-Llama-3.1-8B](https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B) on the None dataset.
144
+ It achieves the following results on the evaluation set:
145
+ - Loss: 0.0391
146
+
147
+ ## Model description
148
+
149
+ More information needed
150
+
151
+ ## Intended uses & limitations
152
+
153
+ More information needed
154
+
155
+ ## Training and evaluation data
156
+
157
+ More information needed
158
+
159
+ ## Training procedure
160
+
161
+ ### Training hyperparameters
162
+
163
+ The following hyperparameters were used during training:
164
+ - learning_rate: 0.0001
165
+ - train_batch_size: 4
166
+ - eval_batch_size: 4
167
+ - seed: 117
168
+ - distributed_type: multi-GPU
169
+ - num_devices: 8
170
+ - total_train_batch_size: 32
171
+ - total_eval_batch_size: 32
172
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
173
+ - lr_scheduler_type: cosine
174
+ - lr_scheduler_warmup_steps: 10
175
+ - num_epochs: 4
176
+
177
+ ### Training results
178
+
179
+ | Training Loss | Epoch | Step | Validation Loss |
180
+ |:-------------:|:------:|:----:|:---------------:|
181
+ | 2.1647 | 0.0769 | 1 | 2.2016 |
182
+ | 2.1505 | 0.2308 | 3 | 2.1168 |
183
+ | 1.7332 | 0.4615 | 6 | 1.5604 |
184
+ | 1.0807 | 0.6923 | 9 | 0.8788 |
185
+ | 0.5284 | 0.9231 | 12 | 0.4853 |
186
+ | 0.3215 | 1.1538 | 15 | 0.2911 |
187
+ | 0.2114 | 1.3846 | 18 | 0.1958 |
188
+ | 0.1493 | 1.6154 | 21 | 0.1374 |
189
+ | 0.1081 | 1.8462 | 24 | 0.1066 |
190
+ | 0.0751 | 2.0769 | 27 | 0.0821 |
191
+ | 0.0782 | 2.3077 | 30 | 0.0689 |
192
+ | 0.0524 | 2.5385 | 33 | 0.0602 |
193
+ | 0.0538 | 2.7692 | 36 | 0.0523 |
194
+ | 0.0442 | 3.0 | 39 | 0.0464 |
195
+ | 0.0385 | 3.2308 | 42 | 0.0417 |
196
+ | 0.0358 | 3.4615 | 45 | 0.0410 |
197
+ | 0.0336 | 3.6923 | 48 | 0.0388 |
198
+ | 0.0336 | 3.9231 | 51 | 0.0391 |
199
+
200
+
201
+ ### Framework versions
202
+
203
+ - PEFT 0.13.0
204
+ - Transformers 4.45.1
205
+ - Pytorch 2.3.1+cu121
206
+ - Datasets 2.21.0
207
+ - Tokenizers 0.20.0