Crystalcareai commited on
Commit
177215f
·
1 Parent(s): 4654b42

Upload 8 files

Browse files
README.md CHANGED
@@ -1,9 +1,179 @@
1
  ---
 
2
  library_name: peft
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  ## Training procedure
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  ### Framework versions
7
 
8
 
9
- - PEFT 0.5.0
 
1
  ---
2
+ license: apache-2.0
3
  library_name: peft
4
+ tags:
5
+ - generated_from_trainer
6
+ base_model: NurtureAI/OpenHermes-2.5-Mistral-7B-16k
7
+ model-index:
8
+ - name: qlora-out
9
+ results: []
10
  ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
16
+ <details><summary>See axolotl config</summary>
17
+
18
+ axolotl version: `0.3.0`
19
+ ```yaml
20
+ base_model: NurtureAI/OpenHermes-2.5-Mistral-7B-16k
21
+ model_type: MistralForCausalLM
22
+ tokenizer_type: LlamaTokenizer
23
+ is_mistral_derived_model: true
24
+
25
+ load_in_8bit: false
26
+ load_in_4bit: true
27
+ strict: false
28
+
29
+ datasets:
30
+ - path: Crystalcareai/WATOP600
31
+ type: alpaca
32
+ dataset_prepared_path: last_run_prepared
33
+ val_set_size: 0.1
34
+ output_dir: ./qlora-out
35
+
36
+ adapter: qlora
37
+ lora_model_dir:
38
+
39
+ sequence_len: 16000
40
+ sample_packing: true
41
+ pad_to_sequence_len: true
42
+
43
+ lora_r: 32
44
+ lora_alpha: 16
45
+ lora_dropout: 0.05
46
+ lora_target_linear: true
47
+ lora_fan_in_fan_out:
48
+ lora_target_modules:
49
+ - gate_proj
50
+ - down_proj
51
+ - up_proj
52
+ - q_proj
53
+ - v_proj
54
+ - k_proj
55
+ - o_proj
56
+
57
+ # Added line to resolve the error
58
+ lora_modules_to_save: ['embed_tokens', 'lm_head']
59
+
60
+ wandb_project:
61
+ wandb_entity:
62
+ wandb_watch:
63
+ wandb_name:
64
+ wandb_log_model:
65
+
66
+ gradient_accumulation_steps: 4
67
+ micro_batch_size: 1
68
+ num_epochs: 1.5
69
+ optimizer: adamw_bnb_8bit
70
+ lr_scheduler: cosine
71
+ learning_rate: 0.0002
72
+
73
+ train_on_inputs: false
74
+ group_by_length: false
75
+ bf16: true
76
+ fp16: false
77
+ tf32: false
78
+
79
+ gradient_checkpointing: true
80
+ early_stopping_patience:
81
+ resume_from_checkpoint:
82
+ local_rank:
83
+ logging_steps: 1
84
+ xformers_attention:
85
+ flash_attention: true
86
+
87
+ loss_watchdog_threshold: 5.0
88
+ loss_watchdog_patience: 3
89
+
90
+ warmup_steps: 10
91
+ evals_per_epoch: 4
92
+ eval_table_size:
93
+ eval_table_max_new_tokens: 128
94
+ saves_per_epoch: 1
95
+ debug:
96
+ deepspeed:
97
+ weight_decay: 0.0
98
+ fsdp:
99
+ fsdp_config:
100
+ special_tokens:
101
+ bos_token: "<s>"
102
+ eos_token: "</s>"
103
+ unk_token: "<unk>"
104
+
105
+ ```
106
+
107
+ </details><br>
108
+
109
+ # qlora-out
110
+
111
+ This model is a fine-tuned version of [NurtureAI/OpenHermes-2.5-Mistral-7B-16k](https://huggingface.co/NurtureAI/OpenHermes-2.5-Mistral-7B-16k) on the None dataset.
112
+ It achieves the following results on the evaluation set:
113
+ - Loss: 0.5566
114
+
115
+ ## Model description
116
+
117
+ More information needed
118
+
119
+ ## Intended uses & limitations
120
+
121
+ More information needed
122
+
123
+ ## Training and evaluation data
124
+
125
+ More information needed
126
+
127
+ ## Training procedure
128
+
129
+ ### Training hyperparameters
130
+
131
+ The following hyperparameters were used during training:
132
+ - learning_rate: 0.0002
133
+ - train_batch_size: 1
134
+ - eval_batch_size: 1
135
+ - seed: 42
136
+ - gradient_accumulation_steps: 4
137
+ - total_train_batch_size: 4
138
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
139
+ - lr_scheduler_type: cosine
140
+ - lr_scheduler_warmup_steps: 10
141
+ - num_epochs: 1.5
142
+
143
+ ### Training results
144
+
145
+ | Training Loss | Epoch | Step | Validation Loss |
146
+ |:-------------:|:-----:|:----:|:---------------:|
147
+ | 1.0027 | 0.03 | 1 | 0.9286 |
148
+ | 0.8464 | 0.27 | 9 | 0.7312 |
149
+ | 0.5254 | 0.54 | 18 | 0.6451 |
150
+ | 0.712 | 0.81 | 27 | 0.5988 |
151
+ | 0.5733 | 1.01 | 36 | 0.5616 |
152
+ | 0.3739 | 1.28 | 45 | 0.5566 |
153
+
154
+
155
+ ### Framework versions
156
+
157
+ - Transformers 4.36.2
158
+ - Pytorch 2.0.1+cu117
159
+ - Datasets 2.16.1
160
+ - Tokenizers 0.15.0
161
  ## Training procedure
162
 
163
+
164
+ The following `bitsandbytes` quantization config was used during training:
165
+ - quant_method: bitsandbytes
166
+ - load_in_8bit: False
167
+ - load_in_4bit: True
168
+ - llm_int8_threshold: 6.0
169
+ - llm_int8_skip_modules: None
170
+ - llm_int8_enable_fp32_cpu_offload: False
171
+ - llm_int8_has_fp16_weight: False
172
+ - bnb_4bit_quant_type: nf4
173
+ - bnb_4bit_use_double_quant: True
174
+ - bnb_4bit_compute_dtype: bfloat16
175
+
176
  ### Framework versions
177
 
178
 
179
+ - PEFT 0.6.0
adapter_config.json CHANGED
@@ -1,23 +1,31 @@
1
  {
 
2
  "auto_mapping": null,
3
- "base_model_name_or_path": "HuggingFaceH4/zephyr-7b-alpha",
4
  "bias": "none",
5
- "fan_in_fan_out": false,
6
  "inference_mode": true,
7
  "init_lora_weights": true,
8
  "layers_pattern": null,
9
  "layers_to_transform": null,
10
  "lora_alpha": 16,
11
- "lora_dropout": 0,
12
- "modules_to_save": null,
 
 
 
13
  "peft_type": "LORA",
14
- "r": 8,
 
15
  "revision": null,
16
  "target_modules": [
17
- "q_proj",
18
- "k_proj",
 
 
19
  "v_proj",
20
- "o_proj"
 
21
  ],
22
  "task_type": "CAUSAL_LM"
23
  }
 
1
  {
2
+ "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "NurtureAI/OpenHermes-2.5-Mistral-7B-16k",
5
  "bias": "none",
6
+ "fan_in_fan_out": null,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
  "lora_alpha": 16,
12
+ "lora_dropout": 0.05,
13
+ "modules_to_save": [
14
+ "embed_tokens",
15
+ "lm_head"
16
+ ],
17
  "peft_type": "LORA",
18
+ "r": 32,
19
+ "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
+ "up_proj",
23
+ "o_proj",
24
+ "down_proj",
25
+ "gate_proj",
26
  "v_proj",
27
+ "k_proj",
28
+ "q_proj"
29
  ],
30
  "task_type": "CAUSAL_LM"
31
  }
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d2b7f4b0ef8a6b008380b9149e8d97f737f23514729254a5fed32d080b5a0ac6
3
- size 27355402
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49a280210b0cf26f6a4b781ea02e4ad8132946f7fdf8a7cb35d5df5816c3b3ad
3
+ size 860027221
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|im_end|>": 32000,
3
+ "<|im_start|>": 32001
4
+ }
config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "NurtureAI/OpenHermes-2.5-Mistral-7B-16k",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 4096,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 14336,
13
+ "max_position_embeddings": 32768,
14
+ "model_type": "mistral",
15
+ "num_attention_heads": 32,
16
+ "num_hidden_layers": 32,
17
+ "num_key_value_heads": 8,
18
+ "quantization_config": {
19
+ "bnb_4bit_compute_dtype": "bfloat16",
20
+ "bnb_4bit_quant_type": "nf4",
21
+ "bnb_4bit_use_double_quant": true,
22
+ "llm_int8_enable_fp32_cpu_offload": false,
23
+ "llm_int8_has_fp16_weight": false,
24
+ "llm_int8_skip_modules": null,
25
+ "llm_int8_threshold": 6.0,
26
+ "load_in_4bit": true,
27
+ "load_in_8bit": false,
28
+ "quant_method": "bitsandbytes"
29
+ },
30
+ "rms_norm_eps": 1e-05,
31
+ "rope_theta": 100000.0,
32
+ "sliding_window": 16384,
33
+ "tie_word_embeddings": false,
34
+ "torch_dtype": "float16",
35
+ "transformers_version": "4.36.2",
36
+ "use_cache": false,
37
+ "vocab_size": 32002
38
+ }
special_tokens_map.json CHANGED
@@ -1,9 +1,4 @@
1
  {
2
- "additional_special_tokens": [
3
- "<unk>",
4
- "<s>",
5
- "</s>"
6
- ],
7
  "bos_token": {
8
  "content": "<s>",
9
  "lstrip": false,
@@ -18,13 +13,7 @@
18
  "rstrip": false,
19
  "single_word": false
20
  },
21
- "pad_token": {
22
- "content": "</s>",
23
- "lstrip": false,
24
- "normalized": false,
25
- "rstrip": false,
26
- "single_word": false
27
- },
28
  "unk_token": {
29
  "content": "<unk>",
30
  "lstrip": false,
 
1
  {
 
 
 
 
 
2
  "bos_token": {
3
  "content": "<s>",
4
  "lstrip": false,
 
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": "</s>",
 
 
 
 
 
 
17
  "unk_token": {
18
  "content": "<unk>",
19
  "lstrip": false,
tokenizer_config.json CHANGED
@@ -1,4 +1,6 @@
1
  {
 
 
2
  "added_tokens_decoder": {
3
  "0": {
4
  "content": "<unk>",
@@ -23,24 +25,39 @@
23
  "rstrip": false,
24
  "single_word": false,
25
  "special": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  }
27
  },
28
- "additional_special_tokens": [
29
- "<unk>",
30
- "<s>",
31
- "</s>"
32
- ],
33
  "bos_token": "<s>",
34
- "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
35
  "clean_up_tokenization_spaces": false,
 
36
  "eos_token": "</s>",
37
  "legacy": true,
 
38
  "model_max_length": 1000000000000000019884624838656,
39
  "pad_token": "</s>",
40
  "sp_model_kwargs": {},
41
  "spaces_between_special_tokens": false,
42
  "tokenizer_class": "LlamaTokenizer",
43
- "truncation_side": "left",
44
  "unk_token": "<unk>",
45
- "use_default_system_prompt": true
 
46
  }
 
1
  {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "<unk>",
 
25
  "rstrip": false,
26
  "single_word": false,
27
  "special": true
28
+ },
29
+ "32000": {
30
+ "content": "<|im_end|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "32001": {
38
+ "content": "<|im_start|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
  }
45
  },
46
+ "additional_special_tokens": [],
 
 
 
 
47
  "bos_token": "<s>",
48
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
49
  "clean_up_tokenization_spaces": false,
50
+ "device_map": "auto",
51
  "eos_token": "</s>",
52
  "legacy": true,
53
+ "max_length": 16384,
54
  "model_max_length": 1000000000000000019884624838656,
55
  "pad_token": "</s>",
56
  "sp_model_kwargs": {},
57
  "spaces_between_special_tokens": false,
58
  "tokenizer_class": "LlamaTokenizer",
59
+ "trust_remote_code": false,
60
  "unk_token": "<unk>",
61
+ "use_default_system_prompt": true,
62
+ "use_fast": true
63
  }