Junxiong Wang commited on
Commit
7ca5856
·
1 Parent(s): a45debb

add models

Browse files
.DS_Store ADDED
Binary file (6.15 kB). View file
 
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: JunxiongWang/mamba_0_875_sft
3
+ tags:
4
+ - mamba
5
+ - alignment-handbook
6
+ - generated_from_trainer
7
+ datasets:
8
+ - HuggingFaceH4/ultrafeedback_binarized
9
+ model-index:
10
+ - name: mamba_0_875_dpo_ep3
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # mamba_0_875_dpo_ep3
18
+
19
+ This model is a fine-tuned version of [JunxiongWang/mamba_0_875_sft](https://huggingface.co/JunxiongWang/mamba_0_875_sft) on the HuggingFaceH4/ultrafeedback_binarized dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.6922
22
+ - Rewards/chosen: -3.9752
23
+ - Rewards/rejected: -6.3998
24
+ - Rewards/accuracies: 0.7852
25
+ - Rewards/margins: 2.4245
26
+ - Logps/rejected: -333.8416
27
+ - Logps/chosen: -307.0094
28
+ - Logits/rejected: -2.4971
29
+ - Logits/chosen: -2.5509
30
+
31
+ ## Model description
32
+
33
+ More information needed
34
+
35
+ ## Intended uses & limitations
36
+
37
+ More information needed
38
+
39
+ ## Training and evaluation data
40
+
41
+ More information needed
42
+
43
+ ## Training procedure
44
+
45
+ ### Training hyperparameters
46
+
47
+ The following hyperparameters were used during training:
48
+ - learning_rate: 5e-07
49
+ - train_batch_size: 4
50
+ - eval_batch_size: 8
51
+ - seed: 42
52
+ - distributed_type: multi-GPU
53
+ - num_devices: 8
54
+ - total_train_batch_size: 32
55
+ - total_eval_batch_size: 64
56
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
57
+ - lr_scheduler_type: cosine
58
+ - lr_scheduler_warmup_ratio: 0.1
59
+ - num_epochs: 3
60
+
61
+ ### Training results
62
+
63
+ | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
64
+ |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
65
+ | 0.1219 | 1.0466 | 2000 | 0.5598 | -1.2751 | -2.5954 | 0.7539 | 1.3204 | -295.7982 | -280.0076 | -2.6264 | -2.6813 |
66
+ | 0.0099 | 2.0931 | 4000 | 0.6922 | -3.9752 | -6.3998 | 0.7852 | 2.4245 | -333.8416 | -307.0094 | -2.4971 | -2.5509 |
67
+
68
+
69
+ ### Framework versions
70
+
71
+ - Transformers 4.41.2
72
+ - Pytorch 2.1.0+cu118
73
+ - Datasets 2.20.0
74
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_logits/chosen": -2.47953462600708,
4
+ "eval_logits/rejected": -2.4293060302734375,
5
+ "eval_logps/chosen": -325.9293518066406,
6
+ "eval_logps/rejected": -359.2366638183594,
7
+ "eval_loss": 0.8128312230110168,
8
+ "eval_rewards/accuracies": 0.7734375,
9
+ "eval_rewards/chosen": -5.867239475250244,
10
+ "eval_rewards/margins": 3.072049140930176,
11
+ "eval_rewards/rejected": -8.939288139343262,
12
+ "eval_runtime": 93.0348,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 21.497,
15
+ "eval_steps_per_second": 0.344,
16
+ "total_flos": 0.0,
17
+ "train_loss": 0.22836191894055405,
18
+ "train_runtime": 34822.6853,
19
+ "train_samples": 61134,
20
+ "train_samples_per_second": 5.267,
21
+ "train_steps_per_second": 0.165
22
+ }
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "HuggingFaceH4/zephyr-7b-beta",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 4096,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 14336,
13
+ "max_position_embeddings": 32768,
14
+ "model_type": "mistral",
15
+ "num_attention_heads": 32,
16
+ "num_hidden_layers": 32,
17
+ "num_key_value_heads": 8,
18
+ "pad_token_id": 2,
19
+ "rms_norm_eps": 1e-05,
20
+ "rope_theta": 10000.0,
21
+ "sliding_window": 4096,
22
+ "tie_word_embeddings": false,
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.41.2",
25
+ "use_cache": true,
26
+ "vocab_size": 32000
27
+ }
configs.yaml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ mamba_0_875_dpo_ep3:
2
+ prompt_template: "zephyr-7b-alpha/prompt.txt"
3
+ fn_completions: "huggingface_local_completions"
4
+ completions_kwargs:
5
+ model_name: "/JunxiongWang/mamba_0_875_dpo_ep3"
6
+ model_kwargs:
7
+ torch_dtype: 'bfloat16'
8
+ max_new_tokens: 2048
9
+ temperature: 0.7
10
+ top_p: 1.0
11
+ do_sample: True
12
+ pretty_name: "Mamba 0 875 From Zephyr 7B Beta"
13
+ link: "https://huggingface.co/JunxiongWang/mamba_0_875_dpo_ep3"
eval_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_logits/chosen": -2.47953462600708,
4
+ "eval_logits/rejected": -2.4293060302734375,
5
+ "eval_logps/chosen": -325.9293518066406,
6
+ "eval_logps/rejected": -359.2366638183594,
7
+ "eval_loss": 0.8128312230110168,
8
+ "eval_rewards/accuracies": 0.7734375,
9
+ "eval_rewards/chosen": -5.867239475250244,
10
+ "eval_rewards/margins": 3.072049140930176,
11
+ "eval_rewards/rejected": -8.939288139343262,
12
+ "eval_runtime": 93.0348,
13
+ "eval_samples": 2000,
14
+ "eval_samples_per_second": 21.497,
15
+ "eval_steps_per_second": 0.344
16
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.41.2"
6
+ }
mamba_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "d_model": 4096,
3
+ "ssm_cfg": {
4
+ "expand": 1
5
+ },
6
+ "rms_norm_eps": 1e-05,
7
+ "vocab_size": null,
8
+ "d_xb": 1024,
9
+ "intermediate_size": 14336,
10
+ "hidden_act": "silu",
11
+ "n_layer": 32,
12
+ "attn_layers": [
13
+ 7,
14
+ 15,
15
+ 23,
16
+ 31
17
+ ]
18
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90ddf1b6927f3f2624a140642f46084522640dd8144d7fef47a65db54aab6f67
3
+ size 31091579802
special_tokens_map.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<unk>",
4
+ "<s>",
5
+ "</s>"
6
+ ],
7
+ "bos_token": {
8
+ "content": "<s>",
9
+ "lstrip": false,
10
+ "normalized": false,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "eos_token": {
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "pad_token": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
+ "unk_token": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [
31
+ "<unk>",
32
+ "<s>",
33
+ "</s>"
34
+ ],
35
+ "bos_token": "<s>",
36
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
37
+ "clean_up_tokenization_spaces": false,
38
+ "eos_token": "</s>",
39
+ "legacy": true,
40
+ "model_max_length": 2048,
41
+ "pad_token": "</s>",
42
+ "sp_model_kwargs": {},
43
+ "spaces_between_special_tokens": false,
44
+ "tokenizer_class": "LlamaTokenizer",
45
+ "truncation_side": "left",
46
+ "unk_token": "<unk>",
47
+ "use_default_system_prompt": true
48
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.22836191894055405,
5
+ "train_runtime": 34822.6853,
6
+ "train_samples": 61134,
7
+ "train_samples_per_second": 5.267,
8
+ "train_steps_per_second": 0.165
9
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f83f1fd6dd713f564864494cb659101a5d33cb3a95e8b23ec6f512aac07226f6
3
+ size 6328