ryota39 commited on
Commit
f42fd4c
·
verified ·
1 Parent(s): f66ded9

Upload 10 files

Browse files
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "cyberagent/calm2-7b-chat",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 0,
9
+ "eos_token_id": 0,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 4096,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 11008,
15
+ "max_position_embeddings": 32768,
16
+ "mlp_bias": false,
17
+ "model_type": "llama",
18
+ "num_attention_heads": 32,
19
+ "num_hidden_layers": 32,
20
+ "num_key_value_heads": 32,
21
+ "pad_token_id": 1,
22
+ "pretraining_tp": 1,
23
+ "rms_norm_eps": 1e-06,
24
+ "rope_scaling": null,
25
+ "rope_theta": 500000,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "bfloat16",
28
+ "transformers_version": "4.47.1",
29
+ "unsloth_version": "2024.12.11",
30
+ "use_cache": true,
31
+ "vocab_size": 65024
32
+ }
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
5
+ "max_length": 32768,
6
+ "pad_token_id": 1,
7
+ "transformers_version": "4.47.1"
8
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96e8d14a659a8c7ca4264911cd0bec2c76f90aa94256937a79bd100b12ea6e1e
3
+ size 4985122120
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9240217ad97963ecdc57f0fadfb84c90f921318a1366c71adf2d26cb8057002c
3
+ size 4991431432
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09824b931553c14502982e223453b415e1735ad4aef197fba9788b2f7656e67a
3
+ size 4041376744
model.safetensors.index.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 14017896448
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00003-of-00003.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00003.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00003.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
202
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
203
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
204
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
205
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
206
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
207
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
208
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
209
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
210
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
215
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
216
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
217
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
218
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
219
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
220
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
221
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
222
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
223
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
224
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
225
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
226
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
227
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
228
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
229
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
230
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
231
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
232
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
233
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
234
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
237
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
238
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
239
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
240
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
241
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
242
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
243
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
244
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
245
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
246
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
247
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
248
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
249
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
250
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
251
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
252
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
253
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
254
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
255
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
256
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
257
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
258
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
259
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
260
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
261
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
262
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
263
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
264
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
265
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
266
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
267
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
268
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
269
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
270
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
271
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
272
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
273
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
274
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
275
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
276
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
277
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
278
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
279
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
280
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
281
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
282
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
283
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
284
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
285
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
286
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
287
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
288
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
289
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
290
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
291
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
292
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
293
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
294
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
295
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
296
+ "model.norm.weight": "model-00003-of-00003.safetensors"
297
+ }
298
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|padding|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": false,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<|endoftext|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<|padding|>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ }
22
+ },
23
+ "bos_token": "<|endoftext|>",
24
+ "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ message['content'] }}{% elif message['role'] == 'system' %}{{ message['content'] }}{% elif message['role'] == 'assistant' %}{{ message['content'] }}<|endoftext|>{% endif %}{% if loop.last and add_generation_prompt %}{% endif %}{% endfor %}",
25
+ "clean_up_tokenization_spaces": true,
26
+ "eos_token": "<|endoftext|>",
27
+ "extra_special_tokens": {},
28
+ "model_max_length": 32768,
29
+ "pad_token": "<|padding|>",
30
+ "padding_side": "right",
31
+ "tokenizer_class": "GPTNeoXTokenizer",
32
+ "unk_token": "<|endoftext|>"
33
+ }
trainer_state.json ADDED
@@ -0,0 +1,564 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.16327085955304602,
5
+ "eval_steps": 50,
6
+ "global_step": 150,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.005442361985101534,
13
+ "grad_norm": 223.0,
14
+ "learning_rate": 1.4999011862064836e-06,
15
+ "logits/chosen": -0.3115585446357727,
16
+ "logits/rejected": -0.4312504231929779,
17
+ "logps/chosen": -0.8023494482040405,
18
+ "logps/rejected": -0.9114950299263,
19
+ "loss": 1.7206,
20
+ "nll_loss": 0.8566828966140747,
21
+ "rewards/accuracies": 0.671875,
22
+ "rewards/chosen": -2.005873680114746,
23
+ "rewards/margins": 0.27286386489868164,
24
+ "rewards/rejected": -2.2787375450134277,
25
+ "step": 5
26
+ },
27
+ {
28
+ "epoch": 0.010884723970203068,
29
+ "grad_norm": 213.0,
30
+ "learning_rate": 1.4996047737567963e-06,
31
+ "logits/chosen": -0.2838870584964752,
32
+ "logits/rejected": -0.4562837481498718,
33
+ "logps/chosen": -0.756219744682312,
34
+ "logps/rejected": -0.893709659576416,
35
+ "loss": 1.6545,
36
+ "nll_loss": 0.8323467373847961,
37
+ "rewards/accuracies": 0.746874988079071,
38
+ "rewards/chosen": -1.8905493021011353,
39
+ "rewards/margins": 0.34372463822364807,
40
+ "rewards/rejected": -2.23427414894104,
41
+ "step": 10
42
+ },
43
+ {
44
+ "epoch": 0.0163270859553046,
45
+ "grad_norm": 207.0,
46
+ "learning_rate": 1.4991108494350523e-06,
47
+ "logits/chosen": -0.2802577018737793,
48
+ "logits/rejected": -0.445369154214859,
49
+ "logps/chosen": -0.7702305912971497,
50
+ "logps/rejected": -0.9028533101081848,
51
+ "loss": 1.67,
52
+ "nll_loss": 0.8351463079452515,
53
+ "rewards/accuracies": 0.721875011920929,
54
+ "rewards/chosen": -1.9255762100219727,
55
+ "rewards/margins": 0.331557035446167,
56
+ "rewards/rejected": -2.2571334838867188,
57
+ "step": 15
58
+ },
59
+ {
60
+ "epoch": 0.021769447940406136,
61
+ "grad_norm": 193.0,
62
+ "learning_rate": 1.4984195578532098e-06,
63
+ "logits/chosen": -0.27219459414482117,
64
+ "logits/rejected": -0.43887200951576233,
65
+ "logps/chosen": -0.7428117990493774,
66
+ "logps/rejected": -0.8896303176879883,
67
+ "loss": 1.6298,
68
+ "nll_loss": 0.8110933303833008,
69
+ "rewards/accuracies": 0.737500011920929,
70
+ "rewards/chosen": -1.8570295572280884,
71
+ "rewards/margins": 0.36704641580581665,
72
+ "rewards/rejected": -2.2240757942199707,
73
+ "step": 20
74
+ },
75
+ {
76
+ "epoch": 0.02721180992550767,
77
+ "grad_norm": 196.0,
78
+ "learning_rate": 1.4975311014087314e-06,
79
+ "logits/chosen": -0.28368493914604187,
80
+ "logits/rejected": -0.45796823501586914,
81
+ "logps/chosen": -0.7394925951957703,
82
+ "logps/rejected": -0.8978835344314575,
83
+ "loss": 1.6038,
84
+ "nll_loss": 0.810085117816925,
85
+ "rewards/accuracies": 0.768750011920929,
86
+ "rewards/chosen": -1.8487316370010376,
87
+ "rewards/margins": 0.3959771394729614,
88
+ "rewards/rejected": -2.244708776473999,
89
+ "step": 25
90
+ },
91
+ {
92
+ "epoch": 0.0326541719106092,
93
+ "grad_norm": 180.0,
94
+ "learning_rate": 1.4964457402253263e-06,
95
+ "logits/chosen": -0.26782557368278503,
96
+ "logits/rejected": -0.4345061779022217,
97
+ "logps/chosen": -0.7221552133560181,
98
+ "logps/rejected": -0.8976587057113647,
99
+ "loss": 1.5676,
100
+ "nll_loss": 0.8051786422729492,
101
+ "rewards/accuracies": 0.801562488079071,
102
+ "rewards/chosen": -1.80538809299469,
103
+ "rewards/margins": 0.4387587606906891,
104
+ "rewards/rejected": -2.2441468238830566,
105
+ "step": 30
106
+ },
107
+ {
108
+ "epoch": 0.03809653389571074,
109
+ "grad_norm": 184.0,
110
+ "learning_rate": 1.495163792076789e-06,
111
+ "logits/chosen": -0.2734525799751282,
112
+ "logits/rejected": -0.4188796579837799,
113
+ "logps/chosen": -0.7307429313659668,
114
+ "logps/rejected": -0.9264034032821655,
115
+ "loss": 1.5643,
116
+ "nll_loss": 0.8087199926376343,
117
+ "rewards/accuracies": 0.809374988079071,
118
+ "rewards/chosen": -1.826857328414917,
119
+ "rewards/margins": 0.4891512393951416,
120
+ "rewards/rejected": -2.3160085678100586,
121
+ "step": 35
122
+ },
123
+ {
124
+ "epoch": 0.04353889588081227,
125
+ "grad_norm": 167.0,
126
+ "learning_rate": 1.493685632293963e-06,
127
+ "logits/chosen": -0.2776848375797272,
128
+ "logits/rejected": -0.45066213607788086,
129
+ "logps/chosen": -0.6951563954353333,
130
+ "logps/rejected": -0.9053373336791992,
131
+ "loss": 1.5161,
132
+ "nll_loss": 0.7817299962043762,
133
+ "rewards/accuracies": 0.8296874761581421,
134
+ "rewards/chosen": -1.7378908395767212,
135
+ "rewards/margins": 0.525452733039856,
136
+ "rewards/rejected": -2.263343572616577,
137
+ "step": 40
138
+ },
139
+ {
140
+ "epoch": 0.048981257865913806,
141
+ "grad_norm": 166.0,
142
+ "learning_rate": 1.49201169365485e-06,
143
+ "logits/chosen": -0.2875303626060486,
144
+ "logits/rejected": -0.4160293936729431,
145
+ "logps/chosen": -0.7146323919296265,
146
+ "logps/rejected": -0.9353097677230835,
147
+ "loss": 1.509,
148
+ "nll_loss": 0.789823055267334,
149
+ "rewards/accuracies": 0.854687511920929,
150
+ "rewards/chosen": -1.786581039428711,
151
+ "rewards/margins": 0.551693320274353,
152
+ "rewards/rejected": -2.3382744789123535,
153
+ "step": 45
154
+ },
155
+ {
156
+ "epoch": 0.05442361985101534,
157
+ "grad_norm": 167.0,
158
+ "learning_rate": 1.490142466257901e-06,
159
+ "logits/chosen": -0.2606073021888733,
160
+ "logits/rejected": -0.42186981439590454,
161
+ "logps/chosen": -0.6743353605270386,
162
+ "logps/rejected": -0.9106225967407227,
163
+ "loss": 1.472,
164
+ "nll_loss": 0.7610515356063843,
165
+ "rewards/accuracies": 0.8500000238418579,
166
+ "rewards/chosen": -1.6858384609222412,
167
+ "rewards/margins": 0.590718150138855,
168
+ "rewards/rejected": -2.2765564918518066,
169
+ "step": 50
170
+ },
171
+ {
172
+ "epoch": 0.05442361985101534,
173
+ "eval_logits/chosen": -0.4242976903915405,
174
+ "eval_logits/rejected": -0.5250905156135559,
175
+ "eval_logps/chosen": -0.6774176955223083,
176
+ "eval_logps/rejected": -0.9184495806694031,
177
+ "eval_loss": 1.466933012008667,
178
+ "eval_nll_loss": 0.7765384316444397,
179
+ "eval_rewards/accuracies": 0.8569999933242798,
180
+ "eval_rewards/chosen": -1.6935441493988037,
181
+ "eval_rewards/margins": 0.6025797128677368,
182
+ "eval_rewards/rejected": -2.29612398147583,
183
+ "eval_runtime": 119.2017,
184
+ "eval_samples_per_second": 8.389,
185
+ "eval_steps_per_second": 2.097,
186
+ "step": 50
187
+ },
188
+ {
189
+ "epoch": 0.059865981836116876,
190
+ "grad_norm": 176.0,
191
+ "learning_rate": 1.4880784973785227e-06,
192
+ "logits/chosen": -0.2771835923194885,
193
+ "logits/rejected": -0.43040376901626587,
194
+ "logps/chosen": -0.6886002421379089,
195
+ "logps/rejected": -0.9180153608322144,
196
+ "loss": 1.4611,
197
+ "nll_loss": 0.7568584084510803,
198
+ "rewards/accuracies": 0.8687499761581421,
199
+ "rewards/chosen": -1.7215007543563843,
200
+ "rewards/margins": 0.5735376477241516,
201
+ "rewards/rejected": -2.2950384616851807,
202
+ "step": 55
203
+ },
204
+ {
205
+ "epoch": 0.0653083438212184,
206
+ "grad_norm": 173.0,
207
+ "learning_rate": 1.4858203913088481e-06,
208
+ "logits/chosen": -0.28170254826545715,
209
+ "logits/rejected": -0.43440356850624084,
210
+ "logps/chosen": -0.6944113969802856,
211
+ "logps/rejected": -0.958784282207489,
212
+ "loss": 1.456,
213
+ "nll_loss": 0.7766550183296204,
214
+ "rewards/accuracies": 0.8531249761581421,
215
+ "rewards/chosen": -1.7360286712646484,
216
+ "rewards/margins": 0.6609319448471069,
217
+ "rewards/rejected": -2.396960496902466,
218
+ "step": 60
219
+ },
220
+ {
221
+ "epoch": 0.07075070580631994,
222
+ "grad_norm": 175.0,
223
+ "learning_rate": 1.4833688091808084e-06,
224
+ "logits/chosen": -0.29959458112716675,
225
+ "logits/rejected": -0.4557119905948639,
226
+ "logps/chosen": -0.6825212836265564,
227
+ "logps/rejected": -0.9580196142196655,
228
+ "loss": 1.4192,
229
+ "nll_loss": 0.7619790434837341,
230
+ "rewards/accuracies": 0.895312488079071,
231
+ "rewards/chosen": -1.7063030004501343,
232
+ "rewards/margins": 0.6887460947036743,
233
+ "rewards/rejected": -2.3950493335723877,
234
+ "step": 65
235
+ },
236
+ {
237
+ "epoch": 0.07619306779142147,
238
+ "grad_norm": 189.0,
239
+ "learning_rate": 1.4807244687725672e-06,
240
+ "logits/chosen": -0.2963514029979706,
241
+ "logits/rejected": -0.4536547064781189,
242
+ "logps/chosen": -0.6995417475700378,
243
+ "logps/rejected": -0.9971321821212769,
244
+ "loss": 1.4071,
245
+ "nll_loss": 0.7711254358291626,
246
+ "rewards/accuracies": 0.903124988079071,
247
+ "rewards/chosen": -1.748854398727417,
248
+ "rewards/margins": 0.7439761757850647,
249
+ "rewards/rejected": -2.492830753326416,
250
+ "step": 70
251
+ },
252
+ {
253
+ "epoch": 0.08163542977652301,
254
+ "grad_norm": 192.0,
255
+ "learning_rate": 1.477888144298368e-06,
256
+ "logits/chosen": -0.3087882399559021,
257
+ "logits/rejected": -0.4929865002632141,
258
+ "logps/chosen": -0.6821542978286743,
259
+ "logps/rejected": -1.0003395080566406,
260
+ "loss": 1.3817,
261
+ "nll_loss": 0.7631580233573914,
262
+ "rewards/accuracies": 0.8984375,
263
+ "rewards/chosen": -1.7053858041763306,
264
+ "rewards/margins": 0.7954627871513367,
265
+ "rewards/rejected": -2.5008487701416016,
266
+ "step": 75
267
+ },
268
+ {
269
+ "epoch": 0.08707779176162454,
270
+ "grad_norm": 250.0,
271
+ "learning_rate": 1.4748606661818576e-06,
272
+ "logits/chosen": -0.30626195669174194,
273
+ "logits/rejected": -0.5067037343978882,
274
+ "logps/chosen": -0.6984843015670776,
275
+ "logps/rejected": -1.0486465692520142,
276
+ "loss": 1.3604,
277
+ "nll_loss": 0.777851939201355,
278
+ "rewards/accuracies": 0.925000011920929,
279
+ "rewards/chosen": -1.7462108135223389,
280
+ "rewards/margins": 0.8754053115844727,
281
+ "rewards/rejected": -2.6216163635253906,
282
+ "step": 80
283
+ },
284
+ {
285
+ "epoch": 0.09252015374672608,
286
+ "grad_norm": 229.0,
287
+ "learning_rate": 1.4716429208129552e-06,
288
+ "logits/chosen": -0.3446356952190399,
289
+ "logits/rejected": -0.558761477470398,
290
+ "logps/chosen": -0.6777626276016235,
291
+ "logps/rejected": -1.0675888061523438,
292
+ "loss": 1.3122,
293
+ "nll_loss": 0.7588330507278442,
294
+ "rewards/accuracies": 0.9359375238418579,
295
+ "rewards/chosen": -1.694406509399414,
296
+ "rewards/margins": 0.9745653867721558,
297
+ "rewards/rejected": -2.6689720153808594,
298
+ "step": 85
299
+ },
300
+ {
301
+ "epoch": 0.09796251573182761,
302
+ "grad_norm": 229.0,
303
+ "learning_rate": 1.4682358502883309e-06,
304
+ "logits/chosen": -0.36884021759033203,
305
+ "logits/rejected": -0.6074358820915222,
306
+ "logps/chosen": -0.6712280511856079,
307
+ "logps/rejected": -1.1392602920532227,
308
+ "loss": 1.2571,
309
+ "nll_loss": 0.7604485154151917,
310
+ "rewards/accuracies": 0.940625011920929,
311
+ "rewards/chosen": -1.678070068359375,
312
+ "rewards/margins": 1.1700807809829712,
313
+ "rewards/rejected": -2.8481509685516357,
314
+ "step": 90
315
+ },
316
+ {
317
+ "epoch": 0.10340487771692915,
318
+ "grad_norm": 248.0,
319
+ "learning_rate": 1.4646404521355798e-06,
320
+ "logits/chosen": -0.3828192353248596,
321
+ "logits/rejected": -0.6576655507087708,
322
+ "logps/chosen": -0.695937991142273,
323
+ "logps/rejected": -1.2227665185928345,
324
+ "loss": 1.2367,
325
+ "nll_loss": 0.7761465311050415,
326
+ "rewards/accuracies": 0.9468749761581421,
327
+ "rewards/chosen": -1.7398450374603271,
328
+ "rewards/margins": 1.3170711994171143,
329
+ "rewards/rejected": -3.0569162368774414,
330
+ "step": 95
331
+ },
332
+ {
333
+ "epoch": 0.10884723970203068,
334
+ "grad_norm": 233.0,
335
+ "learning_rate": 1.4608577790211639e-06,
336
+ "logits/chosen": -0.408522367477417,
337
+ "logits/rejected": -0.6887016296386719,
338
+ "logps/chosen": -0.6912533640861511,
339
+ "logps/rejected": -1.2720332145690918,
340
+ "loss": 1.1964,
341
+ "nll_loss": 0.7770565748214722,
342
+ "rewards/accuracies": 0.956250011920929,
343
+ "rewards/chosen": -1.7281334400177002,
344
+ "rewards/margins": 1.4519497156143188,
345
+ "rewards/rejected": -3.1800830364227295,
346
+ "step": 100
347
+ },
348
+ {
349
+ "epoch": 0.10884723970203068,
350
+ "eval_logits/chosen": -0.6553338766098022,
351
+ "eval_logits/rejected": -0.960581362247467,
352
+ "eval_logps/chosen": -0.6551228165626526,
353
+ "eval_logps/rejected": -1.286936640739441,
354
+ "eval_loss": 1.1423654556274414,
355
+ "eval_nll_loss": 0.7609782814979553,
356
+ "eval_rewards/accuracies": 0.9639999866485596,
357
+ "eval_rewards/chosen": -1.637807011604309,
358
+ "eval_rewards/margins": 1.579534649848938,
359
+ "eval_rewards/rejected": -3.217341899871826,
360
+ "eval_runtime": 122.2807,
361
+ "eval_samples_per_second": 8.178,
362
+ "eval_steps_per_second": 2.044,
363
+ "step": 100
364
+ },
365
+ {
366
+ "epoch": 0.11428960168713222,
367
+ "grad_norm": 244.0,
368
+ "learning_rate": 1.4568889384422085e-06,
369
+ "logits/chosen": -0.44453367590904236,
370
+ "logits/rejected": -0.8137799501419067,
371
+ "logps/chosen": -0.6734666228294373,
372
+ "logps/rejected": -1.3720922470092773,
373
+ "loss": 1.1097,
374
+ "nll_loss": 0.7629668116569519,
375
+ "rewards/accuracies": 0.9703124761581421,
376
+ "rewards/chosen": -1.6836665868759155,
377
+ "rewards/margins": 1.7465636730194092,
378
+ "rewards/rejected": -3.4302303791046143,
379
+ "step": 105
380
+ },
381
+ {
382
+ "epoch": 0.11973196367223375,
383
+ "grad_norm": 232.0,
384
+ "learning_rate": 1.4527350924022508e-06,
385
+ "logits/chosen": -0.4658277928829193,
386
+ "logits/rejected": -0.8643436431884766,
387
+ "logps/chosen": -0.6887673735618591,
388
+ "logps/rejected": -1.4408514499664307,
389
+ "loss": 1.0866,
390
+ "nll_loss": 0.7645546793937683,
391
+ "rewards/accuracies": 0.9765625,
392
+ "rewards/chosen": -1.7219183444976807,
393
+ "rewards/margins": 1.8802101612091064,
394
+ "rewards/rejected": -3.602128505706787,
395
+ "step": 110
396
+ },
397
+ {
398
+ "epoch": 0.12517432565733527,
399
+ "grad_norm": 227.0,
400
+ "learning_rate": 1.4483974570710226e-06,
401
+ "logits/chosen": -0.4871746897697449,
402
+ "logits/rejected": -1.0338547229766846,
403
+ "logps/chosen": -0.6287084817886353,
404
+ "logps/rejected": -1.4971318244934082,
405
+ "loss": 0.9829,
406
+ "nll_loss": 0.7324297428131104,
407
+ "rewards/accuracies": 0.984375,
408
+ "rewards/chosen": -1.571771264076233,
409
+ "rewards/margins": 2.171058177947998,
410
+ "rewards/rejected": -3.7428295612335205,
411
+ "step": 115
412
+ },
413
+ {
414
+ "epoch": 0.1306166876424368,
415
+ "grad_norm": 186.0,
416
+ "learning_rate": 1.4438773024283826e-06,
417
+ "logits/chosen": -0.5202233195304871,
418
+ "logits/rejected": -1.1327874660491943,
419
+ "logps/chosen": -0.6288429498672485,
420
+ "logps/rejected": -1.5928165912628174,
421
+ "loss": 0.959,
422
+ "nll_loss": 0.7312449216842651,
423
+ "rewards/accuracies": 0.9859374761581421,
424
+ "rewards/chosen": -1.572107195854187,
425
+ "rewards/margins": 2.4099345207214355,
426
+ "rewards/rejected": -3.982041120529175,
427
+ "step": 120
428
+ },
429
+ {
430
+ "epoch": 0.13605904962753834,
431
+ "grad_norm": 177.0,
432
+ "learning_rate": 1.4391759518924858e-06,
433
+ "logits/chosen": -0.5006698369979858,
434
+ "logits/rejected": -1.2095844745635986,
435
+ "logps/chosen": -0.6492566466331482,
436
+ "logps/rejected": -1.7050262689590454,
437
+ "loss": 0.9298,
438
+ "nll_loss": 0.742263674736023,
439
+ "rewards/accuracies": 0.9906250238418579,
440
+ "rewards/chosen": -1.623141884803772,
441
+ "rewards/margins": 2.6394238471984863,
442
+ "rewards/rejected": -4.262566089630127,
443
+ "step": 125
444
+ },
445
+ {
446
+ "epoch": 0.14150141161263988,
447
+ "grad_norm": 155.0,
448
+ "learning_rate": 1.4342947819323133e-06,
449
+ "logits/chosen": -0.49277132749557495,
450
+ "logits/rejected": -1.2336362600326538,
451
+ "logps/chosen": -0.6462730169296265,
452
+ "logps/rejected": -1.7665297985076904,
453
+ "loss": 0.9073,
454
+ "nll_loss": 0.7340320348739624,
455
+ "rewards/accuracies": 0.9906250238418579,
456
+ "rewards/chosen": -1.615682601928711,
457
+ "rewards/margins": 2.8006420135498047,
458
+ "rewards/rejected": -4.416324615478516,
459
+ "step": 130
460
+ },
461
+ {
462
+ "epoch": 0.1469437735977414,
463
+ "grad_norm": 157.0,
464
+ "learning_rate": 1.4292352216646672e-06,
465
+ "logits/chosen": -0.4820989966392517,
466
+ "logits/rejected": -1.2759294509887695,
467
+ "logps/chosen": -0.6405208706855774,
468
+ "logps/rejected": -1.8488733768463135,
469
+ "loss": 0.8986,
470
+ "nll_loss": 0.7417997121810913,
471
+ "rewards/accuracies": 0.989062488079071,
472
+ "rewards/chosen": -1.601301908493042,
473
+ "rewards/margins": 3.020881175994873,
474
+ "rewards/rejected": -4.622182846069336,
475
+ "step": 135
476
+ },
477
+ {
478
+ "epoch": 0.15238613558284295,
479
+ "grad_norm": 117.0,
480
+ "learning_rate": 1.4239987524357501e-06,
481
+ "logits/chosen": -0.48703551292419434,
482
+ "logits/rejected": -1.3474723100662231,
483
+ "logps/chosen": -0.6453564167022705,
484
+ "logps/rejected": -1.947719931602478,
485
+ "loss": 0.8587,
486
+ "nll_loss": 0.7325531840324402,
487
+ "rewards/accuracies": 0.9937499761581421,
488
+ "rewards/chosen": -1.6133911609649658,
489
+ "rewards/margins": 3.2559094429016113,
490
+ "rewards/rejected": -4.86929988861084,
491
+ "step": 140
492
+ },
493
+ {
494
+ "epoch": 0.15782849756794448,
495
+ "grad_norm": 114.0,
496
+ "learning_rate": 1.4185869073874565e-06,
497
+ "logits/chosen": -0.4773550033569336,
498
+ "logits/rejected": -1.3999682664871216,
499
+ "logps/chosen": -0.6471393704414368,
500
+ "logps/rejected": -2.04455304145813,
501
+ "loss": 0.8458,
502
+ "nll_loss": 0.7375032305717468,
503
+ "rewards/accuracies": 0.996874988079071,
504
+ "rewards/chosen": -1.6178483963012695,
505
+ "rewards/margins": 3.493534803390503,
506
+ "rewards/rejected": -5.111382961273193,
507
+ "step": 145
508
+ },
509
+ {
510
+ "epoch": 0.16327085955304602,
511
+ "grad_norm": 102.5,
512
+ "learning_rate": 1.413001271008494e-06,
513
+ "logits/chosen": -0.5018254518508911,
514
+ "logits/rejected": -1.4777194261550903,
515
+ "logps/chosen": -0.6435462832450867,
516
+ "logps/rejected": -2.1130857467651367,
517
+ "loss": 0.8377,
518
+ "nll_loss": 0.7388636469841003,
519
+ "rewards/accuracies": 0.9937499761581421,
520
+ "rewards/chosen": -1.608865737915039,
521
+ "rewards/margins": 3.673849105834961,
522
+ "rewards/rejected": -5.28271484375,
523
+ "step": 150
524
+ },
525
+ {
526
+ "epoch": 0.16327085955304602,
527
+ "eval_logits/chosen": -0.6831561326980591,
528
+ "eval_logits/rejected": -1.8252586126327515,
529
+ "eval_logps/chosen": -0.6115793585777283,
530
+ "eval_logps/rejected": -2.0589704513549805,
531
+ "eval_loss": 0.8232702612876892,
532
+ "eval_nll_loss": 0.7224369645118713,
533
+ "eval_rewards/accuracies": 0.9929999709129333,
534
+ "eval_rewards/chosen": -1.528948187828064,
535
+ "eval_rewards/margins": 3.6184778213500977,
536
+ "eval_rewards/rejected": -5.147425651550293,
537
+ "eval_runtime": 112.2438,
538
+ "eval_samples_per_second": 8.909,
539
+ "eval_steps_per_second": 2.227,
540
+ "step": 150
541
+ }
542
+ ],
543
+ "logging_steps": 5,
544
+ "max_steps": 918,
545
+ "num_input_tokens_seen": 0,
546
+ "num_train_epochs": 1,
547
+ "save_steps": 50,
548
+ "stateful_callbacks": {
549
+ "TrainerControl": {
550
+ "args": {
551
+ "should_epoch_stop": false,
552
+ "should_evaluate": false,
553
+ "should_log": false,
554
+ "should_save": true,
555
+ "should_training_stop": false
556
+ },
557
+ "attributes": {}
558
+ }
559
+ },
560
+ "total_flos": 0.0,
561
+ "train_batch_size": 4,
562
+ "trial_name": null,
563
+ "trial_params": null
564
+ }