silviasapora
commited on
Model save
Browse files- .gitattributes +1 -0
- README.md +79 -0
- all_results.json +9 -0
- config.json +29 -0
- generation_config.json +7 -0
- model-00001-of-00004.safetensors +3 -0
- model-00002-of-00004.safetensors +3 -0
- model-00003-of-00004.safetensors +3 -0
- model-00004-of-00004.safetensors +3 -0
- model.safetensors.index.json +261 -0
- runs/Sep21_12-42-46_65ecb96dba42/events.out.tfevents.1726922648.65ecb96dba42.271982.0 +3 -0
- special_tokens_map.json +28 -0
- tokenizer.json +3 -0
- tokenizer_config.json +70 -0
- train_results.json +9 -0
- trainer_state.json +1791 -0
- training_args.bin +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: transformers
|
3 |
+
license: gemma
|
4 |
+
base_model: google/gemma-7b
|
5 |
+
tags:
|
6 |
+
- trl
|
7 |
+
- orpo
|
8 |
+
- generated_from_trainer
|
9 |
+
model-index:
|
10 |
+
- name: gemma-7b-borpo
|
11 |
+
results: []
|
12 |
+
---
|
13 |
+
|
14 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
15 |
+
should probably proofread and complete it, then remove this comment. -->
|
16 |
+
|
17 |
+
# gemma-7b-borpo
|
18 |
+
|
19 |
+
This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on an unknown dataset.
|
20 |
+
It achieves the following results on the evaluation set:
|
21 |
+
- Loss: 1.5984
|
22 |
+
- Rewards/chosen: -0.0575
|
23 |
+
- Rewards/rejected: -0.0699
|
24 |
+
- Rewards/accuracies: 0.5899
|
25 |
+
- Rewards/margins: 0.0124
|
26 |
+
- Logps/rejected: -1.3977
|
27 |
+
- Logps/chosen: -1.1506
|
28 |
+
- Logits/rejected: 270.9628
|
29 |
+
- Logits/chosen: 299.8625
|
30 |
+
- Nll Loss: 1.5312
|
31 |
+
- Log Odds Ratio: -0.6761
|
32 |
+
- Log Odds Chosen: 0.3679
|
33 |
+
|
34 |
+
## Model description
|
35 |
+
|
36 |
+
More information needed
|
37 |
+
|
38 |
+
## Intended uses & limitations
|
39 |
+
|
40 |
+
More information needed
|
41 |
+
|
42 |
+
## Training and evaluation data
|
43 |
+
|
44 |
+
More information needed
|
45 |
+
|
46 |
+
## Training procedure
|
47 |
+
|
48 |
+
### Training hyperparameters
|
49 |
+
|
50 |
+
The following hyperparameters were used during training:
|
51 |
+
- learning_rate: 5e-06
|
52 |
+
- train_batch_size: 2
|
53 |
+
- eval_batch_size: 1
|
54 |
+
- seed: 42
|
55 |
+
- distributed_type: multi-GPU
|
56 |
+
- num_devices: 4
|
57 |
+
- gradient_accumulation_steps: 4
|
58 |
+
- total_train_batch_size: 32
|
59 |
+
- total_eval_batch_size: 4
|
60 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
61 |
+
- lr_scheduler_type: inverse_sqrt
|
62 |
+
- lr_scheduler_warmup_steps: 100
|
63 |
+
- num_epochs: 3
|
64 |
+
|
65 |
+
### Training results
|
66 |
+
|
67 |
+
| Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen | Nll Loss | Log Odds Ratio | Log Odds Chosen |
|
68 |
+
|:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|:--------:|:--------------:|:---------------:|
|
69 |
+
| 1.4516 | 0.9968 | 157 | 1.4765 | -0.0513 | -0.0577 | 0.5468 | 0.0064 | -1.1547 | -1.0260 | 293.8872 | 321.9495 | 1.4282 | -0.6924 | 0.1911 |
|
70 |
+
| 1.0587 | 2.0 | 315 | 1.4250 | -0.0502 | -0.0595 | 0.5468 | 0.0093 | -1.1904 | -1.0035 | 296.0850 | 323.6012 | 1.3729 | -0.6901 | 0.2723 |
|
71 |
+
| 0.5897 | 2.9905 | 471 | 1.5984 | -0.0575 | -0.0699 | 0.5899 | 0.0124 | -1.3977 | -1.1506 | 270.9628 | 299.8625 | 1.5312 | -0.6761 | 0.3679 |
|
72 |
+
|
73 |
+
|
74 |
+
### Framework versions
|
75 |
+
|
76 |
+
- Transformers 4.44.2
|
77 |
+
- Pytorch 2.4.0+cu121
|
78 |
+
- Datasets 3.0.0
|
79 |
+
- Tokenizers 0.19.1
|
all_results.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 2.9904761904761905,
|
3 |
+
"total_flos": 0.0,
|
4 |
+
"train_loss": 1.4937715783493788,
|
5 |
+
"train_runtime": 12592.9879,
|
6 |
+
"train_samples": 5034,
|
7 |
+
"train_samples_per_second": 1.199,
|
8 |
+
"train_steps_per_second": 0.037
|
9 |
+
}
|
config.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "google/gemma-7b",
|
3 |
+
"architectures": [
|
4 |
+
"GemmaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 106,
|
9 |
+
"eos_token_id": 107,
|
10 |
+
"head_dim": 256,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_activation": "gelu_pytorch_tanh",
|
13 |
+
"hidden_size": 3072,
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"intermediate_size": 24576,
|
16 |
+
"max_position_embeddings": 8192,
|
17 |
+
"model_type": "gemma",
|
18 |
+
"num_attention_heads": 16,
|
19 |
+
"num_hidden_layers": 28,
|
20 |
+
"num_key_value_heads": 16,
|
21 |
+
"pad_token_id": 107,
|
22 |
+
"rms_norm_eps": 1e-06,
|
23 |
+
"rope_scaling": null,
|
24 |
+
"rope_theta": 10000.0,
|
25 |
+
"torch_dtype": "bfloat16",
|
26 |
+
"transformers_version": "4.44.2",
|
27 |
+
"use_cache": false,
|
28 |
+
"vocab_size": 256000
|
29 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 106,
|
4 |
+
"eos_token_id": 107,
|
5 |
+
"pad_token_id": 107,
|
6 |
+
"transformers_version": "4.44.2"
|
7 |
+
}
|
model-00001-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:feb9e5df7227c57a5f17b3d866c3ebe19e95cc2008c29163aafc45ce64d572a6
|
3 |
+
size 4995496656
|
model-00002-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:315e3392b12dfef4be759999aebfadfead0d6e62af35c51bdc47730de4a9fe44
|
3 |
+
size 4982953168
|
model-00003-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:71410e63d4228e1e273e73b64be4f576c2df7b294e44c58ed37335fa6bda64a1
|
3 |
+
size 4982953200
|
model-00004-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7b87e4a8148ff22fdb3fd1b1f5a5d8b960f51571070581cbfe0b949ef6e68f8d
|
3 |
+
size 2113988336
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 17075361792
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"model.embed_tokens.weight": "model-00001-of-00004.safetensors",
|
7 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
8 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
9 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
10 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
11 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
12 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
13 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
14 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
15 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
16 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
17 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
18 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
19 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
20 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
21 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
22 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
23 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
24 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
25 |
+
"model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
26 |
+
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
27 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
28 |
+
"model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
29 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
30 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
31 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
32 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
33 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
34 |
+
"model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
35 |
+
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
36 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
37 |
+
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
38 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
39 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
40 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
41 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
42 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
43 |
+
"model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
44 |
+
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
45 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
46 |
+
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
47 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
48 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
49 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
50 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
51 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
52 |
+
"model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
53 |
+
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
54 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
55 |
+
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
56 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
57 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
58 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
59 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
60 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
61 |
+
"model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
62 |
+
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
63 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
64 |
+
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
65 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
66 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
67 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
68 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
69 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
70 |
+
"model.layers.15.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
71 |
+
"model.layers.15.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
72 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
73 |
+
"model.layers.15.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
74 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
75 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
76 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
77 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
78 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
79 |
+
"model.layers.16.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
80 |
+
"model.layers.16.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
81 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
82 |
+
"model.layers.16.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
83 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
84 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
85 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
86 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
87 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
88 |
+
"model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
89 |
+
"model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
90 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
91 |
+
"model.layers.17.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
92 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
93 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
94 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
95 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
96 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
97 |
+
"model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
98 |
+
"model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
99 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
100 |
+
"model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
101 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
102 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
103 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
104 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
105 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
106 |
+
"model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
107 |
+
"model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
108 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
109 |
+
"model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
110 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
111 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
112 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
113 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
114 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
115 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
116 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
117 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
118 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
119 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
120 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
121 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
122 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
123 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
124 |
+
"model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
125 |
+
"model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
126 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
127 |
+
"model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
128 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
129 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
130 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
131 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
132 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
133 |
+
"model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
134 |
+
"model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
135 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
136 |
+
"model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
137 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
138 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
139 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
140 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
141 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
142 |
+
"model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
143 |
+
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
144 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
145 |
+
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
146 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
147 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
148 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
149 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
150 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
151 |
+
"model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
152 |
+
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
153 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
154 |
+
"model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
155 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
156 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
157 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
158 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
159 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
160 |
+
"model.layers.24.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
161 |
+
"model.layers.24.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
162 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
|
163 |
+
"model.layers.24.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
164 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
165 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
166 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
167 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
168 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
169 |
+
"model.layers.25.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
170 |
+
"model.layers.25.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
171 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
|
172 |
+
"model.layers.25.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
173 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
174 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
175 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
|
176 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
177 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
178 |
+
"model.layers.26.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
179 |
+
"model.layers.26.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
180 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
|
181 |
+
"model.layers.26.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
182 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
183 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
184 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
|
185 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
186 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
187 |
+
"model.layers.27.input_layernorm.weight": "model-00004-of-00004.safetensors",
|
188 |
+
"model.layers.27.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
|
189 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
|
190 |
+
"model.layers.27.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
|
191 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
|
192 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
|
193 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
|
194 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
|
195 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
|
196 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
197 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
198 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
199 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
200 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
201 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
202 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
203 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
204 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
205 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
206 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
207 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
208 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
209 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
210 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
211 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
212 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
213 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
214 |
+
"model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
215 |
+
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
216 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
217 |
+
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
218 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
219 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
220 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
221 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
222 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
223 |
+
"model.layers.6.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
224 |
+
"model.layers.6.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
225 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
226 |
+
"model.layers.6.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
227 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
228 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
229 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
230 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
231 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
232 |
+
"model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
233 |
+
"model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
234 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
235 |
+
"model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
236 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
237 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
238 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
239 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
240 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
241 |
+
"model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
242 |
+
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
243 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
244 |
+
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
245 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
246 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
247 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
248 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
249 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
250 |
+
"model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
251 |
+
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
252 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
253 |
+
"model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
254 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
255 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
256 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
257 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
258 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
259 |
+
"model.norm.weight": "model-00004-of-00004.safetensors"
|
260 |
+
}
|
261 |
+
}
|
runs/Sep21_12-42-46_65ecb96dba42/events.out.tfevents.1726922648.65ecb96dba42.271982.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:011635d02f6fd04bdfb3fda4f1d7a2ec270f98d8b4119ff94717d3117fbbe82b
|
3 |
+
size 88906
|
special_tokens_map.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
{
|
4 |
+
"content": "<|im_start|>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"content": "<|im_end|>",
|
12 |
+
"lstrip": false,
|
13 |
+
"normalized": false,
|
14 |
+
"rstrip": false,
|
15 |
+
"single_word": false
|
16 |
+
}
|
17 |
+
],
|
18 |
+
"bos_token": "<|im_start|>",
|
19 |
+
"eos_token": "<|im_end|>",
|
20 |
+
"pad_token": "<|im_end|>",
|
21 |
+
"unk_token": {
|
22 |
+
"content": "<unk>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": false,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false
|
27 |
+
}
|
28 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:322a5f52ab5cab196761ab397a022d6fa3a2e1418585e532bb6efb2fedd2ae94
|
3 |
+
size 17477501
|
tokenizer_config.json
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"0": {
|
6 |
+
"content": "<pad>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"1": {
|
14 |
+
"content": "<eos>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"2": {
|
22 |
+
"content": "<bos>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": false,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
},
|
29 |
+
"3": {
|
30 |
+
"content": "<unk>",
|
31 |
+
"lstrip": false,
|
32 |
+
"normalized": false,
|
33 |
+
"rstrip": false,
|
34 |
+
"single_word": false,
|
35 |
+
"special": true
|
36 |
+
},
|
37 |
+
"106": {
|
38 |
+
"content": "<|im_start|>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false,
|
43 |
+
"special": true
|
44 |
+
},
|
45 |
+
"107": {
|
46 |
+
"content": "<|im_end|>",
|
47 |
+
"lstrip": false,
|
48 |
+
"normalized": false,
|
49 |
+
"rstrip": false,
|
50 |
+
"single_word": false,
|
51 |
+
"special": true
|
52 |
+
}
|
53 |
+
},
|
54 |
+
"additional_special_tokens": [
|
55 |
+
"<|im_start|>",
|
56 |
+
"<|im_end|>"
|
57 |
+
],
|
58 |
+
"bos_token": "<|im_start|>",
|
59 |
+
"chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
60 |
+
"clean_up_tokenization_spaces": false,
|
61 |
+
"eos_token": "<|im_end|>",
|
62 |
+
"legacy": null,
|
63 |
+
"model_max_length": 2048,
|
64 |
+
"pad_token": "<|im_end|>",
|
65 |
+
"sp_model_kwargs": {},
|
66 |
+
"spaces_between_special_tokens": false,
|
67 |
+
"tokenizer_class": "GemmaTokenizer",
|
68 |
+
"unk_token": "<unk>",
|
69 |
+
"use_default_system_prompt": false
|
70 |
+
}
|
train_results.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 2.9904761904761905,
|
3 |
+
"total_flos": 0.0,
|
4 |
+
"train_loss": 1.4937715783493788,
|
5 |
+
"train_runtime": 12592.9879,
|
6 |
+
"train_samples": 5034,
|
7 |
+
"train_samples_per_second": 1.199,
|
8 |
+
"train_steps_per_second": 0.037
|
9 |
+
}
|
trainer_state.json
ADDED
@@ -0,0 +1,1791 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 2.9904761904761905,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 471,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.031746031746031744,
|
13 |
+
"grad_norm": 1682.7587890625,
|
14 |
+
"learning_rate": 2.5000000000000004e-07,
|
15 |
+
"log_odds_chosen": 0.019949015229940414,
|
16 |
+
"log_odds_ratio": -1.0337324142456055,
|
17 |
+
"logits/chosen": 250.0281982421875,
|
18 |
+
"logits/rejected": 241.27963256835938,
|
19 |
+
"logps/chosen": -15.414708137512207,
|
20 |
+
"logps/rejected": -15.434652328491211,
|
21 |
+
"loss": 14.8392,
|
22 |
+
"nll_loss": 14.92613697052002,
|
23 |
+
"rewards/accuracies": 0.4000000059604645,
|
24 |
+
"rewards/chosen": -0.7707353830337524,
|
25 |
+
"rewards/margins": 0.0009973436826840043,
|
26 |
+
"rewards/rejected": -0.7717326879501343,
|
27 |
+
"step": 5
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"epoch": 0.06349206349206349,
|
31 |
+
"grad_norm": 1073.85595703125,
|
32 |
+
"learning_rate": 5.000000000000001e-07,
|
33 |
+
"log_odds_chosen": 0.10046229511499405,
|
34 |
+
"log_odds_ratio": -0.9202533960342407,
|
35 |
+
"logits/chosen": 252.1835479736328,
|
36 |
+
"logits/rejected": 275.7188415527344,
|
37 |
+
"logps/chosen": -12.929780960083008,
|
38 |
+
"logps/rejected": -13.030034065246582,
|
39 |
+
"loss": 12.9337,
|
40 |
+
"nll_loss": 12.802912712097168,
|
41 |
+
"rewards/accuracies": 0.3499999940395355,
|
42 |
+
"rewards/chosen": -0.6464890241622925,
|
43 |
+
"rewards/margins": 0.005012729670852423,
|
44 |
+
"rewards/rejected": -0.6515016555786133,
|
45 |
+
"step": 10
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"epoch": 0.09523809523809523,
|
49 |
+
"grad_norm": 566.9100952148438,
|
50 |
+
"learning_rate": 7.5e-07,
|
51 |
+
"log_odds_chosen": -0.22649931907653809,
|
52 |
+
"log_odds_ratio": -1.288556694984436,
|
53 |
+
"logits/chosen": 297.252685546875,
|
54 |
+
"logits/rejected": 288.51727294921875,
|
55 |
+
"logps/chosen": -8.612268447875977,
|
56 |
+
"logps/rejected": -8.385721206665039,
|
57 |
+
"loss": 8.2059,
|
58 |
+
"nll_loss": 8.307785034179688,
|
59 |
+
"rewards/accuracies": 0.5249999761581421,
|
60 |
+
"rewards/chosen": -0.4306134283542633,
|
61 |
+
"rewards/margins": -0.011327351443469524,
|
62 |
+
"rewards/rejected": -0.4192860722541809,
|
63 |
+
"step": 15
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"epoch": 0.12698412698412698,
|
67 |
+
"grad_norm": 190.63539123535156,
|
68 |
+
"learning_rate": 1.0000000000000002e-06,
|
69 |
+
"log_odds_chosen": 0.15076614916324615,
|
70 |
+
"log_odds_ratio": -0.7511703968048096,
|
71 |
+
"logits/chosen": 239.71182250976562,
|
72 |
+
"logits/rejected": 275.3675537109375,
|
73 |
+
"logps/chosen": -4.92425537109375,
|
74 |
+
"logps/rejected": -5.06990385055542,
|
75 |
+
"loss": 5.3341,
|
76 |
+
"nll_loss": 5.193692207336426,
|
77 |
+
"rewards/accuracies": 0.550000011920929,
|
78 |
+
"rewards/chosen": -0.24621276557445526,
|
79 |
+
"rewards/margins": 0.007282428443431854,
|
80 |
+
"rewards/rejected": -0.2534952163696289,
|
81 |
+
"step": 20
|
82 |
+
},
|
83 |
+
{
|
84 |
+
"epoch": 0.15873015873015872,
|
85 |
+
"grad_norm": 108.96724700927734,
|
86 |
+
"learning_rate": 1.25e-06,
|
87 |
+
"log_odds_chosen": -0.32157501578330994,
|
88 |
+
"log_odds_ratio": -1.014737606048584,
|
89 |
+
"logits/chosen": 249.96826171875,
|
90 |
+
"logits/rejected": 277.22991943359375,
|
91 |
+
"logps/chosen": -2.9985427856445312,
|
92 |
+
"logps/rejected": -2.6938087940216064,
|
93 |
+
"loss": 3.2459,
|
94 |
+
"nll_loss": 3.1284079551696777,
|
95 |
+
"rewards/accuracies": 0.3499999940395355,
|
96 |
+
"rewards/chosen": -0.14992713928222656,
|
97 |
+
"rewards/margins": -0.015236688777804375,
|
98 |
+
"rewards/rejected": -0.13469044864177704,
|
99 |
+
"step": 25
|
100 |
+
},
|
101 |
+
{
|
102 |
+
"epoch": 0.19047619047619047,
|
103 |
+
"grad_norm": 114.80986022949219,
|
104 |
+
"learning_rate": 1.5e-06,
|
105 |
+
"log_odds_chosen": 0.16358330845832825,
|
106 |
+
"log_odds_ratio": -0.7382305860519409,
|
107 |
+
"logits/chosen": 335.34710693359375,
|
108 |
+
"logits/rejected": 316.969970703125,
|
109 |
+
"logps/chosen": -1.8879131078720093,
|
110 |
+
"logps/rejected": -2.03938627243042,
|
111 |
+
"loss": 2.5624,
|
112 |
+
"nll_loss": 2.2552151679992676,
|
113 |
+
"rewards/accuracies": 0.6000000238418579,
|
114 |
+
"rewards/chosen": -0.09439565241336823,
|
115 |
+
"rewards/margins": 0.007573665119707584,
|
116 |
+
"rewards/rejected": -0.10196931660175323,
|
117 |
+
"step": 30
|
118 |
+
},
|
119 |
+
{
|
120 |
+
"epoch": 0.2222222222222222,
|
121 |
+
"grad_norm": 103.77478790283203,
|
122 |
+
"learning_rate": 1.75e-06,
|
123 |
+
"log_odds_chosen": 0.19950130581855774,
|
124 |
+
"log_odds_ratio": -0.6767204403877258,
|
125 |
+
"logits/chosen": 402.70550537109375,
|
126 |
+
"logits/rejected": 401.308837890625,
|
127 |
+
"logps/chosen": -1.784985899925232,
|
128 |
+
"logps/rejected": -1.9822971820831299,
|
129 |
+
"loss": 2.0861,
|
130 |
+
"nll_loss": 2.089512586593628,
|
131 |
+
"rewards/accuracies": 0.574999988079071,
|
132 |
+
"rewards/chosen": -0.08924929052591324,
|
133 |
+
"rewards/margins": 0.009865568950772285,
|
134 |
+
"rewards/rejected": -0.09911485761404037,
|
135 |
+
"step": 35
|
136 |
+
},
|
137 |
+
{
|
138 |
+
"epoch": 0.25396825396825395,
|
139 |
+
"grad_norm": 87.8357925415039,
|
140 |
+
"learning_rate": 2.0000000000000003e-06,
|
141 |
+
"log_odds_chosen": 0.2945292890071869,
|
142 |
+
"log_odds_ratio": -0.6519163250923157,
|
143 |
+
"logits/chosen": 389.55194091796875,
|
144 |
+
"logits/rejected": 411.52655029296875,
|
145 |
+
"logps/chosen": -1.5404895544052124,
|
146 |
+
"logps/rejected": -1.7902438640594482,
|
147 |
+
"loss": 2.1696,
|
148 |
+
"nll_loss": 1.986445426940918,
|
149 |
+
"rewards/accuracies": 0.625,
|
150 |
+
"rewards/chosen": -0.07702446728944778,
|
151 |
+
"rewards/margins": 0.012487736530601978,
|
152 |
+
"rewards/rejected": -0.08951220661401749,
|
153 |
+
"step": 40
|
154 |
+
},
|
155 |
+
{
|
156 |
+
"epoch": 0.2857142857142857,
|
157 |
+
"grad_norm": 43.71113967895508,
|
158 |
+
"learning_rate": 2.25e-06,
|
159 |
+
"log_odds_chosen": 0.16207186877727509,
|
160 |
+
"log_odds_ratio": -0.7175930142402649,
|
161 |
+
"logits/chosen": 377.09368896484375,
|
162 |
+
"logits/rejected": 402.40277099609375,
|
163 |
+
"logps/chosen": -1.3781359195709229,
|
164 |
+
"logps/rejected": -1.5260416269302368,
|
165 |
+
"loss": 1.9061,
|
166 |
+
"nll_loss": 1.8738489151000977,
|
167 |
+
"rewards/accuracies": 0.574999988079071,
|
168 |
+
"rewards/chosen": -0.06890679895877838,
|
169 |
+
"rewards/margins": 0.007395277731120586,
|
170 |
+
"rewards/rejected": -0.07630207389593124,
|
171 |
+
"step": 45
|
172 |
+
},
|
173 |
+
{
|
174 |
+
"epoch": 0.31746031746031744,
|
175 |
+
"grad_norm": 72.43922424316406,
|
176 |
+
"learning_rate": 2.5e-06,
|
177 |
+
"log_odds_chosen": -0.16915926337242126,
|
178 |
+
"log_odds_ratio": -0.8527873158454895,
|
179 |
+
"logits/chosen": 398.2160949707031,
|
180 |
+
"logits/rejected": 385.0538330078125,
|
181 |
+
"logps/chosen": -1.474123239517212,
|
182 |
+
"logps/rejected": -1.3540009260177612,
|
183 |
+
"loss": 1.8456,
|
184 |
+
"nll_loss": 1.6751617193222046,
|
185 |
+
"rewards/accuracies": 0.4000000059604645,
|
186 |
+
"rewards/chosen": -0.07370616495609283,
|
187 |
+
"rewards/margins": -0.00600611325353384,
|
188 |
+
"rewards/rejected": -0.06770005077123642,
|
189 |
+
"step": 50
|
190 |
+
},
|
191 |
+
{
|
192 |
+
"epoch": 0.3492063492063492,
|
193 |
+
"grad_norm": 191.2710418701172,
|
194 |
+
"learning_rate": 2.7500000000000004e-06,
|
195 |
+
"log_odds_chosen": -0.08151008188724518,
|
196 |
+
"log_odds_ratio": -0.8122876286506653,
|
197 |
+
"logits/chosen": 402.912841796875,
|
198 |
+
"logits/rejected": 368.6970520019531,
|
199 |
+
"logps/chosen": -1.4740947484970093,
|
200 |
+
"logps/rejected": -1.3778045177459717,
|
201 |
+
"loss": 1.711,
|
202 |
+
"nll_loss": 1.6115481853485107,
|
203 |
+
"rewards/accuracies": 0.5249999761581421,
|
204 |
+
"rewards/chosen": -0.07370473444461823,
|
205 |
+
"rewards/margins": -0.004814522340893745,
|
206 |
+
"rewards/rejected": -0.06889022886753082,
|
207 |
+
"step": 55
|
208 |
+
},
|
209 |
+
{
|
210 |
+
"epoch": 0.38095238095238093,
|
211 |
+
"grad_norm": 44.32540512084961,
|
212 |
+
"learning_rate": 3e-06,
|
213 |
+
"log_odds_chosen": 0.23596720397472382,
|
214 |
+
"log_odds_ratio": -0.6497625708580017,
|
215 |
+
"logits/chosen": 352.0681457519531,
|
216 |
+
"logits/rejected": 393.3800354003906,
|
217 |
+
"logps/chosen": -1.2096924781799316,
|
218 |
+
"logps/rejected": -1.3974428176879883,
|
219 |
+
"loss": 1.7459,
|
220 |
+
"nll_loss": 1.6499731540679932,
|
221 |
+
"rewards/accuracies": 0.6000000238418579,
|
222 |
+
"rewards/chosen": -0.0604846253991127,
|
223 |
+
"rewards/margins": 0.009387515485286713,
|
224 |
+
"rewards/rejected": -0.06987214088439941,
|
225 |
+
"step": 60
|
226 |
+
},
|
227 |
+
{
|
228 |
+
"epoch": 0.4126984126984127,
|
229 |
+
"grad_norm": 42.01204299926758,
|
230 |
+
"learning_rate": 3.2500000000000002e-06,
|
231 |
+
"log_odds_chosen": -0.05983946472406387,
|
232 |
+
"log_odds_ratio": -0.833350658416748,
|
233 |
+
"logits/chosen": 355.71978759765625,
|
234 |
+
"logits/rejected": 387.98907470703125,
|
235 |
+
"logps/chosen": -1.2814446687698364,
|
236 |
+
"logps/rejected": -1.2495291233062744,
|
237 |
+
"loss": 1.6973,
|
238 |
+
"nll_loss": 1.529348611831665,
|
239 |
+
"rewards/accuracies": 0.4000000059604645,
|
240 |
+
"rewards/chosen": -0.06407222896814346,
|
241 |
+
"rewards/margins": -0.0015957739669829607,
|
242 |
+
"rewards/rejected": -0.06247646361589432,
|
243 |
+
"step": 65
|
244 |
+
},
|
245 |
+
{
|
246 |
+
"epoch": 0.4444444444444444,
|
247 |
+
"grad_norm": 73.30603790283203,
|
248 |
+
"learning_rate": 3.5e-06,
|
249 |
+
"log_odds_chosen": 0.24904093146324158,
|
250 |
+
"log_odds_ratio": -0.6408222317695618,
|
251 |
+
"logits/chosen": 389.5067443847656,
|
252 |
+
"logits/rejected": 379.82989501953125,
|
253 |
+
"logps/chosen": -1.2020925283432007,
|
254 |
+
"logps/rejected": -1.368801474571228,
|
255 |
+
"loss": 1.6581,
|
256 |
+
"nll_loss": 1.5083438158035278,
|
257 |
+
"rewards/accuracies": 0.6000000238418579,
|
258 |
+
"rewards/chosen": -0.06010463088750839,
|
259 |
+
"rewards/margins": 0.008335437625646591,
|
260 |
+
"rewards/rejected": -0.06844006478786469,
|
261 |
+
"step": 70
|
262 |
+
},
|
263 |
+
{
|
264 |
+
"epoch": 0.47619047619047616,
|
265 |
+
"grad_norm": 39.53456497192383,
|
266 |
+
"learning_rate": 3.7500000000000005e-06,
|
267 |
+
"log_odds_chosen": 0.2291734218597412,
|
268 |
+
"log_odds_ratio": -0.7071748971939087,
|
269 |
+
"logits/chosen": 414.55291748046875,
|
270 |
+
"logits/rejected": 395.8731994628906,
|
271 |
+
"logps/chosen": -1.2737112045288086,
|
272 |
+
"logps/rejected": -1.4267314672470093,
|
273 |
+
"loss": 1.7913,
|
274 |
+
"nll_loss": 1.7599716186523438,
|
275 |
+
"rewards/accuracies": 0.5249999761581421,
|
276 |
+
"rewards/chosen": -0.06368555873632431,
|
277 |
+
"rewards/margins": 0.00765102356672287,
|
278 |
+
"rewards/rejected": -0.07133658230304718,
|
279 |
+
"step": 75
|
280 |
+
},
|
281 |
+
{
|
282 |
+
"epoch": 0.5079365079365079,
|
283 |
+
"grad_norm": 32.64231872558594,
|
284 |
+
"learning_rate": 4.000000000000001e-06,
|
285 |
+
"log_odds_chosen": 0.3214460611343384,
|
286 |
+
"log_odds_ratio": -0.6462022066116333,
|
287 |
+
"logits/chosen": 416.7820739746094,
|
288 |
+
"logits/rejected": 400.43658447265625,
|
289 |
+
"logps/chosen": -1.1434603929519653,
|
290 |
+
"logps/rejected": -1.3834912776947021,
|
291 |
+
"loss": 1.67,
|
292 |
+
"nll_loss": 1.6429141759872437,
|
293 |
+
"rewards/accuracies": 0.574999988079071,
|
294 |
+
"rewards/chosen": -0.057173021137714386,
|
295 |
+
"rewards/margins": 0.01200154609978199,
|
296 |
+
"rewards/rejected": -0.06917456537485123,
|
297 |
+
"step": 80
|
298 |
+
},
|
299 |
+
{
|
300 |
+
"epoch": 0.5396825396825397,
|
301 |
+
"grad_norm": 30.180082321166992,
|
302 |
+
"learning_rate": 4.25e-06,
|
303 |
+
"log_odds_chosen": 0.202164888381958,
|
304 |
+
"log_odds_ratio": -0.6881243586540222,
|
305 |
+
"logits/chosen": 406.5018310546875,
|
306 |
+
"logits/rejected": 416.58203125,
|
307 |
+
"logps/chosen": -1.2485874891281128,
|
308 |
+
"logps/rejected": -1.4020246267318726,
|
309 |
+
"loss": 1.6165,
|
310 |
+
"nll_loss": 1.5869615077972412,
|
311 |
+
"rewards/accuracies": 0.6000000238418579,
|
312 |
+
"rewards/chosen": -0.06242937967181206,
|
313 |
+
"rewards/margins": 0.007671858184039593,
|
314 |
+
"rewards/rejected": -0.07010124623775482,
|
315 |
+
"step": 85
|
316 |
+
},
|
317 |
+
{
|
318 |
+
"epoch": 0.5714285714285714,
|
319 |
+
"grad_norm": 122.0818862915039,
|
320 |
+
"learning_rate": 4.5e-06,
|
321 |
+
"log_odds_chosen": 0.013370111584663391,
|
322 |
+
"log_odds_ratio": -0.7374812960624695,
|
323 |
+
"logits/chosen": 392.1935729980469,
|
324 |
+
"logits/rejected": 372.4733581542969,
|
325 |
+
"logps/chosen": -1.0436148643493652,
|
326 |
+
"logps/rejected": -1.0640267133712769,
|
327 |
+
"loss": 1.4603,
|
328 |
+
"nll_loss": 1.4258912801742554,
|
329 |
+
"rewards/accuracies": 0.4000000059604645,
|
330 |
+
"rewards/chosen": -0.05218074470758438,
|
331 |
+
"rewards/margins": 0.001020595314912498,
|
332 |
+
"rewards/rejected": -0.0532013364136219,
|
333 |
+
"step": 90
|
334 |
+
},
|
335 |
+
{
|
336 |
+
"epoch": 0.6031746031746031,
|
337 |
+
"grad_norm": 47.91487121582031,
|
338 |
+
"learning_rate": 4.75e-06,
|
339 |
+
"log_odds_chosen": -0.2634345293045044,
|
340 |
+
"log_odds_ratio": -0.944506049156189,
|
341 |
+
"logits/chosen": 402.14569091796875,
|
342 |
+
"logits/rejected": 376.51416015625,
|
343 |
+
"logps/chosen": -1.3972222805023193,
|
344 |
+
"logps/rejected": -1.2059199810028076,
|
345 |
+
"loss": 1.5678,
|
346 |
+
"nll_loss": 1.6941537857055664,
|
347 |
+
"rewards/accuracies": 0.42500001192092896,
|
348 |
+
"rewards/chosen": -0.06986111402511597,
|
349 |
+
"rewards/margins": -0.009565119631588459,
|
350 |
+
"rewards/rejected": -0.060295987874269485,
|
351 |
+
"step": 95
|
352 |
+
},
|
353 |
+
{
|
354 |
+
"epoch": 0.6349206349206349,
|
355 |
+
"grad_norm": 29.921293258666992,
|
356 |
+
"learning_rate": 5e-06,
|
357 |
+
"log_odds_chosen": 0.16289451718330383,
|
358 |
+
"log_odds_ratio": -0.7408555746078491,
|
359 |
+
"logits/chosen": 414.12579345703125,
|
360 |
+
"logits/rejected": 409.37408447265625,
|
361 |
+
"logps/chosen": -1.2401769161224365,
|
362 |
+
"logps/rejected": -1.3893964290618896,
|
363 |
+
"loss": 1.6149,
|
364 |
+
"nll_loss": 1.531909465789795,
|
365 |
+
"rewards/accuracies": 0.5249999761581421,
|
366 |
+
"rewards/chosen": -0.062008846551179886,
|
367 |
+
"rewards/margins": 0.007460971362888813,
|
368 |
+
"rewards/rejected": -0.06946982443332672,
|
369 |
+
"step": 100
|
370 |
+
},
|
371 |
+
{
|
372 |
+
"epoch": 0.6666666666666666,
|
373 |
+
"grad_norm": 27.314743041992188,
|
374 |
+
"learning_rate": 4.8795003647426654e-06,
|
375 |
+
"log_odds_chosen": 0.32554125785827637,
|
376 |
+
"log_odds_ratio": -0.6547573804855347,
|
377 |
+
"logits/chosen": 419.6529235839844,
|
378 |
+
"logits/rejected": 403.6163330078125,
|
379 |
+
"logps/chosen": -1.074582576751709,
|
380 |
+
"logps/rejected": -1.3015466928482056,
|
381 |
+
"loss": 1.4794,
|
382 |
+
"nll_loss": 1.4857242107391357,
|
383 |
+
"rewards/accuracies": 0.6499999761581421,
|
384 |
+
"rewards/chosen": -0.05372912809252739,
|
385 |
+
"rewards/margins": 0.011348200961947441,
|
386 |
+
"rewards/rejected": -0.06507732719182968,
|
387 |
+
"step": 105
|
388 |
+
},
|
389 |
+
{
|
390 |
+
"epoch": 0.6984126984126984,
|
391 |
+
"grad_norm": 37.0976448059082,
|
392 |
+
"learning_rate": 4.767312946227961e-06,
|
393 |
+
"log_odds_chosen": 0.26148396730422974,
|
394 |
+
"log_odds_ratio": -0.6971127390861511,
|
395 |
+
"logits/chosen": 383.90203857421875,
|
396 |
+
"logits/rejected": 392.50604248046875,
|
397 |
+
"logps/chosen": -1.01774001121521,
|
398 |
+
"logps/rejected": -1.1847659349441528,
|
399 |
+
"loss": 1.5732,
|
400 |
+
"nll_loss": 1.6914262771606445,
|
401 |
+
"rewards/accuracies": 0.5,
|
402 |
+
"rewards/chosen": -0.05088699609041214,
|
403 |
+
"rewards/margins": 0.00835130549967289,
|
404 |
+
"rewards/rejected": -0.05923830717802048,
|
405 |
+
"step": 110
|
406 |
+
},
|
407 |
+
{
|
408 |
+
"epoch": 0.7301587301587301,
|
409 |
+
"grad_norm": 22.67607879638672,
|
410 |
+
"learning_rate": 4.662524041201569e-06,
|
411 |
+
"log_odds_chosen": 0.5929769277572632,
|
412 |
+
"log_odds_ratio": -0.5206801295280457,
|
413 |
+
"logits/chosen": 403.47882080078125,
|
414 |
+
"logits/rejected": 381.8203125,
|
415 |
+
"logps/chosen": -1.0154887437820435,
|
416 |
+
"logps/rejected": -1.3891681432724,
|
417 |
+
"loss": 1.5678,
|
418 |
+
"nll_loss": 1.5397897958755493,
|
419 |
+
"rewards/accuracies": 0.699999988079071,
|
420 |
+
"rewards/chosen": -0.050774432718753815,
|
421 |
+
"rewards/margins": 0.01868397556245327,
|
422 |
+
"rewards/rejected": -0.06945841014385223,
|
423 |
+
"step": 115
|
424 |
+
},
|
425 |
+
{
|
426 |
+
"epoch": 0.7619047619047619,
|
427 |
+
"grad_norm": 19.351375579833984,
|
428 |
+
"learning_rate": 4.564354645876385e-06,
|
429 |
+
"log_odds_chosen": 0.18771110475063324,
|
430 |
+
"log_odds_ratio": -0.7007285356521606,
|
431 |
+
"logits/chosen": 403.85211181640625,
|
432 |
+
"logits/rejected": 380.380859375,
|
433 |
+
"logps/chosen": -0.9934407472610474,
|
434 |
+
"logps/rejected": -1.1065788269042969,
|
435 |
+
"loss": 1.4796,
|
436 |
+
"nll_loss": 1.3748087882995605,
|
437 |
+
"rewards/accuracies": 0.44999998807907104,
|
438 |
+
"rewards/chosen": -0.04967203736305237,
|
439 |
+
"rewards/margins": 0.005656905937939882,
|
440 |
+
"rewards/rejected": -0.05532894283533096,
|
441 |
+
"step": 120
|
442 |
+
},
|
443 |
+
{
|
444 |
+
"epoch": 0.7936507936507936,
|
445 |
+
"grad_norm": 82.66716003417969,
|
446 |
+
"learning_rate": 4.47213595499958e-06,
|
447 |
+
"log_odds_chosen": 0.24183085560798645,
|
448 |
+
"log_odds_ratio": -0.7099695801734924,
|
449 |
+
"logits/chosen": 414.0562438964844,
|
450 |
+
"logits/rejected": 379.0338439941406,
|
451 |
+
"logps/chosen": -1.1751952171325684,
|
452 |
+
"logps/rejected": -1.3275480270385742,
|
453 |
+
"loss": 1.557,
|
454 |
+
"nll_loss": 1.7125810384750366,
|
455 |
+
"rewards/accuracies": 0.5249999761581421,
|
456 |
+
"rewards/chosen": -0.05875976011157036,
|
457 |
+
"rewards/margins": 0.007617638912051916,
|
458 |
+
"rewards/rejected": -0.06637740135192871,
|
459 |
+
"step": 125
|
460 |
+
},
|
461 |
+
{
|
462 |
+
"epoch": 0.8253968253968254,
|
463 |
+
"grad_norm": 77.14762115478516,
|
464 |
+
"learning_rate": 4.385290096535147e-06,
|
465 |
+
"log_odds_chosen": -0.02768993005156517,
|
466 |
+
"log_odds_ratio": -0.8408949971199036,
|
467 |
+
"logits/chosen": 408.2793273925781,
|
468 |
+
"logits/rejected": 362.93756103515625,
|
469 |
+
"logps/chosen": -1.183045744895935,
|
470 |
+
"logps/rejected": -1.150722861289978,
|
471 |
+
"loss": 1.5911,
|
472 |
+
"nll_loss": 1.608017921447754,
|
473 |
+
"rewards/accuracies": 0.4749999940395355,
|
474 |
+
"rewards/chosen": -0.05915228649973869,
|
475 |
+
"rewards/margins": -0.0016161398962140083,
|
476 |
+
"rewards/rejected": -0.05753614753484726,
|
477 |
+
"step": 130
|
478 |
+
},
|
479 |
+
{
|
480 |
+
"epoch": 0.8571428571428571,
|
481 |
+
"grad_norm": 38.051902770996094,
|
482 |
+
"learning_rate": 4.303314829119352e-06,
|
483 |
+
"log_odds_chosen": 0.4119636118412018,
|
484 |
+
"log_odds_ratio": -0.6424974203109741,
|
485 |
+
"logits/chosen": 396.2297058105469,
|
486 |
+
"logits/rejected": 415.40289306640625,
|
487 |
+
"logps/chosen": -1.023511290550232,
|
488 |
+
"logps/rejected": -1.3654203414916992,
|
489 |
+
"loss": 1.4791,
|
490 |
+
"nll_loss": 1.5638678073883057,
|
491 |
+
"rewards/accuracies": 0.6000000238418579,
|
492 |
+
"rewards/chosen": -0.0511755645275116,
|
493 |
+
"rewards/margins": 0.017095452174544334,
|
494 |
+
"rewards/rejected": -0.06827102601528168,
|
495 |
+
"step": 135
|
496 |
+
},
|
497 |
+
{
|
498 |
+
"epoch": 0.8888888888888888,
|
499 |
+
"grad_norm": 18.50411605834961,
|
500 |
+
"learning_rate": 4.2257712736425835e-06,
|
501 |
+
"log_odds_chosen": 0.5941201448440552,
|
502 |
+
"log_odds_ratio": -0.629595160484314,
|
503 |
+
"logits/chosen": 382.5362548828125,
|
504 |
+
"logits/rejected": 439.97161865234375,
|
505 |
+
"logps/chosen": -1.0793473720550537,
|
506 |
+
"logps/rejected": -1.5278475284576416,
|
507 |
+
"loss": 1.4732,
|
508 |
+
"nll_loss": 1.4305188655853271,
|
509 |
+
"rewards/accuracies": 0.675000011920929,
|
510 |
+
"rewards/chosen": -0.053967367857694626,
|
511 |
+
"rewards/margins": 0.022425005212426186,
|
512 |
+
"rewards/rejected": -0.07639236748218536,
|
513 |
+
"step": 140
|
514 |
+
},
|
515 |
+
{
|
516 |
+
"epoch": 0.9206349206349206,
|
517 |
+
"grad_norm": 29.88001251220703,
|
518 |
+
"learning_rate": 4.1522739926869985e-06,
|
519 |
+
"log_odds_chosen": 0.43660497665405273,
|
520 |
+
"log_odds_ratio": -0.6272520422935486,
|
521 |
+
"logits/chosen": 405.6814270019531,
|
522 |
+
"logits/rejected": 429.1936950683594,
|
523 |
+
"logps/chosen": -1.0074222087860107,
|
524 |
+
"logps/rejected": -1.279060959815979,
|
525 |
+
"loss": 1.429,
|
526 |
+
"nll_loss": 1.431378722190857,
|
527 |
+
"rewards/accuracies": 0.6000000238418579,
|
528 |
+
"rewards/chosen": -0.05037111043930054,
|
529 |
+
"rewards/margins": 0.013581933453679085,
|
530 |
+
"rewards/rejected": -0.06395303457975388,
|
531 |
+
"step": 145
|
532 |
+
},
|
533 |
+
{
|
534 |
+
"epoch": 0.9523809523809523,
|
535 |
+
"grad_norm": 21.189523696899414,
|
536 |
+
"learning_rate": 4.082482904638631e-06,
|
537 |
+
"log_odds_chosen": 0.09152127802371979,
|
538 |
+
"log_odds_ratio": -0.7851961851119995,
|
539 |
+
"logits/chosen": 413.9974670410156,
|
540 |
+
"logits/rejected": 395.56390380859375,
|
541 |
+
"logps/chosen": -1.102851390838623,
|
542 |
+
"logps/rejected": -1.1990187168121338,
|
543 |
+
"loss": 1.4701,
|
544 |
+
"nll_loss": 1.5602357387542725,
|
545 |
+
"rewards/accuracies": 0.574999988079071,
|
546 |
+
"rewards/chosen": -0.05514257028698921,
|
547 |
+
"rewards/margins": 0.004808364901691675,
|
548 |
+
"rewards/rejected": -0.05995092913508415,
|
549 |
+
"step": 150
|
550 |
+
},
|
551 |
+
{
|
552 |
+
"epoch": 0.9841269841269841,
|
553 |
+
"grad_norm": 46.408260345458984,
|
554 |
+
"learning_rate": 4.016096644512495e-06,
|
555 |
+
"log_odds_chosen": 0.1732434332370758,
|
556 |
+
"log_odds_ratio": -0.7032361626625061,
|
557 |
+
"logits/chosen": 387.8311767578125,
|
558 |
+
"logits/rejected": 406.3460693359375,
|
559 |
+
"logps/chosen": -1.0525623559951782,
|
560 |
+
"logps/rejected": -1.2012317180633545,
|
561 |
+
"loss": 1.4516,
|
562 |
+
"nll_loss": 1.3833885192871094,
|
563 |
+
"rewards/accuracies": 0.5249999761581421,
|
564 |
+
"rewards/chosen": -0.05262812227010727,
|
565 |
+
"rewards/margins": 0.007433467544615269,
|
566 |
+
"rewards/rejected": -0.06006159260869026,
|
567 |
+
"step": 155
|
568 |
+
},
|
569 |
+
{
|
570 |
+
"epoch": 0.9968253968253968,
|
571 |
+
"eval_log_odds_chosen": 0.19107408821582794,
|
572 |
+
"eval_log_odds_ratio": -0.6924421191215515,
|
573 |
+
"eval_logits/chosen": 321.94952392578125,
|
574 |
+
"eval_logits/rejected": 293.8872375488281,
|
575 |
+
"eval_logps/chosen": -1.0260426998138428,
|
576 |
+
"eval_logps/rejected": -1.1547330617904663,
|
577 |
+
"eval_loss": 1.4765217304229736,
|
578 |
+
"eval_nll_loss": 1.4281681776046753,
|
579 |
+
"eval_rewards/accuracies": 0.5467625856399536,
|
580 |
+
"eval_rewards/chosen": -0.051302142441272736,
|
581 |
+
"eval_rewards/margins": 0.006434514187276363,
|
582 |
+
"eval_rewards/rejected": -0.05773665010929108,
|
583 |
+
"eval_runtime": 112.3635,
|
584 |
+
"eval_samples_per_second": 4.922,
|
585 |
+
"eval_steps_per_second": 1.237,
|
586 |
+
"step": 157
|
587 |
+
},
|
588 |
+
{
|
589 |
+
"epoch": 1.0158730158730158,
|
590 |
+
"grad_norm": 25.751989364624023,
|
591 |
+
"learning_rate": 3.952847075210474e-06,
|
592 |
+
"log_odds_chosen": 0.12412871420383453,
|
593 |
+
"log_odds_ratio": -0.7145590782165527,
|
594 |
+
"logits/chosen": 387.1231384277344,
|
595 |
+
"logits/rejected": 377.3012390136719,
|
596 |
+
"logps/chosen": -0.9075422286987305,
|
597 |
+
"logps/rejected": -0.9930707812309265,
|
598 |
+
"loss": 1.2728,
|
599 |
+
"nll_loss": 1.2198539972305298,
|
600 |
+
"rewards/accuracies": 0.550000011920929,
|
601 |
+
"rewards/chosen": -0.04537711292505264,
|
602 |
+
"rewards/margins": 0.004276423715054989,
|
603 |
+
"rewards/rejected": -0.049653537571430206,
|
604 |
+
"step": 160
|
605 |
+
},
|
606 |
+
{
|
607 |
+
"epoch": 1.0476190476190477,
|
608 |
+
"grad_norm": 32.28386688232422,
|
609 |
+
"learning_rate": 3.892494720807615e-06,
|
610 |
+
"log_odds_chosen": 1.30362868309021,
|
611 |
+
"log_odds_ratio": -0.3633436858654022,
|
612 |
+
"logits/chosen": 391.3251953125,
|
613 |
+
"logits/rejected": 433.5608825683594,
|
614 |
+
"logps/chosen": -0.6450636982917786,
|
615 |
+
"logps/rejected": -1.4025341272354126,
|
616 |
+
"loss": 1.0741,
|
617 |
+
"nll_loss": 1.082463264465332,
|
618 |
+
"rewards/accuracies": 0.875,
|
619 |
+
"rewards/chosen": -0.03225318342447281,
|
620 |
+
"rewards/margins": 0.0378735177218914,
|
621 |
+
"rewards/rejected": -0.07012669742107391,
|
622 |
+
"step": 165
|
623 |
+
},
|
624 |
+
{
|
625 |
+
"epoch": 1.0793650793650793,
|
626 |
+
"grad_norm": 15.754105567932129,
|
627 |
+
"learning_rate": 3.834824944236852e-06,
|
628 |
+
"log_odds_chosen": 1.1329152584075928,
|
629 |
+
"log_odds_ratio": -0.408731073141098,
|
630 |
+
"logits/chosen": 365.712158203125,
|
631 |
+
"logits/rejected": 407.73284912109375,
|
632 |
+
"logps/chosen": -0.6770078539848328,
|
633 |
+
"logps/rejected": -1.3867766857147217,
|
634 |
+
"loss": 1.0344,
|
635 |
+
"nll_loss": 0.9729477167129517,
|
636 |
+
"rewards/accuracies": 0.7749999761581421,
|
637 |
+
"rewards/chosen": -0.03385039418935776,
|
638 |
+
"rewards/margins": 0.035488441586494446,
|
639 |
+
"rewards/rejected": -0.0693388432264328,
|
640 |
+
"step": 170
|
641 |
+
},
|
642 |
+
{
|
643 |
+
"epoch": 1.1111111111111112,
|
644 |
+
"grad_norm": 38.7343864440918,
|
645 |
+
"learning_rate": 3.7796447300922724e-06,
|
646 |
+
"log_odds_chosen": 1.06243896484375,
|
647 |
+
"log_odds_ratio": -0.3898783326148987,
|
648 |
+
"logits/chosen": 381.17822265625,
|
649 |
+
"logits/rejected": 380.6576232910156,
|
650 |
+
"logps/chosen": -0.6825309991836548,
|
651 |
+
"logps/rejected": -1.2429835796356201,
|
652 |
+
"loss": 1.1273,
|
653 |
+
"nll_loss": 1.1054210662841797,
|
654 |
+
"rewards/accuracies": 0.8999999761581421,
|
655 |
+
"rewards/chosen": -0.03412654995918274,
|
656 |
+
"rewards/margins": 0.028022626414895058,
|
657 |
+
"rewards/rejected": -0.062149178236722946,
|
658 |
+
"step": 175
|
659 |
+
},
|
660 |
+
{
|
661 |
+
"epoch": 1.1428571428571428,
|
662 |
+
"grad_norm": 41.509403228759766,
|
663 |
+
"learning_rate": 3.72677996249965e-06,
|
664 |
+
"log_odds_chosen": 0.7714899182319641,
|
665 |
+
"log_odds_ratio": -0.48040905594825745,
|
666 |
+
"logits/chosen": 385.80029296875,
|
667 |
+
"logits/rejected": 383.924560546875,
|
668 |
+
"logps/chosen": -0.8479583859443665,
|
669 |
+
"logps/rejected": -1.3133970499038696,
|
670 |
+
"loss": 1.1486,
|
671 |
+
"nll_loss": 1.1066267490386963,
|
672 |
+
"rewards/accuracies": 0.7749999761581421,
|
673 |
+
"rewards/chosen": -0.042397916316986084,
|
674 |
+
"rewards/margins": 0.023271935060620308,
|
675 |
+
"rewards/rejected": -0.06566984951496124,
|
676 |
+
"step": 180
|
677 |
+
},
|
678 |
+
{
|
679 |
+
"epoch": 1.1746031746031746,
|
680 |
+
"grad_norm": 33.1015510559082,
|
681 |
+
"learning_rate": 3.6760731104690393e-06,
|
682 |
+
"log_odds_chosen": 0.9662767648696899,
|
683 |
+
"log_odds_ratio": -0.42013198137283325,
|
684 |
+
"logits/chosen": 390.68731689453125,
|
685 |
+
"logits/rejected": 372.36456298828125,
|
686 |
+
"logps/chosen": -0.6732779741287231,
|
687 |
+
"logps/rejected": -1.1919136047363281,
|
688 |
+
"loss": 1.1507,
|
689 |
+
"nll_loss": 1.2062638998031616,
|
690 |
+
"rewards/accuracies": 0.824999988079071,
|
691 |
+
"rewards/chosen": -0.03366389125585556,
|
692 |
+
"rewards/margins": 0.025931786745786667,
|
693 |
+
"rewards/rejected": -0.059595685452222824,
|
694 |
+
"step": 185
|
695 |
+
},
|
696 |
+
{
|
697 |
+
"epoch": 1.2063492063492063,
|
698 |
+
"grad_norm": 14.4358491897583,
|
699 |
+
"learning_rate": 3.6273812505500587e-06,
|
700 |
+
"log_odds_chosen": 0.8507558703422546,
|
701 |
+
"log_odds_ratio": -0.5280704498291016,
|
702 |
+
"logits/chosen": 412.74951171875,
|
703 |
+
"logits/rejected": 407.2632751464844,
|
704 |
+
"logps/chosen": -0.7739854454994202,
|
705 |
+
"logps/rejected": -1.3370511531829834,
|
706 |
+
"loss": 1.0636,
|
707 |
+
"nll_loss": 1.1027393341064453,
|
708 |
+
"rewards/accuracies": 0.7250000238418579,
|
709 |
+
"rewards/chosen": -0.03869926929473877,
|
710 |
+
"rewards/margins": 0.028153279796242714,
|
711 |
+
"rewards/rejected": -0.06685255467891693,
|
712 |
+
"step": 190
|
713 |
+
},
|
714 |
+
{
|
715 |
+
"epoch": 1.2380952380952381,
|
716 |
+
"grad_norm": 20.473995208740234,
|
717 |
+
"learning_rate": 3.5805743701971648e-06,
|
718 |
+
"log_odds_chosen": 1.13797128200531,
|
719 |
+
"log_odds_ratio": -0.38396361470222473,
|
720 |
+
"logits/chosen": 414.3319396972656,
|
721 |
+
"logits/rejected": 398.20440673828125,
|
722 |
+
"logps/chosen": -0.6320561170578003,
|
723 |
+
"logps/rejected": -1.2892091274261475,
|
724 |
+
"loss": 1.074,
|
725 |
+
"nll_loss": 1.0247658491134644,
|
726 |
+
"rewards/accuracies": 0.824999988079071,
|
727 |
+
"rewards/chosen": -0.031602807343006134,
|
728 |
+
"rewards/margins": 0.032857660204172134,
|
729 |
+
"rewards/rejected": -0.06446046382188797,
|
730 |
+
"step": 195
|
731 |
+
},
|
732 |
+
{
|
733 |
+
"epoch": 1.2698412698412698,
|
734 |
+
"grad_norm": 17.97160530090332,
|
735 |
+
"learning_rate": 3.5355339059327378e-06,
|
736 |
+
"log_odds_chosen": 0.6695777773857117,
|
737 |
+
"log_odds_ratio": -0.5161210894584656,
|
738 |
+
"logits/chosen": 372.59912109375,
|
739 |
+
"logits/rejected": 398.4673156738281,
|
740 |
+
"logps/chosen": -0.7804095149040222,
|
741 |
+
"logps/rejected": -1.1648569107055664,
|
742 |
+
"loss": 1.0069,
|
743 |
+
"nll_loss": 1.056370735168457,
|
744 |
+
"rewards/accuracies": 0.7749999761581421,
|
745 |
+
"rewards/chosen": -0.03902047872543335,
|
746 |
+
"rewards/margins": 0.019222375005483627,
|
747 |
+
"rewards/rejected": -0.05824284628033638,
|
748 |
+
"step": 200
|
749 |
+
},
|
750 |
+
{
|
751 |
+
"epoch": 1.3015873015873016,
|
752 |
+
"grad_norm": 15.39059066772461,
|
753 |
+
"learning_rate": 3.4921514788478916e-06,
|
754 |
+
"log_odds_chosen": 0.9029768109321594,
|
755 |
+
"log_odds_ratio": -0.45210033655166626,
|
756 |
+
"logits/chosen": 383.56378173828125,
|
757 |
+
"logits/rejected": 386.5069274902344,
|
758 |
+
"logps/chosen": -0.7651145458221436,
|
759 |
+
"logps/rejected": -1.2839988470077515,
|
760 |
+
"loss": 1.1163,
|
761 |
+
"nll_loss": 1.1502236127853394,
|
762 |
+
"rewards/accuracies": 0.75,
|
763 |
+
"rewards/chosen": -0.0382557287812233,
|
764 |
+
"rewards/margins": 0.025944212451577187,
|
765 |
+
"rewards/rejected": -0.06419993937015533,
|
766 |
+
"step": 205
|
767 |
+
},
|
768 |
+
{
|
769 |
+
"epoch": 1.3333333333333333,
|
770 |
+
"grad_norm": 14.983006477355957,
|
771 |
+
"learning_rate": 3.450327796711771e-06,
|
772 |
+
"log_odds_chosen": 1.0162965059280396,
|
773 |
+
"log_odds_ratio": -0.42147356271743774,
|
774 |
+
"logits/chosen": 394.2404479980469,
|
775 |
+
"logits/rejected": 420.30450439453125,
|
776 |
+
"logps/chosen": -0.7215703725814819,
|
777 |
+
"logps/rejected": -1.2990248203277588,
|
778 |
+
"loss": 1.056,
|
779 |
+
"nll_loss": 0.9170693159103394,
|
780 |
+
"rewards/accuracies": 0.824999988079071,
|
781 |
+
"rewards/chosen": -0.036078520119190216,
|
782 |
+
"rewards/margins": 0.02887272834777832,
|
783 |
+
"rewards/rejected": -0.06495124846696854,
|
784 |
+
"step": 210
|
785 |
+
},
|
786 |
+
{
|
787 |
+
"epoch": 1.3650793650793651,
|
788 |
+
"grad_norm": 13.9975004196167,
|
789 |
+
"learning_rate": 3.409971697352368e-06,
|
790 |
+
"log_odds_chosen": 0.8547927141189575,
|
791 |
+
"log_odds_ratio": -0.4758794903755188,
|
792 |
+
"logits/chosen": 415.505859375,
|
793 |
+
"logits/rejected": 414.4971618652344,
|
794 |
+
"logps/chosen": -0.7432714700698853,
|
795 |
+
"logps/rejected": -1.159825086593628,
|
796 |
+
"loss": 1.0231,
|
797 |
+
"nll_loss": 0.9773567318916321,
|
798 |
+
"rewards/accuracies": 0.7749999761581421,
|
799 |
+
"rewards/chosen": -0.03716357797384262,
|
800 |
+
"rewards/margins": 0.020827675238251686,
|
801 |
+
"rewards/rejected": -0.05799124762415886,
|
802 |
+
"step": 215
|
803 |
+
},
|
804 |
+
{
|
805 |
+
"epoch": 1.3968253968253967,
|
806 |
+
"grad_norm": 16.370100021362305,
|
807 |
+
"learning_rate": 3.3709993123162106e-06,
|
808 |
+
"log_odds_chosen": 0.9633774757385254,
|
809 |
+
"log_odds_ratio": -0.4300917685031891,
|
810 |
+
"logits/chosen": 405.0268859863281,
|
811 |
+
"logits/rejected": 405.39898681640625,
|
812 |
+
"logps/chosen": -0.6296164393424988,
|
813 |
+
"logps/rejected": -1.0896862745285034,
|
814 |
+
"loss": 1.0243,
|
815 |
+
"nll_loss": 0.9427892565727234,
|
816 |
+
"rewards/accuracies": 0.8500000238418579,
|
817 |
+
"rewards/chosen": -0.0314808264374733,
|
818 |
+
"rewards/margins": 0.02300349436700344,
|
819 |
+
"rewards/rejected": -0.05448431894183159,
|
820 |
+
"step": 220
|
821 |
+
},
|
822 |
+
{
|
823 |
+
"epoch": 1.4285714285714286,
|
824 |
+
"grad_norm": 21.732885360717773,
|
825 |
+
"learning_rate": 3.3333333333333333e-06,
|
826 |
+
"log_odds_chosen": 0.8722143173217773,
|
827 |
+
"log_odds_ratio": -0.49761050939559937,
|
828 |
+
"logits/chosen": 373.18170166015625,
|
829 |
+
"logits/rejected": 408.52191162109375,
|
830 |
+
"logps/chosen": -0.7673987150192261,
|
831 |
+
"logps/rejected": -1.3063347339630127,
|
832 |
+
"loss": 1.0781,
|
833 |
+
"nll_loss": 1.0205880403518677,
|
834 |
+
"rewards/accuracies": 0.699999988079071,
|
835 |
+
"rewards/chosen": -0.03836994245648384,
|
836 |
+
"rewards/margins": 0.026946794241666794,
|
837 |
+
"rewards/rejected": -0.06531673669815063,
|
838 |
+
"step": 225
|
839 |
+
},
|
840 |
+
{
|
841 |
+
"epoch": 1.4603174603174602,
|
842 |
+
"grad_norm": 26.658767700195312,
|
843 |
+
"learning_rate": 3.296902366978936e-06,
|
844 |
+
"log_odds_chosen": 1.3574587106704712,
|
845 |
+
"log_odds_ratio": -0.3206990659236908,
|
846 |
+
"logits/chosen": 397.1745910644531,
|
847 |
+
"logits/rejected": 435.58380126953125,
|
848 |
+
"logps/chosen": -0.5968003869056702,
|
849 |
+
"logps/rejected": -1.3607866764068604,
|
850 |
+
"loss": 1.0374,
|
851 |
+
"nll_loss": 1.0697901248931885,
|
852 |
+
"rewards/accuracies": 0.875,
|
853 |
+
"rewards/chosen": -0.02984001860022545,
|
854 |
+
"rewards/margins": 0.03819932043552399,
|
855 |
+
"rewards/rejected": -0.06803934276103973,
|
856 |
+
"step": 230
|
857 |
+
},
|
858 |
+
{
|
859 |
+
"epoch": 1.492063492063492,
|
860 |
+
"grad_norm": 13.732707977294922,
|
861 |
+
"learning_rate": 3.2616403652672114e-06,
|
862 |
+
"log_odds_chosen": 0.7236745357513428,
|
863 |
+
"log_odds_ratio": -0.5223321914672852,
|
864 |
+
"logits/chosen": 354.2861022949219,
|
865 |
+
"logits/rejected": 388.4797668457031,
|
866 |
+
"logps/chosen": -0.7881089448928833,
|
867 |
+
"logps/rejected": -1.2045094966888428,
|
868 |
+
"loss": 1.1557,
|
869 |
+
"nll_loss": 1.0742206573486328,
|
870 |
+
"rewards/accuracies": 0.75,
|
871 |
+
"rewards/chosen": -0.0394054539501667,
|
872 |
+
"rewards/margins": 0.020820021629333496,
|
873 |
+
"rewards/rejected": -0.0602254755795002,
|
874 |
+
"step": 235
|
875 |
+
},
|
876 |
+
{
|
877 |
+
"epoch": 1.5238095238095237,
|
878 |
+
"grad_norm": 22.118778228759766,
|
879 |
+
"learning_rate": 3.2274861218395142e-06,
|
880 |
+
"log_odds_chosen": 1.1263599395751953,
|
881 |
+
"log_odds_ratio": -0.43313202261924744,
|
882 |
+
"logits/chosen": 382.83905029296875,
|
883 |
+
"logits/rejected": 387.61065673828125,
|
884 |
+
"logps/chosen": -0.7860811948776245,
|
885 |
+
"logps/rejected": -1.4334101676940918,
|
886 |
+
"loss": 1.1383,
|
887 |
+
"nll_loss": 1.2601253986358643,
|
888 |
+
"rewards/accuracies": 0.75,
|
889 |
+
"rewards/chosen": -0.039304058998823166,
|
890 |
+
"rewards/margins": 0.032366447150707245,
|
891 |
+
"rewards/rejected": -0.07167050242424011,
|
892 |
+
"step": 240
|
893 |
+
},
|
894 |
+
{
|
895 |
+
"epoch": 1.5555555555555556,
|
896 |
+
"grad_norm": 22.131145477294922,
|
897 |
+
"learning_rate": 3.1943828249997e-06,
|
898 |
+
"log_odds_chosen": 0.7136174440383911,
|
899 |
+
"log_odds_ratio": -0.5149211883544922,
|
900 |
+
"logits/chosen": 355.15948486328125,
|
901 |
+
"logits/rejected": 387.1158447265625,
|
902 |
+
"logps/chosen": -0.8465463519096375,
|
903 |
+
"logps/rejected": -1.2772014141082764,
|
904 |
+
"loss": 1.0981,
|
905 |
+
"nll_loss": 1.2311415672302246,
|
906 |
+
"rewards/accuracies": 0.699999988079071,
|
907 |
+
"rewards/chosen": -0.042327314615249634,
|
908 |
+
"rewards/margins": 0.021532755345106125,
|
909 |
+
"rewards/rejected": -0.06386007368564606,
|
910 |
+
"step": 245
|
911 |
+
},
|
912 |
+
{
|
913 |
+
"epoch": 1.5873015873015874,
|
914 |
+
"grad_norm": 19.921703338623047,
|
915 |
+
"learning_rate": 3.1622776601683796e-06,
|
916 |
+
"log_odds_chosen": 1.1693449020385742,
|
917 |
+
"log_odds_ratio": -0.3576763868331909,
|
918 |
+
"logits/chosen": 408.05621337890625,
|
919 |
+
"logits/rejected": 368.35400390625,
|
920 |
+
"logps/chosen": -0.6127609014511108,
|
921 |
+
"logps/rejected": -1.2678160667419434,
|
922 |
+
"loss": 1.0755,
|
923 |
+
"nll_loss": 0.9913685917854309,
|
924 |
+
"rewards/accuracies": 0.875,
|
925 |
+
"rewards/chosen": -0.030638042837381363,
|
926 |
+
"rewards/margins": 0.032752759754657745,
|
927 |
+
"rewards/rejected": -0.0633908063173294,
|
928 |
+
"step": 250
|
929 |
+
},
|
930 |
+
{
|
931 |
+
"epoch": 1.619047619047619,
|
932 |
+
"grad_norm": 17.49940299987793,
|
933 |
+
"learning_rate": 3.131121455425748e-06,
|
934 |
+
"log_odds_chosen": 0.8926092982292175,
|
935 |
+
"log_odds_ratio": -0.4545460343360901,
|
936 |
+
"logits/chosen": 421.48529052734375,
|
937 |
+
"logits/rejected": 387.22784423828125,
|
938 |
+
"logps/chosen": -0.6890762448310852,
|
939 |
+
"logps/rejected": -1.1581443548202515,
|
940 |
+
"loss": 1.0777,
|
941 |
+
"nll_loss": 1.068838357925415,
|
942 |
+
"rewards/accuracies": 0.800000011920929,
|
943 |
+
"rewards/chosen": -0.03445381671190262,
|
944 |
+
"rewards/margins": 0.023453406989574432,
|
945 |
+
"rewards/rejected": -0.057907216250896454,
|
946 |
+
"step": 255
|
947 |
+
},
|
948 |
+
{
|
949 |
+
"epoch": 1.6507936507936507,
|
950 |
+
"grad_norm": 13.594038009643555,
|
951 |
+
"learning_rate": 3.1008683647302113e-06,
|
952 |
+
"log_odds_chosen": 1.0179307460784912,
|
953 |
+
"log_odds_ratio": -0.4250411093235016,
|
954 |
+
"logits/chosen": 393.05145263671875,
|
955 |
+
"logits/rejected": 409.2987365722656,
|
956 |
+
"logps/chosen": -0.7133018970489502,
|
957 |
+
"logps/rejected": -1.2928727865219116,
|
958 |
+
"loss": 1.0383,
|
959 |
+
"nll_loss": 1.0420652627944946,
|
960 |
+
"rewards/accuracies": 0.824999988079071,
|
961 |
+
"rewards/chosen": -0.03566509857773781,
|
962 |
+
"rewards/margins": 0.02897855080664158,
|
963 |
+
"rewards/rejected": -0.06464364379644394,
|
964 |
+
"step": 260
|
965 |
+
},
|
966 |
+
{
|
967 |
+
"epoch": 1.6825396825396826,
|
968 |
+
"grad_norm": 37.5067138671875,
|
969 |
+
"learning_rate": 3.0714755841697565e-06,
|
970 |
+
"log_odds_chosen": 0.7450331449508667,
|
971 |
+
"log_odds_ratio": -0.469769150018692,
|
972 |
+
"logits/chosen": 389.87579345703125,
|
973 |
+
"logits/rejected": 387.38641357421875,
|
974 |
+
"logps/chosen": -0.7393844723701477,
|
975 |
+
"logps/rejected": -1.1401046514511108,
|
976 |
+
"loss": 1.0411,
|
977 |
+
"nll_loss": 1.1084836721420288,
|
978 |
+
"rewards/accuracies": 0.800000011920929,
|
979 |
+
"rewards/chosen": -0.036969222128391266,
|
980 |
+
"rewards/margins": 0.020036008208990097,
|
981 |
+
"rewards/rejected": -0.05700523406267166,
|
982 |
+
"step": 265
|
983 |
+
},
|
984 |
+
{
|
985 |
+
"epoch": 1.7142857142857144,
|
986 |
+
"grad_norm": 22.182912826538086,
|
987 |
+
"learning_rate": 3.0429030972509227e-06,
|
988 |
+
"log_odds_chosen": 0.946363627910614,
|
989 |
+
"log_odds_ratio": -0.44000881910324097,
|
990 |
+
"logits/chosen": 394.14337158203125,
|
991 |
+
"logits/rejected": 425.6368103027344,
|
992 |
+
"logps/chosen": -0.7661867141723633,
|
993 |
+
"logps/rejected": -1.3347783088684082,
|
994 |
+
"loss": 1.0739,
|
995 |
+
"nll_loss": 1.0289955139160156,
|
996 |
+
"rewards/accuracies": 0.7749999761581421,
|
997 |
+
"rewards/chosen": -0.03830933943390846,
|
998 |
+
"rewards/margins": 0.028429577127099037,
|
999 |
+
"rewards/rejected": -0.06673891097307205,
|
1000 |
+
"step": 270
|
1001 |
+
},
|
1002 |
+
{
|
1003 |
+
"epoch": 1.746031746031746,
|
1004 |
+
"grad_norm": 23.890846252441406,
|
1005 |
+
"learning_rate": 3.0151134457776365e-06,
|
1006 |
+
"log_odds_chosen": 0.8923860788345337,
|
1007 |
+
"log_odds_ratio": -0.5077377557754517,
|
1008 |
+
"logits/chosen": 385.6656799316406,
|
1009 |
+
"logits/rejected": 381.90765380859375,
|
1010 |
+
"logps/chosen": -0.9714023470878601,
|
1011 |
+
"logps/rejected": -1.5488981008529663,
|
1012 |
+
"loss": 1.0839,
|
1013 |
+
"nll_loss": 1.2609059810638428,
|
1014 |
+
"rewards/accuracies": 0.7749999761581421,
|
1015 |
+
"rewards/chosen": -0.04857012256979942,
|
1016 |
+
"rewards/margins": 0.028874799609184265,
|
1017 |
+
"rewards/rejected": -0.07744492590427399,
|
1018 |
+
"step": 275
|
1019 |
+
},
|
1020 |
+
{
|
1021 |
+
"epoch": 1.7777777777777777,
|
1022 |
+
"grad_norm": 21.916139602661133,
|
1023 |
+
"learning_rate": 2.988071523335984e-06,
|
1024 |
+
"log_odds_chosen": 0.737246036529541,
|
1025 |
+
"log_odds_ratio": -0.4812200665473938,
|
1026 |
+
"logits/chosen": 386.85882568359375,
|
1027 |
+
"logits/rejected": 401.51556396484375,
|
1028 |
+
"logps/chosen": -0.7044110894203186,
|
1029 |
+
"logps/rejected": -1.1333626508712769,
|
1030 |
+
"loss": 1.1711,
|
1031 |
+
"nll_loss": 1.0592542886734009,
|
1032 |
+
"rewards/accuracies": 0.7250000238418579,
|
1033 |
+
"rewards/chosen": -0.03522055223584175,
|
1034 |
+
"rewards/margins": 0.02144758589565754,
|
1035 |
+
"rewards/rejected": -0.05666813999414444,
|
1036 |
+
"step": 280
|
1037 |
+
},
|
1038 |
+
{
|
1039 |
+
"epoch": 1.8095238095238095,
|
1040 |
+
"grad_norm": 52.16273498535156,
|
1041 |
+
"learning_rate": 2.961744388795462e-06,
|
1042 |
+
"log_odds_chosen": 0.9238273501396179,
|
1043 |
+
"log_odds_ratio": -0.533433735370636,
|
1044 |
+
"logits/chosen": 430.734375,
|
1045 |
+
"logits/rejected": 403.882568359375,
|
1046 |
+
"logps/chosen": -0.823104977607727,
|
1047 |
+
"logps/rejected": -1.353060007095337,
|
1048 |
+
"loss": 1.0421,
|
1049 |
+
"nll_loss": 1.0632542371749878,
|
1050 |
+
"rewards/accuracies": 0.6499999761581421,
|
1051 |
+
"rewards/chosen": -0.041155245155096054,
|
1052 |
+
"rewards/margins": 0.026497751474380493,
|
1053 |
+
"rewards/rejected": -0.06765300035476685,
|
1054 |
+
"step": 285
|
1055 |
+
},
|
1056 |
+
{
|
1057 |
+
"epoch": 1.8412698412698414,
|
1058 |
+
"grad_norm": 30.704998016357422,
|
1059 |
+
"learning_rate": 2.9361010975735177e-06,
|
1060 |
+
"log_odds_chosen": 0.9045122861862183,
|
1061 |
+
"log_odds_ratio": -0.44358786940574646,
|
1062 |
+
"logits/chosen": 413.3941955566406,
|
1063 |
+
"logits/rejected": 385.8519592285156,
|
1064 |
+
"logps/chosen": -0.7694595456123352,
|
1065 |
+
"logps/rejected": -1.312978982925415,
|
1066 |
+
"loss": 1.1363,
|
1067 |
+
"nll_loss": 1.063130497932434,
|
1068 |
+
"rewards/accuracies": 0.875,
|
1069 |
+
"rewards/chosen": -0.0384729765355587,
|
1070 |
+
"rewards/margins": 0.027175968512892723,
|
1071 |
+
"rewards/rejected": -0.06564894318580627,
|
1072 |
+
"step": 290
|
1073 |
+
},
|
1074 |
+
{
|
1075 |
+
"epoch": 1.873015873015873,
|
1076 |
+
"grad_norm": 14.101789474487305,
|
1077 |
+
"learning_rate": 2.9111125486979104e-06,
|
1078 |
+
"log_odds_chosen": 0.9633495211601257,
|
1079 |
+
"log_odds_ratio": -0.4407424032688141,
|
1080 |
+
"logits/chosen": 402.0762634277344,
|
1081 |
+
"logits/rejected": 433.57135009765625,
|
1082 |
+
"logps/chosen": -0.7939198613166809,
|
1083 |
+
"logps/rejected": -1.3206735849380493,
|
1084 |
+
"loss": 1.0649,
|
1085 |
+
"nll_loss": 1.0656565427780151,
|
1086 |
+
"rewards/accuracies": 0.800000011920929,
|
1087 |
+
"rewards/chosen": -0.039695993065834045,
|
1088 |
+
"rewards/margins": 0.026337692514061928,
|
1089 |
+
"rewards/rejected": -0.06603368371725082,
|
1090 |
+
"step": 295
|
1091 |
+
},
|
1092 |
+
{
|
1093 |
+
"epoch": 1.9047619047619047,
|
1094 |
+
"grad_norm": 14.737664222717285,
|
1095 |
+
"learning_rate": 2.8867513459481293e-06,
|
1096 |
+
"log_odds_chosen": 0.8915547132492065,
|
1097 |
+
"log_odds_ratio": -0.4351634085178375,
|
1098 |
+
"logits/chosen": 394.14019775390625,
|
1099 |
+
"logits/rejected": 398.04486083984375,
|
1100 |
+
"logps/chosen": -0.6770254373550415,
|
1101 |
+
"logps/rejected": -1.136715292930603,
|
1102 |
+
"loss": 1.0148,
|
1103 |
+
"nll_loss": 1.0366181135177612,
|
1104 |
+
"rewards/accuracies": 0.824999988079071,
|
1105 |
+
"rewards/chosen": -0.03385127708315849,
|
1106 |
+
"rewards/margins": 0.022984493523836136,
|
1107 |
+
"rewards/rejected": -0.05683577060699463,
|
1108 |
+
"step": 300
|
1109 |
+
},
|
1110 |
+
{
|
1111 |
+
"epoch": 1.9365079365079365,
|
1112 |
+
"grad_norm": 27.61500358581543,
|
1113 |
+
"learning_rate": 2.862991671569341e-06,
|
1114 |
+
"log_odds_chosen": 1.2255324125289917,
|
1115 |
+
"log_odds_ratio": -0.3640300929546356,
|
1116 |
+
"logits/chosen": 414.54364013671875,
|
1117 |
+
"logits/rejected": 399.3366394042969,
|
1118 |
+
"logps/chosen": -0.5758011937141418,
|
1119 |
+
"logps/rejected": -1.1583391427993774,
|
1120 |
+
"loss": 0.9944,
|
1121 |
+
"nll_loss": 0.8838338851928711,
|
1122 |
+
"rewards/accuracies": 0.875,
|
1123 |
+
"rewards/chosen": -0.028790060430765152,
|
1124 |
+
"rewards/margins": 0.029126901179552078,
|
1125 |
+
"rewards/rejected": -0.05791696161031723,
|
1126 |
+
"step": 305
|
1127 |
+
},
|
1128 |
+
{
|
1129 |
+
"epoch": 1.9682539682539684,
|
1130 |
+
"grad_norm": 22.889514923095703,
|
1131 |
+
"learning_rate": 2.839809171235324e-06,
|
1132 |
+
"log_odds_chosen": 0.8731952905654907,
|
1133 |
+
"log_odds_ratio": -0.4226910471916199,
|
1134 |
+
"logits/chosen": 386.8481140136719,
|
1135 |
+
"logits/rejected": 391.4911804199219,
|
1136 |
+
"logps/chosen": -0.7098206877708435,
|
1137 |
+
"logps/rejected": -1.2461992502212524,
|
1138 |
+
"loss": 1.068,
|
1139 |
+
"nll_loss": 1.1014317274093628,
|
1140 |
+
"rewards/accuracies": 0.8999999761581421,
|
1141 |
+
"rewards/chosen": -0.035491038113832474,
|
1142 |
+
"rewards/margins": 0.026818927377462387,
|
1143 |
+
"rewards/rejected": -0.06230996176600456,
|
1144 |
+
"step": 310
|
1145 |
+
},
|
1146 |
+
{
|
1147 |
+
"epoch": 2.0,
|
1148 |
+
"grad_norm": 14.210800170898438,
|
1149 |
+
"learning_rate": 2.817180849095055e-06,
|
1150 |
+
"log_odds_chosen": 1.0788153409957886,
|
1151 |
+
"log_odds_ratio": -0.42032232880592346,
|
1152 |
+
"logits/chosen": 411.44830322265625,
|
1153 |
+
"logits/rejected": 397.8619384765625,
|
1154 |
+
"logps/chosen": -0.7904238104820251,
|
1155 |
+
"logps/rejected": -1.4402620792388916,
|
1156 |
+
"loss": 1.0587,
|
1157 |
+
"nll_loss": 1.1283180713653564,
|
1158 |
+
"rewards/accuracies": 0.824999988079071,
|
1159 |
+
"rewards/chosen": -0.03952118754386902,
|
1160 |
+
"rewards/margins": 0.03249191492795944,
|
1161 |
+
"rewards/rejected": -0.07201310247182846,
|
1162 |
+
"step": 315
|
1163 |
+
},
|
1164 |
+
{
|
1165 |
+
"epoch": 2.0,
|
1166 |
+
"eval_log_odds_chosen": 0.27226492762565613,
|
1167 |
+
"eval_log_odds_ratio": -0.6900704503059387,
|
1168 |
+
"eval_logits/chosen": 323.6011962890625,
|
1169 |
+
"eval_logits/rejected": 296.0849914550781,
|
1170 |
+
"eval_logps/chosen": -1.003529667854309,
|
1171 |
+
"eval_logps/rejected": -1.1904489994049072,
|
1172 |
+
"eval_loss": 1.4250426292419434,
|
1173 |
+
"eval_nll_loss": 1.372891902923584,
|
1174 |
+
"eval_rewards/accuracies": 0.5467625856399536,
|
1175 |
+
"eval_rewards/chosen": -0.0501764751970768,
|
1176 |
+
"eval_rewards/margins": 0.009345975704491138,
|
1177 |
+
"eval_rewards/rejected": -0.05952245742082596,
|
1178 |
+
"eval_runtime": 112.3866,
|
1179 |
+
"eval_samples_per_second": 4.921,
|
1180 |
+
"eval_steps_per_second": 1.237,
|
1181 |
+
"step": 315
|
1182 |
+
},
|
1183 |
+
{
|
1184 |
+
"epoch": 2.0317460317460316,
|
1185 |
+
"grad_norm": 15.507851600646973,
|
1186 |
+
"learning_rate": 2.7950849718747376e-06,
|
1187 |
+
"log_odds_chosen": 2.808565616607666,
|
1188 |
+
"log_odds_ratio": -0.13379335403442383,
|
1189 |
+
"logits/chosen": 376.21978759765625,
|
1190 |
+
"logits/rejected": 409.29669189453125,
|
1191 |
+
"logps/chosen": -0.34599769115448,
|
1192 |
+
"logps/rejected": -1.854657530784607,
|
1193 |
+
"loss": 0.5967,
|
1194 |
+
"nll_loss": 0.5440115928649902,
|
1195 |
+
"rewards/accuracies": 1.0,
|
1196 |
+
"rewards/chosen": -0.01729988493025303,
|
1197 |
+
"rewards/margins": 0.07543299347162247,
|
1198 |
+
"rewards/rejected": -0.09273288398981094,
|
1199 |
+
"step": 320
|
1200 |
+
},
|
1201 |
+
{
|
1202 |
+
"epoch": 2.0634920634920633,
|
1203 |
+
"grad_norm": 14.76868724822998,
|
1204 |
+
"learning_rate": 2.773500981126146e-06,
|
1205 |
+
"log_odds_chosen": 2.1307175159454346,
|
1206 |
+
"log_odds_ratio": -0.16102290153503418,
|
1207 |
+
"logits/chosen": 384.79876708984375,
|
1208 |
+
"logits/rejected": 401.5932312011719,
|
1209 |
+
"logps/chosen": -0.3530585765838623,
|
1210 |
+
"logps/rejected": -1.3687174320220947,
|
1211 |
+
"loss": 0.6419,
|
1212 |
+
"nll_loss": 0.5659872889518738,
|
1213 |
+
"rewards/accuracies": 0.9750000238418579,
|
1214 |
+
"rewards/chosen": -0.017652926966547966,
|
1215 |
+
"rewards/margins": 0.0507829412817955,
|
1216 |
+
"rewards/rejected": -0.06843587011098862,
|
1217 |
+
"step": 325
|
1218 |
+
},
|
1219 |
+
{
|
1220 |
+
"epoch": 2.0952380952380953,
|
1221 |
+
"grad_norm": 16.631784439086914,
|
1222 |
+
"learning_rate": 2.752409412815902e-06,
|
1223 |
+
"log_odds_chosen": 2.310250759124756,
|
1224 |
+
"log_odds_ratio": -0.15331122279167175,
|
1225 |
+
"logits/chosen": 407.6214599609375,
|
1226 |
+
"logits/rejected": 376.6868896484375,
|
1227 |
+
"logps/chosen": -0.41628751158714294,
|
1228 |
+
"logps/rejected": -1.6159404516220093,
|
1229 |
+
"loss": 0.6052,
|
1230 |
+
"nll_loss": 0.6574462652206421,
|
1231 |
+
"rewards/accuracies": 0.9750000238418579,
|
1232 |
+
"rewards/chosen": -0.020814374089241028,
|
1233 |
+
"rewards/margins": 0.059982649981975555,
|
1234 |
+
"rewards/rejected": -0.08079702407121658,
|
1235 |
+
"step": 330
|
1236 |
+
},
|
1237 |
+
{
|
1238 |
+
"epoch": 2.126984126984127,
|
1239 |
+
"grad_norm": 13.462658882141113,
|
1240 |
+
"learning_rate": 2.7317918235407652e-06,
|
1241 |
+
"log_odds_chosen": 2.4282920360565186,
|
1242 |
+
"log_odds_ratio": -0.13053010404109955,
|
1243 |
+
"logits/chosen": 353.52349853515625,
|
1244 |
+
"logits/rejected": 368.1820983886719,
|
1245 |
+
"logps/chosen": -0.41353076696395874,
|
1246 |
+
"logps/rejected": -1.7517569065093994,
|
1247 |
+
"loss": 0.5669,
|
1248 |
+
"nll_loss": 0.6166477203369141,
|
1249 |
+
"rewards/accuracies": 0.9750000238418579,
|
1250 |
+
"rewards/chosen": -0.020676542073488235,
|
1251 |
+
"rewards/margins": 0.06691131740808487,
|
1252 |
+
"rewards/rejected": -0.08758784830570221,
|
1253 |
+
"step": 335
|
1254 |
+
},
|
1255 |
+
{
|
1256 |
+
"epoch": 2.1587301587301586,
|
1257 |
+
"grad_norm": 14.838258743286133,
|
1258 |
+
"learning_rate": 2.711630722733202e-06,
|
1259 |
+
"log_odds_chosen": 2.658790111541748,
|
1260 |
+
"log_odds_ratio": -0.11425229161977768,
|
1261 |
+
"logits/chosen": 371.021484375,
|
1262 |
+
"logits/rejected": 371.3381042480469,
|
1263 |
+
"logps/chosen": -0.36502301692962646,
|
1264 |
+
"logps/rejected": -1.8417142629623413,
|
1265 |
+
"loss": 0.5366,
|
1266 |
+
"nll_loss": 0.5441598296165466,
|
1267 |
+
"rewards/accuracies": 1.0,
|
1268 |
+
"rewards/chosen": -0.018251152709126472,
|
1269 |
+
"rewards/margins": 0.07383455336093903,
|
1270 |
+
"rewards/rejected": -0.09208571910858154,
|
1271 |
+
"step": 340
|
1272 |
+
},
|
1273 |
+
{
|
1274 |
+
"epoch": 2.1904761904761907,
|
1275 |
+
"grad_norm": 13.042370796203613,
|
1276 |
+
"learning_rate": 2.691909510290828e-06,
|
1277 |
+
"log_odds_chosen": 2.3122012615203857,
|
1278 |
+
"log_odds_ratio": -0.1497458517551422,
|
1279 |
+
"logits/chosen": 391.87457275390625,
|
1280 |
+
"logits/rejected": 377.2791442871094,
|
1281 |
+
"logps/chosen": -0.38829633593559265,
|
1282 |
+
"logps/rejected": -1.5641629695892334,
|
1283 |
+
"loss": 0.5496,
|
1284 |
+
"nll_loss": 0.6075534224510193,
|
1285 |
+
"rewards/accuracies": 1.0,
|
1286 |
+
"rewards/chosen": -0.019414816051721573,
|
1287 |
+
"rewards/margins": 0.058793336153030396,
|
1288 |
+
"rewards/rejected": -0.07820815593004227,
|
1289 |
+
"step": 345
|
1290 |
+
},
|
1291 |
+
{
|
1292 |
+
"epoch": 2.2222222222222223,
|
1293 |
+
"grad_norm": 13.684109687805176,
|
1294 |
+
"learning_rate": 2.6726124191242444e-06,
|
1295 |
+
"log_odds_chosen": 2.6677346229553223,
|
1296 |
+
"log_odds_ratio": -0.10940170288085938,
|
1297 |
+
"logits/chosen": 390.1587829589844,
|
1298 |
+
"logits/rejected": 384.64398193359375,
|
1299 |
+
"logps/chosen": -0.34501951932907104,
|
1300 |
+
"logps/rejected": -1.7201154232025146,
|
1301 |
+
"loss": 0.5817,
|
1302 |
+
"nll_loss": 0.5362960696220398,
|
1303 |
+
"rewards/accuracies": 1.0,
|
1304 |
+
"rewards/chosen": -0.017250975593924522,
|
1305 |
+
"rewards/margins": 0.06875480711460114,
|
1306 |
+
"rewards/rejected": -0.08600577712059021,
|
1307 |
+
"step": 350
|
1308 |
+
},
|
1309 |
+
{
|
1310 |
+
"epoch": 2.253968253968254,
|
1311 |
+
"grad_norm": 12.086108207702637,
|
1312 |
+
"learning_rate": 2.6537244621713765e-06,
|
1313 |
+
"log_odds_chosen": 2.147876262664795,
|
1314 |
+
"log_odds_ratio": -0.16266413033008575,
|
1315 |
+
"logits/chosen": 391.79998779296875,
|
1316 |
+
"logits/rejected": 370.3006286621094,
|
1317 |
+
"logps/chosen": -0.34506654739379883,
|
1318 |
+
"logps/rejected": -1.4374301433563232,
|
1319 |
+
"loss": 0.5589,
|
1320 |
+
"nll_loss": 0.5755339860916138,
|
1321 |
+
"rewards/accuracies": 1.0,
|
1322 |
+
"rewards/chosen": -0.017253328114748,
|
1323 |
+
"rewards/margins": 0.054618168622255325,
|
1324 |
+
"rewards/rejected": -0.07187150418758392,
|
1325 |
+
"step": 355
|
1326 |
+
},
|
1327 |
+
{
|
1328 |
+
"epoch": 2.2857142857142856,
|
1329 |
+
"grad_norm": 13.608001708984375,
|
1330 |
+
"learning_rate": 2.6352313834736496e-06,
|
1331 |
+
"log_odds_chosen": 2.511486768722534,
|
1332 |
+
"log_odds_ratio": -0.14479351043701172,
|
1333 |
+
"logits/chosen": 381.4727478027344,
|
1334 |
+
"logits/rejected": 367.880859375,
|
1335 |
+
"logps/chosen": -0.33382540941238403,
|
1336 |
+
"logps/rejected": -1.6793336868286133,
|
1337 |
+
"loss": 0.6084,
|
1338 |
+
"nll_loss": 0.6348077058792114,
|
1339 |
+
"rewards/accuracies": 1.0,
|
1340 |
+
"rewards/chosen": -0.01669127121567726,
|
1341 |
+
"rewards/margins": 0.06727541238069534,
|
1342 |
+
"rewards/rejected": -0.0839666873216629,
|
1343 |
+
"step": 360
|
1344 |
+
},
|
1345 |
+
{
|
1346 |
+
"epoch": 2.317460317460317,
|
1347 |
+
"grad_norm": 12.76559829711914,
|
1348 |
+
"learning_rate": 2.6171196129510684e-06,
|
1349 |
+
"log_odds_chosen": 2.4732162952423096,
|
1350 |
+
"log_odds_ratio": -0.13545522093772888,
|
1351 |
+
"logits/chosen": 364.4020080566406,
|
1352 |
+
"logits/rejected": 389.8330383300781,
|
1353 |
+
"logps/chosen": -0.32329806685447693,
|
1354 |
+
"logps/rejected": -1.6308807134628296,
|
1355 |
+
"loss": 0.5702,
|
1356 |
+
"nll_loss": 0.5807278752326965,
|
1357 |
+
"rewards/accuracies": 1.0,
|
1358 |
+
"rewards/chosen": -0.016164902597665787,
|
1359 |
+
"rewards/margins": 0.06537913531064987,
|
1360 |
+
"rewards/rejected": -0.08154404163360596,
|
1361 |
+
"step": 365
|
1362 |
+
},
|
1363 |
+
{
|
1364 |
+
"epoch": 2.3492063492063493,
|
1365 |
+
"grad_norm": 11.652225494384766,
|
1366 |
+
"learning_rate": 2.599376224550182e-06,
|
1367 |
+
"log_odds_chosen": 2.5551929473876953,
|
1368 |
+
"log_odds_ratio": -0.12840549647808075,
|
1369 |
+
"logits/chosen": 341.6715087890625,
|
1370 |
+
"logits/rejected": 366.8011169433594,
|
1371 |
+
"logps/chosen": -0.3068418800830841,
|
1372 |
+
"logps/rejected": -1.6018937826156616,
|
1373 |
+
"loss": 0.5513,
|
1374 |
+
"nll_loss": 0.5314286947250366,
|
1375 |
+
"rewards/accuracies": 1.0,
|
1376 |
+
"rewards/chosen": -0.015342095866799355,
|
1377 |
+
"rewards/margins": 0.06475259363651276,
|
1378 |
+
"rewards/rejected": -0.08009468764066696,
|
1379 |
+
"step": 370
|
1380 |
+
},
|
1381 |
+
{
|
1382 |
+
"epoch": 2.380952380952381,
|
1383 |
+
"grad_norm": 11.979321479797363,
|
1384 |
+
"learning_rate": 2.5819888974716113e-06,
|
1385 |
+
"log_odds_chosen": 2.403555393218994,
|
1386 |
+
"log_odds_ratio": -0.2127949744462967,
|
1387 |
+
"logits/chosen": 380.73468017578125,
|
1388 |
+
"logits/rejected": 381.8941650390625,
|
1389 |
+
"logps/chosen": -0.3469754755496979,
|
1390 |
+
"logps/rejected": -1.4457371234893799,
|
1391 |
+
"loss": 0.5535,
|
1392 |
+
"nll_loss": 0.6116215586662292,
|
1393 |
+
"rewards/accuracies": 0.949999988079071,
|
1394 |
+
"rewards/chosen": -0.017348771914839745,
|
1395 |
+
"rewards/margins": 0.05493808910250664,
|
1396 |
+
"rewards/rejected": -0.07228686660528183,
|
1397 |
+
"step": 375
|
1398 |
+
},
|
1399 |
+
{
|
1400 |
+
"epoch": 2.4126984126984126,
|
1401 |
+
"grad_norm": 14.060809135437012,
|
1402 |
+
"learning_rate": 2.564945880212886e-06,
|
1403 |
+
"log_odds_chosen": 2.586824893951416,
|
1404 |
+
"log_odds_ratio": -0.11490654945373535,
|
1405 |
+
"logits/chosen": 339.2264709472656,
|
1406 |
+
"logits/rejected": 333.75042724609375,
|
1407 |
+
"logps/chosen": -0.2667796015739441,
|
1408 |
+
"logps/rejected": -1.5076611042022705,
|
1409 |
+
"loss": 0.555,
|
1410 |
+
"nll_loss": 0.563228964805603,
|
1411 |
+
"rewards/accuracies": 1.0,
|
1412 |
+
"rewards/chosen": -0.01333898026496172,
|
1413 |
+
"rewards/margins": 0.06204408407211304,
|
1414 |
+
"rewards/rejected": -0.07538305968046188,
|
1415 |
+
"step": 380
|
1416 |
+
},
|
1417 |
+
{
|
1418 |
+
"epoch": 2.4444444444444446,
|
1419 |
+
"grad_norm": 13.985331535339355,
|
1420 |
+
"learning_rate": 2.5482359571881276e-06,
|
1421 |
+
"log_odds_chosen": 2.4178810119628906,
|
1422 |
+
"log_odds_ratio": -0.13190138339996338,
|
1423 |
+
"logits/chosen": 361.1400146484375,
|
1424 |
+
"logits/rejected": 399.1631164550781,
|
1425 |
+
"logps/chosen": -0.3266194462776184,
|
1426 |
+
"logps/rejected": -1.4809812307357788,
|
1427 |
+
"loss": 0.5341,
|
1428 |
+
"nll_loss": 0.5168324708938599,
|
1429 |
+
"rewards/accuracies": 1.0,
|
1430 |
+
"rewards/chosen": -0.01633097417652607,
|
1431 |
+
"rewards/margins": 0.05771808698773384,
|
1432 |
+
"rewards/rejected": -0.07404907047748566,
|
1433 |
+
"step": 385
|
1434 |
+
},
|
1435 |
+
{
|
1436 |
+
"epoch": 2.4761904761904763,
|
1437 |
+
"grad_norm": 18.084671020507812,
|
1438 |
+
"learning_rate": 2.5318484177091667e-06,
|
1439 |
+
"log_odds_chosen": 2.383100986480713,
|
1440 |
+
"log_odds_ratio": -0.14292851090431213,
|
1441 |
+
"logits/chosen": 384.72064208984375,
|
1442 |
+
"logits/rejected": 393.7874755859375,
|
1443 |
+
"logps/chosen": -0.32157301902770996,
|
1444 |
+
"logps/rejected": -1.5480461120605469,
|
1445 |
+
"loss": 0.6103,
|
1446 |
+
"nll_loss": 0.5748914480209351,
|
1447 |
+
"rewards/accuracies": 1.0,
|
1448 |
+
"rewards/chosen": -0.01607864908874035,
|
1449 |
+
"rewards/margins": 0.061323653906583786,
|
1450 |
+
"rewards/rejected": -0.07740230858325958,
|
1451 |
+
"step": 390
|
1452 |
+
},
|
1453 |
+
{
|
1454 |
+
"epoch": 2.507936507936508,
|
1455 |
+
"grad_norm": 14.601240158081055,
|
1456 |
+
"learning_rate": 2.515773027133138e-06,
|
1457 |
+
"log_odds_chosen": 2.7805123329162598,
|
1458 |
+
"log_odds_ratio": -0.09744001924991608,
|
1459 |
+
"logits/chosen": 387.84619140625,
|
1460 |
+
"logits/rejected": 379.5089111328125,
|
1461 |
+
"logps/chosen": -0.29952239990234375,
|
1462 |
+
"logps/rejected": -1.6519800424575806,
|
1463 |
+
"loss": 0.6068,
|
1464 |
+
"nll_loss": 0.5597054958343506,
|
1465 |
+
"rewards/accuracies": 1.0,
|
1466 |
+
"rewards/chosen": -0.014976121485233307,
|
1467 |
+
"rewards/margins": 0.06762289255857468,
|
1468 |
+
"rewards/rejected": -0.08259901404380798,
|
1469 |
+
"step": 395
|
1470 |
+
},
|
1471 |
+
{
|
1472 |
+
"epoch": 2.5396825396825395,
|
1473 |
+
"grad_norm": 12.65790843963623,
|
1474 |
+
"learning_rate": 2.5e-06,
|
1475 |
+
"log_odds_chosen": 2.2969400882720947,
|
1476 |
+
"log_odds_ratio": -0.15153826773166656,
|
1477 |
+
"logits/chosen": 380.21063232421875,
|
1478 |
+
"logits/rejected": 387.6795959472656,
|
1479 |
+
"logps/chosen": -0.4159126281738281,
|
1480 |
+
"logps/rejected": -1.649945855140686,
|
1481 |
+
"loss": 0.5659,
|
1482 |
+
"nll_loss": 0.6253801584243774,
|
1483 |
+
"rewards/accuracies": 0.9750000238418579,
|
1484 |
+
"rewards/chosen": -0.020795632153749466,
|
1485 |
+
"rewards/margins": 0.06170165538787842,
|
1486 |
+
"rewards/rejected": -0.08249729126691818,
|
1487 |
+
"step": 400
|
1488 |
+
},
|
1489 |
+
{
|
1490 |
+
"epoch": 2.571428571428571,
|
1491 |
+
"grad_norm": 14.444682121276855,
|
1492 |
+
"learning_rate": 2.484519974999767e-06,
|
1493 |
+
"log_odds_chosen": 2.648660182952881,
|
1494 |
+
"log_odds_ratio": -0.12657761573791504,
|
1495 |
+
"logits/chosen": 393.61175537109375,
|
1496 |
+
"logits/rejected": 366.6102600097656,
|
1497 |
+
"logps/chosen": -0.3193502426147461,
|
1498 |
+
"logps/rejected": -1.483344316482544,
|
1499 |
+
"loss": 0.5605,
|
1500 |
+
"nll_loss": 0.534288763999939,
|
1501 |
+
"rewards/accuracies": 1.0,
|
1502 |
+
"rewards/chosen": -0.015967514365911484,
|
1503 |
+
"rewards/margins": 0.058199692517519,
|
1504 |
+
"rewards/rejected": -0.07416721433401108,
|
1505 |
+
"step": 405
|
1506 |
+
},
|
1507 |
+
{
|
1508 |
+
"epoch": 2.6031746031746033,
|
1509 |
+
"grad_norm": 16.570829391479492,
|
1510 |
+
"learning_rate": 2.4693239916239746e-06,
|
1511 |
+
"log_odds_chosen": 2.558866500854492,
|
1512 |
+
"log_odds_ratio": -0.11873920261859894,
|
1513 |
+
"logits/chosen": 402.86456298828125,
|
1514 |
+
"logits/rejected": 378.17303466796875,
|
1515 |
+
"logps/chosen": -0.3188033699989319,
|
1516 |
+
"logps/rejected": -1.5745189189910889,
|
1517 |
+
"loss": 0.5904,
|
1518 |
+
"nll_loss": 0.5366710424423218,
|
1519 |
+
"rewards/accuracies": 1.0,
|
1520 |
+
"rewards/chosen": -0.015940170735120773,
|
1521 |
+
"rewards/margins": 0.06278578191995621,
|
1522 |
+
"rewards/rejected": -0.07872594892978668,
|
1523 |
+
"step": 410
|
1524 |
+
},
|
1525 |
+
{
|
1526 |
+
"epoch": 2.634920634920635,
|
1527 |
+
"grad_norm": 14.746502876281738,
|
1528 |
+
"learning_rate": 2.4544034683690802e-06,
|
1529 |
+
"log_odds_chosen": 2.3469109535217285,
|
1530 |
+
"log_odds_ratio": -0.18333213031291962,
|
1531 |
+
"logits/chosen": 368.54840087890625,
|
1532 |
+
"logits/rejected": 411.5265197753906,
|
1533 |
+
"logps/chosen": -0.3403250277042389,
|
1534 |
+
"logps/rejected": -1.5103747844696045,
|
1535 |
+
"loss": 0.5125,
|
1536 |
+
"nll_loss": 0.4973570704460144,
|
1537 |
+
"rewards/accuracies": 0.9750000238418579,
|
1538 |
+
"rewards/chosen": -0.017016252502799034,
|
1539 |
+
"rewards/margins": 0.05850248783826828,
|
1540 |
+
"rewards/rejected": -0.07551874220371246,
|
1541 |
+
"step": 415
|
1542 |
+
},
|
1543 |
+
{
|
1544 |
+
"epoch": 2.6666666666666665,
|
1545 |
+
"grad_norm": 13.026257514953613,
|
1546 |
+
"learning_rate": 2.4397501823713327e-06,
|
1547 |
+
"log_odds_chosen": 2.4256062507629395,
|
1548 |
+
"log_odds_ratio": -0.13080844283103943,
|
1549 |
+
"logits/chosen": 361.3508605957031,
|
1550 |
+
"logits/rejected": 377.66217041015625,
|
1551 |
+
"logps/chosen": -0.30100154876708984,
|
1552 |
+
"logps/rejected": -1.4869256019592285,
|
1553 |
+
"loss": 0.5528,
|
1554 |
+
"nll_loss": 0.5275372266769409,
|
1555 |
+
"rewards/accuracies": 0.9750000238418579,
|
1556 |
+
"rewards/chosen": -0.015050077810883522,
|
1557 |
+
"rewards/margins": 0.059296198189258575,
|
1558 |
+
"rewards/rejected": -0.07434628158807755,
|
1559 |
+
"step": 420
|
1560 |
+
},
|
1561 |
+
{
|
1562 |
+
"epoch": 2.6984126984126986,
|
1563 |
+
"grad_norm": 13.664224624633789,
|
1564 |
+
"learning_rate": 2.4253562503633297e-06,
|
1565 |
+
"log_odds_chosen": 2.5698065757751465,
|
1566 |
+
"log_odds_ratio": -0.11280637979507446,
|
1567 |
+
"logits/chosen": 358.09954833984375,
|
1568 |
+
"logits/rejected": 362.4793395996094,
|
1569 |
+
"logps/chosen": -0.35316431522369385,
|
1570 |
+
"logps/rejected": -1.7075494527816772,
|
1571 |
+
"loss": 0.5529,
|
1572 |
+
"nll_loss": 0.5961888432502747,
|
1573 |
+
"rewards/accuracies": 1.0,
|
1574 |
+
"rewards/chosen": -0.017658215016126633,
|
1575 |
+
"rewards/margins": 0.06771925836801529,
|
1576 |
+
"rewards/rejected": -0.08537746965885162,
|
1577 |
+
"step": 425
|
1578 |
+
},
|
1579 |
+
{
|
1580 |
+
"epoch": 2.7301587301587302,
|
1581 |
+
"grad_norm": 10.971692085266113,
|
1582 |
+
"learning_rate": 2.411214110852061e-06,
|
1583 |
+
"log_odds_chosen": 2.5541086196899414,
|
1584 |
+
"log_odds_ratio": -0.133527934551239,
|
1585 |
+
"logits/chosen": 403.82427978515625,
|
1586 |
+
"logits/rejected": 415.45831298828125,
|
1587 |
+
"logps/chosen": -0.3685951828956604,
|
1588 |
+
"logps/rejected": -1.7137283086776733,
|
1589 |
+
"loss": 0.6179,
|
1590 |
+
"nll_loss": 0.5651038885116577,
|
1591 |
+
"rewards/accuracies": 0.9750000238418579,
|
1592 |
+
"rewards/chosen": -0.01842975988984108,
|
1593 |
+
"rewards/margins": 0.06725665926933289,
|
1594 |
+
"rewards/rejected": -0.08568642288446426,
|
1595 |
+
"step": 430
|
1596 |
+
},
|
1597 |
+
{
|
1598 |
+
"epoch": 2.761904761904762,
|
1599 |
+
"grad_norm": 10.986706733703613,
|
1600 |
+
"learning_rate": 2.3973165074269213e-06,
|
1601 |
+
"log_odds_chosen": 2.3214781284332275,
|
1602 |
+
"log_odds_ratio": -0.13725291192531586,
|
1603 |
+
"logits/chosen": 365.35345458984375,
|
1604 |
+
"logits/rejected": 361.9271545410156,
|
1605 |
+
"logps/chosen": -0.30945223569869995,
|
1606 |
+
"logps/rejected": -1.4199343919754028,
|
1607 |
+
"loss": 0.5953,
|
1608 |
+
"nll_loss": 0.45835572481155396,
|
1609 |
+
"rewards/accuracies": 1.0,
|
1610 |
+
"rewards/chosen": -0.015472611412405968,
|
1611 |
+
"rewards/margins": 0.055524103343486786,
|
1612 |
+
"rewards/rejected": -0.0709967166185379,
|
1613 |
+
"step": 435
|
1614 |
+
},
|
1615 |
+
{
|
1616 |
+
"epoch": 2.7936507936507935,
|
1617 |
+
"grad_norm": 10.5348539352417,
|
1618 |
+
"learning_rate": 2.3836564731139807e-06,
|
1619 |
+
"log_odds_chosen": 2.33512020111084,
|
1620 |
+
"log_odds_ratio": -0.15043067932128906,
|
1621 |
+
"logits/chosen": 387.08831787109375,
|
1622 |
+
"logits/rejected": 410.5296936035156,
|
1623 |
+
"logps/chosen": -0.39346036314964294,
|
1624 |
+
"logps/rejected": -1.6411031484603882,
|
1625 |
+
"loss": 0.5485,
|
1626 |
+
"nll_loss": 0.6781072616577148,
|
1627 |
+
"rewards/accuracies": 0.9750000238418579,
|
1628 |
+
"rewards/chosen": -0.019673021510243416,
|
1629 |
+
"rewards/margins": 0.06238213926553726,
|
1630 |
+
"rewards/rejected": -0.08205515891313553,
|
1631 |
+
"step": 440
|
1632 |
+
},
|
1633 |
+
{
|
1634 |
+
"epoch": 2.825396825396825,
|
1635 |
+
"grad_norm": 16.081439971923828,
|
1636 |
+
"learning_rate": 2.3702273156998867e-06,
|
1637 |
+
"log_odds_chosen": 2.88311505317688,
|
1638 |
+
"log_odds_ratio": -0.0930914506316185,
|
1639 |
+
"logits/chosen": 375.8430480957031,
|
1640 |
+
"logits/rejected": 376.3010559082031,
|
1641 |
+
"logps/chosen": -0.26819559931755066,
|
1642 |
+
"logps/rejected": -1.6618140935897827,
|
1643 |
+
"loss": 0.5927,
|
1644 |
+
"nll_loss": 0.5575067400932312,
|
1645 |
+
"rewards/accuracies": 1.0,
|
1646 |
+
"rewards/chosen": -0.013409781269729137,
|
1647 |
+
"rewards/margins": 0.06968092918395996,
|
1648 |
+
"rewards/rejected": -0.08309070765972137,
|
1649 |
+
"step": 445
|
1650 |
+
},
|
1651 |
+
{
|
1652 |
+
"epoch": 2.857142857142857,
|
1653 |
+
"grad_norm": 12.238554954528809,
|
1654 |
+
"learning_rate": 2.357022603955159e-06,
|
1655 |
+
"log_odds_chosen": 2.187112808227539,
|
1656 |
+
"log_odds_ratio": -0.17866849899291992,
|
1657 |
+
"logits/chosen": 380.0975036621094,
|
1658 |
+
"logits/rejected": 422.848876953125,
|
1659 |
+
"logps/chosen": -0.3514227867126465,
|
1660 |
+
"logps/rejected": -1.5105705261230469,
|
1661 |
+
"loss": 0.5869,
|
1662 |
+
"nll_loss": 0.5251952409744263,
|
1663 |
+
"rewards/accuracies": 0.9750000238418579,
|
1664 |
+
"rewards/chosen": -0.017571140080690384,
|
1665 |
+
"rewards/margins": 0.05795738101005554,
|
1666 |
+
"rewards/rejected": -0.07552852481603622,
|
1667 |
+
"step": 450
|
1668 |
+
},
|
1669 |
+
{
|
1670 |
+
"epoch": 2.888888888888889,
|
1671 |
+
"grad_norm": 11.653218269348145,
|
1672 |
+
"learning_rate": 2.3440361546924774e-06,
|
1673 |
+
"log_odds_chosen": 2.4360597133636475,
|
1674 |
+
"log_odds_ratio": -0.13144996762275696,
|
1675 |
+
"logits/chosen": 347.91571044921875,
|
1676 |
+
"logits/rejected": 403.49285888671875,
|
1677 |
+
"logps/chosen": -0.3146543800830841,
|
1678 |
+
"logps/rejected": -1.518036127090454,
|
1679 |
+
"loss": 0.5735,
|
1680 |
+
"nll_loss": 0.6055470705032349,
|
1681 |
+
"rewards/accuracies": 0.9750000238418579,
|
1682 |
+
"rewards/chosen": -0.015732718631625175,
|
1683 |
+
"rewards/margins": 0.06016908213496208,
|
1684 |
+
"rewards/rejected": -0.07590179890394211,
|
1685 |
+
"step": 455
|
1686 |
+
},
|
1687 |
+
{
|
1688 |
+
"epoch": 2.9206349206349205,
|
1689 |
+
"grad_norm": 12.282635688781738,
|
1690 |
+
"learning_rate": 2.3312620206007847e-06,
|
1691 |
+
"log_odds_chosen": 2.144893169403076,
|
1692 |
+
"log_odds_ratio": -0.15759766101837158,
|
1693 |
+
"logits/chosen": 398.63311767578125,
|
1694 |
+
"logits/rejected": 387.18377685546875,
|
1695 |
+
"logps/chosen": -0.33198612928390503,
|
1696 |
+
"logps/rejected": -1.3607968091964722,
|
1697 |
+
"loss": 0.532,
|
1698 |
+
"nll_loss": 0.5102331638336182,
|
1699 |
+
"rewards/accuracies": 1.0,
|
1700 |
+
"rewards/chosen": -0.01659930869936943,
|
1701 |
+
"rewards/margins": 0.051440536975860596,
|
1702 |
+
"rewards/rejected": -0.06803984940052032,
|
1703 |
+
"step": 460
|
1704 |
+
},
|
1705 |
+
{
|
1706 |
+
"epoch": 2.9523809523809526,
|
1707 |
+
"grad_norm": 16.435625076293945,
|
1708 |
+
"learning_rate": 2.3186944788008413e-06,
|
1709 |
+
"log_odds_chosen": 2.769465446472168,
|
1710 |
+
"log_odds_ratio": -0.11041196435689926,
|
1711 |
+
"logits/chosen": 392.8448486328125,
|
1712 |
+
"logits/rejected": 378.9862365722656,
|
1713 |
+
"logps/chosen": -0.3426569700241089,
|
1714 |
+
"logps/rejected": -1.8384767770767212,
|
1715 |
+
"loss": 0.5493,
|
1716 |
+
"nll_loss": 0.5335457921028137,
|
1717 |
+
"rewards/accuracies": 1.0,
|
1718 |
+
"rewards/chosen": -0.017132848501205444,
|
1719 |
+
"rewards/margins": 0.07479099929332733,
|
1720 |
+
"rewards/rejected": -0.09192384779453278,
|
1721 |
+
"step": 465
|
1722 |
+
},
|
1723 |
+
{
|
1724 |
+
"epoch": 2.984126984126984,
|
1725 |
+
"grad_norm": 15.835468292236328,
|
1726 |
+
"learning_rate": 2.3063280200722128e-06,
|
1727 |
+
"log_odds_chosen": 2.4313647747039795,
|
1728 |
+
"log_odds_ratio": -0.13430938124656677,
|
1729 |
+
"logits/chosen": 396.10845947265625,
|
1730 |
+
"logits/rejected": 391.65936279296875,
|
1731 |
+
"logps/chosen": -0.3266219198703766,
|
1732 |
+
"logps/rejected": -1.3965808153152466,
|
1733 |
+
"loss": 0.5897,
|
1734 |
+
"nll_loss": 0.5770766139030457,
|
1735 |
+
"rewards/accuracies": 1.0,
|
1736 |
+
"rewards/chosen": -0.01633109711110592,
|
1737 |
+
"rewards/margins": 0.05349794775247574,
|
1738 |
+
"rewards/rejected": -0.06982903182506561,
|
1739 |
+
"step": 470
|
1740 |
+
},
|
1741 |
+
{
|
1742 |
+
"epoch": 2.9904761904761905,
|
1743 |
+
"eval_log_odds_chosen": 0.367882639169693,
|
1744 |
+
"eval_log_odds_ratio": -0.6760825514793396,
|
1745 |
+
"eval_logits/chosen": 299.86248779296875,
|
1746 |
+
"eval_logits/rejected": 270.96282958984375,
|
1747 |
+
"eval_logps/chosen": -1.1505995988845825,
|
1748 |
+
"eval_logps/rejected": -1.3976730108261108,
|
1749 |
+
"eval_loss": 1.59840989112854,
|
1750 |
+
"eval_nll_loss": 1.5312451124191284,
|
1751 |
+
"eval_rewards/accuracies": 0.5899280309677124,
|
1752 |
+
"eval_rewards/chosen": -0.057529982179403305,
|
1753 |
+
"eval_rewards/margins": 0.012353661470115185,
|
1754 |
+
"eval_rewards/rejected": -0.06988365203142166,
|
1755 |
+
"eval_runtime": 112.4929,
|
1756 |
+
"eval_samples_per_second": 4.916,
|
1757 |
+
"eval_steps_per_second": 1.236,
|
1758 |
+
"step": 471
|
1759 |
+
},
|
1760 |
+
{
|
1761 |
+
"epoch": 2.9904761904761905,
|
1762 |
+
"step": 471,
|
1763 |
+
"total_flos": 0.0,
|
1764 |
+
"train_loss": 1.4937715783493788,
|
1765 |
+
"train_runtime": 12592.9879,
|
1766 |
+
"train_samples_per_second": 1.199,
|
1767 |
+
"train_steps_per_second": 0.037
|
1768 |
+
}
|
1769 |
+
],
|
1770 |
+
"logging_steps": 5,
|
1771 |
+
"max_steps": 471,
|
1772 |
+
"num_input_tokens_seen": 0,
|
1773 |
+
"num_train_epochs": 3,
|
1774 |
+
"save_steps": 500,
|
1775 |
+
"stateful_callbacks": {
|
1776 |
+
"TrainerControl": {
|
1777 |
+
"args": {
|
1778 |
+
"should_epoch_stop": false,
|
1779 |
+
"should_evaluate": false,
|
1780 |
+
"should_log": false,
|
1781 |
+
"should_save": false,
|
1782 |
+
"should_training_stop": false
|
1783 |
+
},
|
1784 |
+
"attributes": {}
|
1785 |
+
}
|
1786 |
+
},
|
1787 |
+
"total_flos": 0.0,
|
1788 |
+
"train_batch_size": 2,
|
1789 |
+
"trial_name": null,
|
1790 |
+
"trial_params": null
|
1791 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e92e0a70b4821a3be729706638722f3cde4a53ab2888e6b40d26c1da02d13a1d
|
3 |
+
size 6712
|