Upload folder using huggingface_hub
Browse files- checkpoint-30/adapter_config.json +2 -2
- checkpoint-30/adapter_model.safetensors +1 -1
- checkpoint-30/optimizer.pt +1 -1
- checkpoint-30/rng_state_0.pth +2 -2
- checkpoint-30/rng_state_1.pth +2 -2
- checkpoint-30/scheduler.pt +1 -1
- checkpoint-30/trainer_state.json +22 -22
- checkpoint-30/training_args.bin +1 -1
- checkpoint-40/adapter_config.json +2 -2
- checkpoint-40/adapter_model.safetensors +1 -1
- checkpoint-40/optimizer.pt +1 -1
- checkpoint-40/rng_state_0.pth +2 -2
- checkpoint-40/rng_state_1.pth +2 -2
- checkpoint-40/scheduler.pt +1 -1
- checkpoint-40/trainer_state.json +31 -31
- checkpoint-40/training_args.bin +1 -1
- checkpoint-50/adapter_config.json +2 -2
- checkpoint-50/adapter_model.safetensors +1 -1
- checkpoint-50/optimizer.pt +1 -1
- checkpoint-50/rng_state_0.pth +2 -2
- checkpoint-50/rng_state_1.pth +2 -2
- checkpoint-50/scheduler.pt +1 -1
- checkpoint-50/trainer_state.json +36 -36
- checkpoint-50/training_args.bin +1 -1
checkpoint-30/adapter_config.json
CHANGED
@@ -20,9 +20,9 @@
|
|
20 |
"revision": null,
|
21 |
"target_modules": [
|
22 |
"q_proj",
|
|
|
23 |
"v_proj",
|
24 |
-
"o_proj"
|
25 |
-
"k_proj"
|
26 |
],
|
27 |
"task_type": "CAUSAL_LM",
|
28 |
"use_rslora": false
|
|
|
20 |
"revision": null,
|
21 |
"target_modules": [
|
22 |
"q_proj",
|
23 |
+
"k_proj",
|
24 |
"v_proj",
|
25 |
+
"o_proj"
|
|
|
26 |
],
|
27 |
"task_type": "CAUSAL_LM",
|
28 |
"use_rslora": false
|
checkpoint-30/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 436242776
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:24867322dfb4671521880b7952dfecf869c9ddf50ba8c8d0ab60442d46c3b6e7
|
3 |
size 436242776
|
checkpoint-30/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 872568314
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6a095acb983c64222b3980c352b38e58ba4bd6e2f0b8e559eddea78a65277790
|
3 |
size 872568314
|
checkpoint-30/rng_state_0.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2c2a616f0c7a81cc42f0138e0036f3bc925663524713430ec02128feb7cc22f7
|
3 |
+
size 14512
|
checkpoint-30/rng_state_1.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3547aee58cf2280e2d966f891fba83ef181a792754cf251e62b59ae644dd2253
|
3 |
+
size 14512
|
checkpoint-30/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1000
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:35079c4c5c54e3e2a4600c729d1fa32b43362d22b0baf443894af8a786492df1
|
3 |
size 1000
|
checkpoint-30/trainer_state.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
-
"best_metric": 0.
|
3 |
"best_model_checkpoint": "./mistral/01-03-24-Weni-ZeroShot-3.3.18-Mistral-7b-Multilanguage-3.2.0_Zeroshot-2_max_steps-100_batch_16_2024-03-01_ppid_7/checkpoint-30",
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 10,
|
6 |
"global_step": 30,
|
7 |
"is_hyper_param_search": false,
|
@@ -9,34 +9,34 @@
|
|
9 |
"is_world_process_zero": true,
|
10 |
"log_history": [
|
11 |
{
|
12 |
-
"epoch": 0.
|
13 |
-
"eval_loss": 0.
|
14 |
-
"eval_runtime":
|
15 |
-
"eval_samples_per_second":
|
16 |
-
"eval_steps_per_second": 0.
|
17 |
"step": 10
|
18 |
},
|
19 |
{
|
20 |
-
"epoch": 0.
|
21 |
-
"grad_norm": 0.
|
22 |
-
"learning_rate": 0.
|
23 |
-
"loss":
|
24 |
"step": 20
|
25 |
},
|
26 |
{
|
27 |
-
"epoch": 0.
|
28 |
-
"eval_loss": 0.
|
29 |
-
"eval_runtime":
|
30 |
-
"eval_samples_per_second":
|
31 |
-
"eval_steps_per_second": 0.
|
32 |
"step": 20
|
33 |
},
|
34 |
{
|
35 |
-
"epoch": 0.
|
36 |
-
"eval_loss": 0.
|
37 |
-
"eval_runtime":
|
38 |
-
"eval_samples_per_second":
|
39 |
-
"eval_steps_per_second": 0.
|
40 |
"step": 30
|
41 |
}
|
42 |
],
|
@@ -45,7 +45,7 @@
|
|
45 |
"num_input_tokens_seen": 0,
|
46 |
"num_train_epochs": 1,
|
47 |
"save_steps": 10,
|
48 |
-
"total_flos":
|
49 |
"train_batch_size": 16,
|
50 |
"trial_name": null,
|
51 |
"trial_params": null
|
|
|
1 |
{
|
2 |
+
"best_metric": 0.44581571221351624,
|
3 |
"best_model_checkpoint": "./mistral/01-03-24-Weni-ZeroShot-3.3.18-Mistral-7b-Multilanguage-3.2.0_Zeroshot-2_max_steps-100_batch_16_2024-03-01_ppid_7/checkpoint-30",
|
4 |
+
"epoch": 0.14869888475836432,
|
5 |
"eval_steps": 10,
|
6 |
"global_step": 30,
|
7 |
"is_hyper_param_search": false,
|
|
|
9 |
"is_world_process_zero": true,
|
10 |
"log_history": [
|
11 |
{
|
12 |
+
"epoch": 0.05,
|
13 |
+
"eval_loss": 0.6367942690849304,
|
14 |
+
"eval_runtime": 180.8957,
|
15 |
+
"eval_samples_per_second": 15.849,
|
16 |
+
"eval_steps_per_second": 0.995,
|
17 |
"step": 10
|
18 |
},
|
19 |
{
|
20 |
+
"epoch": 0.1,
|
21 |
+
"grad_norm": 0.34346863627433777,
|
22 |
+
"learning_rate": 0.00019510565162951537,
|
23 |
+
"loss": 0.9774,
|
24 |
"step": 20
|
25 |
},
|
26 |
{
|
27 |
+
"epoch": 0.1,
|
28 |
+
"eval_loss": 0.48089343309402466,
|
29 |
+
"eval_runtime": 180.8882,
|
30 |
+
"eval_samples_per_second": 15.85,
|
31 |
+
"eval_steps_per_second": 0.995,
|
32 |
"step": 20
|
33 |
},
|
34 |
{
|
35 |
+
"epoch": 0.15,
|
36 |
+
"eval_loss": 0.44581571221351624,
|
37 |
+
"eval_runtime": 180.8305,
|
38 |
+
"eval_samples_per_second": 15.855,
|
39 |
+
"eval_steps_per_second": 0.995,
|
40 |
"step": 30
|
41 |
}
|
42 |
],
|
|
|
45 |
"num_input_tokens_seen": 0,
|
46 |
"num_train_epochs": 1,
|
47 |
"save_steps": 10,
|
48 |
+
"total_flos": 1.3497349192024064e+17,
|
49 |
"train_batch_size": 16,
|
50 |
"trial_name": null,
|
51 |
"trial_params": null
|
checkpoint-30/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5176
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:397ed785970a10324cddec1fc7a5b8a987e32e5f4a937dc38def4b1cb481ef1b
|
3 |
size 5176
|
checkpoint-40/adapter_config.json
CHANGED
@@ -20,9 +20,9 @@
|
|
20 |
"revision": null,
|
21 |
"target_modules": [
|
22 |
"q_proj",
|
|
|
23 |
"v_proj",
|
24 |
-
"o_proj"
|
25 |
-
"k_proj"
|
26 |
],
|
27 |
"task_type": "CAUSAL_LM",
|
28 |
"use_rslora": false
|
|
|
20 |
"revision": null,
|
21 |
"target_modules": [
|
22 |
"q_proj",
|
23 |
+
"k_proj",
|
24 |
"v_proj",
|
25 |
+
"o_proj"
|
|
|
26 |
],
|
27 |
"task_type": "CAUSAL_LM",
|
28 |
"use_rslora": false
|
checkpoint-40/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 436242776
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7b041c9a2283489464450c8f036707f48c1437ef863952d49130a0f6ffff827a
|
3 |
size 436242776
|
checkpoint-40/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 872568314
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bbf571fc43c326ce5b7f8ab1133043927c1104c6b8d07f4365744906cc3954f2
|
3 |
size 872568314
|
checkpoint-40/rng_state_0.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:562c6644f18ed45bd23812c0c9184280a67775561c0f90429c64bbb0b4cfdd52
|
3 |
+
size 14512
|
checkpoint-40/rng_state_1.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6e449608c637ff3eb44facd6dbe70487e4bba04dec462d49ef0b0b376478b590
|
3 |
+
size 14512
|
checkpoint-40/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1000
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9f3f71a5b5e00e23a44af22895931ea5920a9f7ce8cbf52213ba10a1597ee79b
|
3 |
size 1000
|
checkpoint-40/trainer_state.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
-
"best_metric": 0.
|
3 |
"best_model_checkpoint": "./mistral/01-03-24-Weni-ZeroShot-3.3.18-Mistral-7b-Multilanguage-3.2.0_Zeroshot-2_max_steps-100_batch_16_2024-03-01_ppid_7/checkpoint-40",
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 10,
|
6 |
"global_step": 40,
|
7 |
"is_hyper_param_search": false,
|
@@ -9,49 +9,49 @@
|
|
9 |
"is_world_process_zero": true,
|
10 |
"log_history": [
|
11 |
{
|
12 |
-
"epoch": 0.
|
13 |
-
"eval_loss": 0.
|
14 |
-
"eval_runtime":
|
15 |
-
"eval_samples_per_second":
|
16 |
-
"eval_steps_per_second": 0.
|
17 |
"step": 10
|
18 |
},
|
19 |
{
|
20 |
-
"epoch": 0.
|
21 |
-
"grad_norm": 0.
|
22 |
-
"learning_rate": 0.
|
23 |
-
"loss":
|
24 |
"step": 20
|
25 |
},
|
26 |
{
|
27 |
-
"epoch": 0.
|
28 |
-
"eval_loss": 0.
|
29 |
-
"eval_runtime":
|
30 |
-
"eval_samples_per_second":
|
31 |
-
"eval_steps_per_second": 0.
|
32 |
"step": 20
|
33 |
},
|
34 |
{
|
35 |
-
"epoch": 0.
|
36 |
-
"eval_loss": 0.
|
37 |
-
"eval_runtime":
|
38 |
-
"eval_samples_per_second":
|
39 |
-
"eval_steps_per_second": 0.
|
40 |
"step": 30
|
41 |
},
|
42 |
{
|
43 |
-
"epoch": 0.
|
44 |
-
"grad_norm":
|
45 |
-
"learning_rate": 0.
|
46 |
-
"loss": 0.
|
47 |
"step": 40
|
48 |
},
|
49 |
{
|
50 |
-
"epoch": 0.
|
51 |
-
"eval_loss": 0.
|
52 |
-
"eval_runtime":
|
53 |
-
"eval_samples_per_second":
|
54 |
-
"eval_steps_per_second": 0.
|
55 |
"step": 40
|
56 |
}
|
57 |
],
|
@@ -60,7 +60,7 @@
|
|
60 |
"num_input_tokens_seen": 0,
|
61 |
"num_train_epochs": 1,
|
62 |
"save_steps": 10,
|
63 |
-
"total_flos":
|
64 |
"train_batch_size": 16,
|
65 |
"trial_name": null,
|
66 |
"trial_params": null
|
|
|
1 |
{
|
2 |
+
"best_metric": 0.4221758544445038,
|
3 |
"best_model_checkpoint": "./mistral/01-03-24-Weni-ZeroShot-3.3.18-Mistral-7b-Multilanguage-3.2.0_Zeroshot-2_max_steps-100_batch_16_2024-03-01_ppid_7/checkpoint-40",
|
4 |
+
"epoch": 0.1982651796778191,
|
5 |
"eval_steps": 10,
|
6 |
"global_step": 40,
|
7 |
"is_hyper_param_search": false,
|
|
|
9 |
"is_world_process_zero": true,
|
10 |
"log_history": [
|
11 |
{
|
12 |
+
"epoch": 0.05,
|
13 |
+
"eval_loss": 0.6367942690849304,
|
14 |
+
"eval_runtime": 180.8957,
|
15 |
+
"eval_samples_per_second": 15.849,
|
16 |
+
"eval_steps_per_second": 0.995,
|
17 |
"step": 10
|
18 |
},
|
19 |
{
|
20 |
+
"epoch": 0.1,
|
21 |
+
"grad_norm": 0.34346863627433777,
|
22 |
+
"learning_rate": 0.00019510565162951537,
|
23 |
+
"loss": 0.9774,
|
24 |
"step": 20
|
25 |
},
|
26 |
{
|
27 |
+
"epoch": 0.1,
|
28 |
+
"eval_loss": 0.48089343309402466,
|
29 |
+
"eval_runtime": 180.8882,
|
30 |
+
"eval_samples_per_second": 15.85,
|
31 |
+
"eval_steps_per_second": 0.995,
|
32 |
"step": 20
|
33 |
},
|
34 |
{
|
35 |
+
"epoch": 0.15,
|
36 |
+
"eval_loss": 0.44581571221351624,
|
37 |
+
"eval_runtime": 180.8305,
|
38 |
+
"eval_samples_per_second": 15.855,
|
39 |
+
"eval_steps_per_second": 0.995,
|
40 |
"step": 30
|
41 |
},
|
42 |
{
|
43 |
+
"epoch": 0.2,
|
44 |
+
"grad_norm": 0.22729116678237915,
|
45 |
+
"learning_rate": 0.0001529919264233205,
|
46 |
+
"loss": 0.4459,
|
47 |
"step": 40
|
48 |
},
|
49 |
{
|
50 |
+
"epoch": 0.2,
|
51 |
+
"eval_loss": 0.4221758544445038,
|
52 |
+
"eval_runtime": 180.8406,
|
53 |
+
"eval_samples_per_second": 15.854,
|
54 |
+
"eval_steps_per_second": 0.995,
|
55 |
"step": 40
|
56 |
}
|
57 |
],
|
|
|
60 |
"num_input_tokens_seen": 0,
|
61 |
"num_train_epochs": 1,
|
62 |
"save_steps": 10,
|
63 |
+
"total_flos": 1.8087006649607782e+17,
|
64 |
"train_batch_size": 16,
|
65 |
"trial_name": null,
|
66 |
"trial_params": null
|
checkpoint-40/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5176
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:397ed785970a10324cddec1fc7a5b8a987e32e5f4a937dc38def4b1cb481ef1b
|
3 |
size 5176
|
checkpoint-50/adapter_config.json
CHANGED
@@ -20,9 +20,9 @@
|
|
20 |
"revision": null,
|
21 |
"target_modules": [
|
22 |
"q_proj",
|
|
|
23 |
"v_proj",
|
24 |
-
"o_proj"
|
25 |
-
"k_proj"
|
26 |
],
|
27 |
"task_type": "CAUSAL_LM",
|
28 |
"use_rslora": false
|
|
|
20 |
"revision": null,
|
21 |
"target_modules": [
|
22 |
"q_proj",
|
23 |
+
"k_proj",
|
24 |
"v_proj",
|
25 |
+
"o_proj"
|
|
|
26 |
],
|
27 |
"task_type": "CAUSAL_LM",
|
28 |
"use_rslora": false
|
checkpoint-50/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 436242776
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:792bf8f744bca5db30a5b2fe80e0610d871d2e0e9c0a4f386c15921efdc2cada
|
3 |
size 436242776
|
checkpoint-50/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 872568314
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4bfe60b7f0163e31d4d317b854f7aa8b441fdda4591c40ed389e383d3a3f89cf
|
3 |
size 872568314
|
checkpoint-50/rng_state_0.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:155af0328dced3a7822123bfbbfc0a98082468058997d134358c43dd61128a48
|
3 |
+
size 14512
|
checkpoint-50/rng_state_1.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a4b7ab3a551c96c4ceca4cd3993bf33763b09c0ba81bc2a77fec0f7faa8f7890
|
3 |
+
size 14512
|
checkpoint-50/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1000
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:17f23ea92413fa32eaeae1119269ac938b5f442880c1543708aed43058dfbadd
|
3 |
size 1000
|
checkpoint-50/trainer_state.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
-
"best_metric": 0.
|
3 |
"best_model_checkpoint": "./mistral/01-03-24-Weni-ZeroShot-3.3.18-Mistral-7b-Multilanguage-3.2.0_Zeroshot-2_max_steps-100_batch_16_2024-03-01_ppid_7/checkpoint-50",
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 10,
|
6 |
"global_step": 50,
|
7 |
"is_hyper_param_search": false,
|
@@ -9,57 +9,57 @@
|
|
9 |
"is_world_process_zero": true,
|
10 |
"log_history": [
|
11 |
{
|
12 |
-
"epoch": 0.
|
13 |
-
"eval_loss": 0.
|
14 |
-
"eval_runtime":
|
15 |
-
"eval_samples_per_second":
|
16 |
-
"eval_steps_per_second": 0.
|
17 |
"step": 10
|
18 |
},
|
19 |
{
|
20 |
-
"epoch": 0.
|
21 |
-
"grad_norm": 0.
|
22 |
-
"learning_rate": 0.
|
23 |
-
"loss":
|
24 |
"step": 20
|
25 |
},
|
26 |
{
|
27 |
-
"epoch": 0.
|
28 |
-
"eval_loss": 0.
|
29 |
-
"eval_runtime":
|
30 |
-
"eval_samples_per_second":
|
31 |
-
"eval_steps_per_second": 0.
|
32 |
"step": 20
|
33 |
},
|
34 |
{
|
35 |
-
"epoch": 0.
|
36 |
-
"eval_loss": 0.
|
37 |
-
"eval_runtime":
|
38 |
-
"eval_samples_per_second":
|
39 |
-
"eval_steps_per_second": 0.
|
40 |
"step": 30
|
41 |
},
|
42 |
{
|
43 |
-
"epoch": 0.
|
44 |
-
"grad_norm":
|
45 |
-
"learning_rate": 0.
|
46 |
-
"loss": 0.
|
47 |
"step": 40
|
48 |
},
|
49 |
{
|
50 |
-
"epoch": 0.
|
51 |
-
"eval_loss": 0.
|
52 |
-
"eval_runtime":
|
53 |
-
"eval_samples_per_second":
|
54 |
-
"eval_steps_per_second": 0.
|
55 |
"step": 40
|
56 |
},
|
57 |
{
|
58 |
-
"epoch": 0.
|
59 |
-
"eval_loss": 0.
|
60 |
-
"eval_runtime":
|
61 |
-
"eval_samples_per_second":
|
62 |
-
"eval_steps_per_second": 0.
|
63 |
"step": 50
|
64 |
}
|
65 |
],
|
@@ -68,7 +68,7 @@
|
|
68 |
"num_input_tokens_seen": 0,
|
69 |
"num_train_epochs": 1,
|
70 |
"save_steps": 10,
|
71 |
-
"total_flos":
|
72 |
"train_batch_size": 16,
|
73 |
"trial_name": null,
|
74 |
"trial_params": null
|
|
|
1 |
{
|
2 |
+
"best_metric": 0.40922558307647705,
|
3 |
"best_model_checkpoint": "./mistral/01-03-24-Weni-ZeroShot-3.3.18-Mistral-7b-Multilanguage-3.2.0_Zeroshot-2_max_steps-100_batch_16_2024-03-01_ppid_7/checkpoint-50",
|
4 |
+
"epoch": 0.24783147459727387,
|
5 |
"eval_steps": 10,
|
6 |
"global_step": 50,
|
7 |
"is_hyper_param_search": false,
|
|
|
9 |
"is_world_process_zero": true,
|
10 |
"log_history": [
|
11 |
{
|
12 |
+
"epoch": 0.05,
|
13 |
+
"eval_loss": 0.6367942690849304,
|
14 |
+
"eval_runtime": 180.8957,
|
15 |
+
"eval_samples_per_second": 15.849,
|
16 |
+
"eval_steps_per_second": 0.995,
|
17 |
"step": 10
|
18 |
},
|
19 |
{
|
20 |
+
"epoch": 0.1,
|
21 |
+
"grad_norm": 0.34346863627433777,
|
22 |
+
"learning_rate": 0.00019510565162951537,
|
23 |
+
"loss": 0.9774,
|
24 |
"step": 20
|
25 |
},
|
26 |
{
|
27 |
+
"epoch": 0.1,
|
28 |
+
"eval_loss": 0.48089343309402466,
|
29 |
+
"eval_runtime": 180.8882,
|
30 |
+
"eval_samples_per_second": 15.85,
|
31 |
+
"eval_steps_per_second": 0.995,
|
32 |
"step": 20
|
33 |
},
|
34 |
{
|
35 |
+
"epoch": 0.15,
|
36 |
+
"eval_loss": 0.44581571221351624,
|
37 |
+
"eval_runtime": 180.8305,
|
38 |
+
"eval_samples_per_second": 15.855,
|
39 |
+
"eval_steps_per_second": 0.995,
|
40 |
"step": 30
|
41 |
},
|
42 |
{
|
43 |
+
"epoch": 0.2,
|
44 |
+
"grad_norm": 0.22729116678237915,
|
45 |
+
"learning_rate": 0.0001529919264233205,
|
46 |
+
"loss": 0.4459,
|
47 |
"step": 40
|
48 |
},
|
49 |
{
|
50 |
+
"epoch": 0.2,
|
51 |
+
"eval_loss": 0.4221758544445038,
|
52 |
+
"eval_runtime": 180.8406,
|
53 |
+
"eval_samples_per_second": 15.854,
|
54 |
+
"eval_steps_per_second": 0.995,
|
55 |
"step": 40
|
56 |
},
|
57 |
{
|
58 |
+
"epoch": 0.25,
|
59 |
+
"eval_loss": 0.40922558307647705,
|
60 |
+
"eval_runtime": 181.1791,
|
61 |
+
"eval_samples_per_second": 15.824,
|
62 |
+
"eval_steps_per_second": 0.993,
|
63 |
"step": 50
|
64 |
}
|
65 |
],
|
|
|
68 |
"num_input_tokens_seen": 0,
|
69 |
"num_train_epochs": 1,
|
70 |
"save_steps": 10,
|
71 |
+
"total_flos": 2.2680060137832448e+17,
|
72 |
"train_batch_size": 16,
|
73 |
"trial_name": null,
|
74 |
"trial_params": null
|
checkpoint-50/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5176
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:397ed785970a10324cddec1fc7a5b8a987e32e5f4a937dc38def4b1cb481ef1b
|
3 |
size 5176
|