Mel-Iza0 commited on
Commit
d430c64
1 Parent(s): 6575bd4

Upload folder using huggingface_hub

Browse files
checkpoint-10/adapter_config.json CHANGED
@@ -19,10 +19,10 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "k_proj",
23
- "v_proj",
24
  "q_proj",
25
- "o_proj"
 
26
  ],
27
  "task_type": "CAUSAL_LM",
28
  "use_rslora": false
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
+ "o_proj",
 
23
  "q_proj",
24
+ "v_proj",
25
+ "k_proj"
26
  ],
27
  "task_type": "CAUSAL_LM",
28
  "use_rslora": false
checkpoint-10/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:15ae5932d2117a9fe102743dce9227b614a7f7e67bd4612c850f63e7ec055e7c
3
  size 27297032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8eb55c00afe2daf0cc193b98ba17167db3a5221a88af9ca571557a77b25c40db
3
  size 27297032
checkpoint-10/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:01dddfc0dd86a2b4573d485e442360222cc561b471bb2f610b5f44b1c390e9a1
3
  size 54678010
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bc294522b85647cee62094c86a5d17595ba897e21fae21411437e3cdc4c748d
3
  size 54678010
checkpoint-10/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:965202796c6aa89694fc280d55ec3a965bdea05297618ca5204beb258f29acbb
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8227d354da3cbca07acd78ccb1c34b1147cb1758e2d64f86fd35b5f7dd774835
3
  size 14512
checkpoint-10/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2e64ab752dbb68c5e75ddb592f9ba807f28ad33b4959ba527eed1f91dd74d99d
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23cdbafd808fd15bd280a8c9a008f72f112af1045baa57257c1c247b6fb6b6b8
3
  size 14512
checkpoint-10/trainer_state.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "best_metric": 1.1801708936691284,
3
  "best_model_checkpoint": "./mistral/29-02-24-Weni-test-folder-upload_Zeroshot-2_max_steps-30_batch_8_2024-02-29_ppid_7/checkpoint-10",
4
  "epoch": 0.006199628022318661,
5
  "eval_steps": 10,
@@ -10,10 +10,10 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.01,
13
- "eval_loss": 1.1801708936691284,
14
- "eval_runtime": 206.9482,
15
- "eval_samples_per_second": 13.854,
16
- "eval_steps_per_second": 3.465,
17
  "step": 10
18
  }
19
  ],
@@ -22,7 +22,7 @@
22
  "num_input_tokens_seen": 0,
23
  "num_train_epochs": 1,
24
  "save_steps": 10,
25
- "total_flos": 4575910770507776.0,
26
  "train_batch_size": 8,
27
  "trial_name": null,
28
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.158402442932129,
3
  "best_model_checkpoint": "./mistral/29-02-24-Weni-test-folder-upload_Zeroshot-2_max_steps-30_batch_8_2024-02-29_ppid_7/checkpoint-10",
4
  "epoch": 0.006199628022318661,
5
  "eval_steps": 10,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.01,
13
+ "eval_loss": 1.158402442932129,
14
+ "eval_runtime": 206.6551,
15
+ "eval_samples_per_second": 13.873,
16
+ "eval_steps_per_second": 3.47,
17
  "step": 10
18
  }
19
  ],
 
22
  "num_input_tokens_seen": 0,
23
  "num_train_epochs": 1,
24
  "save_steps": 10,
25
+ "total_flos": 5364754528862208.0,
26
  "train_batch_size": 8,
27
  "trial_name": null,
28
  "trial_params": null
checkpoint-10/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d4c9c5bf1f39262ed092c14f2781858ebaaf53fd5ab0433213c6b7ee0a70c7c
3
  size 5112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:377cebf6747b0036d17e46c08128d3e0536853da5124cfa8637015f03ae33506
3
  size 5112