alexredna commited on
Commit
78cbf16
·
verified ·
1 Parent(s): 1470c8b

Model save

Browse files
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  library_name: peft
3
  tags:
4
  - trl
@@ -6,7 +7,7 @@ tags:
6
  - generated_from_trainer
7
  datasets:
8
  - generator
9
- base_model: data/Tukan-1.1B-Chat-reasoning-sft-COLA_merged_0
10
  model-index:
11
  - name: Tukan-1.1B-Chat-reasoning-sft-COLA
12
  results: []
@@ -17,9 +18,9 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  # Tukan-1.1B-Chat-reasoning-sft-COLA
19
 
20
- This model was trained from scratch on the generator dataset.
21
  It achieves the following results on the evaluation set:
22
- - Loss: 1.5347
23
 
24
  ## Model description
25
 
 
1
  ---
2
+ license: apache-2.0
3
  library_name: peft
4
  tags:
5
  - trl
 
7
  - generated_from_trainer
8
  datasets:
9
  - generator
10
+ base_model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
11
  model-index:
12
  - name: Tukan-1.1B-Chat-reasoning-sft-COLA
13
  results: []
 
18
 
19
  # Tukan-1.1B-Chat-reasoning-sft-COLA
20
 
21
+ This model is a fine-tuned version of [TinyLlama/TinyLlama-1.1B-Chat-v1.0](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0) on the generator dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 1.5464
24
 
25
  ## Model description
26
 
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "data/Tukan-1.1B-Chat-reasoning-sft-COLA_merged_0",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -16,14 +16,14 @@
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
- "k_proj",
20
  "v_proj",
21
  "up_proj",
 
22
  "lm_head",
23
- "o_proj",
24
- "down_proj",
25
  "q_proj",
26
- "gate_proj"
 
27
  ],
28
  "task_type": "CAUSAL_LM"
29
  }
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
+ "o_proj",
20
  "v_proj",
21
  "up_proj",
22
+ "gate_proj",
23
  "lm_head",
 
 
24
  "q_proj",
25
+ "k_proj",
26
+ "down_proj"
27
  ],
28
  "task_type": "CAUSAL_LM"
29
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:faec55aa5e087bfcfcc1a48ca7befe73c653e838144f524d8570f3d354d9d2f8
3
  size 26361536
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68b264ea9c5c8573c5bac38b0d63c8d49f62b4bc8f9b8b97dcb0cff3e141bf3a
3
  size 26361536
all_results.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
  "epoch": 0.95,
3
- "eval_loss": 1.5346883535385132,
4
- "eval_runtime": 1.3685,
5
  "eval_samples": 36,
6
- "eval_samples_per_second": 5.115,
7
- "eval_steps_per_second": 2.192,
8
- "train_loss": 1.555079698562622,
9
- "train_runtime": 77.8333,
10
  "train_samples": 682,
11
- "train_samples_per_second": 1.567,
12
- "train_steps_per_second": 0.013
13
  }
 
1
  {
2
  "epoch": 0.95,
3
+ "eval_loss": 1.5464149713516235,
4
+ "eval_runtime": 1.3787,
5
  "eval_samples": 36,
6
+ "eval_samples_per_second": 5.077,
7
+ "eval_steps_per_second": 2.176,
8
+ "train_loss": 1.5683434009552002,
9
+ "train_runtime": 86.2408,
10
  "train_samples": 682,
11
+ "train_samples_per_second": 1.415,
12
+ "train_steps_per_second": 0.012
13
  }
eval_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 0.95,
3
- "eval_loss": 1.5346883535385132,
4
- "eval_runtime": 1.3685,
5
  "eval_samples": 36,
6
- "eval_samples_per_second": 5.115,
7
- "eval_steps_per_second": 2.192
8
  }
 
1
  {
2
  "epoch": 0.95,
3
+ "eval_loss": 1.5464149713516235,
4
+ "eval_runtime": 1.3787,
5
  "eval_samples": 36,
6
+ "eval_samples_per_second": 5.077,
7
+ "eval_steps_per_second": 2.176
8
  }
runs/Jan21_18-49-28_98f107f1aa39/events.out.tfevents.1705862986.98f107f1aa39.101023.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e32d6af1d9d9c738b51ec8e56a1b167019e7a8199d06254b97de0dec6e6c90fc
3
+ size 5042
runs/Jan21_18-49-28_98f107f1aa39/events.out.tfevents.1705863074.98f107f1aa39.101023.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a15dfe8e10f5a0d0a7b43b116ec8129f944940a9cd2f3b2d652ba7a7c801b34e
3
+ size 354
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 0.95,
3
- "train_loss": 1.555079698562622,
4
- "train_runtime": 77.8333,
5
  "train_samples": 682,
6
- "train_samples_per_second": 1.567,
7
- "train_steps_per_second": 0.013
8
  }
 
1
  {
2
  "epoch": 0.95,
3
+ "train_loss": 1.5683434009552002,
4
+ "train_runtime": 86.2408,
5
  "train_samples": 682,
6
+ "train_samples_per_second": 1.415,
7
+ "train_steps_per_second": 0.012
8
  }
trainer_state.json CHANGED
@@ -11,17 +11,17 @@
11
  {
12
  "epoch": 0.95,
13
  "learning_rate": 0.0,
14
- "loss": 1.5551,
15
  "step": 1
16
  },
17
  {
18
  "epoch": 0.95,
19
  "step": 1,
20
  "total_flos": 1535153516052480.0,
21
- "train_loss": 1.555079698562622,
22
- "train_runtime": 77.8333,
23
- "train_samples_per_second": 1.567,
24
- "train_steps_per_second": 0.013
25
  }
26
  ],
27
  "logging_steps": 5,
 
11
  {
12
  "epoch": 0.95,
13
  "learning_rate": 0.0,
14
+ "loss": 1.5683,
15
  "step": 1
16
  },
17
  {
18
  "epoch": 0.95,
19
  "step": 1,
20
  "total_flos": 1535153516052480.0,
21
+ "train_loss": 1.5683434009552002,
22
+ "train_runtime": 86.2408,
23
+ "train_samples_per_second": 1.415,
24
+ "train_steps_per_second": 0.012
25
  }
26
  ],
27
  "logging_steps": 5,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aeb0ef68df7eba155819ecadf94529b54aabf6815f0cfce9a25ffa9c9040fe91
3
  size 4792
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54992a2c8472fa25aaf5bd77b4f99dc364639b438e0687926d0334ae0c178a23
3
  size 4792