lucifertrj commited on
Commit
28a08aa
·
verified ·
1 Parent(s): 642f677

lucifertrj/multieuro-adapter

Browse files
README.md CHANGED
@@ -14,12 +14,12 @@ model-index:
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
  should probably proofread and complete it, then remove this comment. -->
16
 
17
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/tarunwandb/huggingface/runs/p1uelhr3)
18
  # results
19
 
20
- This model is a fine-tuned version of [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) on an unknown dataset.
21
  It achieves the following results on the evaluation set:
22
- - Loss: 1.3580
23
 
24
  ## Model description
25
 
@@ -53,18 +53,24 @@ The following hyperparameters were used during training:
53
 
54
  | Training Loss | Epoch | Step | Validation Loss |
55
  |:-------------:|:------:|:----:|:---------------:|
56
- | 2.0752 | 0.0771 | 100 | 1.4142 |
57
- | 1.3931 | 0.1542 | 200 | 1.3826 |
58
- | 1.3882 | 0.2313 | 300 | 1.3745 |
59
- | 1.3717 | 0.3085 | 400 | 1.3701 |
60
- | 1.3711 | 0.3856 | 500 | 1.3673 |
61
- | 1.3659 | 0.4627 | 600 | 1.3649 |
62
- | 1.3691 | 0.5398 | 700 | 1.3632 |
63
- | 1.3585 | 0.6169 | 800 | 1.3614 |
64
- | 1.3634 | 0.6940 | 900 | 1.3603 |
65
- | 1.362 | 0.7712 | 1000 | 1.3593 |
66
- | 1.3584 | 0.8483 | 1100 | 1.3586 |
67
- | 1.3516 | 0.9254 | 1200 | 1.3580 |
 
 
 
 
 
 
68
 
69
 
70
  ### Framework versions
@@ -72,5 +78,5 @@ The following hyperparameters were used during training:
72
  - PEFT 0.12.0
73
  - Transformers 4.42.4
74
  - Pytorch 2.3.1+cu121
75
- - Datasets 2.20.0
76
  - Tokenizers 0.19.1
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
  should probably proofread and complete it, then remove this comment. -->
16
 
17
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/tarunwandb/huggingface/runs/5mqa1xeb)
18
  # results
19
 
20
+ This model is a fine-tuned version of [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) on the None dataset.
21
  It achieves the following results on the evaluation set:
22
+ - Loss: 1.2406
23
 
24
  ## Model description
25
 
 
53
 
54
  | Training Loss | Epoch | Step | Validation Loss |
55
  |:-------------:|:------:|:----:|:---------------:|
56
+ | 2.2518 | 0.0530 | 100 | 1.3453 |
57
+ | 1.3137 | 0.1059 | 200 | 1.2820 |
58
+ | 1.2681 | 0.1589 | 300 | 1.2684 |
59
+ | 1.2611 | 0.2118 | 400 | 1.2625 |
60
+ | 1.2599 | 0.2648 | 500 | 1.2587 |
61
+ | 1.2709 | 0.3177 | 600 | 1.2561 |
62
+ | 1.2607 | 0.3707 | 700 | 1.2537 |
63
+ | 1.2502 | 0.4236 | 800 | 1.2515 |
64
+ | 1.2475 | 0.4766 | 900 | 1.2494 |
65
+ | 1.2479 | 0.5295 | 1000 | 1.2476 |
66
+ | 1.2535 | 0.5825 | 1100 | 1.2469 |
67
+ | 1.2546 | 0.6354 | 1200 | 1.2455 |
68
+ | 1.2498 | 0.6884 | 1300 | 1.2440 |
69
+ | 1.2445 | 0.7413 | 1400 | 1.2433 |
70
+ | 1.247 | 0.7943 | 1500 | 1.2423 |
71
+ | 1.2438 | 0.8472 | 1600 | 1.2418 |
72
+ | 1.2434 | 0.9002 | 1700 | 1.2413 |
73
+ | 1.2425 | 0.9531 | 1800 | 1.2406 |
74
 
75
 
76
  ### Framework versions
 
78
  - PEFT 0.12.0
79
  - Transformers 4.42.4
80
  - Pytorch 2.3.1+cu121
81
+ - Datasets 2.21.0
82
  - Tokenizers 0.19.1
adapter_config.json CHANGED
@@ -10,23 +10,23 @@
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
- "lora_alpha": 16,
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
- "r": 16,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "gate_proj",
24
- "k_proj",
25
  "v_proj",
26
- "q_proj",
27
- "o_proj",
28
  "up_proj",
29
- "down_proj"
 
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
+ "lora_alpha": 32,
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 32,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
23
  "v_proj",
24
+ "k_proj",
 
25
  "up_proj",
26
+ "down_proj",
27
+ "o_proj",
28
+ "q_proj",
29
+ "gate_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:77c785345b869994552f0d6667d135fda3af82a808423d5aff25c52b4ded677c
3
- size 35668592
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acb3661213f64196afbc3636bd99f01159679bb1d2700ade7a358e3678ecc0ef
3
+ size 71320216
tokenizer.json CHANGED
@@ -2,7 +2,7 @@
2
  "version": "1.0",
3
  "truncation": {
4
  "direction": "Right",
5
- "max_length": 1024,
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
 
2
  "version": "1.0",
3
  "truncation": {
4
  "direction": "Right",
5
+ "max_length": 756,
6
  "strategy": "LongestFirst",
7
  "stride": 0
8
  },
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:933f8997dba5ad4ccb53fda5d90ef1c717bac19cb087848f59cfd3a247eeaa0d
3
  size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85ab47e6d0f37d6528b77677c6be8d531a1a536345d35fc2f45bd4c78afda5bf
3
  size 5304