xiaoxiaolin
commited on
Upload folder using huggingface_hub
Browse files- checkpoint-1400/config.json +44 -0
- checkpoint-1400/model.safetensors +3 -0
- checkpoint-1400/optimizer.pt +3 -0
- checkpoint-1400/scheduler.pt +3 -0
- checkpoint-1400/special_tokens_map.json +37 -0
- checkpoint-1400/tokenizer.json +0 -0
- checkpoint-1400/tokenizer_config.json +62 -0
- checkpoint-1400/trainer_state.json +20 -0
- checkpoint-1400/training_args.bin +3 -0
- checkpoint-1400/vocab.txt +0 -0
- checkpoint-2800/config.json +44 -0
- checkpoint-2800/model.safetensors +3 -0
- checkpoint-2800/optimizer.pt +3 -0
- checkpoint-2800/scheduler.pt +3 -0
- checkpoint-2800/special_tokens_map.json +37 -0
- checkpoint-2800/tokenizer.json +0 -0
- checkpoint-2800/tokenizer_config.json +62 -0
- checkpoint-2800/trainer_state.json +20 -0
- checkpoint-2800/training_args.bin +3 -0
- checkpoint-2800/vocab.txt +0 -0
- checkpoint-4200/config.json +44 -0
- checkpoint-4200/model.safetensors +3 -0
- checkpoint-4200/optimizer.pt +3 -0
- checkpoint-4200/scheduler.pt +3 -0
- checkpoint-4200/special_tokens_map.json +37 -0
- checkpoint-4200/tokenizer.json +0 -0
- checkpoint-4200/tokenizer_config.json +62 -0
- checkpoint-4200/trainer_state.json +20 -0
- checkpoint-4200/training_args.bin +3 -0
- checkpoint-4200/vocab.txt +0 -0
- config.json +44 -0
- custom_info +9 -0
- model.safetensors +3 -0
- special_tokens_map.json +37 -0
- tokenizer.json +0 -0
- tokenizer_config.json +62 -0
- train_results.txt +3 -0
- trainer_state.json +28 -0
- training_args.bin +3 -0
- vocab.txt +0 -0
checkpoint-1400/config.json
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Alibaba-NLP/gte-large-en-v1.5",
|
3 |
+
"architectures": [
|
4 |
+
"NewModelForCL"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.0,
|
7 |
+
"auto_map": {
|
8 |
+
"AutoConfig": "Alibaba-NLP/new-impl--configuration.NewConfig",
|
9 |
+
"AutoModel": "Alibaba-NLP/new-impl--modeling.NewModel",
|
10 |
+
"AutoModelForMaskedLM": "Alibaba-NLP/new-impl--modeling.NewForMaskedLM",
|
11 |
+
"AutoModelForMultipleChoice": "Alibaba-NLP/new-impl--modeling.NewForMultipleChoice",
|
12 |
+
"AutoModelForQuestionAnswering": "Alibaba-NLP/new-impl--modeling.NewForQuestionAnswering",
|
13 |
+
"AutoModelForSequenceClassification": "Alibaba-NLP/new-impl--modeling.NewForSequenceClassification",
|
14 |
+
"AutoModelForTokenClassification": "Alibaba-NLP/new-impl--modeling.NewForTokenClassification"
|
15 |
+
},
|
16 |
+
"classifier_dropout": null,
|
17 |
+
"hidden_act": "gelu",
|
18 |
+
"hidden_dropout_prob": 0.1,
|
19 |
+
"hidden_size": 1024,
|
20 |
+
"initializer_range": 0.02,
|
21 |
+
"intermediate_size": 4096,
|
22 |
+
"layer_norm_eps": 1e-12,
|
23 |
+
"layer_norm_type": "layer_norm",
|
24 |
+
"logn_attention_clip1": false,
|
25 |
+
"logn_attention_scale": false,
|
26 |
+
"max_position_embeddings": 8192,
|
27 |
+
"model_type": "new",
|
28 |
+
"num_attention_heads": 16,
|
29 |
+
"num_hidden_layers": 24,
|
30 |
+
"pack_qkv": true,
|
31 |
+
"pad_token_id": 0,
|
32 |
+
"position_embedding_type": "rope",
|
33 |
+
"rope_scaling": {
|
34 |
+
"factor": 2.0,
|
35 |
+
"type": "ntk"
|
36 |
+
},
|
37 |
+
"rope_theta": 160000,
|
38 |
+
"torch_dtype": "float32",
|
39 |
+
"transformers_version": "4.36.2",
|
40 |
+
"type_vocab_size": 2,
|
41 |
+
"unpad_inputs": false,
|
42 |
+
"use_memory_efficient_attention": false,
|
43 |
+
"vocab_size": 30522
|
44 |
+
}
|
checkpoint-1400/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:10c608d9a36b7296e1602e9b0417c5e0dbb7ce722f6ecd2c5b0bafd4e4f94e61
|
3 |
+
size 1736561104
|
checkpoint-1400/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b842dcc761015212ef93e7ad4f640cc53b44c7cf2d1758dcd1e003a27ae2ef75
|
3 |
+
size 3473287493
|
checkpoint-1400/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dcea1a200517ffc2cf77af3894149058705795c4078294ca6f50ab18ccde5527
|
3 |
+
size 627
|
checkpoint-1400/special_tokens_map.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": {
|
3 |
+
"content": "[CLS]",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"mask_token": {
|
10 |
+
"content": "[MASK]",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "[PAD]",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"sep_token": {
|
24 |
+
"content": "[SEP]",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"unk_token": {
|
31 |
+
"content": "[UNK]",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
}
|
37 |
+
}
|
checkpoint-1400/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-1400/tokenizer_config.json
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"max_length": 8000,
|
49 |
+
"model_max_length": 32768,
|
50 |
+
"pad_to_multiple_of": null,
|
51 |
+
"pad_token": "[PAD]",
|
52 |
+
"pad_token_type_id": 0,
|
53 |
+
"padding_side": "right",
|
54 |
+
"sep_token": "[SEP]",
|
55 |
+
"stride": 0,
|
56 |
+
"strip_accents": null,
|
57 |
+
"tokenize_chinese_chars": true,
|
58 |
+
"tokenizer_class": "BertTokenizer",
|
59 |
+
"truncation_side": "right",
|
60 |
+
"truncation_strategy": "longest_first",
|
61 |
+
"unk_token": "[UNK]"
|
62 |
+
}
|
checkpoint-1400/trainer_state.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 1.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 1400,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [],
|
11 |
+
"logging_steps": 500,
|
12 |
+
"max_steps": 4200,
|
13 |
+
"num_input_tokens_seen": 0,
|
14 |
+
"num_train_epochs": 3,
|
15 |
+
"save_steps": 500,
|
16 |
+
"total_flos": 0,
|
17 |
+
"train_batch_size": null,
|
18 |
+
"trial_name": null,
|
19 |
+
"trial_params": null
|
20 |
+
}
|
checkpoint-1400/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f1d1860c201152194fec8da63c1ddd15c9b31b41cc737ce034071ce733c789b
|
3 |
+
size 4091
|
checkpoint-1400/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-2800/config.json
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Alibaba-NLP/gte-large-en-v1.5",
|
3 |
+
"architectures": [
|
4 |
+
"NewModelForCL"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.0,
|
7 |
+
"auto_map": {
|
8 |
+
"AutoConfig": "Alibaba-NLP/new-impl--configuration.NewConfig",
|
9 |
+
"AutoModel": "Alibaba-NLP/new-impl--modeling.NewModel",
|
10 |
+
"AutoModelForMaskedLM": "Alibaba-NLP/new-impl--modeling.NewForMaskedLM",
|
11 |
+
"AutoModelForMultipleChoice": "Alibaba-NLP/new-impl--modeling.NewForMultipleChoice",
|
12 |
+
"AutoModelForQuestionAnswering": "Alibaba-NLP/new-impl--modeling.NewForQuestionAnswering",
|
13 |
+
"AutoModelForSequenceClassification": "Alibaba-NLP/new-impl--modeling.NewForSequenceClassification",
|
14 |
+
"AutoModelForTokenClassification": "Alibaba-NLP/new-impl--modeling.NewForTokenClassification"
|
15 |
+
},
|
16 |
+
"classifier_dropout": null,
|
17 |
+
"hidden_act": "gelu",
|
18 |
+
"hidden_dropout_prob": 0.1,
|
19 |
+
"hidden_size": 1024,
|
20 |
+
"initializer_range": 0.02,
|
21 |
+
"intermediate_size": 4096,
|
22 |
+
"layer_norm_eps": 1e-12,
|
23 |
+
"layer_norm_type": "layer_norm",
|
24 |
+
"logn_attention_clip1": false,
|
25 |
+
"logn_attention_scale": false,
|
26 |
+
"max_position_embeddings": 8192,
|
27 |
+
"model_type": "new",
|
28 |
+
"num_attention_heads": 16,
|
29 |
+
"num_hidden_layers": 24,
|
30 |
+
"pack_qkv": true,
|
31 |
+
"pad_token_id": 0,
|
32 |
+
"position_embedding_type": "rope",
|
33 |
+
"rope_scaling": {
|
34 |
+
"factor": 2.0,
|
35 |
+
"type": "ntk"
|
36 |
+
},
|
37 |
+
"rope_theta": 160000,
|
38 |
+
"torch_dtype": "float32",
|
39 |
+
"transformers_version": "4.36.2",
|
40 |
+
"type_vocab_size": 2,
|
41 |
+
"unpad_inputs": false,
|
42 |
+
"use_memory_efficient_attention": false,
|
43 |
+
"vocab_size": 30522
|
44 |
+
}
|
checkpoint-2800/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a9b8140c50cf003bcc482c5258fa29b05e337fa96394eff65acbb554f585c817
|
3 |
+
size 1736561104
|
checkpoint-2800/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cc2cb31c5e472ba8950613142a034a182c62018bf0c3ce8230768ff03d85524f
|
3 |
+
size 3473287493
|
checkpoint-2800/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e5353282c47ce7e1629b303c28d290db65d7df0877959e58338a438dcca8d310
|
3 |
+
size 627
|
checkpoint-2800/special_tokens_map.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": {
|
3 |
+
"content": "[CLS]",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"mask_token": {
|
10 |
+
"content": "[MASK]",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "[PAD]",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"sep_token": {
|
24 |
+
"content": "[SEP]",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"unk_token": {
|
31 |
+
"content": "[UNK]",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
}
|
37 |
+
}
|
checkpoint-2800/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-2800/tokenizer_config.json
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"max_length": 8000,
|
49 |
+
"model_max_length": 32768,
|
50 |
+
"pad_to_multiple_of": null,
|
51 |
+
"pad_token": "[PAD]",
|
52 |
+
"pad_token_type_id": 0,
|
53 |
+
"padding_side": "right",
|
54 |
+
"sep_token": "[SEP]",
|
55 |
+
"stride": 0,
|
56 |
+
"strip_accents": null,
|
57 |
+
"tokenize_chinese_chars": true,
|
58 |
+
"tokenizer_class": "BertTokenizer",
|
59 |
+
"truncation_side": "right",
|
60 |
+
"truncation_strategy": "longest_first",
|
61 |
+
"unk_token": "[UNK]"
|
62 |
+
}
|
checkpoint-2800/trainer_state.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 2.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 2800,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [],
|
11 |
+
"logging_steps": 500,
|
12 |
+
"max_steps": 4200,
|
13 |
+
"num_input_tokens_seen": 0,
|
14 |
+
"num_train_epochs": 3,
|
15 |
+
"save_steps": 500,
|
16 |
+
"total_flos": 0,
|
17 |
+
"train_batch_size": null,
|
18 |
+
"trial_name": null,
|
19 |
+
"trial_params": null
|
20 |
+
}
|
checkpoint-2800/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f1d1860c201152194fec8da63c1ddd15c9b31b41cc737ce034071ce733c789b
|
3 |
+
size 4091
|
checkpoint-2800/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-4200/config.json
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Alibaba-NLP/gte-large-en-v1.5",
|
3 |
+
"architectures": [
|
4 |
+
"NewModelForCL"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.0,
|
7 |
+
"auto_map": {
|
8 |
+
"AutoConfig": "Alibaba-NLP/new-impl--configuration.NewConfig",
|
9 |
+
"AutoModel": "Alibaba-NLP/new-impl--modeling.NewModel",
|
10 |
+
"AutoModelForMaskedLM": "Alibaba-NLP/new-impl--modeling.NewForMaskedLM",
|
11 |
+
"AutoModelForMultipleChoice": "Alibaba-NLP/new-impl--modeling.NewForMultipleChoice",
|
12 |
+
"AutoModelForQuestionAnswering": "Alibaba-NLP/new-impl--modeling.NewForQuestionAnswering",
|
13 |
+
"AutoModelForSequenceClassification": "Alibaba-NLP/new-impl--modeling.NewForSequenceClassification",
|
14 |
+
"AutoModelForTokenClassification": "Alibaba-NLP/new-impl--modeling.NewForTokenClassification"
|
15 |
+
},
|
16 |
+
"classifier_dropout": null,
|
17 |
+
"hidden_act": "gelu",
|
18 |
+
"hidden_dropout_prob": 0.1,
|
19 |
+
"hidden_size": 1024,
|
20 |
+
"initializer_range": 0.02,
|
21 |
+
"intermediate_size": 4096,
|
22 |
+
"layer_norm_eps": 1e-12,
|
23 |
+
"layer_norm_type": "layer_norm",
|
24 |
+
"logn_attention_clip1": false,
|
25 |
+
"logn_attention_scale": false,
|
26 |
+
"max_position_embeddings": 8192,
|
27 |
+
"model_type": "new",
|
28 |
+
"num_attention_heads": 16,
|
29 |
+
"num_hidden_layers": 24,
|
30 |
+
"pack_qkv": true,
|
31 |
+
"pad_token_id": 0,
|
32 |
+
"position_embedding_type": "rope",
|
33 |
+
"rope_scaling": {
|
34 |
+
"factor": 2.0,
|
35 |
+
"type": "ntk"
|
36 |
+
},
|
37 |
+
"rope_theta": 160000,
|
38 |
+
"torch_dtype": "float32",
|
39 |
+
"transformers_version": "4.36.2",
|
40 |
+
"type_vocab_size": 2,
|
41 |
+
"unpad_inputs": false,
|
42 |
+
"use_memory_efficient_attention": false,
|
43 |
+
"vocab_size": 30522
|
44 |
+
}
|
checkpoint-4200/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8363ad9e6f06e6bc75da9a6a5524d4c6ec5fb6816afa269b962815cad8b19236
|
3 |
+
size 1736561104
|
checkpoint-4200/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:80bf9025a46fa7a39222f1310cc0cf17aeba33a9fb735c417fcac4d55ac86055
|
3 |
+
size 3473287493
|
checkpoint-4200/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fe4c02de10aa198f8890d58eaff374045cd827a437d97f98892422dbc9358138
|
3 |
+
size 627
|
checkpoint-4200/special_tokens_map.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": {
|
3 |
+
"content": "[CLS]",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"mask_token": {
|
10 |
+
"content": "[MASK]",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "[PAD]",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"sep_token": {
|
24 |
+
"content": "[SEP]",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"unk_token": {
|
31 |
+
"content": "[UNK]",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
}
|
37 |
+
}
|
checkpoint-4200/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-4200/tokenizer_config.json
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"max_length": 8000,
|
49 |
+
"model_max_length": 32768,
|
50 |
+
"pad_to_multiple_of": null,
|
51 |
+
"pad_token": "[PAD]",
|
52 |
+
"pad_token_type_id": 0,
|
53 |
+
"padding_side": "right",
|
54 |
+
"sep_token": "[SEP]",
|
55 |
+
"stride": 0,
|
56 |
+
"strip_accents": null,
|
57 |
+
"tokenize_chinese_chars": true,
|
58 |
+
"tokenizer_class": "BertTokenizer",
|
59 |
+
"truncation_side": "right",
|
60 |
+
"truncation_strategy": "longest_first",
|
61 |
+
"unk_token": "[UNK]"
|
62 |
+
}
|
checkpoint-4200/trainer_state.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 3.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 4200,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [],
|
11 |
+
"logging_steps": 500,
|
12 |
+
"max_steps": 4200,
|
13 |
+
"num_input_tokens_seen": 0,
|
14 |
+
"num_train_epochs": 3,
|
15 |
+
"save_steps": 500,
|
16 |
+
"total_flos": 0,
|
17 |
+
"train_batch_size": null,
|
18 |
+
"trial_name": null,
|
19 |
+
"trial_params": null
|
20 |
+
}
|
checkpoint-4200/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f1d1860c201152194fec8da63c1ddd15c9b31b41cc737ce034071ce733c789b
|
3 |
+
size 4091
|
checkpoint-4200/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
config.json
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Alibaba-NLP/gte-large-en-v1.5",
|
3 |
+
"architectures": [
|
4 |
+
"NewModelForCL"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.0,
|
7 |
+
"auto_map": {
|
8 |
+
"AutoConfig": "Alibaba-NLP/new-impl--configuration.NewConfig",
|
9 |
+
"AutoModel": "Alibaba-NLP/new-impl--modeling.NewModel",
|
10 |
+
"AutoModelForMaskedLM": "Alibaba-NLP/new-impl--modeling.NewForMaskedLM",
|
11 |
+
"AutoModelForMultipleChoice": "Alibaba-NLP/new-impl--modeling.NewForMultipleChoice",
|
12 |
+
"AutoModelForQuestionAnswering": "Alibaba-NLP/new-impl--modeling.NewForQuestionAnswering",
|
13 |
+
"AutoModelForSequenceClassification": "Alibaba-NLP/new-impl--modeling.NewForSequenceClassification",
|
14 |
+
"AutoModelForTokenClassification": "Alibaba-NLP/new-impl--modeling.NewForTokenClassification"
|
15 |
+
},
|
16 |
+
"classifier_dropout": null,
|
17 |
+
"hidden_act": "gelu",
|
18 |
+
"hidden_dropout_prob": 0.1,
|
19 |
+
"hidden_size": 1024,
|
20 |
+
"initializer_range": 0.02,
|
21 |
+
"intermediate_size": 4096,
|
22 |
+
"layer_norm_eps": 1e-12,
|
23 |
+
"layer_norm_type": "layer_norm",
|
24 |
+
"logn_attention_clip1": false,
|
25 |
+
"logn_attention_scale": false,
|
26 |
+
"max_position_embeddings": 8192,
|
27 |
+
"model_type": "new",
|
28 |
+
"num_attention_heads": 16,
|
29 |
+
"num_hidden_layers": 24,
|
30 |
+
"pack_qkv": true,
|
31 |
+
"pad_token_id": 0,
|
32 |
+
"position_embedding_type": "rope",
|
33 |
+
"rope_scaling": {
|
34 |
+
"factor": 2.0,
|
35 |
+
"type": "ntk"
|
36 |
+
},
|
37 |
+
"rope_theta": 160000,
|
38 |
+
"torch_dtype": "float32",
|
39 |
+
"transformers_version": "4.36.2",
|
40 |
+
"type_vocab_size": 2,
|
41 |
+
"unpad_inputs": false,
|
42 |
+
"use_memory_efficient_attention": false,
|
43 |
+
"vocab_size": 30522
|
44 |
+
}
|
custom_info
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
epoch=0 step=466 progress=0.111 eval info {'sparsity_loss': 0.46, 'l1l2_ratio_z12': 21.945, 'l1l2_ratio_z13': 24.316, 'l1l2_ratio_z13_all': 25.81} train info {'sparsity_loss': 1.884, 'l1l2_ratio_z12': 23.299, 'l1l2_ratio_z13': 25.038, 'l1l2_ratio_z13_all': 25.553}
|
2 |
+
epoch=0 step=932 progress=0.222 eval info {'sparsity_loss': 0.409, 'l1l2_ratio_z12': 21.396, 'l1l2_ratio_z13': 24.043, 'l1l2_ratio_z13_all': 25.861} train info {'sparsity_loss': 0.277, 'l1l2_ratio_z12': 21.973, 'l1l2_ratio_z13': 24.671, 'l1l2_ratio_z13_all': 25.844}
|
3 |
+
epoch=0 step=1398 progress=0.333 eval info {'sparsity_loss': 0.338, 'l1l2_ratio_z12': 21.167, 'l1l2_ratio_z13': 23.785, 'l1l2_ratio_z13_all': 25.845} train info {'sparsity_loss': 0.144, 'l1l2_ratio_z12': 21.444, 'l1l2_ratio_z13': 24.537, 'l1l2_ratio_z13_all': 25.87}
|
4 |
+
epoch=1 step=466 progress=0.444 eval info {'sparsity_loss': 0.335, 'l1l2_ratio_z12': 19.772, 'l1l2_ratio_z13': 23.568, 'l1l2_ratio_z13_all': 25.787} train info {'sparsity_loss': 0.063, 'l1l2_ratio_z12': 20.466, 'l1l2_ratio_z13': 24.443, 'l1l2_ratio_z13_all': 25.822}
|
5 |
+
epoch=1 step=932 progress=0.555 eval info {'sparsity_loss': 0.341, 'l1l2_ratio_z12': 18.72, 'l1l2_ratio_z13': 22.955, 'l1l2_ratio_z13_all': 25.723} train info {'sparsity_loss': 0.041, 'l1l2_ratio_z12': 19.578, 'l1l2_ratio_z13': 24.308, 'l1l2_ratio_z13_all': 25.795}
|
6 |
+
epoch=1 step=1398 progress=0.666 eval info {'sparsity_loss': 0.414, 'l1l2_ratio_z12': 17.568, 'l1l2_ratio_z13': 22.579, 'l1l2_ratio_z13_all': 25.668} train info {'sparsity_loss': 0.025, 'l1l2_ratio_z12': 18.579, 'l1l2_ratio_z13': 24.139, 'l1l2_ratio_z13_all': 25.763}
|
7 |
+
epoch=2 step=466 progress=0.778 eval info {'sparsity_loss': 0.399, 'l1l2_ratio_z12': 16.844, 'l1l2_ratio_z13': 22.645, 'l1l2_ratio_z13_all': 25.691} train info {'sparsity_loss': 0.015, 'l1l2_ratio_z12': 17.588, 'l1l2_ratio_z13': 24.193, 'l1l2_ratio_z13_all': 25.747}
|
8 |
+
epoch=2 step=932 progress=0.889 eval info {'sparsity_loss': 0.421, 'l1l2_ratio_z12': 16.263, 'l1l2_ratio_z13': 22.549, 'l1l2_ratio_z13_all': 25.682} train info {'sparsity_loss': 0.013, 'l1l2_ratio_z12': 16.787, 'l1l2_ratio_z13': 24.081, 'l1l2_ratio_z13_all': 25.725}
|
9 |
+
epoch=2 step=1398 progress=1.0 eval info {'sparsity_loss': 0.417, 'l1l2_ratio_z12': 16.159, 'l1l2_ratio_z13': 22.708, 'l1l2_ratio_z13_all': 25.705} train info {'sparsity_loss': 0.015, 'l1l2_ratio_z12': 16.438, 'l1l2_ratio_z13': 24.094, 'l1l2_ratio_z13_all': 25.717}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8363ad9e6f06e6bc75da9a6a5524d4c6ec5fb6816afa269b962815cad8b19236
|
3 |
+
size 1736561104
|
special_tokens_map.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": {
|
3 |
+
"content": "[CLS]",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"mask_token": {
|
10 |
+
"content": "[MASK]",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "[PAD]",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"sep_token": {
|
24 |
+
"content": "[SEP]",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"unk_token": {
|
31 |
+
"content": "[UNK]",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
}
|
37 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"max_length": 8000,
|
49 |
+
"model_max_length": 32768,
|
50 |
+
"pad_to_multiple_of": null,
|
51 |
+
"pad_token": "[PAD]",
|
52 |
+
"pad_token_type_id": 0,
|
53 |
+
"padding_side": "right",
|
54 |
+
"sep_token": "[SEP]",
|
55 |
+
"stride": 0,
|
56 |
+
"strip_accents": null,
|
57 |
+
"tokenize_chinese_chars": true,
|
58 |
+
"tokenizer_class": "BertTokenizer",
|
59 |
+
"truncation_side": "right",
|
60 |
+
"truncation_strategy": "longest_first",
|
61 |
+
"unk_token": "[UNK]"
|
62 |
+
}
|
train_results.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
epoch = 3.0
|
2 |
+
train_runtime = 24068.6912
|
3 |
+
train_samples_per_second = 0.175
|
trainer_state.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 3.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 4200,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 3.0,
|
13 |
+
"step": 4200,
|
14 |
+
"total_flos": 0,
|
15 |
+
"train_runtime": 24068.6912,
|
16 |
+
"train_samples_per_second": 0.175
|
17 |
+
}
|
18 |
+
],
|
19 |
+
"logging_steps": 500,
|
20 |
+
"max_steps": 4200,
|
21 |
+
"num_input_tokens_seen": 0,
|
22 |
+
"num_train_epochs": 3,
|
23 |
+
"save_steps": 500,
|
24 |
+
"total_flos": 0,
|
25 |
+
"train_batch_size": null,
|
26 |
+
"trial_name": null,
|
27 |
+
"trial_params": null
|
28 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f1d1860c201152194fec8da63c1ddd15c9b31b41cc737ce034071ce733c789b
|
3 |
+
size 4091
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|