Initial commit
Browse files- logs.txt +107 -0
- mlc-chat-config.json +71 -0
- tokenizer.json +0 -0
- tokenizer_config.json +144 -0
logs.txt
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
0 |
0%| | 0/183 [00:00<?, ?it/s]
|
1 |
|
|
|
2 |
0%| | 0/183 [00:00<?, ?it/s]
|
3 |
0%| | 0/183 [00:00<?, ?it/s]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/home/floriadmin/miniforge3/envs/mlc/bin/python -m mlc_llm gen_config ../dist/models/gorilla-openfunctions-v2 --quantization q8f32_1 --conv-template gorilla --output /tmp/tmpfb6fqbz7
|
2 |
+
[2024-03-18 21:03:38] INFO auto_config.py:115: [92mFound[0m model configuration: ../dist/models/gorilla-openfunctions-v2/config.json
|
3 |
+
[2024-03-18 21:03:38] INFO auto_config.py:153: [92mFound[0m model type: [1mllama[0m. Use `--model-type` to override.
|
4 |
+
[2024-03-18 21:03:38] INFO llama_model.py:52: [1mcontext_window_size[0m not found in config.json. Falling back to [1mmax_position_embeddings[0m (4096)
|
5 |
+
[2024-03-18 21:03:38] INFO llama_model.py:72: [1mprefill_chunk_size[0m defaults to [1mcontext_window_size[0m (4096)
|
6 |
+
[2024-03-18 21:03:38] INFO config.py:106: Overriding [1mmax_batch_size[0m from 1 to 80
|
7 |
+
[2024-03-18 21:03:38] INFO gen_config.py:133: [generation_config.json] Setting [1mbos_token_id[0m: 100000
|
8 |
+
[2024-03-18 21:03:38] INFO gen_config.py:133: [generation_config.json] Setting [1meos_token_id[0m: 100015
|
9 |
+
[2024-03-18 21:03:38] INFO gen_config.py:147: [91mNot found[0m tokenizer config: ../dist/models/gorilla-openfunctions-v2/tokenizer.model
|
10 |
+
[2024-03-18 21:03:38] INFO gen_config.py:145: [92mFound[0m tokenizer config: ../dist/models/gorilla-openfunctions-v2/tokenizer.json. Copying to [1m/tmp/tmpfb6fqbz7/tokenizer.json[0m
|
11 |
+
[2024-03-18 21:03:38] INFO gen_config.py:147: [91mNot found[0m tokenizer config: ../dist/models/gorilla-openfunctions-v2/vocab.json
|
12 |
+
[2024-03-18 21:03:38] INFO gen_config.py:147: [91mNot found[0m tokenizer config: ../dist/models/gorilla-openfunctions-v2/merges.txt
|
13 |
+
[2024-03-18 21:03:38] INFO gen_config.py:147: [91mNot found[0m tokenizer config: ../dist/models/gorilla-openfunctions-v2/added_tokens.json
|
14 |
+
[2024-03-18 21:03:38] INFO gen_config.py:145: [92mFound[0m tokenizer config: ../dist/models/gorilla-openfunctions-v2/tokenizer_config.json. Copying to [1m/tmp/tmpfb6fqbz7/tokenizer_config.json[0m
|
15 |
+
[2024-03-18 21:03:38] INFO gen_config.py:75: [System default] Setting [1mpad_token_id[0m: 0
|
16 |
+
[2024-03-18 21:03:38] INFO gen_config.py:75: [System default] Setting [1mtemperature[0m: 0.7
|
17 |
+
[2024-03-18 21:03:38] INFO gen_config.py:75: [System default] Setting [1mpresence_penalty[0m: 0.0
|
18 |
+
[2024-03-18 21:03:38] INFO gen_config.py:75: [System default] Setting [1mfrequency_penalty[0m: 0.0
|
19 |
+
[2024-03-18 21:03:38] INFO gen_config.py:75: [System default] Setting [1mrepetition_penalty[0m: 1.0
|
20 |
+
[2024-03-18 21:03:38] INFO gen_config.py:75: [System default] Setting [1mtop_p[0m: 0.95
|
21 |
+
[2024-03-18 21:03:38] INFO gen_config.py:75: [System default] Setting [1mmean_gen_len[0m: 128
|
22 |
+
[2024-03-18 21:03:38] INFO gen_config.py:75: [System default] Setting [1mmax_gen_len[0m: 512
|
23 |
+
[2024-03-18 21:03:38] INFO gen_config.py:75: [System default] Setting [1mshift_fill_factor[0m: 0.3
|
24 |
+
[2024-03-18 21:03:38] INFO gen_config.py:198: Dumping configuration file to: [1m/tmp/tmpfb6fqbz7/mlc-chat-config.json[0m
|
25 |
+
/home/floriadmin/miniforge3/envs/mlc/bin/python -m mlc_llm convert_weight ../dist/models/gorilla-openfunctions-v2 --quantization q8f32_1 --source-format auto --output /tmp/tmpfb6fqbz7
|
26 |
+
[2024-03-18 21:03:39] INFO auto_config.py:115: [92mFound[0m model configuration: ../dist/models/gorilla-openfunctions-v2/config.json
|
27 |
+
[2024-03-18 21:03:39] INFO auto_device.py:76: [92mFound[0m device: cuda:0
|
28 |
+
[2024-03-18 21:03:39] INFO auto_device.py:76: [92mFound[0m device: cuda:1
|
29 |
+
[2024-03-18 21:03:39] INFO auto_device.py:76: [92mFound[0m device: cuda:2
|
30 |
+
[2024-03-18 21:03:39] INFO auto_device.py:76: [92mFound[0m device: cuda:3
|
31 |
+
[2024-03-18 21:03:39] INFO auto_device.py:76: [92mFound[0m device: cuda:4
|
32 |
+
[2024-03-18 21:03:39] INFO auto_device.py:76: [92mFound[0m device: cuda:5
|
33 |
+
[2024-03-18 21:03:39] INFO auto_device.py:76: [92mFound[0m device: cuda:6
|
34 |
+
[2024-03-18 21:03:39] INFO auto_device.py:76: [92mFound[0m device: cuda:7
|
35 |
+
[2024-03-18 21:03:39] INFO auto_device.py:76: [92mFound[0m device: cuda:8
|
36 |
+
[2024-03-18 21:03:39] INFO auto_device.py:76: [92mFound[0m device: cuda:9
|
37 |
+
[2024-03-18 21:03:40] INFO auto_device.py:85: [91mNot found[0m device: rocm:0
|
38 |
+
[2024-03-18 21:03:41] INFO auto_device.py:85: [91mNot found[0m device: metal:0
|
39 |
+
[2024-03-18 21:03:44] INFO auto_device.py:76: [92mFound[0m device: vulkan:0
|
40 |
+
[2024-03-18 21:03:44] INFO auto_device.py:76: [92mFound[0m device: vulkan:1
|
41 |
+
[2024-03-18 21:03:44] INFO auto_device.py:76: [92mFound[0m device: vulkan:2
|
42 |
+
[2024-03-18 21:03:44] INFO auto_device.py:76: [92mFound[0m device: vulkan:3
|
43 |
+
[2024-03-18 21:03:44] INFO auto_device.py:76: [92mFound[0m device: vulkan:4
|
44 |
+
[2024-03-18 21:03:44] INFO auto_device.py:76: [92mFound[0m device: vulkan:5
|
45 |
+
[2024-03-18 21:03:44] INFO auto_device.py:76: [92mFound[0m device: vulkan:6
|
46 |
+
[2024-03-18 21:03:44] INFO auto_device.py:76: [92mFound[0m device: vulkan:7
|
47 |
+
[2024-03-18 21:03:44] INFO auto_device.py:76: [92mFound[0m device: vulkan:8
|
48 |
+
[2024-03-18 21:03:44] INFO auto_device.py:76: [92mFound[0m device: vulkan:9
|
49 |
+
[2024-03-18 21:03:44] INFO auto_device.py:76: [92mFound[0m device: vulkan:10
|
50 |
+
[2024-03-18 21:03:45] INFO auto_device.py:85: [91mNot found[0m device: opencl:0
|
51 |
+
[2024-03-18 21:03:45] INFO auto_device.py:33: Using device: [1mcuda:0[0m
|
52 |
+
[2024-03-18 21:03:45] INFO auto_weight.py:70: Finding weights in: ../dist/models/gorilla-openfunctions-v2
|
53 |
+
[2024-03-18 21:03:45] INFO auto_weight.py:120: [92mFound[0m source weight format: huggingface-torch. Source configuration: ../dist/models/gorilla-openfunctions-v2/pytorch_model.bin.index.json
|
54 |
+
[2024-03-18 21:03:45] INFO auto_weight.py:143: [92mFound[0m source weight format: huggingface-safetensor. Source configuration: ../dist/models/gorilla-openfunctions-v2/model.safetensors.index.json
|
55 |
+
[2024-03-18 21:03:45] INFO auto_weight.py:106: Using source weight configuration: [1m../dist/models/gorilla-openfunctions-v2/pytorch_model.bin.index.json[0m. Use `--source` to override.
|
56 |
+
[2024-03-18 21:03:45] INFO auto_weight.py:110: Using source weight format: [1mhuggingface-torch[0m. Use `--source-format` to override.
|
57 |
+
[2024-03-18 21:03:45] INFO auto_config.py:153: [92mFound[0m model type: [1mllama[0m. Use `--model-type` to override.
|
58 |
+
[2024-03-18 21:03:45] INFO llama_model.py:52: [1mcontext_window_size[0m not found in config.json. Falling back to [1mmax_position_embeddings[0m (4096)
|
59 |
+
[2024-03-18 21:03:45] INFO llama_model.py:72: [1mprefill_chunk_size[0m defaults to [1mcontext_window_size[0m (4096)
|
60 |
+
[1mWeight conversion with arguments:[0m
|
61 |
+
[1m--config[0m ../dist/models/gorilla-openfunctions-v2/config.json
|
62 |
+
[1m--quantization[0m GroupQuantize(name='q8f32_1', kind='group-quant', group_size=32, quantize_dtype='int8', storage_dtype='uint32', model_dtype='float32', linear_weight_layout='NK', quantize_embedding=True, quantize_final_fc=True, num_elem_per_storage=4, num_storage_per_group=8, max_int_value=127)
|
63 |
+
[1m--model-type[0m llama
|
64 |
+
[1m--device[0m cuda:0
|
65 |
+
[1m--source[0m ../dist/models/gorilla-openfunctions-v2/pytorch_model.bin.index.json
|
66 |
+
[1m--source-format[0m huggingface-torch
|
67 |
+
[1m--output[0m /tmp/tmpfb6fqbz7
|
68 |
+
Start storing to cache /tmp/tmpfb6fqbz7
|
69 |
+
|
70 |
0%| | 0/183 [00:00<?, ?it/s]
|
71 |
|
72 |
+
|
73 |
0%| | 0/183 [00:00<?, ?it/s]
|
74 |
0%| | 0/183 [00:00<?, ?it/s]
|
75 |
+
Traceback (most recent call last):
|
76 |
+
File "<frozen runpy>", line 198, in _run_module_as_main
|
77 |
+
File "<frozen runpy>", line 88, in _run_code
|
78 |
+
File "/home/floriadmin/mlc-llm/python/mlc_llm/__main__.py", line 47, in <module>
|
79 |
+
main()
|
80 |
+
File "/home/floriadmin/mlc-llm/python/mlc_llm/__main__.py", line 28, in main
|
81 |
+
cli.main(sys.argv[2:])
|
82 |
+
File "/home/floriadmin/mlc-llm/python/mlc_llm/cli/convert_weight.py", line 87, in main
|
83 |
+
convert_weight(
|
84 |
+
File "/home/floriadmin/mlc-llm/python/mlc_llm/interface/convert_weight.py", line 182, in convert_weight
|
85 |
+
_convert_args(args)
|
86 |
+
File "/home/floriadmin/mlc-llm/python/mlc_llm/interface/convert_weight.py", line 146, in _convert_args
|
87 |
+
tvmjs.dump_ndarray_cache(
|
88 |
+
File "/home/floriadmin/miniforge3/envs/mlc/lib/python3.11/site-packages/tvm/contrib/tvmjs.py", line 210, in dump_ndarray_cache
|
89 |
+
for k, origin_v in param_generator:
|
90 |
+
File "/home/floriadmin/mlc-llm/python/mlc_llm/interface/convert_weight.py", line 130, in _param_generator
|
91 |
+
for name, param in loader.load(device=args.device, preshard_funcs=preshard_funcs):
|
92 |
+
File "/home/floriadmin/mlc-llm/python/mlc_llm/loader/huggingface_loader.py", line 117, in load
|
93 |
+
param = self._load_mlc_param(mlc_name, device=device)
|
94 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
95 |
+
File "/home/floriadmin/mlc-llm/python/mlc_llm/loader/huggingface_loader.py", line 147, in _load_mlc_param
|
96 |
+
self._load_file(path)
|
97 |
+
File "/home/floriadmin/mlc-llm/python/mlc_llm/loader/huggingface_loader.py", line 186, in _load_file
|
98 |
+
for name, param in load_func(path):
|
99 |
+
File "/home/floriadmin/mlc-llm/python/mlc_llm/loader/utils.py", line 42, in load_torch_shard
|
100 |
+
for name, param in torch.load(path, map_location=torch.device("cpu")).items():
|
101 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
102 |
+
File "/home/floriadmin/miniforge3/envs/mlc/lib/python3.11/site-packages/torch/serialization.py", line 998, in load
|
103 |
+
with _open_file_like(f, 'rb') as opened_file:
|
104 |
+
^^^^^^^^^^^^^^^^^^^^^^^^
|
105 |
+
File "/home/floriadmin/miniforge3/envs/mlc/lib/python3.11/site-packages/torch/serialization.py", line 445, in _open_file_like
|
106 |
+
return _open_file(name_or_buffer, mode)
|
107 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
108 |
+
File "/home/floriadmin/miniforge3/envs/mlc/lib/python3.11/site-packages/torch/serialization.py", line 426, in __init__
|
109 |
+
super().__init__(open(name, mode))
|
110 |
+
^^^^^^^^^^^^^^^^
|
111 |
+
FileNotFoundError: [Errno 2] No such file or directory: '../dist/models/gorilla-openfunctions-v2/pytorch_model-00002-of-00002.bin'
|
mlc-chat-config.json
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_type": "llama",
|
3 |
+
"quantization": "q8f32_1",
|
4 |
+
"model_config": {
|
5 |
+
"hidden_size": 4096,
|
6 |
+
"intermediate_size": 11008,
|
7 |
+
"num_attention_heads": 32,
|
8 |
+
"num_hidden_layers": 30,
|
9 |
+
"rms_norm_eps": 1e-06,
|
10 |
+
"vocab_size": 102400,
|
11 |
+
"position_embedding_base": 10000.0,
|
12 |
+
"context_window_size": 4096,
|
13 |
+
"prefill_chunk_size": 4096,
|
14 |
+
"num_key_value_heads": 32,
|
15 |
+
"head_dim": 128,
|
16 |
+
"tensor_parallel_shards": 1,
|
17 |
+
"max_batch_size": 80
|
18 |
+
},
|
19 |
+
"vocab_size": 102400,
|
20 |
+
"context_window_size": 4096,
|
21 |
+
"sliding_window_size": -1,
|
22 |
+
"prefill_chunk_size": 4096,
|
23 |
+
"attention_sink_size": -1,
|
24 |
+
"tensor_parallel_shards": 1,
|
25 |
+
"mean_gen_len": 128,
|
26 |
+
"max_gen_len": 512,
|
27 |
+
"shift_fill_factor": 0.3,
|
28 |
+
"temperature": 0.7,
|
29 |
+
"presence_penalty": 0.0,
|
30 |
+
"frequency_penalty": 0.0,
|
31 |
+
"repetition_penalty": 1.0,
|
32 |
+
"top_p": 0.95,
|
33 |
+
"conv_template": {
|
34 |
+
"name": "gorilla",
|
35 |
+
"system_template": "{system_message}",
|
36 |
+
"system_message": "A chat between a curious user and an artificial intelligence assistant. The assistant provides helpful, detailed, and polite responses to the user's inquiries.",
|
37 |
+
"roles": {
|
38 |
+
"user": "USER",
|
39 |
+
"assistant": "ASSISTANT",
|
40 |
+
"tool": "USER"
|
41 |
+
},
|
42 |
+
"role_templates": {
|
43 |
+
"user": "<<question>> {user_message} <<function>> {function_string}",
|
44 |
+
"assistant": "{assistant_message}",
|
45 |
+
"tool": "{tool_message}"
|
46 |
+
},
|
47 |
+
"messages": [],
|
48 |
+
"seps": [
|
49 |
+
"\n",
|
50 |
+
"</s>"
|
51 |
+
],
|
52 |
+
"role_content_sep": ": ",
|
53 |
+
"role_empty_sep": ":",
|
54 |
+
"stop_str": [
|
55 |
+
"</s>"
|
56 |
+
],
|
57 |
+
"stop_token_ids": [
|
58 |
+
2
|
59 |
+
],
|
60 |
+
"function_string": "",
|
61 |
+
"use_function_calling": false
|
62 |
+
},
|
63 |
+
"pad_token_id": 0,
|
64 |
+
"bos_token_id": 100000,
|
65 |
+
"eos_token_id": 100015,
|
66 |
+
"tokenizer_files": [
|
67 |
+
"tokenizer.json",
|
68 |
+
"tokenizer_config.json"
|
69 |
+
],
|
70 |
+
"version": "0.1.0"
|
71 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"100000": {
|
4 |
+
"content": "<|begin▁of▁sentence|>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": true,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100001": {
|
12 |
+
"content": "<|end▁of▁sentence|>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": true,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"100002": {
|
20 |
+
"content": "ø",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": true,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": false
|
26 |
+
},
|
27 |
+
"100003": {
|
28 |
+
"content": "ö",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": true,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": false
|
34 |
+
},
|
35 |
+
"100004": {
|
36 |
+
"content": "ú",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": true,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": false
|
42 |
+
},
|
43 |
+
"100005": {
|
44 |
+
"content": "ÿ",
|
45 |
+
"lstrip": false,
|
46 |
+
"normalized": true,
|
47 |
+
"rstrip": false,
|
48 |
+
"single_word": false,
|
49 |
+
"special": false
|
50 |
+
},
|
51 |
+
"100006": {
|
52 |
+
"content": "õ",
|
53 |
+
"lstrip": false,
|
54 |
+
"normalized": true,
|
55 |
+
"rstrip": false,
|
56 |
+
"single_word": false,
|
57 |
+
"special": false
|
58 |
+
},
|
59 |
+
"100007": {
|
60 |
+
"content": "÷",
|
61 |
+
"lstrip": false,
|
62 |
+
"normalized": true,
|
63 |
+
"rstrip": false,
|
64 |
+
"single_word": false,
|
65 |
+
"special": false
|
66 |
+
},
|
67 |
+
"100008": {
|
68 |
+
"content": "û",
|
69 |
+
"lstrip": false,
|
70 |
+
"normalized": true,
|
71 |
+
"rstrip": false,
|
72 |
+
"single_word": false,
|
73 |
+
"special": false
|
74 |
+
},
|
75 |
+
"100009": {
|
76 |
+
"content": "ý",
|
77 |
+
"lstrip": false,
|
78 |
+
"normalized": true,
|
79 |
+
"rstrip": false,
|
80 |
+
"single_word": false,
|
81 |
+
"special": false
|
82 |
+
},
|
83 |
+
"100010": {
|
84 |
+
"content": "À",
|
85 |
+
"lstrip": false,
|
86 |
+
"normalized": true,
|
87 |
+
"rstrip": false,
|
88 |
+
"single_word": false,
|
89 |
+
"special": false
|
90 |
+
},
|
91 |
+
"100011": {
|
92 |
+
"content": "ù",
|
93 |
+
"lstrip": false,
|
94 |
+
"normalized": true,
|
95 |
+
"rstrip": false,
|
96 |
+
"single_word": false,
|
97 |
+
"special": false
|
98 |
+
},
|
99 |
+
"100012": {
|
100 |
+
"content": "Á",
|
101 |
+
"lstrip": false,
|
102 |
+
"normalized": true,
|
103 |
+
"rstrip": false,
|
104 |
+
"single_word": false,
|
105 |
+
"special": false
|
106 |
+
},
|
107 |
+
"100013": {
|
108 |
+
"content": "þ",
|
109 |
+
"lstrip": false,
|
110 |
+
"normalized": true,
|
111 |
+
"rstrip": false,
|
112 |
+
"single_word": false,
|
113 |
+
"special": false
|
114 |
+
},
|
115 |
+
"100014": {
|
116 |
+
"content": "ü",
|
117 |
+
"lstrip": false,
|
118 |
+
"normalized": true,
|
119 |
+
"rstrip": false,
|
120 |
+
"single_word": false,
|
121 |
+
"special": false
|
122 |
+
},
|
123 |
+
"100015": {
|
124 |
+
"content": "<|EOT|>",
|
125 |
+
"lstrip": false,
|
126 |
+
"normalized": true,
|
127 |
+
"rstrip": false,
|
128 |
+
"single_word": false,
|
129 |
+
"special": true
|
130 |
+
}
|
131 |
+
},
|
132 |
+
"bos_token": "<|begin▁of▁sentence|>",
|
133 |
+
"chat_template": "{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}",
|
134 |
+
"clean_up_tokenization_spaces": false,
|
135 |
+
"eos_token": "<|EOT|>",
|
136 |
+
"legacy": true,
|
137 |
+
"model_max_length": 4096,
|
138 |
+
"pad_token": "<|end▁of▁sentence|>",
|
139 |
+
"padding_side": "right",
|
140 |
+
"sp_model_kwargs": {},
|
141 |
+
"tokenizer_class": "LlamaTokenizer",
|
142 |
+
"unk_token": null,
|
143 |
+
"use_default_system_prompt": true
|
144 |
+
}
|