danielhanchen
commited on
Add files using upload-large-folder tool
Browse files- config.json +9 -2
- generation_config.json +3 -1
- tokenizer.json +2 -2
- tokenizer_config.json +5 -2
config.json
CHANGED
@@ -22,6 +22,7 @@
|
|
22 |
"num_attention_heads": 32,
|
23 |
"num_hidden_layers": 16,
|
24 |
"num_key_value_heads": 8,
|
|
|
25 |
"pretraining_tp": 1,
|
26 |
"quantization_config": {
|
27 |
"_load_in_4bit": true,
|
@@ -32,7 +33,12 @@
|
|
32 |
"bnb_4bit_use_double_quant": true,
|
33 |
"llm_int8_enable_fp32_cpu_offload": false,
|
34 |
"llm_int8_has_fp16_weight": false,
|
35 |
-
"llm_int8_skip_modules":
|
|
|
|
|
|
|
|
|
|
|
36 |
"llm_int8_threshold": 6.0,
|
37 |
"load_in_4bit": true,
|
38 |
"load_in_8bit": false,
|
@@ -49,7 +55,8 @@
|
|
49 |
"rope_theta": 500000.0,
|
50 |
"tie_word_embeddings": true,
|
51 |
"torch_dtype": "bfloat16",
|
52 |
-
"transformers_version": "4.
|
|
|
53 |
"use_cache": true,
|
54 |
"vocab_size": 128256
|
55 |
}
|
|
|
22 |
"num_attention_heads": 32,
|
23 |
"num_hidden_layers": 16,
|
24 |
"num_key_value_heads": 8,
|
25 |
+
"pad_token_id": 128004,
|
26 |
"pretraining_tp": 1,
|
27 |
"quantization_config": {
|
28 |
"_load_in_4bit": true,
|
|
|
33 |
"bnb_4bit_use_double_quant": true,
|
34 |
"llm_int8_enable_fp32_cpu_offload": false,
|
35 |
"llm_int8_has_fp16_weight": false,
|
36 |
+
"llm_int8_skip_modules": [
|
37 |
+
"lm_head",
|
38 |
+
"multi_modal_projector",
|
39 |
+
"merger",
|
40 |
+
"modality_projection"
|
41 |
+
],
|
42 |
"llm_int8_threshold": 6.0,
|
43 |
"load_in_4bit": true,
|
44 |
"load_in_8bit": false,
|
|
|
55 |
"rope_theta": 500000.0,
|
56 |
"tie_word_embeddings": true,
|
57 |
"torch_dtype": "bfloat16",
|
58 |
+
"transformers_version": "4.48.1",
|
59 |
+
"unsloth_fixed": true,
|
60 |
"use_cache": true,
|
61 |
"vocab_size": 128256
|
62 |
}
|
generation_config.json
CHANGED
@@ -6,7 +6,9 @@
|
|
6 |
128008,
|
7 |
128009
|
8 |
],
|
|
|
|
|
9 |
"temperature": 0.6,
|
10 |
"top_p": 0.9,
|
11 |
-
"transformers_version": "4.
|
12 |
}
|
|
|
6 |
128008,
|
7 |
128009
|
8 |
],
|
9 |
+
"max_length": 131072,
|
10 |
+
"pad_token_id": 128004,
|
11 |
"temperature": 0.6,
|
12 |
"top_p": 0.9,
|
13 |
+
"transformers_version": "4.48.1"
|
14 |
}
|
tokenizer.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b
|
3 |
+
size 17209920
|
tokenizer_config.json
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
{
|
|
|
2 |
"added_tokens_decoder": {
|
3 |
"128000": {
|
4 |
"content": "<|begin_of_text|>",
|
@@ -2050,9 +2051,10 @@
|
|
2050 |
}
|
2051 |
},
|
2052 |
"bos_token": "<|begin_of_text|>",
|
2053 |
-
"chat_template": "
|
2054 |
"clean_up_tokenization_spaces": true,
|
2055 |
"eos_token": "<|eot_id|>",
|
|
|
2056 |
"model_input_names": [
|
2057 |
"input_ids",
|
2058 |
"attention_mask"
|
@@ -2060,5 +2062,6 @@
|
|
2060 |
"model_max_length": 131072,
|
2061 |
"pad_token": "<|finetune_right_pad_id|>",
|
2062 |
"padding_side": "left",
|
2063 |
-
"tokenizer_class": "PreTrainedTokenizerFast"
|
|
|
2064 |
}
|
|
|
1 |
{
|
2 |
+
"add_bos_token": true,
|
3 |
"added_tokens_decoder": {
|
4 |
"128000": {
|
5 |
"content": "<|begin_of_text|>",
|
|
|
2051 |
}
|
2052 |
},
|
2053 |
"bos_token": "<|begin_of_text|>",
|
2054 |
+
"chat_template": "\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- if strftime_now is defined %}\n {%- set date_string = strftime_now(\"%d %b %Y\") %}\n {%- else %}\n {%- set date_string = \"26 Jul 2024\" %}\n {%- endif %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {{- \"<|eot_id|>\" }}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n",
|
2055 |
"clean_up_tokenization_spaces": true,
|
2056 |
"eos_token": "<|eot_id|>",
|
2057 |
+
"extra_special_tokens": {},
|
2058 |
"model_input_names": [
|
2059 |
"input_ids",
|
2060 |
"attention_mask"
|
|
|
2062 |
"model_max_length": 131072,
|
2063 |
"pad_token": "<|finetune_right_pad_id|>",
|
2064 |
"padding_side": "left",
|
2065 |
+
"tokenizer_class": "PreTrainedTokenizerFast",
|
2066 |
+
"unk_token": null
|
2067 |
}
|