jth01 commited on
Commit
7f97ed9
·
verified ·
1 Parent(s): 87d6538

Upload 7 files

Browse files
added_tokens.json CHANGED
@@ -4,7 +4,6 @@
4
  "<|box_end|>": 151649,
5
  "<|box_start|>": 151648,
6
  "<|endoftext|>": 151643,
7
- "<|end|>": 151666,
8
  "<|file_sep|>": 151664,
9
  "<|fim_middle|>": 151660,
10
  "<|fim_pad|>": 151662,
@@ -13,17 +12,11 @@
13
  "<|im_end|>": 151645,
14
  "<|im_start|>": 151644,
15
  "<|image_pad|>": 151655,
16
- "<|message|>": 151667,
17
  "<|object_ref_end|>": 151647,
18
  "<|object_ref_start|>": 151646,
19
- "<|pad|>": 151671,
20
  "<|quad_end|>": 151651,
21
  "<|quad_start|>": 151650,
22
  "<|repo_name|>": 151663,
23
- "<|start|>": 151665,
24
- "<|tool_end|>": 151670,
25
- "<|tool_excute|>": 151669,
26
- "<|tool_start|>": 151668,
27
  "<|video_pad|>": 151656,
28
  "<|vision_end|>": 151653,
29
  "<|vision_pad|>": 151654,
 
4
  "<|box_end|>": 151649,
5
  "<|box_start|>": 151648,
6
  "<|endoftext|>": 151643,
 
7
  "<|file_sep|>": 151664,
8
  "<|fim_middle|>": 151660,
9
  "<|fim_pad|>": 151662,
 
12
  "<|im_end|>": 151645,
13
  "<|im_start|>": 151644,
14
  "<|image_pad|>": 151655,
 
15
  "<|object_ref_end|>": 151647,
16
  "<|object_ref_start|>": 151646,
 
17
  "<|quad_end|>": 151651,
18
  "<|quad_start|>": 151650,
19
  "<|repo_name|>": 151663,
 
 
 
 
20
  "<|video_pad|>": 151656,
21
  "<|vision_end|>": 151653,
22
  "<|vision_pad|>": 151654,
config.json CHANGED
@@ -1,39 +1,29 @@
1
  {
2
- "architectures": [
3
- "Qwen2ForCausalLM"
4
- ],
5
- "attention_dropout": 0.0,
6
- "bos_token_id": 151643,
7
- "eos_token_id": 151645,
8
- "hidden_act": "silu",
9
- "hidden_size": 5120,
10
- "initializer_range": 0.02,
11
- "intermediate_size": 27648,
12
- "max_position_embeddings": 32768,
13
- "max_window_layers": 70,
14
- "model_type": "qwen2",
15
- "num_attention_heads": 40,
16
- "num_hidden_layers": 64,
17
- "num_key_value_heads": 8,
18
- "rms_norm_eps": 1e-06,
19
- "rope_scaling": null,
20
- "rope_theta": 1000000.0,
21
- "sliding_window": null,
22
- "tie_word_embeddings": false,
23
- "torch_dtype": "bfloat16",
24
- "transformers_version": "4.47.0",
25
- "use_cache": true,
26
- "use_sliding_window": false,
27
- "vocab_size": 152064,
28
- "quantization_config": {
29
- "quant_method": "exl2",
30
- "version": "0.2.7",
31
- "bits": 4.0,
32
- "head_bits": 6,
33
- "calibration": {
34
- "rows": 115,
35
- "length": 2048,
36
- "dataset": "(default)"
37
- }
38
- }
39
- }
 
1
  {
2
+ "_name_or_path": "Qwen/QwQ-32B-Preview",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151645,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 5120,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 27648,
13
+ "max_position_embeddings": 32768,
14
+ "max_window_layers": 64,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 40,
17
+ "num_hidden_layers": 64,
18
+ "num_key_value_heads": 8,
19
+ "rms_norm_eps": 1e-05,
20
+ "rope_scaling": null,
21
+ "rope_theta": 1000000.0,
22
+ "sliding_window": null,
23
+ "tie_word_embeddings": false,
24
+ "torch_dtype": "bfloat16",
25
+ "transformers_version": "4.46.3",
26
+ "use_cache": true,
27
+ "use_sliding_window": false,
28
+ "vocab_size": 152064
29
+ }
 
 
 
 
 
 
 
 
 
 
generation_config.json CHANGED
@@ -1,6 +1,13 @@
1
  {
2
- "_from_model_config": true,
3
  "bos_token_id": 151643,
4
- "eos_token_id": 151645,
5
- "transformers_version": "4.47.0"
 
 
 
 
 
 
 
 
6
  }
 
1
  {
 
2
  "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "temperature": 0.7,
10
+ "top_k": 20,
11
+ "top_p": 0.8,
12
+ "transformers_version": "4.46.3"
13
  }
special_tokens_map.json CHANGED
@@ -15,14 +15,14 @@
15
  "<|video_pad|>"
16
  ],
17
  "eos_token": {
18
- "content": "<|endoftext|>",
19
  "lstrip": false,
20
  "normalized": false,
21
  "rstrip": false,
22
  "single_word": false
23
  },
24
  "pad_token": {
25
- "content": "<|video_pad|>",
26
  "lstrip": false,
27
  "normalized": false,
28
  "rstrip": false,
 
15
  "<|video_pad|>"
16
  ],
17
  "eos_token": {
18
+ "content": "<|im_end|>",
19
  "lstrip": false,
20
  "normalized": false,
21
  "rstrip": false,
22
  "single_word": false
23
  },
24
  "pad_token": {
25
+ "content": "<|vision_pad|>",
26
  "lstrip": false,
27
  "normalized": false,
28
  "rstrip": false,
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:509590e461198995b477f22c06a2f50e3e8eb9160b36bcf9521b6aa385283d90
3
- size 11423210
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
tokenizer_config.json CHANGED
@@ -177,62 +177,6 @@
177
  "rstrip": false,
178
  "single_word": false,
179
  "special": false
180
- },
181
- "151665": {
182
- "content": "<|start|>",
183
- "lstrip": false,
184
- "normalized": false,
185
- "rstrip": false,
186
- "single_word": false,
187
- "special": true
188
- },
189
- "151666": {
190
- "content": "<|end|>",
191
- "lstrip": false,
192
- "normalized": false,
193
- "rstrip": false,
194
- "single_word": false,
195
- "special": true
196
- },
197
- "151667": {
198
- "content": "<|message|>",
199
- "lstrip": false,
200
- "normalized": false,
201
- "rstrip": false,
202
- "single_word": false,
203
- "special": true
204
- },
205
- "151668": {
206
- "content": "<|tool_start|>",
207
- "lstrip": false,
208
- "normalized": false,
209
- "rstrip": false,
210
- "single_word": false,
211
- "special": true
212
- },
213
- "151669": {
214
- "content": "<|tool_excute|>",
215
- "lstrip": false,
216
- "normalized": false,
217
- "rstrip": false,
218
- "single_word": false,
219
- "special": true
220
- },
221
- "151670": {
222
- "content": "<|tool_end|>",
223
- "lstrip": false,
224
- "normalized": false,
225
- "rstrip": false,
226
- "single_word": false,
227
- "special": true
228
- },
229
- "151671": {
230
- "content": "<|pad|>",
231
- "lstrip": false,
232
- "normalized": false,
233
- "rstrip": false,
234
- "single_word": false,
235
- "special": true
236
  }
237
  },
238
  "additional_special_tokens": [
@@ -251,14 +195,13 @@
251
  "<|video_pad|>"
252
  ],
253
  "bos_token": null,
254
- "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
255
  "clean_up_tokenization_spaces": false,
256
- "eos_token": "<|endoftext|>",
257
  "errors": "replace",
258
- "extra_special_tokens": {},
259
- "model_max_length": 131072,
260
- "pad_token": "<|video_pad|>",
261
- "return_tensors": true,
262
  "split_special_tokens": false,
263
  "tokenizer_class": "Qwen2Tokenizer",
264
  "unk_token": null
 
177
  "rstrip": false,
178
  "single_word": false,
179
  "special": false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
  }
181
  },
182
  "additional_special_tokens": [
 
195
  "<|video_pad|>"
196
  ],
197
  "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are an advanced AI language model specializing in solving math and programming problems step by step. Carefully analyze each part of the problem, verify the accuracy of your reasoning with relevant facts and data, and provide clear, logical solutions. Reflect on and review your approach throughout the problem-solving process to ensure precision and thoroughness. Always think through the problem step by step and provide your answers accordingly.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
  "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
  "errors": "replace",
202
+ "model_max_length": 32768,
203
+ "pad_token": "<|vision_pad|>",
204
+ "padding_side": "left",
 
205
  "split_special_tokens": false,
206
  "tokenizer_class": "Qwen2Tokenizer",
207
  "unk_token": null