flyingfishinwater
commited on
Commit
·
d7580f1
1
Parent(s):
9aeabfb
Update models.json
Browse files- models.json +55 -57
models.json
CHANGED
@@ -9,36 +9,36 @@
|
|
9 |
"model_description": "The standard Llama2 based 1.3B LLM.",
|
10 |
"developer": "Meta",
|
11 |
"developer_url": "https://ai.meta.com/llama/",
|
12 |
-
"context"
|
13 |
-
"temp"
|
14 |
-
"prompt_format"
|
15 |
-
"top_k"
|
16 |
-
"top_p"
|
17 |
-
"model_inference"
|
18 |
-
"n_batch"
|
19 |
-
"template_name"
|
20 |
"is_ready": true
|
21 |
},
|
22 |
{
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
}
|
42 |
{
|
43 |
"id": "mistral-7b-instruct-v0.2-Q5_K_M",
|
44 |
"model_title": "Mistral 7B Instruct v0.2 Q5_K_M",
|
@@ -49,14 +49,14 @@
|
|
49 |
"model_description": "The standard Llama2 based 1.3B LLM.",
|
50 |
"developer": "Mistral AI",
|
51 |
"developer_url": "https://mistral.ai/",
|
52 |
-
"context"
|
53 |
-
"temp"
|
54 |
-
"prompt_format"
|
55 |
-
"top_k"
|
56 |
-
"top_p"
|
57 |
-
"model_inference"
|
58 |
-
"n_batch"
|
59 |
-
"template_name"
|
60 |
"is_ready": true
|
61 |
},
|
62 |
{
|
@@ -69,17 +69,17 @@
|
|
69 |
"model_description": "The standard Llama2 based 1.3B LLM.",
|
70 |
"developer": "Mistral AI",
|
71 |
"developer_url": "https://mistral.ai/",
|
72 |
-
"context"
|
73 |
-
"temp"
|
74 |
-
"prompt_format"
|
75 |
-
"top_k"
|
76 |
-
"top_p"
|
77 |
-
"model_inference"
|
78 |
-
"n_batch"
|
79 |
-
"template_name"
|
80 |
"is_ready": true
|
81 |
-
|
82 |
-
|
83 |
"id": "openchat-3.5-1210-Q5_K_M",
|
84 |
"model_title": "OpenChat 3.5 Q5_K_M",
|
85 |
"model_file": "data/mistral-7b-instruct-v0.2.Q5_K_M.gguf",
|
@@ -89,16 +89,14 @@
|
|
89 |
"model_description": "The standard Llama2 based 1.3B LLM.",
|
90 |
"developer": "OpenChat Team",
|
91 |
"developer_url": "https://openchat.team/",
|
92 |
-
"context"
|
93 |
-
"temp"
|
94 |
-
"prompt_format"
|
95 |
-
"top_k"
|
96 |
-
"top_p"
|
97 |
-
"model_inference"
|
98 |
-
"n_batch"
|
99 |
-
"template_name"
|
100 |
"is_ready": true
|
101 |
-
|
102 |
-
|
103 |
-
]
|
104 |
-
|
|
|
9 |
"model_description": "The standard Llama2 based 1.3B LLM.",
|
10 |
"developer": "Meta",
|
11 |
"developer_url": "https://ai.meta.com/llama/",
|
12 |
+
"context": 2048,
|
13 |
+
"temp": 0.6,
|
14 |
+
"prompt_format": "<human>: {{prompt}}\n<bot>: ",
|
15 |
+
"top_k": 5,
|
16 |
+
"top_p": 0.9,
|
17 |
+
"model_inference": "llama",
|
18 |
+
"n_batch": 10,
|
19 |
+
"template_name": "HumanBot",
|
20 |
"is_ready": true
|
21 |
},
|
22 |
{
|
23 |
+
"id": "tinyllama-1.1B-32k-Q8",
|
24 |
+
"model_title": "TinyLlama-1.1B-32k",
|
25 |
+
"model_file": "data/mistral-7b-instruct-v0.2.Q5_K_M.gguf",
|
26 |
+
"model_url": "https://huggingface.co/TheBloke/openchat-3.5-1210-GGUF/resolve/main/openchat-3.5-1210.Q5_K_M.gguf?download=true",
|
27 |
+
"model_info_url": "https://huggingface.co/Doctor-Shotgun/TinyLlama-1.1B-32k",
|
28 |
+
"model_avatar": "logo_tinyllama",
|
29 |
+
"model_description": "The TinyLlama 1.1B model.",
|
30 |
+
"developer": "Zhang Peiyuan",
|
31 |
+
"developer_url": "https://github.com/jzhang38/TinyLlama",
|
32 |
+
"context": 4096,
|
33 |
+
"temp": 0.6,
|
34 |
+
"prompt_format": "<|system|>You are a friendly chatbot who always responds in the style of a pirate.</s><|user|>{{prompt}}</s><|assistant|>",
|
35 |
+
"top_k": 5,
|
36 |
+
"top_p": 0.9,
|
37 |
+
"model_inference": "llama",
|
38 |
+
"n_batch": 10,
|
39 |
+
"template_name": "TinyLlama",
|
40 |
+
"is_ready": true
|
41 |
+
},
|
42 |
{
|
43 |
"id": "mistral-7b-instruct-v0.2-Q5_K_M",
|
44 |
"model_title": "Mistral 7B Instruct v0.2 Q5_K_M",
|
|
|
49 |
"model_description": "The standard Llama2 based 1.3B LLM.",
|
50 |
"developer": "Mistral AI",
|
51 |
"developer_url": "https://mistral.ai/",
|
52 |
+
"context": 4096,
|
53 |
+
"temp": 0.6,
|
54 |
+
"prompt_format": "<s>[INST]{{prompt}}[/INST]</s>",
|
55 |
+
"top_k": 5,
|
56 |
+
"top_p": 0.9,
|
57 |
+
"model_inference": "llama",
|
58 |
+
"n_batch": 10,
|
59 |
+
"template_name": "Mistral",
|
60 |
"is_ready": true
|
61 |
},
|
62 |
{
|
|
|
69 |
"model_description": "The standard Llama2 based 1.3B LLM.",
|
70 |
"developer": "Mistral AI",
|
71 |
"developer_url": "https://mistral.ai/",
|
72 |
+
"context": 4096,
|
73 |
+
"temp": 0.6,
|
74 |
+
"prompt_format": "<s>[INST]{{prompt}}[/INST]</s>",
|
75 |
+
"top_k": 5,
|
76 |
+
"top_p": 0.9,
|
77 |
+
"model_inference": "llama",
|
78 |
+
"n_batch": 10,
|
79 |
+
"template_name": "Mistral",
|
80 |
"is_ready": true
|
81 |
+
},
|
82 |
+
{
|
83 |
"id": "openchat-3.5-1210-Q5_K_M",
|
84 |
"model_title": "OpenChat 3.5 Q5_K_M",
|
85 |
"model_file": "data/mistral-7b-instruct-v0.2.Q5_K_M.gguf",
|
|
|
89 |
"model_description": "The standard Llama2 based 1.3B LLM.",
|
90 |
"developer": "OpenChat Team",
|
91 |
"developer_url": "https://openchat.team/",
|
92 |
+
"context": 4096,
|
93 |
+
"temp": 0.6,
|
94 |
+
"prompt_format": "<s>[INST]{{prompt}}[/INST]</s>",
|
95 |
+
"top_k": 5,
|
96 |
+
"top_p": 0.9,
|
97 |
+
"model_inference": "llama",
|
98 |
+
"n_batch": 10,
|
99 |
+
"template_name": "Mistral",
|
100 |
"is_ready": true
|
101 |
+
}
|
102 |
+
]
|
|
|
|