Spaces:
Paused
Paused
Update endpoints.py
Browse files- endpoints.py +6 -5
endpoints.py
CHANGED
@@ -28,23 +28,24 @@ def LLM(llm_name, length):
|
|
28 |
return pipe
|
29 |
|
30 |
|
31 |
-
pipe = LLM("microsoft/phi-2",2000)
|
32 |
# tokenizer = AutoTokenizer.from_pretrained("WizardLM/WizardCoder-1B-V1.0")
|
33 |
# base_model = AutoModelForCausalLM.from_pretrained("WizardLM/WizardCoder-1B-V1.0")
|
34 |
# Mistral 7B
|
35 |
# mistral_llm = LLM("mistralai/Mistral-7B-v0.1",30000)
|
36 |
-
mistral_llm =
|
37 |
|
38 |
# WizardCoder 13B
|
39 |
# wizard_llm = LLM("WizardLM/WizardCoder-Python-13B-V1.0",8000)
|
40 |
-
wizard_llm =
|
41 |
# hf_llm = HuggingFacePipeline(pipeline=pipe)
|
42 |
|
43 |
def ask_model(model, prompt):
|
44 |
if(model == 'mistral'):
|
45 |
-
|
|
|
46 |
if(model == 'wizard'):
|
47 |
-
|
|
|
48 |
|
49 |
|
50 |
|
|
|
28 |
return pipe
|
29 |
|
30 |
|
|
|
31 |
# tokenizer = AutoTokenizer.from_pretrained("WizardLM/WizardCoder-1B-V1.0")
|
32 |
# base_model = AutoModelForCausalLM.from_pretrained("WizardLM/WizardCoder-1B-V1.0")
|
33 |
# Mistral 7B
|
34 |
# mistral_llm = LLM("mistralai/Mistral-7B-v0.1",30000)
|
35 |
+
mistral_llm = LLM("microsoft/phi-2",2000)
|
36 |
|
37 |
# WizardCoder 13B
|
38 |
# wizard_llm = LLM("WizardLM/WizardCoder-Python-13B-V1.0",8000)
|
39 |
+
wizard_llm = LLM("WizardLM/WizardCoder-3B-V1.0",4000)
|
40 |
# hf_llm = HuggingFacePipeline(pipeline=pipe)
|
41 |
|
42 |
def ask_model(model, prompt):
|
43 |
if(model == 'mistral'):
|
44 |
+
result = mistral_llm(prompt)
|
45 |
+
return result
|
46 |
if(model == 'wizard'):
|
47 |
+
result = wizard_llm(prompt)
|
48 |
+
return result
|
49 |
|
50 |
|
51 |
|