Spaces:
Sleeping
Sleeping
kidsampson
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -6,17 +6,17 @@ login(token = huggingfacetoken)
|
|
6 |
import gradio as gr
|
7 |
from transformers import pipeline
|
8 |
|
9 |
-
|
10 |
#pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-3B") gave some preamble
|
11 |
#pipe = pipeline("text-generation", model="mistralai/Mixtral-8x7B-Instruct-v0.1") exceeded free memory
|
12 |
#pipe = pipeline("text-generation", model="mistralai/Mistral-7B-v0.1") exceeded free memory
|
13 |
#pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-1B")
|
14 |
-
pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-1B-Instruct")
|
15 |
#pipe = pipeline("text-generation", model="openai-community/gpt2")
|
16 |
|
17 |
|
18 |
def response(messages):
|
19 |
-
sequences = pipe(messages, max_length=
|
20 |
#sequences = pipe(messages)
|
21 |
#gen_text = sequences
|
22 |
gen_text = sequences[0]["generated_text"][len(messages):]
|
|
|
6 |
import gradio as gr
|
7 |
from transformers import pipeline
|
8 |
|
9 |
+
pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-3B-Instruct")
|
10 |
#pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-3B") gave some preamble
|
11 |
#pipe = pipeline("text-generation", model="mistralai/Mixtral-8x7B-Instruct-v0.1") exceeded free memory
|
12 |
#pipe = pipeline("text-generation", model="mistralai/Mistral-7B-v0.1") exceeded free memory
|
13 |
#pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-1B")
|
14 |
+
#pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-1B-Instruct")
|
15 |
#pipe = pipeline("text-generation", model="openai-community/gpt2")
|
16 |
|
17 |
|
18 |
def response(messages):
|
19 |
+
sequences = pipe(messages, max_length=500)
|
20 |
#sequences = pipe(messages)
|
21 |
#gen_text = sequences
|
22 |
gen_text = sequences[0]["generated_text"][len(messages):]
|