Spaces:
Sleeping
Sleeping
from huggingface_hub import login | |
import os | |
huggingfacetoken = os.getenv('hf_write') | |
login(token = huggingfacetoken) | |
import gradio as gr | |
from transformers import pipeline | |
pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-3B-Instruct") | |
#pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-3B") gave some preamble | |
#pipe = pipeline("text-generation", model="mistralai/Mixtral-8x7B-Instruct-v0.1") exceeded free memory | |
#pipe = pipeline("text-generation", model="mistralai/Mistral-7B-v0.1") exceeded free memory | |
#pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-1B") | |
#pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-1B-Instruct") | |
#pipe = pipeline("text-generation", model="openai-community/gpt2") | |
def response(messages): | |
sequences = pipe(messages, max_length=500) | |
#sequences = pipe(messages) | |
#gen_text = sequences | |
gen_text = sequences[0]["generated_text"][len(messages):] | |
return gen_text | |
# pipe(message)[0][0] = | |
#{'generated_text': '{"role": "user", "content": hi! what is your name?}, {"role": "assistant", "content": "Hello! I don"}}; {"role": "'} | |
demo = gr.Interface( | |
response, | |
inputs=[gr.Textbox(label="Input your question: ", lines=3)], | |
outputs=[gr.Textbox(label="Here's the answer: ", lines=3)], | |
title="Ask me anything!", | |
) | |
demo.launch() | |
# import gradio as gr | |
# from transformers import pipeline | |
# pipe = pipeline("text-generation", model="openai-community/gpt2") | |
# def response(messages): | |
# return pipe(messages) | |
# demo = gr.Interface( | |
# response, | |
# inputs=[gr.Textbox(label="Input your question: ", lines=3)], | |
# outputs=[gr.Textbox(label="Here's the answer: ", lines=3)], | |
# title="Ask me anything!", | |
# ) | |
# demo.launch() | |
# import gradio as gr | |
# from transformers import pipeline | |
# pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog") | |
# def predict(image): | |
# predictions = pipeline(image) | |
# return {p["label"]: p["score"] for p in predictions} | |
# demo = gr.Interface( | |
# predict, | |
# inputs=gr.Image(label="Upload hot dog candidate", type="filepath"), | |
# outputs=gr.Label(num_top_classes=2), | |
# title="Hot Dog? Or Not?", | |
# ) | |
# demo.launch() |