Spaces:
Running
Running
from huggingface_hub import login | |
import os | |
huggingfacetoken = os.getenv('hf_write') | |
login(token = huggingfacetoken) | |
import gradio as gr | |
from transformers import pipeline | |
#pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-3B-Instruct") | |
#pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-3B") gave some preamble | |
#pipe = pipeline("text-generation", model="mistralai/Mixtral-8x7B-Instruct-v0.1") exceeded free memory | |
#pipe = pipeline("text-generation", model="mistralai/Mistral-7B-v0.1") exceeded free memory | |
#pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-1B") | |
pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-1B-Instruct") | |
#pipe = pipeline("text-generation", model="openai-community/gpt2") | |
#pipe = pipeline("text-generation", model="HuggingFaceTB/SmolLM2-1.7B-Instruct") | |
def response(age, story): | |
#messages = f"this is the 300 word story of {story} in words a {age} year old can understand." | |
messages = f"You are a school teacher telling stories to children. Retell the story of {story} in 300 words using language a {age} year old can understand." | |
sequences = pipe(messages, max_length=500) | |
gen_text = sequences[0]["generated_text"][len(messages):] | |
return gen_text | |
demo = gr.Interface( | |
response, | |
inputs=[gr.Slider(minimum=5, maximum=10, step=1, value=5, label="Enter your age: ", info="Select an age between 5 and 10."), | |
gr.Dropdown( | |
["Cinderella", "Snow White", "Little Red Riding Hood"], label="Select the Story: ", info="Select the story you want to read."), | |
], | |
outputs=[gr.Textbox(label="Here's the story: ", lines=3)], | |
title="Tell me a story!", | |
) | |
demo.launch() | |
# import gradio as gr | |
# from transformers import pipeline | |
# pipe = pipeline("text-generation", model="openai-community/gpt2") | |
# def response(messages): | |
# return pipe(messages) | |
# demo = gr.Interface( | |
# response, | |
# inputs=[gr.Textbox(label="Input your question: ", lines=3)], | |
# outputs=[gr.Textbox(label="Here's the answer: ", lines=3)], | |
# title="Ask me anything!", | |
# ) | |
# demo.launch() | |
# import gradio as gr | |
# from transformers import pipeline | |
# pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog") | |
# def predict(image): | |
# predictions = pipeline(image) | |
# return {p["label"]: p["score"] for p in predictions} | |
# demo = gr.Interface( | |
# predict, | |
# inputs=gr.Image(label="Upload hot dog candidate", type="filepath"), | |
# outputs=gr.Label(num_top_classes=2), | |
# title="Hot Dog? Or Not?", | |
# ) | |
# demo.launch() |