File size: 2,583 Bytes
095f1b0
0a76dfc
8de8a47
095f1b0
 
0019f15
 
 
4852347
837b7fa
b052e6d
d22b4dc
3bd5c93
4d13440
 
d10bb65
0019f15
847afc5
b36bab5
63080eb
d396e37
f338b85
8d2b04c
6754cc9
3e733bb
0019f15
 
f1de876
b36bab5
 
 
 
 
0019f15
 
 
 
e44a4b5
 
 
e13e066
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6134078
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
from huggingface_hub import login
import os
huggingfacetoken = os.getenv('hf_write')
login(token = huggingfacetoken)

import gradio as gr
from transformers import pipeline

#pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-3B-Instruct") 
#pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-3B") gave some preamble
#pipe = pipeline("text-generation", model="mistralai/Mixtral-8x7B-Instruct-v0.1") exceeded free memory
#pipe = pipeline("text-generation", model="mistralai/Mistral-7B-v0.1") exceeded free memory
#pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-1B")
pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-1B-Instruct")
#pipe = pipeline("text-generation", model="openai-community/gpt2")
#pipe = pipeline("text-generation", model="HuggingFaceTB/SmolLM2-1.7B-Instruct")


def response(age, story):
    #messages = f"this is the 300 word story of {story} in words a {age} year old can understand."
    messages = f"You are a school teacher telling stories to children. Retell the story of {story} in 300 words using language a {age} year old can understand."
    sequences = pipe(messages, max_length=500)
    gen_text = sequences[0]["generated_text"][len(messages):]
    return gen_text

demo = gr.Interface(
    response, 
    inputs=[gr.Slider(minimum=5, maximum=10, step=1, value=5, label="Enter your age: ", info="Select an age between 5 and 10."),
           gr.Dropdown(
            ["Cinderella", "Snow White", "Little Red Riding Hood"], label="Select the Story: ", info="Select the story you want to read."),
           ],
    outputs=[gr.Textbox(label="Here's the story: ", lines=3)],
    title="Tell me a story!",
)

demo.launch()

# import gradio as gr
# from transformers import pipeline

# pipe = pipeline("text-generation", model="openai-community/gpt2")

# def response(messages):
#     return pipe(messages)

# demo = gr.Interface(
#     response, 
#     inputs=[gr.Textbox(label="Input your question: ", lines=3)],
#     outputs=[gr.Textbox(label="Here's the answer: ", lines=3)],
#     title="Ask me anything!",
# )

# demo.launch()

# import gradio as gr
# from transformers import pipeline

# pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")

# def predict(image):
#     predictions = pipeline(image)
#     return {p["label"]: p["score"] for p in predictions}

# demo = gr.Interface(
#     predict,
#     inputs=gr.Image(label="Upload hot dog candidate", type="filepath"),
#     outputs=gr.Label(num_top_classes=2),
#     title="Hot Dog? Or Not?",
# )

# demo.launch()