tatihden commited on
Commit
e4bdfa5
·
verified ·
1 Parent(s): feaa85c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -16
app.py CHANGED
@@ -1,23 +1,109 @@
1
- from transformers import pipeline
2
- from transformers import Conversation
3
-
4
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- chatbot = pipeline(model="tatihden/gemma_mental_health_2b_it_en")
7
 
8
- message_list = []
9
- response_list = []
 
 
 
10
 
11
- def mini_chatbot(message, history):
12
- conversation = Conversation(text=message,
13
- past_user_inputs=message_list,
14
- generated_responses=response_list)
15
- conversation = chatbot(conversation)
16
 
17
- return conversation.generated_responses[-1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
- demo_chatbot = gr.ChatInterface(mini_chatbot,
20
- title="CalmChat",
21
- description="Enter text to start chatting.")
 
 
 
 
 
 
22
 
23
- demo_chatbot.launch()
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ import random
4
+
5
+ models = [
6
+ "tatihden/gemma_mental_health_2b_en",
7
+ "tatihden/gemma_mental_health_7b_it_en",
8
+ "https://huggingface.co/tatihden/gemma_mental_health_2b_it_en"
9
+ ]
10
+
11
+ clients = []
12
+ for model in models:
13
+ clients.append(InferenceClient(model))
14
+
15
+
16
+ def format_prompt(message, history):
17
+ prompt = ""
18
+ if history:
19
+ for user_prompt, bot_response in history:
20
+ prompt += f"<start_of_turn>user{user_prompt}<end_of_turn>"
21
+ prompt += f"<start_of_turn>model{bot_response}"
22
+ prompt += f"<start_of_turn>user{message}<end_of_turn><start_of_turn>model"
23
+ return prompt
24
+
25
+
26
+ def chat_inf(system_prompt, prompt, history, client_choice, seed, temp, tokens, top_p, rep_p):
27
+ client = clients[int(client_choice) - 1]
28
+ if not history:
29
+ history = []
30
+ hist_len = 0
31
+ if history:
32
+ hist_len = len(history)
33
+ print(hist_len)
34
+
35
+ generate_kwargs = dict(
36
+ temperature=temp,
37
+ max_new_tokens=tokens,
38
+ top_p=top_p,
39
+ repetition_penalty=rep_p,
40
+ do_sample=True,
41
+ seed=seed,
42
+ )
43
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
44
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True,
45
+ return_full_text=False)
46
+ output = ""
47
+
48
+ for response in stream:
49
+ output += response.token.text
50
+ yield [(prompt, output)]
51
+ history.append((prompt, output))
52
+ yield history
53
+
54
+
55
+ def clear_fn():
56
+ return None
57
+
58
+
59
+ rand_val = random.randint(1, 1111111111111111)
60
 
 
61
 
62
+ def check_rand(inp, val):
63
+ if inp is True:
64
+ return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=random.randint(1, 1111111111111111))
65
+ else:
66
+ return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=int(val))
67
 
 
 
 
 
 
68
 
69
+ with gr.Blocks() as app:
70
+ gr.HTML(
71
+ """<center><h1 style='font-size:xx-large;'>Google Gemma Models</h1></center>""")
72
+ with gr.Group():
73
+ with gr.Row():
74
+ client_choice = gr.Dropdown(label="Models", type='index', choices=[c for c in models], value=models[0],
75
+ interactive=True)
76
+ chat_b = gr.Chatbot(height=500)
77
+ with gr.Group():
78
+ with gr.Row():
79
+ with gr.Column(scale=1):
80
+ with gr.Group():
81
+ rand = gr.Checkbox(label="Random Seed", value=True)
82
+ seed = gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, step=1, value=rand_val)
83
+ tokens = gr.Slider(label="Max new tokens", value=6400, minimum=0, maximum=8000, step=64,
84
+ interactive=True, visible=True, info="The maximum number of tokens")
85
+ with gr.Column(scale=1):
86
+ with gr.Group():
87
+ temp = gr.Slider(label="Temperature", step=0.01, minimum=0.01, maximum=1.0, value=0.9)
88
+ top_p = gr.Slider(label="Top-P", step=0.01, minimum=0.01, maximum=1.0, value=0.9)
89
+ rep_p = gr.Slider(label="Repetition Penalty", step=0.1, minimum=0.1, maximum=2.0, value=1.0)
90
 
91
+ with gr.Group():
92
+ with gr.Row():
93
+ with gr.Column(scale=3):
94
+ sys_inp = gr.Textbox(label="System Prompt (optional)")
95
+ inp = gr.Textbox(label="Prompt")
96
+ with gr.Row():
97
+ btn = gr.Button("Chat")
98
+ stop_btn = gr.Button("Stop")
99
+ clear_btn = gr.Button("Clear")
100
 
101
+ chat_sub = inp.submit(check_rand, [rand, seed], seed).then(chat_inf,
102
+ [sys_inp, inp, chat_b, client_choice, seed, temp, tokens,
103
+ top_p, rep_p], chat_b)
104
+ go = btn.click(check_rand, [rand, seed], seed).then(chat_inf,
105
+ [sys_inp, inp, chat_b, client_choice, seed, temp, tokens, top_p,
106
+ rep_p], chat_b)
107
+ stop_btn.click(None, None, None, cancels=[go, chat_sub])
108
+ clear_btn.click(clear_fn, None, [chat_b])
109
+ app.queue(default_concurrency_limit=10).launch()