Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -24,8 +24,8 @@ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloa
|
|
24 |
@torch.inference_mode()
|
25 |
@spaces.GPU
|
26 |
def predict_math_bot(user_message, system_message="", max_new_tokens=125, temperature=0.1, top_p=0.9, repetition_penalty=1.9, do_sample=False):
|
27 |
-
prompt = f"
|
28 |
-
inputs = tokenizer(prompt, return_tensors='pt', add_special_tokens=
|
29 |
input_ids = inputs["input_ids"].to(model.device)
|
30 |
|
31 |
output_ids = model.generate(
|
@@ -44,7 +44,9 @@ def predict_math_bot(user_message, system_message="", max_new_tokens=125, temper
|
|
44 |
def main():
|
45 |
with gr.Blocks() as demo:
|
46 |
gr.Markdown(title)
|
47 |
-
|
|
|
|
|
48 |
|
49 |
with gr.Accordion("Advanced Settings"):
|
50 |
with gr.Row():
|
@@ -53,14 +55,13 @@ def main():
|
|
53 |
top_p = gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.01, maximum=0.99)
|
54 |
repetition_penalty = gr.Slider(label="Repetition penalty", value=1.9, minimum=1.0, maximum=2.0)
|
55 |
do_sample = gr.Checkbox(label="Uncheck for faster inference", value=False)
|
56 |
-
with gr.Row():
|
57 |
-
user_message = gr.Textbox(label="🫡Your Message", lines=3, placeholder="Enter your math query here...")
|
58 |
-
system_message = gr.Textbox(label="📉System Prompt", lines=2, placeholder="Optional: Set a scene or introduce a character...")
|
59 |
gr.Button("Try🫡📉MetaMath").click(
|
60 |
predict_math_bot,
|
61 |
inputs=[user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty, do_sample],
|
62 |
outputs=output_text
|
63 |
-
)
|
|
|
|
|
64 |
demo.launch()
|
65 |
|
66 |
if __name__ == "__main__":
|
|
|
24 |
@torch.inference_mode()
|
25 |
@spaces.GPU
|
26 |
def predict_math_bot(user_message, system_message="", max_new_tokens=125, temperature=0.1, top_p=0.9, repetition_penalty=1.9, do_sample=False):
|
27 |
+
prompt = f"<|user|>{user_message}\n<|system|>{system_message}\n<|assistant|>\n" if system_message else user_message
|
28 |
+
inputs = tokenizer(prompt, return_tensors='pt', add_special_tokens=True)
|
29 |
input_ids = inputs["input_ids"].to(model.device)
|
30 |
|
31 |
output_ids = model.generate(
|
|
|
44 |
def main():
|
45 |
with gr.Blocks() as demo:
|
46 |
gr.Markdown(title)
|
47 |
+
with gr.Row():
|
48 |
+
user_message = gr.Code(label="🫡Enter your math query here...", language = "r" , lines=3, value="""F(x) &= \int^a_b \frac{1}{3}x^3""")
|
49 |
+
system_message = gr.Textbox(label="📉System Prompt", lines=2, placeholder="Optional: give precise instructions to resolve the problem provided above:")
|
50 |
|
51 |
with gr.Accordion("Advanced Settings"):
|
52 |
with gr.Row():
|
|
|
55 |
top_p = gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.01, maximum=0.99)
|
56 |
repetition_penalty = gr.Slider(label="Repetition penalty", value=1.9, minimum=1.0, maximum=2.0)
|
57 |
do_sample = gr.Checkbox(label="Uncheck for faster inference", value=False)
|
|
|
|
|
|
|
58 |
gr.Button("Try🫡📉MetaMath").click(
|
59 |
predict_math_bot,
|
60 |
inputs=[user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty, do_sample],
|
61 |
outputs=output_text
|
62 |
+
)
|
63 |
+
output_text = RichTextbox(label="🫡📉MetaMath", interactive=True)
|
64 |
+
|
65 |
demo.launch()
|
66 |
|
67 |
if __name__ == "__main__":
|