smolLM-arena / app.py
as-cle-bert's picture
Update app.py
6bf3e9d verified
raw
history blame
2.5 kB
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from pathlib import Path
import pandas as pd
model_checkpoint = "HuggingFaceTB/SmolLM-135M"
model = AutoModelForCausalLM.from_pretrained(model_checkpoint)
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, repetition_penalty=1.5, temperature=0)
abs_path = Path(__file__).parent
df = pd.read_csv(str(abs_path / "models.csv"))
df.to_html("tab.html")
def refreshfn() -> gr.HTML:
df = pd.read_csv(str(abs_path / "models.csv"))
df.to_html("tab.html")
f = open("tab.html")
content = f.read()
f.close()
t = gr.HTML(content)
return t
def chatfn(text):
return text, text
with gr.Blocks() as demo:
gr.Markdown("""
# πŸ₯‡ Leaderboard Component
""")
with gr.Tabs():
with gr.Tab("Demo"):
f = open("tab.html")
content = f.read()
f.close()
t = gr.HTML(content)
btn = gr.Button("Refresh")
btn.click(fn=refreshfn, inputs=None, outputs=t)
with gr.Tab("Chats"):
import random
import time
with gr.Column():
chatbot = gr.Chatbot()
with gr.Column():
chatbot1 = gr.Chatbot()
msg = gr.Textbox()
clear = gr.ClearButton([msg, chatbot])
def respond(message, chat_history):
response = pipe(message)
bot_message = response[0]["generated_text"]
chat_history.append((message, bot_message))
return "", chat_history
import concurrent.futures
def run_functions_simultaneously():
with concurrent.futures.ThreadPoolExecutor() as executor:
# Submit the first function
future1 = executor.submit(msg.submit, respond, [msg, chatbot], [msg, chatbot])
# Submit the second function
future2 = executor.submit(msg.submit, respond, [msg, chatbot1], [msg, chatbot1])
# Wait for both futures to complete
concurrent.futures.wait([future1, future2])
# Call the function to run the tasks simultaneously
run_functions_simultaneously()
if __name__ == "__main__":
demo.launch()