File size: 1,528 Bytes
e361e2e acb90ae e361e2e fc9454f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import gradio as gr
import os
from utils import generate_response, emb2info, pre_prompt, get_embedding
os.environ['NO_PROXY'] = '127.0.0.1'
print(os.getcwd())
def predict(user_input, history=[]):
# tokenize the new input sentence
emb_user = get_embedding(user_input)
info_to_add, retrieval_text = emb2info(emb_user)
response = generate_response(pre_prompt + info_to_add + \
"\n \n User : " + user_input + "\n Chat bot :")
history.append((user_input,response))
#response = [(user_input,response)]
return history, history
with gr.Blocks() as app:
gr.Markdown(
"## Bienvenue sur l'interface demo de SARA "
)
logo_URL = "file/static/logo_sara.png"
image = "<center> <img src= {} width=150px></center>".format(logo_URL)
gr.HTML(image)
chatbot = gr.Chatbot()
state = gr.State([])
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="Entrez votre question").style(container=False)
gr.Examples(
examples=[
"Who should I call if I struggle with the GPU ? ",
"Who can I call if I need help on diffusion models ? ",
"Qui peut m'aider en NLP ?",
"Qui est un specialiste de la segmentation d'image dans l'equipe ?",
],
inputs=txt,
)
txt.submit(predict, [txt, state], [chatbot, state])
gr.HTML(
"️<center> Created with ❤️ by @louis_ulmer & @aurelien_lac"
)
app.launch(auth=("cellule_ia", "pass")) |