Lomba-TKDN / app.py
Belva1's picture
Create app.py
dbb7617 verified
raw
history blame
1.05 kB
import gradio as gr
import requests
import os
# Hugging Face API URL and token for the model
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FContactDoctor%2FBio-Medical-MultiModal-Llama-3-8B-V1%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY")
# Define a function to send user input to the model
def get_bot_response(user_input):
headers = {"Authorization": f"Bearer {HUGGINGFACE_API_KEY}"}
response = requests.post(API_URL, headers=headers, json={"inputs": user_input})
if response.status_code == 200:
result = response.json()
bot_response = result[0]["generated_text"]
else:
bot_response = "Sorry, the model is currently unavailable."
return bot_response
# Set up Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Medical Consultation Chatbot")
user_input = gr.Textbox(label="Enter your question:")
output = gr.Textbox(label="Bot Response")
# On submit, call the get_bot_response function
user_input.submit(get_bot_response, user_input, output)
demo.launch()