File size: 1,049 Bytes
dbb7617
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import gradio as gr
import requests
import os

# Hugging Face API URL and token for the model
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FContactDoctor%2FBio-Medical-MultiModal-Llama-3-8B-V1%26quot%3B%3C%2Fspan%3E
HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY")

# Define a function to send user input to the model
def get_bot_response(user_input):
    headers = {"Authorization": f"Bearer {HUGGINGFACE_API_KEY}"}
    response = requests.post(API_URL, headers=headers, json={"inputs": user_input})
    if response.status_code == 200:
        result = response.json()
        bot_response = result[0]["generated_text"]
    else:
        bot_response = "Sorry, the model is currently unavailable."
    return bot_response

# Set up Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# Medical Consultation Chatbot")
    user_input = gr.Textbox(label="Enter your question:")
    output = gr.Textbox(label="Bot Response")

    # On submit, call the get_bot_response function
    user_input.submit(get_bot_response, user_input, output)

demo.launch()