|
import gradio as gr |
|
import spaces |
|
from langchain_core.pydantic_v1 import BaseModel, Field |
|
from langchain.prompts import HumanMessagePromptTemplate, ChatPromptTemplate |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer |
|
|
|
|
|
model_name = "KvrParaskevi/Llama-2-7b-Hotel-Booking-Model" |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
system_message = """<<SYS>> |
|
You are an AI having conversation with a human. Below is an instruction that describes a task. |
|
Write a response that appropriately completes the request. |
|
Reply with the most helpful and logic answer. During the conversation you need to ask the user |
|
the following questions to complete the hotel booking task. |
|
1) Where would you like to stay and when? |
|
2) How many people are staying in the room? |
|
3) Do you prefer any ammenities like breakfast included or gym? |
|
4) What is your name, your email address and phone number? |
|
Make sure you receive a logical answer from the user from every question to complete the hotel |
|
booking process. |
|
<</SYS>> |
|
""" |
|
messages = [ |
|
SystemMessagePromptTemplate.from_template(system_message) |
|
] |
|
|
|
|
|
@spaces.GPU |
|
def chatbot(message, history): |
|
|
|
prompt_template = HumanMessagePromptTemplate.from_template(message) |
|
|
|
messages.append(prompt_template) |
|
chat_prompt_template = ChatPromptTemplate.from_messages(messages) |
|
|
|
streamer = TextStreamer(model, tokenizer) |
|
response = model.generate(chat_prompt_template, streamer=streamer, max_new_tokens=20) |
|
return response |
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
|
|
|
|
|
|
|
|
|
demo.chatbot_interface = gr.Interface( |
|
fn=chatbot, |
|
inputs=[ |
|
gr.Textbox(lines=1, label="Input"), |
|
], |
|
outputs="text", |
|
title="Langchain Chatbot", |
|
description="A simple chatbot using Langchain and Hugging Face" |
|
) |
|
|
|
demo.launch() |