KvrParaskevi
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -9,13 +9,31 @@ model_name = "KvrParaskevi/Llama-2-7b-Hotel-Booking-Model"
|
|
9 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
# Define the Langchain chatbot function
|
13 |
@spaces.GPU
|
14 |
def chatbot(message, history):
|
15 |
# Create a Langchain prompt template
|
16 |
prompt_template = HumanMessagePromptTemplate.from_template(message)
|
17 |
# Create a Langchain chat prompt template
|
18 |
-
|
|
|
19 |
# Use the Langchain TextIteratorStreamer to generate responses
|
20 |
streamer = TextStreamer(model, tokenizer)
|
21 |
response = model.generate(chat_prompt_template, streamer=streamer, max_new_tokens=20)
|
|
|
9 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
11 |
|
12 |
+
system_message = """<<SYS>>
|
13 |
+
You are an AI having conversation with a human. Below is an instruction that describes a task.
|
14 |
+
Write a response that appropriately completes the request.
|
15 |
+
Reply with the most helpful and logic answer. During the conversation you need to ask the user
|
16 |
+
the following questions to complete the hotel booking task.
|
17 |
+
1) Where would you like to stay and when?
|
18 |
+
2) How many people are staying in the room?
|
19 |
+
3) Do you prefer any ammenities like breakfast included or gym?
|
20 |
+
4) What is your name, your email address and phone number?
|
21 |
+
Make sure you receive a logical answer from the user from every question to complete the hotel
|
22 |
+
booking process.
|
23 |
+
<</SYS>>
|
24 |
+
"""
|
25 |
+
messages = [
|
26 |
+
SystemMessagePromptTemplate.from_template(system_message)
|
27 |
+
]
|
28 |
+
|
29 |
# Define the Langchain chatbot function
|
30 |
@spaces.GPU
|
31 |
def chatbot(message, history):
|
32 |
# Create a Langchain prompt template
|
33 |
prompt_template = HumanMessagePromptTemplate.from_template(message)
|
34 |
# Create a Langchain chat prompt template
|
35 |
+
messages.append(prompt_template)
|
36 |
+
chat_prompt_template = ChatPromptTemplate.from_messages(messages)
|
37 |
# Use the Langchain TextIteratorStreamer to generate responses
|
38 |
streamer = TextStreamer(model, tokenizer)
|
39 |
response = model.generate(chat_prompt_template, streamer=streamer, max_new_tokens=20)
|