KvrParaskevi
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,59 +1,80 @@
|
|
1 |
-
import
|
2 |
-
import
|
3 |
-
from
|
4 |
-
from langchain.
|
5 |
-
from
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
10 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
11 |
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
"""
|
25 |
-
messages = [
|
26 |
-
SystemMessagePromptTemplate.from_template(system_message)
|
27 |
-
]
|
28 |
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
-
# Create a Gradio chatbot interface
|
43 |
with gr.Blocks() as demo:
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
|
48 |
-
# Define the chatbot function as a Gradio interface
|
49 |
demo.chatbot_interface = gr.Interface(
|
50 |
-
fn=
|
51 |
inputs=[
|
52 |
-
|
53 |
],
|
54 |
-
outputs=
|
55 |
-
title="
|
56 |
-
description="
|
57 |
)
|
58 |
-
|
59 |
demo.launch()
|
|
|
1 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
2 |
+
from langchain import HuggingFaceHub
|
3 |
+
from langchain.llms.base import LLM
|
4 |
+
from langchain.memory import ConversationBufferMemory,ConversationBufferWindowMemory
|
5 |
+
from langchain.chains import LLMChain, ConversationChain
|
6 |
+
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
|
7 |
+
from langchain_community.llms import HuggingFaceEndpoint
|
8 |
+
from langchain.prompts import PromptTemplate, ChatPromptTemplate
|
9 |
+
from langchain_core.prompts.chat import SystemMessagePromptTemplate, HumanMessagePromptTemplate
|
10 |
+
import streamlit as st
|
11 |
|
12 |
+
your_endpoint_url = "https://kp4xdy196cw81uf3.us-east-1.aws.endpoints.huggingface.cloud"
|
13 |
+
token = st.secrets["HUGGINGFACEHUB_API_TOKEN"]
|
|
|
|
|
14 |
|
15 |
+
llm = HuggingFaceEndpoint(
|
16 |
+
endpoint_url=f"{your_endpoint_url}",
|
17 |
+
huggingfacehub_api_token = f"{token}",
|
18 |
+
task = "text-generation",
|
19 |
+
max_new_tokens=128,
|
20 |
+
top_k=10,
|
21 |
+
top_p=0.95,
|
22 |
+
typical_p=0.95,
|
23 |
+
temperature=0.01,
|
24 |
+
repetition_penalty=1.03
|
25 |
+
)
|
26 |
+
#print(llm)
|
|
|
|
|
|
|
|
|
27 |
|
28 |
+
def chat_template_prompt():
|
29 |
+
template = """
|
30 |
+
Do not repeat questions and do not generate answer for user/human.
|
31 |
+
|
32 |
+
You are a helpful hotel booking asssitant.
|
33 |
+
Below is an instruction that describes a task.
|
34 |
+
Write a response that appropriately completes the request.
|
35 |
+
Reply with the most helpful and logic answer. During the conversation you need to ask the user
|
36 |
+
the following questions to complete the hotel booking task.
|
37 |
+
1) Where would you like to stay and when?
|
38 |
+
2) How many people are staying in the room?
|
39 |
+
3) Do you prefer any ammenities like breakfast included or gym?
|
40 |
+
4) What is your name, your email address and phone number?
|
41 |
+
|
42 |
+
When the booking task is completed, respond with "Thank you for choosing us.".
|
43 |
+
|
44 |
+
{history}
|
45 |
+
|
46 |
+
"""
|
47 |
+
|
48 |
+
system_prompt = SystemMessagePromptTemplate.from_template(template)
|
49 |
+
human_prompt = HumanMessagePromptTemplate.from_template("{input}")
|
50 |
+
chat_prompt = ChatPromptTemplate.from_messages([system_prompt, human_prompt])
|
51 |
+
return chat_prompt
|
52 |
+
|
53 |
+
def chain():
|
54 |
+
#memory = ConversationBufferMemory(memory_key="history")
|
55 |
+
chat_prompt = chat_template_prompt()
|
56 |
+
memory = ConversationBufferWindowMemory(k=3) #memory_key="history"
|
57 |
+
llm_chain = LLMChain(llm=llm, memory = memory, prompt = chat_prompt)
|
58 |
+
memory.load_memory_variables({}) #Initialize memory
|
59 |
+
return llm_chain
|
60 |
+
|
61 |
+
def chat_output(inputs):
|
62 |
+
llm_chaim = chain()
|
63 |
+
result = llm_chaim.predict(input = inputs)
|
64 |
+
return result
|
65 |
|
|
|
66 |
with gr.Blocks() as demo:
|
67 |
+
|
68 |
+
chatbot_component = gr.Chatbot(height=300, label = "history")
|
69 |
+
textbox_component = gr.Textbox(placeholder="Can I help you to book a hotel?", container=False, label = "input", scale=7)
|
70 |
|
|
|
71 |
demo.chatbot_interface = gr.Interface(
|
72 |
+
fn=chat_output,
|
73 |
inputs=[
|
74 |
+
textbox_component
|
75 |
],
|
76 |
+
outputs=chatbot_component,
|
77 |
+
title = "Hotel Booking Assistant Chat 🤗",
|
78 |
+
description = "I am your hotel booking assistant. Feel free to start chatting with me."
|
79 |
)
|
|
|
80 |
demo.launch()
|