KvrParaskevi commited on
Commit
2e9166f
·
verified ·
1 Parent(s): 44b361e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -48
app.py CHANGED
@@ -1,59 +1,80 @@
1
- import gradio as gr
2
- import spaces
3
- from langchain_core.pydantic_v1 import BaseModel, Field
4
- from langchain.prompts import HumanMessagePromptTemplate, ChatPromptTemplate, SystemMessagePromptTemplate
5
- from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
 
 
 
 
 
6
 
7
- # Load the Hugging Face model and tokenizer
8
- model_name = "KvrParaskevi/Llama-2-7b-Hotel-Booking-Model"
9
- model = AutoModelForCausalLM.from_pretrained(model_name)
10
- tokenizer = AutoTokenizer.from_pretrained(model_name)
11
 
12
- system_message = """<<SYS>>
13
- You are an AI having conversation with a human. Below is an instruction that describes a task.
14
- Write a response that appropriately completes the request.
15
- Reply with the most helpful and logic answer. During the conversation you need to ask the user
16
- the following questions to complete the hotel booking task.
17
- 1) Where would you like to stay and when?
18
- 2) How many people are staying in the room?
19
- 3) Do you prefer any ammenities like breakfast included or gym?
20
- 4) What is your name, your email address and phone number?
21
- Make sure you receive a logical answer from the user from every question to complete the hotel
22
- booking process.
23
- <</SYS>>
24
- """
25
- messages = [
26
- SystemMessagePromptTemplate.from_template(system_message)
27
- ]
28
 
29
- # Define the Langchain chatbot function
30
- @spaces.GPU
31
- def chatbot(message, history):
32
- # Create a Langchain prompt template
33
- prompt_template = HumanMessagePromptTemplate.from_template(message)
34
- # Create a Langchain chat prompt template
35
- messages.append(prompt_template)
36
- chat_prompt_template = ChatPromptTemplate.from_messages(messages)
37
- # Use the Langchain TextIteratorStreamer to generate responses
38
- streamer = TextStreamer(model, tokenizer)
39
- response = model.generate(chat_prompt_template, streamer=streamer, max_new_tokens=20)
40
- return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
- # Create a Gradio chatbot interface
43
  with gr.Blocks() as demo:
44
- #chatbot_interface = gr.Chatbot()
45
- #msg = gr.Textbox()
46
- #clear = gr.Button("Clear")
47
 
48
- # Define the chatbot function as a Gradio interface
49
  demo.chatbot_interface = gr.Interface(
50
- fn=chatbot,
51
  inputs=[
52
- gr.Textbox(lines=1, label="Input"),
53
  ],
54
- outputs="text",
55
- title="Langchain Chatbot",
56
- description="A simple chatbot using Langchain and Hugging Face"
57
  )
58
-
59
  demo.launch()
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
2
+ from langchain import HuggingFaceHub
3
+ from langchain.llms.base import LLM
4
+ from langchain.memory import ConversationBufferMemory,ConversationBufferWindowMemory
5
+ from langchain.chains import LLMChain, ConversationChain
6
+ from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
7
+ from langchain_community.llms import HuggingFaceEndpoint
8
+ from langchain.prompts import PromptTemplate, ChatPromptTemplate
9
+ from langchain_core.prompts.chat import SystemMessagePromptTemplate, HumanMessagePromptTemplate
10
+ import streamlit as st
11
 
12
+ your_endpoint_url = "https://kp4xdy196cw81uf3.us-east-1.aws.endpoints.huggingface.cloud"
13
+ token = st.secrets["HUGGINGFACEHUB_API_TOKEN"]
 
 
14
 
15
+ llm = HuggingFaceEndpoint(
16
+ endpoint_url=f"{your_endpoint_url}",
17
+ huggingfacehub_api_token = f"{token}",
18
+ task = "text-generation",
19
+ max_new_tokens=128,
20
+ top_k=10,
21
+ top_p=0.95,
22
+ typical_p=0.95,
23
+ temperature=0.01,
24
+ repetition_penalty=1.03
25
+ )
26
+ #print(llm)
 
 
 
 
27
 
28
+ def chat_template_prompt():
29
+ template = """
30
+ Do not repeat questions and do not generate answer for user/human.
31
+
32
+ You are a helpful hotel booking asssitant.
33
+ Below is an instruction that describes a task.
34
+ Write a response that appropriately completes the request.
35
+ Reply with the most helpful and logic answer. During the conversation you need to ask the user
36
+ the following questions to complete the hotel booking task.
37
+ 1) Where would you like to stay and when?
38
+ 2) How many people are staying in the room?
39
+ 3) Do you prefer any ammenities like breakfast included or gym?
40
+ 4) What is your name, your email address and phone number?
41
+
42
+ When the booking task is completed, respond with "Thank you for choosing us.".
43
+
44
+ {history}
45
+
46
+ """
47
+
48
+ system_prompt = SystemMessagePromptTemplate.from_template(template)
49
+ human_prompt = HumanMessagePromptTemplate.from_template("{input}")
50
+ chat_prompt = ChatPromptTemplate.from_messages([system_prompt, human_prompt])
51
+ return chat_prompt
52
+
53
+ def chain():
54
+ #memory = ConversationBufferMemory(memory_key="history")
55
+ chat_prompt = chat_template_prompt()
56
+ memory = ConversationBufferWindowMemory(k=3) #memory_key="history"
57
+ llm_chain = LLMChain(llm=llm, memory = memory, prompt = chat_prompt)
58
+ memory.load_memory_variables({}) #Initialize memory
59
+ return llm_chain
60
+
61
+ def chat_output(inputs):
62
+ llm_chaim = chain()
63
+ result = llm_chaim.predict(input = inputs)
64
+ return result
65
 
 
66
  with gr.Blocks() as demo:
67
+
68
+ chatbot_component = gr.Chatbot(height=300, label = "history")
69
+ textbox_component = gr.Textbox(placeholder="Can I help you to book a hotel?", container=False, label = "input", scale=7)
70
 
 
71
  demo.chatbot_interface = gr.Interface(
72
+ fn=chat_output,
73
  inputs=[
74
+ textbox_component
75
  ],
76
+ outputs=chatbot_component,
77
+ title = "Hotel Booking Assistant Chat 🤗",
78
+ description = "I am your hotel booking assistant. Feel free to start chatting with me."
79
  )
 
80
  demo.launch()