Jawad138 commited on
Commit
ddec797
·
1 Parent(s): fa50910

update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -36
app.py CHANGED
@@ -2,7 +2,6 @@ import streamlit as st
2
  from streamlit_chat import message
3
  from langchain.chains import ConversationalRetrievalChain
4
  from langchain.embeddings import HuggingFaceEmbeddings
5
- from langchain.llms import CTransformers
6
  from langchain.llms import Replicate
7
  from langchain.text_splitter import CharacterTextSplitter
8
  from langchain.vectorstores import FAISS
@@ -15,10 +14,8 @@ import os
15
  from dotenv import load_dotenv
16
  import tempfile
17
 
18
-
19
  load_dotenv()
20
 
21
-
22
  def initialize_session_state():
23
  if 'history' not in st.session_state:
24
  st.session_state['history'] = []
@@ -39,35 +36,31 @@ def display_chat_history(chain):
39
  container = st.container()
40
 
41
  with container:
42
- with st.form(key='my_form', clear_on_submit=True):
43
- user_input = st.text_input("Question:", placeholder="Ask about your Documents", key='input')
44
- submit_button = st.form_submit_button(label='Send')
45
-
46
- if submit_button and user_input:
47
- with st.spinner('Generating response...'):
48
- output = conversation_chat(user_input, chain, st.session_state['history'])
49
 
50
- st.session_state['past'].append(user_input)
51
- st.session_state['generated'].append(output)
 
 
52
 
53
- if st.session_state['generated']:
54
- with reply_container:
55
- for i in range(len(st.session_state['generated'])):
56
- message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="thumbs")
57
- message(st.session_state["generated"][i], key=str(i), avatar_style="fun-emoji")
58
 
59
  def create_conversational_chain(vector_store):
60
  load_dotenv()
61
- # Create llm
62
- #llm = CTransformers(model="llama-2-7b-chat.ggmlv3.q4_0.bin",
63
- #streaming=True,
64
- #callbacks=[StreamingStdOutCallbackHandler()],
65
- #model_type="llama", config={'max_new_tokens': 500, 'temperature': 0.01})
66
  llm = Replicate(
67
- streaming = True,
68
- model = "replicate/llama-2-70b-chat:r8_AA3K1fhDykqLa5M74E5V0w5ss1z0P9S3foWJl",
69
  callbacks=[StreamingStdOutCallbackHandler()],
70
- input = {"temperature": 0.01, "max_length" :500,"top_p":1})
 
 
71
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
72
 
73
  chain = ConversationalRetrievalChain.from_llm(llm=llm, chain_type='stuff',
@@ -78,12 +71,10 @@ def create_conversational_chain(vector_store):
78
  def main():
79
  load_dotenv()
80
  initialize_session_state()
81
- st.title("ChatBot ")
82
- # Initialize Streamlit
83
  st.sidebar.title("Document Processing")
84
  uploaded_files = st.sidebar.file_uploader("Upload files", accept_multiple_files=True)
85
 
86
-
87
  if uploaded_files:
88
  text = []
89
  for file in uploaded_files:
@@ -107,18 +98,11 @@ def main():
107
  text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=100, length_function=len)
108
  text_chunks = text_splitter.split_documents(text)
109
 
110
- # Create embeddings
111
  embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2",
112
  model_kwargs={'device': 'cpu'})
113
-
114
- # Create vector store
115
  vector_store = FAISS.from_documents(text_chunks, embedding=embeddings)
116
-
117
- # Create the chain object
118
  chain = create_conversational_chain(vector_store)
119
-
120
-
121
  display_chat_history(chain)
122
 
123
  if __name__ == "__main__":
124
- main()
 
2
  from streamlit_chat import message
3
  from langchain.chains import ConversationalRetrievalChain
4
  from langchain.embeddings import HuggingFaceEmbeddings
 
5
  from langchain.llms import Replicate
6
  from langchain.text_splitter import CharacterTextSplitter
7
  from langchain.vectorstores import FAISS
 
14
  from dotenv import load_dotenv
15
  import tempfile
16
 
 
17
  load_dotenv()
18
 
 
19
  def initialize_session_state():
20
  if 'history' not in st.session_state:
21
  st.session_state['history'] = []
 
36
  container = st.container()
37
 
38
  with container:
39
+ col1, col2 = st.columns(2)
 
 
 
 
 
 
40
 
41
+ with col1:
42
+ with st.form(key='my_form', clear_on_submit=True):
43
+ user_input = st.text_input("Question:", placeholder="Ask about your Documents", key='input')
44
+ submit_button = st.form_submit_button(label='Send')
45
 
46
+ with col2:
47
+ if st.session_state['generated']:
48
+ for i in range(len(st.session_state['generated'])):
49
+ message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="thumbs")
50
+ message(st.session_state["generated"][i], key=str(i), avatar_style="fun-emoji")
51
 
52
  def create_conversational_chain(vector_store):
53
  load_dotenv()
54
+ replicate_api_token = "r8_AA3K1fhDykqLa5M74E5V0w5ss1z0P9S3foWJl" # Replace with your actual token
55
+ os.environ["REPLICATE_API_TOKEN"] = replicate_api_token
56
+
 
 
57
  llm = Replicate(
58
+ streaming=True,
59
+ model="replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781",
60
  callbacks=[StreamingStdOutCallbackHandler()],
61
+ input={"temperature": 0.01, "max_length": 500, "top_p": 1},
62
+ replicate_api_token=replicate_api_token
63
+ )
64
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
65
 
66
  chain = ConversationalRetrievalChain.from_llm(llm=llm, chain_type='stuff',
 
71
  def main():
72
  load_dotenv()
73
  initialize_session_state()
74
+ st.title("Chat With Your Doc")
 
75
  st.sidebar.title("Document Processing")
76
  uploaded_files = st.sidebar.file_uploader("Upload files", accept_multiple_files=True)
77
 
 
78
  if uploaded_files:
79
  text = []
80
  for file in uploaded_files:
 
98
  text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=100, length_function=len)
99
  text_chunks = text_splitter.split_documents(text)
100
 
 
101
  embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2",
102
  model_kwargs={'device': 'cpu'})
 
 
103
  vector_store = FAISS.from_documents(text_chunks, embedding=embeddings)
 
 
104
  chain = create_conversational_chain(vector_store)
 
 
105
  display_chat_history(chain)
106
 
107
  if __name__ == "__main__":
108
+ main()