Update app.py
Browse filesTry inference instead of generate_response
app.py
CHANGED
@@ -9,7 +9,6 @@ from langchain_community.document_loaders import PyPDFLoader
|
|
9 |
|
10 |
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
11 |
|
12 |
-
#from langchain_ollama import OllamaEmbeddings
|
13 |
from langchain.embeddings import HuggingFaceEmbeddings
|
14 |
|
15 |
import faiss
|
@@ -83,6 +82,11 @@ def get_pdf(uploaded_file):
|
|
83 |
docs = loader.load()
|
84 |
return docs
|
85 |
|
|
|
|
|
|
|
|
|
|
|
86 |
|
87 |
def main() -> None:
|
88 |
|
@@ -155,7 +159,12 @@ def main() -> None:
|
|
155 |
|
156 |
if submit and text:
|
157 |
with st.spinner("Generating response..."):
|
158 |
-
|
|
|
|
|
|
|
|
|
|
|
159 |
st.session_state['chat_history'].append({"user": text, "ollama": response})
|
160 |
st.write(response)
|
161 |
|
|
|
9 |
|
10 |
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
11 |
|
|
|
12 |
from langchain.embeddings import HuggingFaceEmbeddings
|
13 |
|
14 |
import faiss
|
|
|
82 |
docs = loader.load()
|
83 |
return docs
|
84 |
|
85 |
+
def inference(chain, input_query):
|
86 |
+
"""Invoke the processing chain with the input query."""
|
87 |
+
result = chain.invoke(input_query)
|
88 |
+
return result
|
89 |
+
|
90 |
|
91 |
def main() -> None:
|
92 |
|
|
|
159 |
|
160 |
if submit and text:
|
161 |
with st.spinner("Generating response..."):
|
162 |
+
# Ken (12/11/2024): modify start
|
163 |
+
#response = generate_response(rag_chain, text)
|
164 |
+
##retriever = configure_retriever(pdf_loader)
|
165 |
+
##chain = create_chain(retriever, prompt, model)
|
166 |
+
response = inference(rag_chain, input_query)
|
167 |
+
# Ken (12/11/2024): modify end
|
168 |
st.session_state['chat_history'].append({"user": text, "ollama": response})
|
169 |
st.write(response)
|
170 |
|