Upload app.py
Browse files
app.py
CHANGED
@@ -31,7 +31,7 @@ def load_documents():
|
|
31 |
|
32 |
|
33 |
def split_documents(documents):
|
34 |
-
text_splitter = SemanticChunker(OpenAIEmbeddings())
|
35 |
texts = text_splitter.split_documents(documents)
|
36 |
return texts
|
37 |
|
@@ -39,7 +39,7 @@ def split_documents(documents):
|
|
39 |
def embeddings_on_local_vectordb(texts):
|
40 |
vectordb = Chroma.from_documents(
|
41 |
texts,
|
42 |
-
embedding=OpenAIEmbeddings(),
|
43 |
persist_directory=LOCAL_VECTOR_STORE_DIR.as_posix(),
|
44 |
)
|
45 |
vectordb.persist()
|
@@ -52,7 +52,7 @@ def embeddings_on_local_vectordb(texts):
|
|
52 |
|
53 |
def query_llm(retriever, query):
|
54 |
qa_chain = ConversationalRetrievalChain.from_llm(
|
55 |
-
llm=OpenAIChat(),
|
56 |
retriever=retriever,
|
57 |
return_source_documents=True,
|
58 |
chain_type="refine",
|
|
|
31 |
|
32 |
|
33 |
def split_documents(documents):
|
34 |
+
text_splitter = SemanticChunker(OpenAIEmbeddings(temperature=0))
|
35 |
texts = text_splitter.split_documents(documents)
|
36 |
return texts
|
37 |
|
|
|
39 |
def embeddings_on_local_vectordb(texts):
|
40 |
vectordb = Chroma.from_documents(
|
41 |
texts,
|
42 |
+
embedding=OpenAIEmbeddings(temperature=0),
|
43 |
persist_directory=LOCAL_VECTOR_STORE_DIR.as_posix(),
|
44 |
)
|
45 |
vectordb.persist()
|
|
|
52 |
|
53 |
def query_llm(retriever, query):
|
54 |
qa_chain = ConversationalRetrievalChain.from_llm(
|
55 |
+
llm=OpenAIChat(temperature=0),
|
56 |
retriever=retriever,
|
57 |
return_source_documents=True,
|
58 |
chain_type="refine",
|