File size: 5,220 Bytes
bea58f2 37b51d7 bea58f2 88ec983 bea58f2 25481ca b3b198b 25481ca b3b198b 25481ca bea58f2 505efc3 bea58f2 afe66c1 bea58f2 25481ca 37b51d7 bea58f2 88ec983 bea58f2 6fa11a7 bea58f2 505efc3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 |
import streamlit as st
import ollama
import os
import logging
from langchain_ollama import ChatOllama
from langchain_community.document_loaders import PyPDFLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
#from langchain_ollama import OllamaEmbeddings
from langchain.embeddings import HuggingFaceEmbeddings
import faiss
from langchain_community.vectorstores import FAISS
from langchain_community.docstore.in_memory import InMemoryDocstore
from langchain import hub
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_core.prompts import ChatPromptTemplate
from typing import List, Tuple, Dict, Any, Optional
# pip install -qU langchain-ollama
# pip install langchain
##### Logging
def format_docs(docs):
return "\n\n".join([doc.page_content for doc in docs])
@st.cache_resource(show_spinner=True)
def extract_model_names(
models_info: Dict[str, List[Dict[str, Any]]],
) -> Tuple[str, ...]:
"""
Extract model names from the provided models information.
Args:
models_info (Dict[str, List[Dict[str, Any]]]): Dictionary containing information about available models.
Returns:
Tuple[str, ...]: A tuple of model names.
"""
# Logging configuration
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger(__name__)
logger.info("Extracting model names from models_info")
model_names = tuple(model["name"] for model in models_info["models"])
logger.info(f"Extracted model names: {model_names}")
return model_names
def generate_response(rag_chain, input_text):
response = rag_chain.invoke(input_text)
return response
def get_pdf(uploaded_file):
temp_file = "./temp.pdf"
if uploaded_file :
#temp_file = "./temp.pdf"
# Delete the existing temp.pdf file if it exists
if os.path.exists(temp_file):
os.remove(temp_file)
with open(temp_file, "wb") as file:
file.write(uploaded_file.getvalue())
file_name = uploaded_file.name
loader = PyPDFLoader(temp_file)
docs = loader.load()
return docs
def main() -> None:
st.title("🧠 This is a RAG Chatbot with Ollama and Langchain !!!")
st.write("The LLM model Llama-3.2 is used")
st.write("You can upload a PDF to chat with !!!")
with st.sidebar:
st.title("PDF FILE UPLOAD:")
docs = st.file_uploader("Upload your PDF File and Click on the Submit & Process Button", accept_multiple_files=False, key="pdf_uploader")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
raw_text = get_pdf(docs)
chunks = text_splitter.split_documents(raw_text)
#embeddings = OllamaEmbeddings(model='nomic-embed-text', base_url="http://localhost:11434")
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
single_vector = embeddings.embed_query("this is some text data")
index = faiss.IndexFlatL2(len(single_vector))
vector_store = FAISS(
embedding_function=embeddings,
index=index,
docstore=InMemoryDocstore(),
index_to_docstore_id={}
)
ids = vector_store.add_documents(documents=chunks)
## Retreival
retriever = vector_store.as_retriever(search_type="mmr", search_kwargs = {'k': 3,
'fetch_k': 100,
'lambda_mult': 1})
prompt = """
You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question.
If you don't know the answer, just say that you don't know.
Answer in bullet points. Make sure your answer is relevant to the question and it is answered from the context only.
Question: {question}
Context: {context}
Answer:
"""
prompt = ChatPromptTemplate.from_template(prompt)
## from llama3.2:latest to unsloth/Llama-3.2-3B
model = ChatOllama(model="unsloth/Llama-3.2-3B")
rag_chain = (
{"context": retriever|format_docs, "question": RunnablePassthrough()}
| prompt
| model
| StrOutputParser()
)
with st.form("llm-form"):
text = st.text_area("Enter your question or statement:")
submit = st.form_submit_button("Submit")
if "chat_history" not in st.session_state:
st.session_state['chat_history'] = []
if submit and text:
with st.spinner("Generating response..."):
response = generate_response(rag_chain, text)
st.session_state['chat_history'].append({"user": text, "ollama": response})
st.write(response)
st.write("## Chat History")
for chat in reversed(st.session_state['chat_history']):
st.write(f"**🧑 User**: {chat['user']}")
st.write(f"**🧠 Assistant**: {chat['ollama']}")
st.write("---")
if __name__ == "__main__":
main()
|