|
import hashlib |
|
import os |
|
from abc import ABC, abstractmethod |
|
from glob import glob |
|
from typing import Union |
|
from uuid import uuid4 |
|
import faiss |
|
import gradio as gr |
|
import numpy as np |
|
import openai |
|
import torch |
|
from langchain_community.docstore.in_memory import InMemoryDocstore |
|
from langchain_community.document_loaders import (BSHTMLLoader, CSVLoader, |
|
JSONLoader, PyPDFLoader, |
|
TextLoader) |
|
from langchain_community.vectorstores import FAISS |
|
from langchain_core.documents import Document |
|
from sentence_transformers import SentenceTransformer |
|
from transformers import AutoModel, AutoTokenizer |
|
import os |
|
from groq import Groq |
|
from dotenv import load_dotenv |
|
import time |
|
|
|
import json |
|
import os |
|
load_dotenv() |
|
INDEX_PATH = os.path.join(os.getcwd(), "static") |
|
|
|
|
|
print(INDEX_PATH) |
|
|
|
|
|
|
|
pdf_prompt = """ |
|
You are a helpful Employee Handbook assistant, designed to provide concise, accurate, and relevant information from folio3 (our company) internal handbook. Your role is to answer questions clearly, focusing on one topic at a time while remaining formal yet personable. |
|
|
|
Tone: Maintain a formal tone suited for office communication, but ensure it’s friendly and approachable to foster engagement. |
|
|
|
Responses: |
|
|
|
Always greet the user warmly. |
|
|
|
Provide brief answers when possible, but if the user asks follow-up questions, offer more detailed explanations. |
|
|
|
If the user asks multiple questions, respond to each briefly, ensuring clarity without overwhelming the user. |
|
|
|
|
|
Numeric Data: Always bold numerical information such as expenses (e.g., 2000/-) and time periods (e.g., 2 months), and keep them unchanged from the input. |
|
|
|
Summarization: Summarize information effectively, extracting key details from the handbook without lengthening responses unnecessarily. |
|
|
|
User Engagement: Avoid asking multiple questions at once. Instead, facilitate clear communication with a focus on being helpful and concise. |
|
|
|
Sensitive Information: Share all relevant handbook information openly, as it is accessible to all employees. |
|
|
|
|
|
At all times, remain professional, respectful, and supportive in your responses, guiding users to the information they need in the clearest way possible. |
|
""" |
|
|
|
|
|
html_prompt = """ |
|
You are an expert on the input text extracted from HTML pages and can provide relevant answers to questions based on this information. Your primary role is to ensure that the information you provide is accurate, relevant, and based solely on the content from the text. |
|
|
|
Tone: Maintain a friendly and helpful tone to engage the user effectively. |
|
|
|
Responses: |
|
|
|
Answer all user questions briefly, but if they ask multiple questions in one prompt, respond to each one concisely. |
|
|
|
After answering, invite the user to ask more specific questions if they need further details. |
|
|
|
|
|
Error Handling: If the input text does not contain relevant information, clearly state that no information is found. Do not create or fabricate answers. |
|
|
|
|
|
Always prioritize clarity and relevance, helping the user get the most accurate and direct information possible. |
|
""" |
|
|
|
|
|
chat_prompt = """ |
|
You are an expert on the input text, which contains JSON data representing a Google Chat dump. Your role is to provide accurate and relevant answers to user questions based on the content of the chats. |
|
|
|
Tone: Maintain a neutral, factual, and helpful tone in all responses. |
|
|
|
Responses: |
|
|
|
Focus on answering questions about the content of the chat. If a user asks a follow-up or more specific question, you may include the timestamp but avoid including the message ID. |
|
|
|
If the user asks about multiple messages, provide a brief response for each one and encourage the user to ask for more details if needed. |
|
|
|
If no relevant information is found, clearly state that no relevant information is available without making up any data. |
|
|
|
|
|
Context: Include who said what in the chat and the context of the conversation, if available. Ensure responses are concise and directly answer the user's query. |
|
|
|
Error Handling: If any data is missing or the query cannot be answered due to incomplete information, briefly specify the error (e.g., "No speaker information found"). |
|
""" |
|
|
|
api_key = os.getenv("OPEN_API_KEY") |
|
|
|
if api_key: |
|
print("OpenAI: API Key retrieved successfully.") |
|
openai.api_key = api_key |
|
else: |
|
print("OpenAI: API Key not found. Please set the environment variable.") |
|
|
|
|
|
|
|
groq_api_key = os.environ.get("GROQ_KEY") |
|
if groq_api_key: |
|
print("GROQ Key retrieved successfully.") |
|
PORT = os.environ.get("PORT") |
|
print(f"PORT: {PORT}") |
|
|
|
def find_key( |
|
nested_structure: Union[list, dict], key_to_find: str |
|
) -> Union[dict, None]: |
|
|
|
""" |
|
Recursively searches for a specified key within a nested structure that can be |
|
either a list or a dictionary. If the key is found, returns the value associated with the key. |
|
The search proceeds depth-first through dictionaries and iterates through lists. |
|
|
|
:param nested_structure: (Union[list, dict]) The nested structure to search through. |
|
It can be a complex structure containing nested lists and dictionaries. |
|
:param key_to_find: (str) The key to search for in the nested structure. |
|
Returns a unique id. |
|
|
|
Example of a nested structure and how to call this function: |
|
|
|
[ |
|
[[],{}], |
|
[[],{}], |
|
[[],{ |
|
'data': { |
|
'product':{'name':'imac'} |
|
}, |
|
'metadata':{} |
|
}], |
|
] |
|
|
|
Example output |
|
{'name':'imac'} |
|
|
|
:returns: Union[dict, None]: The value associated with the specified key if found; otherwise, None. |
|
:returns: str: A unique id. |
|
""" |
|
|
|
if isinstance(nested_structure, dict): |
|
|
|
if key_to_find in nested_structure: |
|
return nested_structure[key_to_find] |
|
|
|
else: |
|
for key, value in nested_structure.items(): |
|
result = find_key( |
|
value, key_to_find |
|
) |
|
if result: |
|
return result |
|
|
|
elif isinstance(nested_structure, list): |
|
|
|
for item in nested_structure: |
|
result = find_key( |
|
item, key_to_find |
|
) |
|
if result: |
|
return result |
|
|
|
class Metadata(ABC): |
|
def __init__(self) -> None: |
|
super().__init__() |
|
self.documents = [] |
|
self.ids = [] |
|
|
|
def generate_ids(self): |
|
self.ids = [str(uuid4()) for _ in self.documents] |
|
|
|
@abstractmethod |
|
def load(self): |
|
|
|
pass |
|
|
|
@abstractmethod |
|
def generate_metadata(self, *args, **kwargs): |
|
pass |
|
|
|
|
|
class Pdf(Metadata): |
|
def __init__(self, files_path: list) -> None: |
|
super().__init__() |
|
self.files_path = files_path |
|
|
|
def load(self): |
|
self.load_pdfs() |
|
self.generate_ids() |
|
|
|
def load_pdfs(self) -> list: |
|
for file_path in self.files_path: |
|
loader = PyPDFLoader(file_path) |
|
pages = loader.load_and_split() |
|
for page in pages: |
|
page.metadata = self.generate_metadata(page=page) |
|
self.documents.extend(pages) |
|
|
|
def generate_metadata(self, *args, **kwargs): |
|
page = kwargs.get("page") |
|
page.metadata["test"] = 1 |
|
return page.metadata |
|
|
|
|
|
class Json(Metadata): |
|
def __init__( |
|
self, |
|
file_path: str, |
|
jq_schema: str = ".", |
|
content_key: str = None, |
|
metadata_keys: list = [], |
|
) -> None: |
|
super().__init__() |
|
self.file_path = file_path |
|
self.jq_schema = jq_schema |
|
self.content_key = content_key |
|
self.metadata_keys = metadata_keys |
|
|
|
def load(self): |
|
self.load_json() |
|
self.generate_ids() |
|
|
|
def load_json(self): |
|
if self.metadata_keys: |
|
loader = JSONLoader( |
|
file_path=self.file_path, |
|
jq_schema=self.jq_schema, |
|
content_key=self.content_key, |
|
metadata_func=self.generate_metadata, |
|
) |
|
|
|
elif self.content_key: |
|
loader = JSONLoader( |
|
file_path=self.file_path, |
|
jq_schema=self.jq_schema, |
|
content_key=self.content_key, |
|
text_content=False, |
|
) |
|
else: |
|
loader = JSONLoader( |
|
file_path=self.file_path, jq_schema=self.jq_schema, text_content=False |
|
) |
|
pages = loader.load() |
|
self.documents.extend(pages) |
|
|
|
def generate_metadata(self, record: dict, metadata: dict) -> dict: |
|
for key in self.metadata_keys: |
|
value = find_key(record, key) |
|
if value: |
|
metadata[key] = value |
|
return metadata |
|
|
|
|
|
class Csv(Metadata): |
|
def __init__( |
|
self, file_path: str, csv_args: dict = None, source_column: str = None |
|
) -> None: |
|
super().__init__() |
|
self.file_path = file_path |
|
self.csv_args = csv_args |
|
self.source_column = source_column |
|
|
|
def load(self): |
|
self.load_csv() |
|
self.generate_ids() |
|
|
|
def load_csv(self): |
|
if self.csv_args: |
|
|
|
""" |
|
csv_args={ |
|
'delimiter': ',', |
|
'quotechar': '"', |
|
'fieldnames': ['MLB Team', 'Payroll in millions', 'Wins'] |
|
} |
|
""" |
|
loader = CSVLoader(file_path=self.file_path, csv_args=self.csv_args) |
|
elif self.source_column: |
|
loader = CSVLoader( |
|
file_path=self.file_path, source_column=self.source_column |
|
) |
|
else: |
|
loader = CSVLoader(file_path=self.file_path) |
|
pages = loader.load() |
|
for page in pages: |
|
page.metadata = self.generate_metadata(page=page) |
|
self.documents.extend(pages) |
|
|
|
def generate_metadata(self, *args, **kwargs): |
|
page = kwargs.get("page") |
|
page.metadata["length"] = len(page.page_content) |
|
return page.metadata |
|
|
|
|
|
class Text(Metadata): |
|
def __init__(self, files_path: list) -> None: |
|
super().__init__() |
|
self.files_path = files_path |
|
|
|
def load(self): |
|
self.load_texts() |
|
self.generate_ids() |
|
|
|
def load_texts(self): |
|
for file_path in self.files_path: |
|
loader = TextLoader(file_path) |
|
pages = loader.load() |
|
|
|
for page in pages: |
|
page.metadata = self.generate_metadata(page=page) |
|
self.documents.extend(pages) |
|
api_key = os.environ.get("GROQ_KEY") |
|
print(f"Using Groq API Key: {api_key}") |
|
|
|
if not api_key: |
|
raise ValueError("GROQ_KEY environment variable not set!") |
|
|
|
def generate_metadata(self, *args, **kwargs): |
|
page = kwargs.get("page") |
|
page.metadata["length"] = len(page.page_content) |
|
return page.metadata |
|
|
|
|
|
class Html(Metadata): |
|
def __init__(self, files_path: list) -> None: |
|
super().__init__() |
|
self.files_path = files_path |
|
|
|
def load(self): |
|
self.load_html() |
|
self.generate_ids() |
|
|
|
def load_html(self): |
|
for file_path in self.files_path: |
|
loader = BSHTMLLoader(file_path, bs_kwargs={"features": "html.parser"}) |
|
pages = loader.load() |
|
for page in pages: |
|
page.metadata = self.generate_metadata(page=page) |
|
self.documents.extend(pages) |
|
|
|
def generate_metadata(self, *args, **kwargs): |
|
page = kwargs.get("page") |
|
page.metadata["length"] = len(page.page_content) |
|
return page.metadata |
|
|
|
|
|
class Image(Metadata): |
|
def __init__(self, directory_path: str, extension: str = None) -> None: |
|
super().__init__() |
|
self.directory_path = directory_path |
|
self.extension = extension |
|
self.documents = [] |
|
|
|
def load(self): |
|
self.load_images() |
|
self.generate_ids() |
|
|
|
def load_images(self): |
|
if self.extension: |
|
pattern = os.path.join(self.directory_path, f"**/*{self.extension}") |
|
else: |
|
pattern = os.path.join(self.directory_path, "**/*") |
|
|
|
image_paths = glob(pattern, recursive=True) |
|
print(image_paths) |
|
for image_path in image_paths: |
|
self.documents.append( |
|
Document(page_content=image_path, metadata={"image_path": image_path}) |
|
) |
|
|
|
def generate_metadata(self, *args, **kwargs): |
|
pass |
|
|
|
|
|
|
|
|
|
class Model(ABC): |
|
def __init__(self, model_name: str, system_prompt: str) -> None: |
|
super().__init__() |
|
self.model = None |
|
self.system_prompt = system_prompt |
|
self.model_name = model_name |
|
self.device = ( |
|
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") |
|
) |
|
|
|
@abstractmethod |
|
def get_embeddings(self, input_text: str): |
|
pass |
|
|
|
@abstractmethod |
|
def get_embedding_dimension(self, dummy_text: str = "Hello World!"): |
|
pass |
|
|
|
|
|
class MiniLM_L6_v2(Model): |
|
def __init__(self, model_name: str, system_prompt) -> None: |
|
super().__init__(model_name, system_prompt) |
|
self.model = SentenceTransformer("all-MiniLM-L6-v2") |
|
|
|
def get_embedding_dimension(self, dummy_text: str = "Hello World!"): |
|
return len(self.get_embeddings(dummy_text)) |
|
|
|
def get_embeddings(self, input_text: str): |
|
embeddings = self.model.encode(input_text) |
|
return embeddings |
|
|
|
|
|
class TextEmbedding3Large(Model): |
|
def __init__(self, model_name: str, system_prompt) -> None: |
|
super().__init__(model_name, system_prompt) |
|
|
|
def get_embedding_dimension(self, dummy_text: str = "Hello World!"): |
|
return len(self.get_embeddings(dummy_text)) |
|
|
|
def get_embeddings(self, input_text: str): |
|
if isinstance(input_text, str): |
|
input_text = [input_text] |
|
|
|
response = openai.Embedding.create(model=self.model_name, input=input_text) |
|
embeddings = [data["embedding"] for data in response["data"]] |
|
embeddings = np.array(embeddings).astype("float32") |
|
if embeddings.ndim == 2 and embeddings.shape[0] == 1: |
|
embeddings = embeddings.flatten() |
|
return embeddings |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class UAE_Large_V1(Model): |
|
def __init__( |
|
self, |
|
model_name: str, |
|
system_prompt, |
|
cache_dir: str = INDEX_PATH, |
|
) -> None: |
|
super().__init__(model_name, system_prompt) |
|
self.cache_dir = cache_dir |
|
self.model, self.tokenizer = self.load_or_download_model_and_tokenizer() |
|
|
|
def load_or_download_model_and_tokenizer(self): |
|
model_path = os.path.join(self.cache_dir, "_model.pt") |
|
tokenizer_path = os.path.join(self.cache_dir, "_tokenizer") |
|
print(model_path, tokenizer_path) |
|
|
|
if not os.path.exists(self.cache_dir): |
|
os.makedirs(self.cache_dir) |
|
|
|
if os.path.exists(model_path) and os.path.exists(tokenizer_path): |
|
print(f"Loading model and tokenizer from {self.cache_dir}") |
|
model = torch.load(model_path) |
|
tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) |
|
else: |
|
print(f"Downloading and saving model and tokenizer to {self.cache_dir}") |
|
model = AutoModel.from_pretrained(self.model_name) |
|
tokenizer = AutoTokenizer.from_pretrained(self.model_name) |
|
|
|
torch.save(model, model_path) |
|
tokenizer.save_pretrained(tokenizer_path) |
|
|
|
return model, tokenizer |
|
|
|
def get_embedding_dimension(self, dummy_text: str = "Hello World!"): |
|
embeddings = self.get_embeddings(dummy_text) |
|
return len(embeddings) |
|
|
|
def get_embeddings(self, input_text: str): |
|
if isinstance(input_text, str): |
|
input_text = [input_text] |
|
|
|
inputs = self.tokenizer( |
|
input_text, |
|
padding=True, |
|
truncation=True, |
|
return_tensors="pt", |
|
max_length=512, |
|
).to(self.device) |
|
with torch.no_grad(): |
|
last_hidden_state = self.model(**inputs, return_dict=True).last_hidden_state |
|
|
|
embeddings = last_hidden_state[:, 0] |
|
embeddings = embeddings.cpu().numpy() |
|
if embeddings.ndim == 2 and embeddings.shape[0] == 1: |
|
embeddings = embeddings.flatten() |
|
return embeddings |
|
|
|
|
|
|
|
class CliForImages(Model): |
|
def __init__(self, model_name: str, system_prompt: str) -> None: |
|
super().__init__(model_name, system_prompt) |
|
|
|
self.model = SentenceTransformer("clip-ViT-B-32") |
|
|
|
def get_embedding_dimension( |
|
self, |
|
dummy_text: str = "", |
|
): |
|
return len(self.get_embeddings(dummy_text)) |
|
|
|
def get_embeddings(self, input_text: str): |
|
|
|
pass |
|
|
|
class VectorSpace: |
|
def __init__(self, model, file_path_to_save_or_load) -> None: |
|
self.model = model |
|
self.file_path = file_path_to_save_or_load |
|
self.vector_store = None |
|
self.build_vector_space() |
|
|
|
def build_vector_space(self): |
|
if self.vector_store is not None: |
|
print("Warning: Vector store is already created.") |
|
return |
|
index = faiss.IndexFlatL2(self.model.get_embedding_dimension()) |
|
self.vector_store = FAISS( |
|
embedding_function=self.model.get_embeddings, |
|
index=index, |
|
docstore=InMemoryDocstore(), |
|
index_to_docstore_id={}, |
|
) |
|
|
|
|
|
|
|
|
|
def add_docs(self, documents, ids): |
|
if not self.vector_store: |
|
raise ValueError(f"Build vector Space First") |
|
self.vector_store.add_documents(documents=documents, ids=ids) |
|
|
|
|
|
def search_docs(self, query: str, k: int = 3, filter: dict = {}): |
|
if not self.vector_store: |
|
raise ValueError(f"Build vector Space First") |
|
results = self.vector_store.similarity_search(query, k=k, filter=filter) |
|
return results |
|
|
|
|
|
def search_with_score(self, query: str, k: int = 3, filter: dict = {}): |
|
if not self.vector_store: |
|
raise ValueError(f"Build vector Space First") |
|
results = self.vector_store.similarity_search_with_score( |
|
query, k=k, filter=filter |
|
) |
|
return results |
|
|
|
def save_local(self): |
|
if not self.vector_store: |
|
raise ValueError(f"Build vector Space First") |
|
self.vector_store.save_local(self.file_path) |
|
print("Index Saved") |
|
|
|
def load_local(self): |
|
self.vector_store = FAISS.load_local( |
|
self.file_path, |
|
self.model.get_embeddings, |
|
allow_dangerous_deserialization=True, |
|
) |
|
print("Index Loaded") |
|
|
|
|
|
class Controller: |
|
|
|
def __init__(self, input_json: dict) -> None: |
|
self.input_json = input_json |
|
self.document_loader = self.get_loader() |
|
self.model = self.get_model() |
|
self.index_path = self.get_index_path() |
|
|
|
|
|
if self.index_exists(): |
|
print(f"Index found, loading from {self.index_path}") |
|
self.vector_space = VectorSpace(self.model, self.index_path) |
|
self.vector_space.load_local() |
|
else: |
|
print("Index not found, building a new one") |
|
self.load_documents() |
|
self.vector_space = VectorSpace(self.model, self.index_path) |
|
self.vector_space.add_docs( |
|
self.document_loader.documents, self.document_loader.ids |
|
) |
|
self.vector_space.save_local() |
|
|
|
def get_index_path(self): |
|
files_path = self.input_json["files_path"] |
|
model_name = self.input_json["model_name"] |
|
if isinstance(files_path, list): |
|
files_path_str = "".join(files_path) |
|
elif isinstance(files_path, str): |
|
files_path_str = files_path |
|
else: |
|
raise ValueError("Invalid files_path: Expected str or list of str") |
|
|
|
unique_identifier = hashlib.md5( |
|
(files_path_str + model_name).encode() |
|
).hexdigest() |
|
|
|
index_dir = INDEX_PATH |
|
os.makedirs(index_dir, exist_ok=True) |
|
|
|
path = os.path.join(index_dir, f"index_{unique_identifier}.faiss") |
|
print(path) |
|
return path |
|
|
|
def index_exists(self): |
|
return os.path.exists(self.index_path) |
|
|
|
|
|
def add_docs(self): |
|
if not self.vector_space: |
|
raise ValueError(f"Build vector Space First") |
|
self.vector_space.add_docs( |
|
self.document_loader.documents, self.document_loader.ids |
|
) |
|
print("Documents Added!") |
|
|
|
def search(self, query, k: int = 3, filter: dict = {}, with_score: bool = False): |
|
if with_score: |
|
results = self.vector_space.search_with_score(query, k, filter) |
|
else: |
|
results = self.vector_space.search_docs(query, k, filter) |
|
return results |
|
|
|
def get_loader(self): |
|
input_file_type = find_key(self.input_json, "type") |
|
files_path = find_key(self.input_json, "files_path") |
|
|
|
if input_file_type == "PDF": |
|
if not self.is_list(files_path): |
|
raise ValueError(f"PDF files path should be List") |
|
return Pdf(files_path) |
|
|
|
elif input_file_type == "JSON": |
|
if self.is_list(files_path): |
|
raise ValueError(f"JSON file path should be str") |
|
jq_schema = find_key(self.input_json, "jq_schema") or "." |
|
content_key = find_key(self.input_json, "content_key") |
|
metadata_keys = find_key(self.input_json, "metadata_keys") or [] |
|
return Json(files_path, jq_schema, content_key, metadata_keys) |
|
|
|
elif input_file_type == "CSV": |
|
if self.is_list(files_path): |
|
raise ValueError(f"CSV file path should be str") |
|
csv_args = find_key(self.input_json, "csv_args") or {} |
|
source_column = find_key(self.input_json, "source_column") |
|
return Csv(files_path, csv_args, source_column) |
|
|
|
elif input_file_type == "TEXT": |
|
if not self.is_list(files_path): |
|
raise ValueError(f"TEXT files path should be List") |
|
return Text(files_path) |
|
|
|
elif input_file_type == "HTML": |
|
if not self.is_list(files_path): |
|
raise ValueError(f"HTML files path should be List") |
|
return Html(files_path) |
|
|
|
elif input_file_type == "IMAGE": |
|
if self.is_list(files_path): |
|
raise ValueError(f"IMAGE files path should be str") |
|
extension = find_key(self.input_json, "extension", default=None) |
|
return Image(files_path, extension) |
|
else: |
|
raise ValueError(f"Unsupported file type: {input_file_type}") |
|
|
|
def get_model(self): |
|
model_name = find_key(self.input_json, "model_name") |
|
system_prompt = find_key(self.input_json, "system_prompt") |
|
if model_name == "all-MiniLM-L6-v2": |
|
return MiniLM_L6_v2(model_name, system_prompt) |
|
elif model_name == "text-embedding-3-large": |
|
return TextEmbedding3Large(model_name, system_prompt) |
|
elif model_name == "WhereIsAI/UAE-Large-V1": |
|
return UAE_Large_V1(model_name, system_prompt) |
|
else: |
|
raise ValueError(f"Unsupported model name: {model_name}") |
|
|
|
|
|
def load_documents(self): |
|
if not self.document_loader: |
|
print("Error Occurred") |
|
exit(1) |
|
self.document_loader.load() |
|
print("Documents Loaded", len(self.document_loader.documents)) |
|
|
|
def is_list(self, input_value): |
|
return isinstance(input_value, list) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
input_json = { |
|
"files_path": [f"{os.path.join(INDEX_PATH, 'Employee_handbook.pdf')}"], |
|
"type": "PDF", |
|
"system_prompt": pdf_prompt, |
|
|
|
"model_name": "WhereIsAI/UAE-Large-V1", |
|
} |
|
|
|
controller = Controller(input_json=input_json) |
|
|
|
|
|
|
|
def respond( |
|
message: str, |
|
history: list, |
|
system_message: str, |
|
max_tokens: int, |
|
use_groq: bool = True, |
|
use_history: bool = True, |
|
max_history_length: int = 10 |
|
): |
|
""" |
|
Handles conversation with context, manages RAG flow, and streams responses. |
|
|
|
Args: |
|
message (str): User's query. |
|
history (list): Conversation history (user and assistant responses). |
|
system_message (str): System prompt for the assistant. |
|
max_tokens (int): Maximum tokens for the response. |
|
use_groq (bool): Whether to use Groq client or OpenAI API. |
|
use_history (bool): Whether to include history in the prompt. |
|
max_history_length (int): Maximum number of messages to keep in history. |
|
|
|
Yields: |
|
str: Streamed response from the model. |
|
""" |
|
|
|
system_message = controller.model.system_prompt |
|
print(controller.get_index_path()) |
|
messages = [{"role": "system", "content": system_message}] |
|
|
|
|
|
if use_history and history: |
|
trimmed_history = history[-max_history_length:] |
|
for user_msg, assistant_msg in trimmed_history: |
|
if user_msg: |
|
messages.append({"role": "user", "content": user_msg}) |
|
if assistant_msg: |
|
messages.append({"role": "assistant", "content": assistant_msg}) |
|
|
|
|
|
messages.append({"role": "user", "content": message}) |
|
|
|
|
|
print("\nUser Query:") |
|
print(message) |
|
results = controller.search(message, with_score=True, k=3) |
|
|
|
relevant_pages = [] |
|
print("\nFetched Documents:") |
|
for docs, score in results: |
|
print(f"* [SIM={score:.3f}] {docs.page_content} [{docs.metadata}]") |
|
relevant_pages.append(docs.page_content) |
|
|
|
|
|
context = "\n".join(relevant_pages) |
|
if context.strip(): |
|
messages.append({"role": "system", "content": "Relevant documents: " + context}) |
|
|
|
|
|
|
|
if use_groq: |
|
|
|
client = Groq(api_key=groq_api_key) |
|
|
|
|
|
prompt = "\n".join(f"{msg['role']}: {msg['content']}" for msg in messages) |
|
|
|
|
|
response = client.chat.completions.create( |
|
messages=[{"role": "user", "content": prompt}], |
|
model="llama-3.3-70b-versatile", |
|
stream=True, |
|
) |
|
cumulative_response = "" |
|
for chunk in response: |
|
if hasattr(chunk, "choices") and chunk.choices: |
|
delta = chunk.choices[0].delta |
|
token = getattr(delta, "content", "") |
|
if token: |
|
cumulative_response += token |
|
yield cumulative_response |
|
else: |
|
|
|
completion = openai.ChatCompletion.create( |
|
model="gpt-4", |
|
messages=messages, |
|
max_tokens=max_tokens, |
|
temperature=0.1, |
|
top_p=0.1, |
|
stream=True, |
|
) |
|
response = "" |
|
for chunk in completion: |
|
token = chunk["choices"][0]["delta"].get("content", "") |
|
response += token |
|
yield response |
|
|
|
|
|
demo = gr.Blocks(fill_height=True) |
|
|
|
with demo: |
|
gr.Markdown("**Employee handbook assistant **") |
|
gr.Markdown("‼Disclaimer:‼️") |
|
|
|
chatbot = gr.ChatInterface( |
|
respond, |
|
examples=[ |
|
[ |
|
"what are the rules regarding staying in late and ordering food, on the company?" |
|
], |
|
], |
|
title="Employee handbook assistant 👩⚕️", |
|
) |
|
|
|
if __name__ == "__main__": |
|
|
|
demo.launch(debug=True) |
|
|
|
|