Spaces:
Running
Running
Ilyas KHIAT
commited on
Commit
·
716ce63
1
Parent(s):
e08f8de
app api files
Browse files- .dockerignore +11 -0
- .gitignore +2 -0
- Dockerfile +13 -0
- main.py +125 -0
- prompt.py +14 -0
- rag.py +84 -0
- requirements.txt +15 -0
.dockerignore
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__/
|
2 |
+
*.pyc
|
3 |
+
*.pyo
|
4 |
+
*.pyd
|
5 |
+
.Python
|
6 |
+
env/
|
7 |
+
venv/
|
8 |
+
.git
|
9 |
+
.dockerignore
|
10 |
+
Dockerfile
|
11 |
+
*.md
|
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
__pycache__/
|
2 |
+
.env
|
Dockerfile
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.12
|
2 |
+
|
3 |
+
RUN useradd -m -u 1000 user
|
4 |
+
USER user
|
5 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
6 |
+
|
7 |
+
WORKDIR /app
|
8 |
+
|
9 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
10 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
11 |
+
|
12 |
+
COPY --chown=user . /app
|
13 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
main.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException, UploadFile, File,Request,Depends,status
|
2 |
+
from fastapi.security import OAuth2PasswordBearer
|
3 |
+
from pydantic import BaseModel, Json
|
4 |
+
from typing import Optional
|
5 |
+
from pinecone import Pinecone, ServerlessSpec
|
6 |
+
from uuid import uuid4
|
7 |
+
import os
|
8 |
+
from dotenv import load_dotenv
|
9 |
+
from rag import *
|
10 |
+
from fastapi.responses import StreamingResponse
|
11 |
+
import json
|
12 |
+
from prompt import *
|
13 |
+
from typing import Literal
|
14 |
+
import time
|
15 |
+
from fastapi.middleware.cors import CORSMiddleware
|
16 |
+
|
17 |
+
load_dotenv()
|
18 |
+
|
19 |
+
## setup pinecone index
|
20 |
+
pinecone_api_key = os.environ.get("PINECONE_API_KEY")
|
21 |
+
|
22 |
+
pc = Pinecone(api_key=pinecone_api_key)
|
23 |
+
|
24 |
+
index_name = os.environ.get("INDEX_NAME") # change if desired
|
25 |
+
|
26 |
+
existing_indexes = [index_info["name"] for index_info in pc.list_indexes()]
|
27 |
+
|
28 |
+
if index_name not in existing_indexes:
|
29 |
+
pc.create_index(
|
30 |
+
name=index_name,
|
31 |
+
dimension=1536,
|
32 |
+
metric="cosine",
|
33 |
+
spec=ServerlessSpec(cloud="aws", region="us-east-1"),
|
34 |
+
)
|
35 |
+
while not pc.describe_index(index_name).status["ready"]:
|
36 |
+
time.sleep(1)
|
37 |
+
|
38 |
+
index = pc.Index(index_name)
|
39 |
+
|
40 |
+
vector_store = PineconeVectorStore(index=index, embedding=embedding)
|
41 |
+
|
42 |
+
## setup authorization
|
43 |
+
api_keys = [os.environ.get("FASTAPI_API_KEY")]
|
44 |
+
|
45 |
+
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token") # use token authentication
|
46 |
+
|
47 |
+
|
48 |
+
def api_key_auth(api_key: str = Depends(oauth2_scheme)):
|
49 |
+
if api_key not in api_keys:
|
50 |
+
raise HTTPException(
|
51 |
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
52 |
+
detail="Forbidden"
|
53 |
+
)
|
54 |
+
|
55 |
+
dev_mode = os.environ.get("DEV")
|
56 |
+
|
57 |
+
if dev_mode == "True":
|
58 |
+
app = FastAPI()
|
59 |
+
else:
|
60 |
+
app = FastAPI(dependencies=[Depends(api_key_auth)])
|
61 |
+
|
62 |
+
app.add_middleware(CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"])
|
63 |
+
|
64 |
+
|
65 |
+
class UserInput(BaseModel):
|
66 |
+
query: str
|
67 |
+
stream: Optional[bool] = False
|
68 |
+
messages: Optional[list[dict]] = []
|
69 |
+
|
70 |
+
class ChunkToDB(BaseModel):
|
71 |
+
message: str
|
72 |
+
title: str
|
73 |
+
|
74 |
+
|
75 |
+
@app.post("/add_chunk_to_db")
|
76 |
+
async def add_chunk_to_db(chunk: ChunkToDB):
|
77 |
+
try:
|
78 |
+
title = chunk.title
|
79 |
+
message = chunk.message
|
80 |
+
return get_vectorstore(text_chunk=message,index=index,title=title)
|
81 |
+
except Exception as e:
|
82 |
+
return {"message": str(e)}
|
83 |
+
|
84 |
+
|
85 |
+
@app.get("/list_vectors")
|
86 |
+
async def list_vectors():
|
87 |
+
try:
|
88 |
+
return index.list()
|
89 |
+
except Exception as e:
|
90 |
+
return {"message": str(e)}
|
91 |
+
|
92 |
+
|
93 |
+
@app.post("/generate")
|
94 |
+
async def generate(user_input: UserInput):
|
95 |
+
try:
|
96 |
+
print(user_input.stream,user_input.query)
|
97 |
+
if user_input.stream:
|
98 |
+
return StreamingResponse(generate_stream(user_input.query,user_input.messages,index_name=index,stream=True,vector_store=vector_store),media_type="application/json")
|
99 |
+
else:
|
100 |
+
return generate_stream(user_input.query,user_input.messages,index_name=index,stream=False,vector_store=vector_store)
|
101 |
+
except Exception as e:
|
102 |
+
return {"message": str(e)}
|
103 |
+
|
104 |
+
@app.post("/retreive_context")
|
105 |
+
async def retreive_context_response(query: str):
|
106 |
+
try:
|
107 |
+
return retreive_context(index=index,query=query)
|
108 |
+
except Exception as e:
|
109 |
+
return {"message": str(e)}
|
110 |
+
|
111 |
+
|
112 |
+
@app.delete("/delete_vector")
|
113 |
+
async def delete_vector(filename_id: str):
|
114 |
+
try:
|
115 |
+
return index.delete(ids=[filename_id])
|
116 |
+
except Exception as e:
|
117 |
+
return {"message": str(e)}
|
118 |
+
|
119 |
+
@app.get("/check_server")
|
120 |
+
async def check_server():
|
121 |
+
return {"message":"Server is running"}
|
122 |
+
|
123 |
+
@app.get("/")
|
124 |
+
async def read_root():
|
125 |
+
return {"message":"Welcome to the AI API"}
|
prompt.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
template = '''
|
2 |
+
You are an AI assistant for Ilyas Khiat, a future engineer with a major in AI, and software engineering. Your job is to respond to visitors in a persuasive, concise, and brilliant way, always making Ilyas' profile stand out. Your responses must highlight his technical expertise, projects, and how he adds value to potential employers, plus soft skills. Always provide necessary links (e.g., LinkedIn: https://www.linkedin.com/in/ilyas-khiat-148a73254/, github: https://github.com/Ilyas-Khiat , projects, hobbies) to substantiate the information. Ensure your tone is pleasant, engaging, and matches the language of the user's query. The goal is to convince recruiters that Ilyas is the best fit for their business needs.
|
3 |
+
The context retreived from the user is:
|
4 |
+
{context}
|
5 |
+
|
6 |
+
The history of the conversation is:
|
7 |
+
{history}
|
8 |
+
|
9 |
+
The user's query is:
|
10 |
+
{query}
|
11 |
+
|
12 |
+
Please respond to the user's query in a consis way and well formatted markdown with paragraphs and emojis that highlights Ilyas' technical expertise, projects, and how he adds value to potential employers, plus soft skills. Add life to your answer and emphasize keywords with bold, make it short in no more than 150 words or 200 tokens. Ensure your tone is pleasant, engaging, and matches the language of the user's query and your responce is not bluffing and exaggerating but proffesionnal and short and straight to the goal. The goal is to convince recruiters that Ilyas is the best fit for their business needs.
|
13 |
+
|
14 |
+
'''
|
rag.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
2 |
+
from langchain_openai import OpenAIEmbeddings
|
3 |
+
from langchain_community.vectorstores import FAISS
|
4 |
+
from langchain_pinecone import PineconeVectorStore
|
5 |
+
from langchain_core.documents import Document
|
6 |
+
|
7 |
+
from langchain_openai import ChatOpenAI
|
8 |
+
from langchain_core.output_parsers import StrOutputParser
|
9 |
+
from langchain_core.prompts import PromptTemplate
|
10 |
+
from uuid import uuid4
|
11 |
+
from prompt import *
|
12 |
+
|
13 |
+
from pydantic import BaseModel, Field
|
14 |
+
from dotenv import load_dotenv
|
15 |
+
import os
|
16 |
+
|
17 |
+
from langchain_core.tools import tool
|
18 |
+
|
19 |
+
import unicodedata
|
20 |
+
|
21 |
+
load_dotenv()
|
22 |
+
index_name = os.environ.get("INDEX_NAME")
|
23 |
+
# Global initialization
|
24 |
+
embedding_model = "text-embedding-3-small"
|
25 |
+
|
26 |
+
embedding = OpenAIEmbeddings(model=embedding_model)
|
27 |
+
vector_store = PineconeVectorStore(index=index_name, embedding=embedding)
|
28 |
+
|
29 |
+
def get_vectorstore(text_chunk,index,title,model="text-embedding-3-small"):
|
30 |
+
try:
|
31 |
+
embedding = OpenAIEmbeddings(model=model)
|
32 |
+
print("loaded embedding")
|
33 |
+
vector_store = PineconeVectorStore(index=index, embedding=embedding)
|
34 |
+
print("loaded vector store")
|
35 |
+
document = Document(
|
36 |
+
page_content=text_chunk,
|
37 |
+
metadata={"title": title}
|
38 |
+
)
|
39 |
+
print("loaded document")
|
40 |
+
uuid = f"{title}_{uuid4()}"
|
41 |
+
|
42 |
+
vector_store.add_documents(documents=[document], ids=[uuid])
|
43 |
+
print("added document")
|
44 |
+
return {"filename_id":uuid}
|
45 |
+
|
46 |
+
except Exception as e:
|
47 |
+
print(e)
|
48 |
+
return False
|
49 |
+
|
50 |
+
|
51 |
+
def retreive_context(query:str,index:str, model="text-embedding-3-small",vector_store=None):
|
52 |
+
try:
|
53 |
+
#vector_store = PineconeVectorStore(index=index, embedding=embedding)
|
54 |
+
retriever = vector_store.as_retriever(
|
55 |
+
search_type="similarity_score_threshold",
|
56 |
+
search_kwargs={"k": 3, "score_threshold": 0.5},
|
57 |
+
)
|
58 |
+
return retriever.invoke(query)
|
59 |
+
|
60 |
+
except Exception as e:
|
61 |
+
print(e)
|
62 |
+
return False
|
63 |
+
|
64 |
+
llm = ChatOpenAI(model="gpt-4o-mini", max_tokens=300, temperature=0.5)
|
65 |
+
|
66 |
+
def generate_stream(query:str,messages = [], model = "gpt-4o-mini", max_tokens = 300, temperature = 0.5,index_name="",stream=True,vector_store=None):
|
67 |
+
try:
|
68 |
+
print("init chat")
|
69 |
+
print("init template")
|
70 |
+
prompt = PromptTemplate.from_template(template)
|
71 |
+
print("retreiving context")
|
72 |
+
context = retreive_context(query=query,index=index_name,vector_store=vector_store)
|
73 |
+
print(f"Context: {context}")
|
74 |
+
llm_chain = prompt | llm | StrOutputParser()
|
75 |
+
|
76 |
+
print("streaming")
|
77 |
+
if stream:
|
78 |
+
return llm_chain.stream({"context":context,"history":messages,"query":query})
|
79 |
+
else:
|
80 |
+
return llm_chain.invoke({"context":context,"history":messages,"query":query})
|
81 |
+
|
82 |
+
except Exception as e:
|
83 |
+
print(e)
|
84 |
+
return False
|
requirements.txt
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn
|
3 |
+
python-multipart
|
4 |
+
pydantic
|
5 |
+
langchain-pinecone
|
6 |
+
pinecone-notebooks
|
7 |
+
pinecone-client[grpc]
|
8 |
+
async-timeout
|
9 |
+
pymupdf
|
10 |
+
python-dotenv
|
11 |
+
typing-extensions
|
12 |
+
langchain
|
13 |
+
langchain-openai
|
14 |
+
langchain-community
|
15 |
+
langchain-pinecone
|