Yash Sachdeva commited on
Commit
c12ddc3
·
1 Parent(s): dcd2d54

question_paper

Browse files
Files changed (3) hide show
  1. Dockerfile +13 -9
  2. Requirements.txt +7 -0
  3. question_paper.py +44 -31
Dockerfile CHANGED
@@ -1,9 +1,13 @@
1
- FROM python:3.9-slim
2
- WORKDIR /app
3
- COPY requirements.txt ./requirements.txt
4
- RUN apt-get update && apt-get -y install libpq-dev gcc && pip install psycopg2
5
- RUN pip install uvicorn
6
- RUN pip install -r requirements.txt
7
- COPY . /app
8
- ENTRYPOINT [“uvicorn”, question_paper:app”]
9
- CMD [“ — host”, “0.0.0.0”, “ — port”, “7860”]
 
 
 
 
 
1
+ FROM python:3.10.9
2
+
3
+ # Copy the current directory contents into the container at .
4
+ COPY . .
5
+
6
+ # Set the working directory to /
7
+ WORKDIR /
8
+
9
+ # Install requirements.txt
10
+ RUN pip install --no-cache-dir --upgrade -r /requirements.txt
11
+
12
+ # Start the FastAPI app on port 7860, the default port expected by Spaces
13
+ CMD ["uvicorn", "question_paper:app", "--host", "0.0.0.0", "--port", "7860"]
Requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ fastapi==0.99.1
2
+ uvicorn
3
+ requests
4
+ pydantic==1.10.12
5
+ langchain
6
+ clarifai
7
+ Pillow
question_paper.py CHANGED
@@ -1,34 +1,47 @@
1
- import time
2
- import copy
3
- import asyncio
4
- import requests
5
 
6
- from fastapi import FastAPI, Request
7
- from llama_cpp import Llama
8
- from sse_starlette import EventSourceResponse
9
- # Load the model
10
- print("Loading model...")
11
- llm = Llama(model_path="./llama-2-13b-chat.ggmlv3.q4_1.bin") # change based on the location of models
12
- print("Model loaded!")
13
 
14
- app = FastAPI()
 
 
 
15
 
16
- @app.get("/llama")
17
- async def llama(request: Request, question:str):
18
- stream = llm(
19
- f"""{question}""",
20
- max_tokens=100,
21
- stop=["\n", " Q:"],
22
- stream=True,
23
- )
24
- async def async_generator():
25
- for item in stream:
26
- yield item
27
- async def server_sent_events():
28
- async for item in async_generator():
29
- if await request.is_disconnected():
30
- break
31
- result = copy.deepcopy(item)
32
- text = result["choices"][0]["text"]
33
- yield {"data": text}
34
- return EventSourceResponse(server_sent_events())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
 
 
 
2
 
3
+ from .ConfigEnv import config
4
+ from fastapi.middleware.cors import CORSMiddleware
 
 
 
 
 
5
 
6
+ from langchain.llms import Clarifai
7
+ from langchain.chains import LLMChain
8
+ from langchain.prompts import PromptTemplate
9
+ from TextGen import app
10
 
11
+ class Generate(BaseModel):
12
+ text:str
13
+
14
+ def generate_text(prompt: str):
15
+ if prompt == "":
16
+ return {"detail": "Please provide the syllabus!"}
17
+ else:
18
+ prompt = PromptTemplate(template=prompt, input_variables=['Prompt'])
19
+ llm = Clarifai(
20
+ pat = config.CLARIFAI_PAT,
21
+ user_id = config.USER_ID,
22
+ app_id = config.APP_ID,
23
+ model_id = config.MODEL_ID,
24
+ model_version_id=config.MODEL_VERSION_ID,
25
+ )
26
+ llmchain = LLMChain(
27
+ prompt=prompt,
28
+ llm=llm
29
+ )
30
+ llm_response = llmchain.run({"Prompt": prompt})
31
+ return Generate(text=llm_response)
32
+
33
+ app.add_middleware(
34
+ CORSMiddleware,
35
+ allow_origins=["*"],
36
+ allow_credentials=True,
37
+ allow_methods=["*"],
38
+ allow_headers=["*"],
39
+ )
40
+
41
+ @app.get("/", tags=["Home"])
42
+ def api_home():
43
+ return {'detail': 'Welcome to TextGen!'}
44
+
45
+ @app.post("/api/generate", summary="Generate text from prompt", tags=["Generate"], response_model=Generate)
46
+ def inference(input_prompt: str):
47
+ return generate_text(prompt=input_prompt)