Update app.py
Browse files
app.py
CHANGED
@@ -2,26 +2,22 @@ from fastapi import FastAPI, HTTPException
|
|
2 |
from pydantic import BaseModel
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
import torch
|
5 |
-
from fastapi import FastAPI, HTTPException
|
6 |
-
from fastapi.responses import StreamingResponse, JSONResponse
|
7 |
-
import pandas as pd
|
8 |
-
import os
|
9 |
-
import requests
|
10 |
-
from io import StringIO
|
11 |
from fastapi.middleware.cors import CORSMiddleware
|
12 |
-
|
13 |
-
from tqdm import tqdm
|
14 |
|
|
|
|
|
|
|
15 |
|
16 |
app = FastAPI()
|
17 |
|
18 |
# Enable CORS
|
19 |
app.add_middleware(
|
20 |
CORSMiddleware,
|
21 |
-
allow_origins=["*"],
|
22 |
allow_credentials=True,
|
23 |
-
allow_methods=["*"],
|
24 |
-
allow_headers=["*"],
|
25 |
)
|
26 |
|
27 |
# Load your fine-tuned model and tokenizer
|
@@ -31,6 +27,7 @@ try:
|
|
31 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
32 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
33 |
except Exception as e:
|
|
|
34 |
raise RuntimeError(f"Failed to load model or tokenizer: {str(e)}")
|
35 |
|
36 |
# Define the general prompt template
|
@@ -71,12 +68,16 @@ def generate_text(request: GenerateRequest):
|
|
71 |
المادة = request.المادة
|
72 |
المستوى = request.المستوى
|
73 |
|
|
|
|
|
74 |
if not المادة or not المستوى or not isinstance(المادة, str) or not isinstance(المستوى, str):
|
|
|
75 |
raise HTTPException(status_code=400, detail="المادة والمستوى مطلوبان ويجب أن يكونا نصًا.")
|
76 |
|
77 |
try:
|
78 |
# Format the prompt with user inputs
|
79 |
arabic_prompt = general_prompt_template.format(المادة=المادة, المستوى=المستوى)
|
|
|
80 |
|
81 |
# Tokenize the prompt
|
82 |
inputs = tokenizer(arabic_prompt, return_tensors="pt", max_length=512, truncation=True)
|
@@ -94,6 +95,7 @@ def generate_text(request: GenerateRequest):
|
|
94 |
|
95 |
# Decode the generated text
|
96 |
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
97 |
|
98 |
# Remove the prompt from the generated text
|
99 |
generated_text = generated_text.replace(arabic_prompt, "").strip()
|
@@ -101,6 +103,7 @@ def generate_text(request: GenerateRequest):
|
|
101 |
return {"generated_text": generated_text}
|
102 |
|
103 |
except Exception as e:
|
|
|
104 |
raise HTTPException(status_code=500, detail=f"Error during text generation: {str(e)}")
|
105 |
|
106 |
@app.get("/")
|
|
|
2 |
from pydantic import BaseModel
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
import torch
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
from fastapi.middleware.cors import CORSMiddleware
|
6 |
+
import logging
|
|
|
7 |
|
8 |
+
# Configure logging
|
9 |
+
logging.basicConfig(level=logging.INFO)
|
10 |
+
logger = logging.getLogger(__name__)
|
11 |
|
12 |
app = FastAPI()
|
13 |
|
14 |
# Enable CORS
|
15 |
app.add_middleware(
|
16 |
CORSMiddleware,
|
17 |
+
allow_origins=["*"], # Allow all origins (replace with your frontend URL in production)
|
18 |
allow_credentials=True,
|
19 |
+
allow_methods=["*"],
|
20 |
+
allow_headers=["*"],
|
21 |
)
|
22 |
|
23 |
# Load your fine-tuned model and tokenizer
|
|
|
27 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
28 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
29 |
except Exception as e:
|
30 |
+
logger.error(f"Failed to load model or tokenizer: {str(e)}")
|
31 |
raise RuntimeError(f"Failed to load model or tokenizer: {str(e)}")
|
32 |
|
33 |
# Define the general prompt template
|
|
|
68 |
المادة = request.المادة
|
69 |
المستوى = request.المستوى
|
70 |
|
71 |
+
logger.info(f"Received request: المادة={المادة}, المستوى={المستوى}")
|
72 |
+
|
73 |
if not المادة or not المستوى or not isinstance(المادة, str) or not isinstance(المستوى, str):
|
74 |
+
logger.error("المادة والمستوى مطلوبان ويجب أن يكونا نصًا.")
|
75 |
raise HTTPException(status_code=400, detail="المادة والمستوى مطلوبان ويجب أن يكونا نصًا.")
|
76 |
|
77 |
try:
|
78 |
# Format the prompt with user inputs
|
79 |
arabic_prompt = general_prompt_template.format(المادة=المادة, المستوى=المستوى)
|
80 |
+
logger.info(f"Formatted prompt: {arabic_prompt}")
|
81 |
|
82 |
# Tokenize the prompt
|
83 |
inputs = tokenizer(arabic_prompt, return_tensors="pt", max_length=512, truncation=True)
|
|
|
95 |
|
96 |
# Decode the generated text
|
97 |
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
98 |
+
logger.info(f"Generated text: {generated_text}")
|
99 |
|
100 |
# Remove the prompt from the generated text
|
101 |
generated_text = generated_text.replace(arabic_prompt, "").strip()
|
|
|
103 |
return {"generated_text": generated_text}
|
104 |
|
105 |
except Exception as e:
|
106 |
+
logger.error(f"Error during text generation: {str(e)}")
|
107 |
raise HTTPException(status_code=500, detail=f"Error during text generation: {str(e)}")
|
108 |
|
109 |
@app.get("/")
|