Spaces:
Running
Running
Upload 2 files
Browse files
fluxai.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, Depends
|
2 |
+
from fastapi.responses import StreamingResponse
|
3 |
+
import io
|
4 |
+
import requests
|
5 |
+
import os
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
from pydantic import BaseModel
|
8 |
+
|
9 |
+
class FluxAI(BaseModel):
|
10 |
+
user_id: int
|
11 |
+
args: str
|
12 |
+
|
13 |
+
router = APIRouter()
|
14 |
+
|
15 |
+
load_dotenv()
|
16 |
+
MONGO_URL = os.environ["MONGO_URL"]
|
17 |
+
HUGGING_TOKEN = os.environ["HUGGING_TOKEN"]
|
18 |
+
|
19 |
+
client_mongo = MongoClient(MONGO_URL)
|
20 |
+
db = client_mongo["tiktokbot"]
|
21 |
+
collection = db["users"]
|
22 |
+
|
23 |
+
async def schellwithflux(args):
|
24 |
+
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
|
25 |
+
headers = {"Authorization": f"Bearer {HUGGING_TOKEN}"}
|
26 |
+
payload = {"inputs": args}
|
27 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
28 |
+
if response.status_code != 200:
|
29 |
+
print(f"Error status {response.status_code}")
|
30 |
+
return None
|
31 |
+
return response.content
|
32 |
+
|
33 |
+
def get_user_tokens_gpt(user_id):
|
34 |
+
user = collection.find_one({"user_id": user_id})
|
35 |
+
if not user:
|
36 |
+
return 0
|
37 |
+
return user.get("tokens", 0)
|
38 |
+
|
39 |
+
def deduct_tokens_gpt(user_id, amount):
|
40 |
+
tokens = get_user_tokens_gpt(user_id)
|
41 |
+
if tokens >= amount:
|
42 |
+
collection.update_one(
|
43 |
+
{"user_id": user_id},
|
44 |
+
{"$inc": {"tokens": -amount}}
|
45 |
+
)
|
46 |
+
return True
|
47 |
+
else:
|
48 |
+
return False
|
49 |
+
|
50 |
+
@router.post("/akeno/fluxai", response_model=SuccessResponse, responses={422: {"model": SuccessResponse}})
|
51 |
+
async def fluxai_image(payload: FluxAI):
|
52 |
+
if payload.user_id == 1191668125:
|
53 |
+
return SuccessResponse(status="False", randydev={"message": "Only Developer."})
|
54 |
+
|
55 |
+
if deduct_tokens_gpt(payload.user_id, amount=20):
|
56 |
+
try:
|
57 |
+
image_bytes = await schellwithflux(payload.args)
|
58 |
+
if image_bytes is None:
|
59 |
+
return SuccessResponse(
|
60 |
+
status="False",
|
61 |
+
randydev={"error": "Failed to generate an image"}
|
62 |
+
)
|
63 |
+
|
64 |
+
return StreamingResponse(io.BytesIO(image_bytes), media_type="image/jpeg")
|
65 |
+
|
66 |
+
except Exception as e:
|
67 |
+
return SuccessResponse(
|
68 |
+
status="False",
|
69 |
+
randydev={"error": f"An error occurred: {str(e)}"}
|
70 |
+
)
|
71 |
+
else:
|
72 |
+
tokens = get_user_tokens_gpt(payload.user_id)
|
73 |
+
return SuccessResponse(
|
74 |
+
status="False",
|
75 |
+
randydev={"error": f"Not enough tokens. Current tokens: {tokens}. Please support @xtdevs"}
|
76 |
+
)
|
main.py
CHANGED
@@ -82,6 +82,7 @@ from gpytranslate import SyncTranslator
|
|
82 |
|
83 |
import logging
|
84 |
import functions as code
|
|
|
85 |
|
86 |
logging.basicConfig(level=logging.ERROR)
|
87 |
logging.basicConfig(level=logging.INFO)
|
@@ -125,6 +126,7 @@ collection = db["users"]
|
|
125 |
trans = SyncTranslator()
|
126 |
|
127 |
app = FastAPI(docs_url=None, redoc_url="/")
|
|
|
128 |
|
129 |
timeout = 100
|
130 |
|
@@ -195,7 +197,6 @@ RAMDOM_STATUS = [
|
|
195 |
"spammer",
|
196 |
]
|
197 |
|
198 |
-
|
199 |
def remove_sibyl_system_banned(user_id):
|
200 |
update_doc = {
|
201 |
"sibyl_ban": None,
|
@@ -532,7 +533,7 @@ def getfedbans_(payload: GetsaFedBans, api_key: str = Depends(validate_api_key_f
|
|
532 |
"error": str(e)
|
533 |
}
|
534 |
)
|
535 |
-
|
536 |
@app.post("/user/fedban", response_model=SuccessResponse, responses={422: {"model": SuccessResponse}})
|
537 |
def fedbans_(payload: FedBans, api_key: str = Depends(validate_api_key_fedbans)):
|
538 |
if payload.user_id == 1191668125:
|
@@ -541,7 +542,6 @@ def fedbans_(payload: FedBans, api_key: str = Depends(validate_api_key_fedbans))
|
|
541 |
date_joined = str(dt.now())
|
542 |
if not payload.hashtag.startswith("#"):
|
543 |
return SuccessResponse(status="False", randydev={"message": "Invalid hashtag."})
|
544 |
-
|
545 |
try:
|
546 |
new_user_spammers(
|
547 |
user_id=payload.user_id,
|
|
|
82 |
|
83 |
import logging
|
84 |
import functions as code
|
85 |
+
from fluxai import router as fluxai_router
|
86 |
|
87 |
logging.basicConfig(level=logging.ERROR)
|
88 |
logging.basicConfig(level=logging.INFO)
|
|
|
126 |
trans = SyncTranslator()
|
127 |
|
128 |
app = FastAPI(docs_url=None, redoc_url="/")
|
129 |
+
app.include_router(fluxai_router, prefix="/api/v1")
|
130 |
|
131 |
timeout = 100
|
132 |
|
|
|
197 |
"spammer",
|
198 |
]
|
199 |
|
|
|
200 |
def remove_sibyl_system_banned(user_id):
|
201 |
update_doc = {
|
202 |
"sibyl_ban": None,
|
|
|
533 |
"error": str(e)
|
534 |
}
|
535 |
)
|
536 |
+
|
537 |
@app.post("/user/fedban", response_model=SuccessResponse, responses={422: {"model": SuccessResponse}})
|
538 |
def fedbans_(payload: FedBans, api_key: str = Depends(validate_api_key_fedbans)):
|
539 |
if payload.user_id == 1191668125:
|
|
|
542 |
date_joined = str(dt.now())
|
543 |
if not payload.hashtag.startswith("#"):
|
544 |
return SuccessResponse(status="False", randydev={"message": "Invalid hashtag."})
|
|
|
545 |
try:
|
546 |
new_user_spammers(
|
547 |
user_id=payload.user_id,
|