Fix the issue where Gemini cannot be used.
Browse files- main.py +2 -1
- request.py +3 -2
- response.py +8 -0
main.py
CHANGED
@@ -68,7 +68,8 @@ async def process_request(request: RequestModel, provider: Dict):
|
|
68 |
# "headers": headers,
|
69 |
# "payload": payload
|
70 |
# }
|
71 |
-
#
|
|
|
72 |
|
73 |
if request.stream:
|
74 |
model = provider['model'][request.model]
|
|
|
68 |
# "headers": headers,
|
69 |
# "payload": payload
|
70 |
# }
|
71 |
+
# import json
|
72 |
+
# logger.info(f"Request details: {json.dumps(request_info, indent=4, ensure_ascii=False)}")
|
73 |
|
74 |
if request.stream:
|
75 |
model = provider['model'][request.model]
|
request.py
CHANGED
@@ -47,7 +47,8 @@ async def get_gemini_payload(request, engine, provider):
|
|
47 |
|
48 |
messages = []
|
49 |
for msg in request.messages:
|
50 |
-
|
|
|
51 |
if isinstance(msg.content, list):
|
52 |
content = []
|
53 |
for item in msg.content:
|
@@ -59,7 +60,7 @@ async def get_gemini_payload(request, engine, provider):
|
|
59 |
image_message = await get_image_message(item.image_url.url, engine)
|
60 |
content.append(image_message)
|
61 |
else:
|
62 |
-
content = msg.content
|
63 |
if msg.role != "system":
|
64 |
messages.append({"role": msg.role, "parts": content})
|
65 |
|
|
|
47 |
|
48 |
messages = []
|
49 |
for msg in request.messages:
|
50 |
+
if msg.role == "assistant":
|
51 |
+
msg.role = "model"
|
52 |
if isinstance(msg.content, list):
|
53 |
content = []
|
54 |
for item in msg.content:
|
|
|
60 |
image_message = await get_image_message(item.image_url.url, engine)
|
61 |
content.append(image_message)
|
62 |
else:
|
63 |
+
content = [{"text": msg.content}]
|
64 |
if msg.role != "system":
|
65 |
messages.append({"role": msg.role, "parts": content})
|
66 |
|
response.py
CHANGED
@@ -39,6 +39,14 @@ async def generate_sse_response(timestamp, model, content=None, tools_id=None, f
|
|
39 |
async def fetch_gemini_response_stream(client, url, headers, payload, model):
|
40 |
timestamp = datetime.timestamp(datetime.now())
|
41 |
async with client.stream('POST', url, headers=headers, json=payload) as response:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
buffer = ""
|
43 |
async for chunk in response.aiter_text():
|
44 |
buffer += chunk
|
|
|
39 |
async def fetch_gemini_response_stream(client, url, headers, payload, model):
|
40 |
timestamp = datetime.timestamp(datetime.now())
|
41 |
async with client.stream('POST', url, headers=headers, json=payload) as response:
|
42 |
+
if response.status_code != 200:
|
43 |
+
error_message = await response.aread()
|
44 |
+
error_str = error_message.decode('utf-8', errors='replace')
|
45 |
+
try:
|
46 |
+
error_json = json.loads(error_str)
|
47 |
+
except json.JSONDecodeError:
|
48 |
+
error_json = error_str
|
49 |
+
yield {"error": f"fetch_gpt_response_stream HTTP Error {response.status_code}", "details": error_json}
|
50 |
buffer = ""
|
51 |
async for chunk in response.aiter_text():
|
52 |
buffer += chunk
|