File size: 8,113 Bytes
1af48fa 819dd2f 1af48fa 52bcfe4 1af48fa e09244d 52bcfe4 1af48fa 819dd2f 1af48fa 819dd2f 1af48fa 819dd2f 1af48fa f0d6a67 1af48fa 819dd2f f0d6a67 819dd2f f0d6a67 1af48fa c405f98 819dd2f f0d6a67 819dd2f 3e3ea9a 819dd2f e09244d 819dd2f 1af48fa 819dd2f 1af48fa 9410047 60cf2f7 9410047 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
import json
import httpx
from datetime import datetime
async def generate_sse_response(timestamp, model, content=None, tools_id=None, function_call_name=None, function_call_content=None, role=None, tokens_use=None, total_tokens=None):
sample_data = {
"id": "chatcmpl-9ijPeRHa0wtyA2G8wq5z8FC3wGMzc",
"object": "chat.completion.chunk",
"created": timestamp,
"model": model,
"system_fingerprint": "fp_d576307f90",
"choices": [
{
"index": 0,
"delta": {"content": content},
"logprobs": None,
"finish_reason": None
}
],
"usage": None
}
if function_call_content:
sample_data["choices"][0]["delta"] = {"tool_calls":[{"index":0,"function":{"arguments": function_call_content}}]}
if tools_id and function_call_name:
sample_data["choices"][0]["delta"] = {"tool_calls":[{"index":0,"id":tools_id,"type":"function","function":{"name":function_call_name,"arguments":""}}]}
# sample_data["choices"][0]["delta"] = {"tool_calls":[{"index":0,"function":{"id": tools_id, "name": function_call_name}}]}
if role:
sample_data["choices"][0]["delta"] = {"role": role, "content": ""}
json_data = json.dumps(sample_data, ensure_ascii=False)
# 构建SSE响应
sse_response = f"data: {json_data}\n\n"
return sse_response
async def fetch_gemini_response_stream(client, url, headers, payload, model):
timestamp = datetime.timestamp(datetime.now())
async with client.stream('POST', url, headers=headers, json=payload) as response:
buffer = ""
async for chunk in response.aiter_text():
buffer += chunk
while "\n" in buffer:
line, buffer = buffer.split("\n", 1)
print(line)
if line and '\"text\": \"' in line:
try:
json_data = json.loads( "{" + line + "}")
content = json_data.get('text', '')
content = "\n".join(content.split("\\n"))
sse_string = await generate_sse_response(timestamp, model, content)
yield sse_string
except json.JSONDecodeError:
print(f"无法解析JSON: {line}")
# # 处理缓冲区中剩余的内容
# if buffer:
# # print(buffer)
# if '\"text\": \"' in buffer:
# try:
# json_data = json.loads(buffer)
# content = json_data.get('text', '')
# content = "\n".join(content.split("\\n"))
# sse_string = await generate_sse_response(timestamp, model, content)
# yield sse_string
# except json.JSONDecodeError:
# print(f"无法解析JSON: {buffer}")
async def fetch_gpt_response_stream(client, url, headers, payload):
async with client.stream('POST', url, headers=headers, json=payload) as response:
# print("response.status_code", response.status_code)
if response.status_code != 200:
print("请求失败,状态码是", response.status_code)
error_message = await response.aread()
error_str = error_message.decode('utf-8', errors='replace')
error_json = json.loads(error_str)
print(json.dumps(error_json, indent=4, ensure_ascii=False))
yield {"error": f"HTTP Error {response.status_code}", "details": error_json}
buffer = ""
async for chunk in response.aiter_bytes():
# print("chunk.decode('utf-8')", chunk.decode('utf-8'))
buffer += chunk.decode('utf-8')
while "\n" in buffer:
line, buffer = buffer.split("\n", 1)
print(line)
yield line + "\n"
async def fetch_claude_response_stream(client, url, headers, payload, model):
timestamp = datetime.timestamp(datetime.now())
async with client.stream('POST', url, headers=headers, json=payload) as response:
if response.status_code != 200:
print('\033[31m')
print(f"请求失败,状态码是{response.status_code},错误信息:")
error_message = await response.aread()
error_str = error_message.decode('utf-8', errors='replace')
error_json = json.loads(error_str)
print(json.dumps(error_json, indent=4, ensure_ascii=False))
print('\033[0m')
yield {"error": f"HTTP Error {response.status_code}", "details": error_json}
buffer = ""
async for chunk in response.aiter_bytes():
buffer += chunk.decode('utf-8')
while "\n" in buffer:
line, buffer = buffer.split("\n", 1)
print(line)
if line.startswith("data:"):
print(line)
line = line[6:]
resp: dict = json.loads(line)
message = resp.get("message")
if message:
tokens_use = resp.get("usage")
role = message.get("role")
if role:
sse_string = await generate_sse_response(timestamp, model, None, None, None, None, role)
yield sse_string
if tokens_use:
total_tokens = tokens_use["input_tokens"] + tokens_use["output_tokens"]
# print("\n\rtotal_tokens", total_tokens)
tool_use = resp.get("content_block")
tools_id = None
function_call_name = None
if tool_use and "tool_use" == tool_use['type']:
# print("tool_use", tool_use)
tools_id = tool_use["id"]
if "name" in tool_use:
function_call_name = tool_use["name"]
sse_string = await generate_sse_response(timestamp, model, None, tools_id, function_call_name, None)
yield sse_string
delta = resp.get("delta")
# print("delta", delta)
if not delta:
continue
if "text" in delta:
content = delta["text"]
sse_string = await generate_sse_response(timestamp, model, content, None, None)
yield sse_string
if "partial_json" in delta:
# {"type":"input_json_delta","partial_json":""}
function_call_content = delta["partial_json"]
sse_string = await generate_sse_response(timestamp, model, None, None, None, function_call_content)
yield sse_string
async def fetch_response(client, url, headers, payload):
response = await client.post(url, headers=headers, json=payload)
return response.json()
async def fetch_response_stream(client, url, headers, payload, engine, model):
for _ in range(2):
try:
if engine == "gemini":
async for chunk in fetch_gemini_response_stream(client, url, headers, payload, model):
yield chunk
elif engine == "claude":
async for chunk in fetch_claude_response_stream(client, url, headers, payload, model):
yield chunk
elif engine == "gpt":
async for chunk in fetch_gpt_response_stream(client, url, headers, payload):
yield chunk
elif engine == "openrouter":
async for chunk in fetch_gpt_response_stream(client, url, headers, payload):
yield chunk
else:
raise ValueError("Unknown response")
break
except httpx.ConnectError as e:
print(f"连接错误: {e}")
continue |