Fix the bug in RemoteProtocolError handling.
Browse files- response.py +29 -27
- utils.py +2 -11
response.py
CHANGED
@@ -92,33 +92,35 @@ async def fetch_gpt_response_stream(client, url, headers, payload, max_redirects
|
|
92 |
return
|
93 |
|
94 |
buffer = ""
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
|
|
|
|
122 |
return
|
123 |
|
124 |
yield {"error": "Too many redirects", "details": f"Reached maximum of {max_redirects} redirects"}
|
|
|
92 |
return
|
93 |
|
94 |
buffer = ""
|
95 |
+
try:
|
96 |
+
async for chunk in response.aiter_text():
|
97 |
+
# logger.info(f"chunk: {repr(chunk)}")
|
98 |
+
buffer += chunk
|
99 |
+
if chunk.startswith("<script"):
|
100 |
+
import re
|
101 |
+
redirect_match = re.search(r"window\.location\.href\s*=\s*'([^']+)'", chunk)
|
102 |
+
if redirect_match:
|
103 |
+
new_url = redirect_match.group(1)
|
104 |
+
# logger.info(f"new_url: {new_url}")
|
105 |
+
if not new_url.startswith('http'):
|
106 |
+
# 如果是相对路径,构造完整URL
|
107 |
+
# logger.info(url.split('/'))
|
108 |
+
base_url = '/'.join(url.split('/')[:3])
|
109 |
+
new_url = base_url + new_url
|
110 |
+
url = new_url
|
111 |
+
# logger.info(f"new_url: {new_url}")
|
112 |
+
redirect_count += 1
|
113 |
+
break
|
114 |
+
redirect_count = 0
|
115 |
+
while "\n" in buffer:
|
116 |
+
line, buffer = buffer.split("\n", 1)
|
117 |
+
# logger.info("line: %s", repr(line))
|
118 |
+
if line and line != "data: " and line != "data:" and not line.startswith(": "):
|
119 |
+
yield line + "\n"
|
120 |
+
except httpx.RemoteProtocolError as e:
|
121 |
+
yield {"error": f"fetch_gpt_response_stream RemoteProtocolError {e.__class__.__name__}", "details": str(e)}
|
122 |
+
return
|
123 |
+
if redirect_count == 0:
|
124 |
return
|
125 |
|
126 |
yield {"error": "Too many redirects", "details": f"Reached maximum of {max_redirects} redirects"}
|
utils.py
CHANGED
@@ -78,9 +78,6 @@ async def async_generator(items):
|
|
78 |
for item in items:
|
79 |
yield item
|
80 |
|
81 |
-
class GeneratorStopError(Exception):
|
82 |
-
pass
|
83 |
-
|
84 |
async def error_handling_wrapper(generator, status_code=200):
|
85 |
try:
|
86 |
first_item = await generator.__anext__()
|
@@ -107,19 +104,13 @@ async def error_handling_wrapper(generator, status_code=200):
|
|
107 |
# 如果不是错误,创建一个新的生成器,首先yield第一个项,然后yield剩余的项
|
108 |
async def new_generator():
|
109 |
yield ensure_string(first_item)
|
110 |
-
|
111 |
-
|
112 |
-
yield ensure_string(item)
|
113 |
-
except httpx.RemoteProtocolError as e:
|
114 |
-
logger.error(f"Remote protocol error occurred: {e}")
|
115 |
-
raise GeneratorStopError("Generator stopped due to remote protocol error")
|
116 |
|
117 |
return new_generator()
|
118 |
|
119 |
except StopAsyncIteration:
|
120 |
raise HTTPException(status_code=status_code, detail="data: {'error': 'No data returned'}")
|
121 |
-
except GeneratorStopError:
|
122 |
-
raise HTTPException(status_code=status_code, detail="data: {'error': 'No data returned'}")
|
123 |
|
124 |
def post_all_models(token, config, api_list):
|
125 |
all_models = []
|
|
|
78 |
for item in items:
|
79 |
yield item
|
80 |
|
|
|
|
|
|
|
81 |
async def error_handling_wrapper(generator, status_code=200):
|
82 |
try:
|
83 |
first_item = await generator.__anext__()
|
|
|
104 |
# 如果不是错误,创建一个新的生成器,首先yield第一个项,然后yield剩余的项
|
105 |
async def new_generator():
|
106 |
yield ensure_string(first_item)
|
107 |
+
async for item in generator:
|
108 |
+
yield ensure_string(item)
|
|
|
|
|
|
|
|
|
109 |
|
110 |
return new_generator()
|
111 |
|
112 |
except StopAsyncIteration:
|
113 |
raise HTTPException(status_code=status_code, detail="data: {'error': 'No data returned'}")
|
|
|
|
|
114 |
|
115 |
def post_all_models(token, config, api_list):
|
116 |
all_models = []
|