🐛 Bug: 1. Fix the bug where the max_tokens is missing in the Claude request body
Browse files2. Fix the bug where claude tool_choice does not support "tool_choice": {"type": "function", "function": {"name": "extract_underlined_text"}}
- request.py +25 -12
request.py
CHANGED
@@ -447,14 +447,11 @@ async def get_vertex_claude_payload(request, engine, provider):
|
|
447 |
"anthropic_version": "vertex-2023-10-16",
|
448 |
"messages": messages,
|
449 |
"system": system_prompt or "You are Claude, a large language model trained by Anthropic.",
|
|
|
450 |
}
|
451 |
|
452 |
-
|
453 |
-
|
454 |
-
if "claude-3-5-sonnet" in model:
|
455 |
-
payload['max_tokens'] = 8192
|
456 |
-
elif "claude-3" in model: # 處理其他 Claude 3 模型
|
457 |
-
payload['max_tokens'] = 4096
|
458 |
|
459 |
miss_fields = [
|
460 |
'model',
|
@@ -477,9 +474,15 @@ async def get_vertex_claude_payload(request, engine, provider):
|
|
477 |
tools.append(json_tool)
|
478 |
payload["tools"] = tools
|
479 |
if "tool_choice" in payload:
|
480 |
-
payload["tool_choice"]
|
481 |
-
"
|
482 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
483 |
|
484 |
if provider.get("tools") == False:
|
485 |
payload.pop("tools", None)
|
@@ -711,8 +714,12 @@ async def get_claude_payload(request, engine, provider):
|
|
711 |
"model": model,
|
712 |
"messages": messages,
|
713 |
"system": system_prompt or "You are Claude, a large language model trained by Anthropic.",
|
|
|
714 |
}
|
715 |
|
|
|
|
|
|
|
716 |
miss_fields = [
|
717 |
'model',
|
718 |
'messages',
|
@@ -735,9 +742,15 @@ async def get_claude_payload(request, engine, provider):
|
|
735 |
tools.append(json_tool)
|
736 |
payload["tools"] = tools
|
737 |
if "tool_choice" in payload:
|
738 |
-
payload["tool_choice"]
|
739 |
-
"
|
740 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
741 |
|
742 |
if provider.get("tools") == False:
|
743 |
payload.pop("tools", None)
|
|
|
447 |
"anthropic_version": "vertex-2023-10-16",
|
448 |
"messages": messages,
|
449 |
"system": system_prompt or "You are Claude, a large language model trained by Anthropic.",
|
450 |
+
"max_tokens": 8192 if "claude-3-5-sonnet" in model else 4096,
|
451 |
}
|
452 |
|
453 |
+
if request.max_tokens:
|
454 |
+
payload["max_tokens"] = int(request.max_tokens)
|
|
|
|
|
|
|
|
|
455 |
|
456 |
miss_fields = [
|
457 |
'model',
|
|
|
474 |
tools.append(json_tool)
|
475 |
payload["tools"] = tools
|
476 |
if "tool_choice" in payload:
|
477 |
+
if payload["tool_choice"]["type"] == "auto":
|
478 |
+
payload["tool_choice"] = {
|
479 |
+
"type": "auto"
|
480 |
+
}
|
481 |
+
if payload["tool_choice"]["type"] == "function":
|
482 |
+
payload["tool_choice"] = {
|
483 |
+
"type": "tool",
|
484 |
+
"name": payload["tool_choice"]["function"]["name"]
|
485 |
+
}
|
486 |
|
487 |
if provider.get("tools") == False:
|
488 |
payload.pop("tools", None)
|
|
|
714 |
"model": model,
|
715 |
"messages": messages,
|
716 |
"system": system_prompt or "You are Claude, a large language model trained by Anthropic.",
|
717 |
+
"max_tokens": 8192 if "claude-3-5-sonnet" in model else 4096,
|
718 |
}
|
719 |
|
720 |
+
if request.max_tokens:
|
721 |
+
payload["max_tokens"] = int(request.max_tokens)
|
722 |
+
|
723 |
miss_fields = [
|
724 |
'model',
|
725 |
'messages',
|
|
|
742 |
tools.append(json_tool)
|
743 |
payload["tools"] = tools
|
744 |
if "tool_choice" in payload:
|
745 |
+
if payload["tool_choice"]["type"] == "auto":
|
746 |
+
payload["tool_choice"] = {
|
747 |
+
"type": "auto"
|
748 |
+
}
|
749 |
+
if payload["tool_choice"]["type"] == "function":
|
750 |
+
payload["tool_choice"] = {
|
751 |
+
"type": "tool",
|
752 |
+
"name": payload["tool_choice"]["function"]["name"]
|
753 |
+
}
|
754 |
|
755 |
if provider.get("tools") == False:
|
756 |
payload.pop("tools", None)
|