优化call_ai_model,对齐jingrow里面的call_ai_model实现
This commit is contained in:
parent
327faadb2e
commit
989edd0245
@ -302,45 +302,69 @@ def map_fields_by_labels(field_map: list, ai_outputs: dict, label_to_fieldname:
|
||||
record_data[to_field] = value
|
||||
return record_data
|
||||
|
||||
def call_ai_model_via_jingrow(prompt: str,
|
||||
text_model: str = None,
|
||||
ai_temperature: float = None,
|
||||
ai_top_p: float = None,
|
||||
ai_system_message: str = None,
|
||||
max_tokens: int = None,
|
||||
image_urls: list = None):
|
||||
"""
|
||||
通过 Jingrow 服务器动作接口调用大模型(与原版一致,由服务器侧读取 AI Settings 与价格配置)。
|
||||
返回: { 'success': True, 'response': str } 或 { 'success': False, 'error': str }
|
||||
"""
|
||||
def call_jingrow_model(prompt: str,
|
||||
image_urls: list = None,
|
||||
ai_temperature: float = None,
|
||||
ai_top_p: float = None,
|
||||
ai_system_message: str = None,
|
||||
max_tokens: int = None):
|
||||
try:
|
||||
api_url = f"{Config.JINGROW_SERVER_URL}/api/action/jingrow.ai.utils.utils.call_ai_model"
|
||||
headers = get_session_api_headers()
|
||||
payload = {
|
||||
'prompt': prompt,
|
||||
'text_model': text_model,
|
||||
'ai_temperature': ai_temperature,
|
||||
'ai_top_p': ai_top_p,
|
||||
'ai_system_message': ai_system_message,
|
||||
'max_tokens': max_tokens,
|
||||
'image_urls': image_urls or None
|
||||
}
|
||||
payload = {k: v for k, v in payload.items() if v is not None}
|
||||
api_url = f"{get_jingrow_cloud_api_url()}/jchat/chat"
|
||||
headers = get_jingrow_cloud_api_headers()
|
||||
if not headers:
|
||||
return {"success": False, "error": "Jingrow API 未设置,请在 AI设置 中设置 Jingrow Api Key 和 Jingrow Api Secret"}
|
||||
|
||||
temperature = ai_temperature or 0.7
|
||||
top_p = ai_top_p or 0.9
|
||||
system_message = ai_system_message or "你是一个有用的AI助手。"
|
||||
|
||||
if image_urls and len(image_urls) > 0:
|
||||
model = "jingrow-chat-vision"
|
||||
content = [{"type": "text", "text": prompt}]
|
||||
for img_url in image_urls:
|
||||
if img_url:
|
||||
content.append({"type": "image_url", "image_url": {"url": img_url}})
|
||||
payload = {
|
||||
"model": model,
|
||||
"messages": [
|
||||
{"role": "system", "content": system_message},
|
||||
{"role": "user", "content": content}
|
||||
],
|
||||
"temperature": temperature,
|
||||
"top_p": top_p,
|
||||
"max_tokens": max_tokens or 2048
|
||||
}
|
||||
else:
|
||||
model = "jingrow-chat"
|
||||
payload = {
|
||||
"model": model,
|
||||
"messages": [
|
||||
{"role": "system", "content": system_message},
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
"temperature": temperature,
|
||||
"top_p": top_p,
|
||||
"max_tokens": max_tokens or 2048
|
||||
}
|
||||
|
||||
resp = requests.post(api_url, json=payload, headers=headers, timeout=60)
|
||||
if resp.status_code != 200:
|
||||
return {"success": False, "error": f"HTTP {resp.status_code}: {resp.text}"}
|
||||
|
||||
try:
|
||||
data = resp.json()
|
||||
result_obj = data.get('message', data.get('data', data))
|
||||
if isinstance(result_obj, dict) and 'response' in result_obj:
|
||||
return {'success': True, 'response': result_obj.get('response')}
|
||||
if isinstance(result_obj, str):
|
||||
return {'success': True, 'response': result_obj}
|
||||
return {'success': False, 'error': 'AI接口响应格式异常'}
|
||||
resp_json = resp.json()
|
||||
message = resp_json.get("choices", [{}])[0].get("message", {}).get("content", "")
|
||||
if not message:
|
||||
data = resp_json.get("data") or (resp_json.get("message") or {}).get("data")
|
||||
if isinstance(data, dict):
|
||||
message = data.get("content", "")
|
||||
elif isinstance(data, str):
|
||||
message = data
|
||||
return {"success": True, "response": message or ""}
|
||||
except Exception:
|
||||
return {'success': False, 'error': 'AI响应解析失败'}
|
||||
return {"success": False, "error": "AI响应解析失败"}
|
||||
except Exception as e:
|
||||
return {'success': False, 'error': f'调用AI模型异常: {str(e)}'}
|
||||
return {"success": False, "error": f"调用AI模型异常: {str(e)}"}
|
||||
|
||||
def call_chatgpt_model(prompt: str,
|
||||
image_urls: list = None,
|
||||
@ -481,20 +505,16 @@ def call_ai_model(prompt: str,
|
||||
ai_system_message: str = None,
|
||||
max_tokens: int = None,
|
||||
image_urls: list = None):
|
||||
"""
|
||||
统一调度:根据 text_model 路由到具体模型(jingrow/deepseek/doubao/chatgpt)。
|
||||
与 jingrow 原版 utils.call_ai_model 的分流思路一致。
|
||||
"""
|
||||
|
||||
model_type = (text_model or "jingrow").lower()
|
||||
if "jingrow" in model_type:
|
||||
return call_ai_model_via_jingrow(
|
||||
return call_jingrow_model(
|
||||
prompt,
|
||||
text_model=text_model or "jingrow-chat",
|
||||
image_urls=image_urls,
|
||||
ai_temperature=ai_temperature,
|
||||
ai_top_p=ai_top_p,
|
||||
ai_system_message=ai_system_message,
|
||||
max_tokens=max_tokens,
|
||||
image_urls=image_urls,
|
||||
max_tokens=max_tokens
|
||||
)
|
||||
if "deepseek" in model_type:
|
||||
return call_deepseek_model(prompt, image_urls, ai_temperature, ai_top_p, ai_system_message, max_tokens)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user