自动路由 resoning

This commit is contained in:
zihanjian
2025-10-14 11:30:38 +08:00
parent b4b9d02be7
commit 3985070724
3 changed files with 64 additions and 25 deletions

View File

@@ -87,6 +87,11 @@ class AIRouter:
{
"action_type": "chat"
}
3 另外,请判断该问题需不需要被认真对待,如果是比较严肃的问题,需要被认真对待,那么请通过参数配置开启深度思考,需要额外提供:
{
"action_type": "chat",
"enable_reasoning": true
}
2.如果用户需要使用上述功能之一,返回:
{
@@ -99,6 +104,7 @@ class AIRouter:
- 用户输入"提醒我下午3点开会" -> {"action_type": "function", "function_name": "reminder_set", "params": "下午3点开会"}
- 用户输入"查看我的提醒" -> {"action_type": "function", "function_name": "reminder_list", "params": ""}
- 用户输入"你好" -> {"action_type": "chat"}
- 用户输入"帮我认真想想这道题" -> {"action_type": "chat", "enable_reasoning": true}
- 用户输入"查一下Python教程" -> {"action_type": "function", "function_name": "perplexity_search", "params": "Python教程"}
#### 格式注意事项:
@@ -169,6 +175,14 @@ class AIRouter:
if function_name not in self.functions:
self.logger.warning(f"AI路由器未知的功能名 - {function_name}")
return False, None
else:
# 聊天模式下检查是否请求推理
if "enable_reasoning" in decision:
raw_value = decision.get("enable_reasoning")
if isinstance(raw_value, str):
decision["enable_reasoning"] = raw_value.strip().lower() in ("true", "1", "yes", "y")
else:
decision["enable_reasoning"] = bool(raw_value)
self.logger.info(f"AI路由决策: {decision}")
return True, decision
@@ -223,6 +237,7 @@ class AIRouter:
# 获取AI路由决策
success, decision = self.route(ctx)
ctx.router_decision = decision if success else None
self.logger.debug(f"[AI路由器] route返回 - success: {success}, decision: {decision}")
if not success or not decision:

View File

@@ -22,6 +22,8 @@ class MessageContext:
is_group: bool = False # 是否群聊消息
is_at_bot: bool = False # 是否在群聊中 @ 了机器人
sender_name: str = "未知用户" # 发送者昵称 (群内或私聊)
reasoning_requested: bool = False # 是否请求启用推理模式
router_decision: Optional[Dict[str, Any]] = None # AI路由返回的决策结果
# 懒加载字段
_room_members: Optional[Dict[str, str]] = field(default=None, init=False, repr=False)

View File

@@ -262,34 +262,15 @@ class Robot(Job):
# 确保context能访问到当前选定的chat模型及特定历史限制
setattr(ctx, 'chat', self.chat)
setattr(ctx, 'specific_max_history', specific_limit)
reasoning_triggered = bool(
ctx.reasoning_requested = bool(
ctx.text
and "想想" in ctx.text
and (not ctx.is_group or ctx.is_at_bot)
)
if reasoning_triggered:
self.LOG.info("检测到推理模式触发词跳过AI路由。")
ctx.send_text("正在深度思考,请稍候...", record_message=False)
previous_ctx_chat = ctx.chat
reasoning_chat = self._get_reasoning_chat_model()
if reasoning_chat:
ctx.chat = reasoning_chat
model_label = self._describe_chat_model(reasoning_chat, reasoning=True)
self.LOG.debug(f"使用推理模型 {model_label} 处理消息")
else:
self.LOG.warning("当前模型未配置推理模型,使用默认模型处理深度思考请求")
reasoning_handled = False
try:
reasoning_handled = handle_chitchat(ctx, None)
finally:
ctx.chat = previous_ctx_chat
if not reasoning_handled:
self.LOG.warning("推理模式处理消息失败,向用户返回降级提示")
ctx.send_text("抱歉,深度思考暂时遇到问题,请稍后再试。")
if ctx.reasoning_requested:
self.LOG.info("检测到推理模式触发词跳过AI路由直接进入闲聊推理模式。")
self._handle_chitchat(ctx, None)
return
handled = False
@@ -299,6 +280,17 @@ class Robot(Job):
self.LOG.debug(f"[AI路由调试] 准备调用AI路由器处理消息: {msg.content}")
handled = ai_router.dispatch(ctx)
self.LOG.debug(f"[AI路由调试] AI路由器处理结果: {handled}")
router_decision = getattr(ctx, 'router_decision', None)
if router_decision:
action_type = router_decision.get("action_type")
if action_type == "chat":
if router_decision.get("enable_reasoning"):
self.LOG.info("AI路由器请求启用推理模式处理聊天消息")
ctx.reasoning_requested = ctx.reasoning_requested or bool(router_decision.get("enable_reasoning"))
else:
if ctx.reasoning_requested:
self.LOG.debug("AI路由器选择了非聊天模式关闭推理模式")
ctx.reasoning_requested = False
if handled:
self.LOG.info("消息已由AI路由器处理")
self.LOG.debug("[AI路由调试] 消息已成功由AI路由器处理")
@@ -340,7 +332,7 @@ class Robot(Job):
# 如果在群里被@了但AI路由器未处理则进行闲聊
if msg.is_at(self.wxid):
# 调用handle_chitchat函数处理闲聊传递完整的上下文
handle_chitchat(ctx, None)
self._handle_chitchat(ctx, None)
else:
pass
@@ -350,7 +342,7 @@ class Robot(Job):
if msg.type == 1 or (msg.type == 49 and ctx.text):
self.LOG.info(f"准备回复私聊消息: 类型={msg.type}, 文本内容='{ctx.text}'")
# 调用handle_chitchat函数处理闲聊传递完整的上下文
handle_chitchat(ctx, None)
self._handle_chitchat(ctx, None)
except Exception as e:
self.LOG.error(f"处理消息时发生错误: {str(e)}", exc_info=True)
@@ -555,6 +547,36 @@ class Robot(Job):
return None
return self.reasoning_chat_models.get(model_id)
def _handle_chitchat(self, ctx, match=None):
"""统一处理闲聊,自动切换推理模型"""
reasoning_requested = bool(getattr(ctx, 'reasoning_requested', False))
previous_ctx_chat = getattr(ctx, 'chat', None)
reasoning_chat = None
if reasoning_requested:
self.LOG.info("检测到推理模式请求,将启用深度思考。")
ctx.send_text("正在深度思考,请稍候...", record_message=False)
reasoning_chat = self._get_reasoning_chat_model()
if reasoning_chat:
ctx.chat = reasoning_chat
model_label = self._describe_chat_model(reasoning_chat, reasoning=True)
self.LOG.debug(f"使用推理模型 {model_label} 处理消息")
else:
self.LOG.warning("当前模型未配置推理模型,使用默认模型处理深度思考请求")
handled = False
try:
handled = handle_chitchat(ctx, match)
finally:
if reasoning_chat and previous_ctx_chat is not None:
ctx.chat = previous_ctx_chat
if reasoning_requested and not handled:
self.LOG.warning("推理模式处理消息失败,向用户返回降级提示")
ctx.send_text("抱歉,深度思考暂时遇到问题,请稍后再试。")
return handled
def _describe_chat_model(self, chat_model, reasoning: bool = False) -> str:
"""根据配置返回模型名称,默认回退到实例类名"""
model_id = getattr(self, 'current_model_id', None)