mirror of
https://github.com/Zippland/Bubbles.git
synced 2026-01-20 07:00:59 +08:00
重构了一下
This commit is contained in:
@@ -21,11 +21,13 @@ def check_imports():
|
||||
|
||||
# 检查处理器
|
||||
from function_calls.handlers import (
|
||||
handle_weather, handle_news, handle_help,
|
||||
handle_reminder_set, handle_reminder_list, handle_reminder_delete,
|
||||
handle_perplexity_search, handle_summary, handle_clear_messages
|
||||
handle_reminder_set,
|
||||
handle_reminder_list,
|
||||
handle_reminder_delete,
|
||||
handle_perplexity_search,
|
||||
handle_summary,
|
||||
)
|
||||
print("✅ 所有处理器导入成功")
|
||||
print("✅ 核心处理器导入成功")
|
||||
|
||||
# 检查参数模型
|
||||
from function_calls.models import (
|
||||
@@ -47,15 +49,14 @@ def check_function_registration():
|
||||
from function_calls.registry import list_functions
|
||||
functions = list_functions()
|
||||
|
||||
expected_count = 10
|
||||
expected_count = 5
|
||||
if len(functions) != expected_count:
|
||||
print(f"⚠️ 函数数量异常: 期望{expected_count}个,实际{len(functions)}个")
|
||||
return False
|
||||
|
||||
required_functions = [
|
||||
'weather_query', 'news_query', 'help', 'summary',
|
||||
'reminder_set', 'reminder_list', 'reminder_delete',
|
||||
'perplexity_search', 'clear_messages', 'insult'
|
||||
'perplexity_search', 'summary'
|
||||
]
|
||||
|
||||
missing_functions = []
|
||||
@@ -83,25 +84,7 @@ def check_router_initialization():
|
||||
router = FunctionCallRouter()
|
||||
print("✅ FunctionCallRouter初始化成功")
|
||||
|
||||
# 测试直接命令匹配
|
||||
class MockCtx:
|
||||
def __init__(self, text):
|
||||
self.text = text
|
||||
|
||||
test_cases = [
|
||||
("help", "help"),
|
||||
("新闻", "news_query"),
|
||||
("天气 北京", "weather_query")
|
||||
]
|
||||
|
||||
for input_text, expected in test_cases:
|
||||
ctx = MockCtx(input_text)
|
||||
result = router._try_direct_command_match(ctx)
|
||||
if result != expected:
|
||||
print(f"❌ 直接匹配失败: '{input_text}' -> {result} (期望: {expected})")
|
||||
return False
|
||||
|
||||
print("✅ 直接命令匹配正常")
|
||||
print("✅ FunctionCallRouter 初始化成功")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
@@ -157,4 +140,4 @@ def main():
|
||||
return 1
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
sys.exit(main())
|
||||
|
||||
@@ -2,7 +2,5 @@
|
||||
"""
|
||||
命令辅助模块
|
||||
|
||||
该包保留了消息上下文与部分遗留处理器,供 Function Call 架构复用:
|
||||
- context: 消息上下文类
|
||||
- handlers: 基础命令/闲聊逻辑
|
||||
该包仅保留 `context`,供 Function Call 架构复用消息上下文模型。
|
||||
"""
|
||||
|
||||
1065
commands/handlers.py
1065
commands/handlers.py
File diff suppressed because it is too large
Load Diff
@@ -70,17 +70,16 @@ flowchart TD
|
||||
- 群聊场景会在 `at` 字段写入 `ctx.msg.sender`,确保回复时点名原始请求者。
|
||||
|
||||
### 6. 兜底逻辑(`robot.py:229-273`)
|
||||
- 当路由返回未处理状态时,机器人会回退到旧流程:自动通过好友请求、发送欢迎消息或调用 `handle_chitchat`。
|
||||
- 当路由返回未处理状态时,机器人会回退到旧流程:自动通过好友请求、发送欢迎消息或调用 `run_chat_fallback`。
|
||||
- 即使 Function Call 路由失败,整体对话体验依旧有保障。
|
||||
|
||||
## 优势
|
||||
- 对已知命令走直连路径,既避免额外的模型耗时,又能通过 JSON Schema 保证参数质量(`function_calls/router.py:103-175`)。
|
||||
- LLM 协调器清晰区分原生工具与提示词回退,后续替换模型时无需大改(`function_calls/llm.py:33-186`)。
|
||||
- 所有能力通过单一的 Function Call 路由注册与执行,避免了正则命令与 AI 决策两套体系并存的问题。
|
||||
- LLM 协调器完全依赖模型原生的函数调用能力,逻辑集中在 `_run_native_loop`(`function_calls/llm.py:57-134`)。
|
||||
- `FunctionResult` 既可直接回复,也能作为工具输出反馈给模型,减少重复实现(`function_calls/spec.py:12-36`)。
|
||||
|
||||
## 仍需关注的点
|
||||
- 进入 LLM 流程后,工具输出依赖模型二次组织文本;关键函数可考虑直接派发 `FunctionResult`,避免模型返回空字符串时用户无感知(`function_calls/llm.py:83-136`)。
|
||||
- 天气命令的直接路径默认关键词与城市之间存在空格;若要支持“天气北京”这类写法,需要放宽解析逻辑(`function_calls/router.py:148-156`)。
|
||||
- 进入 LLM 流程后,工具输出依赖模型二次组织文本;关键函数可考虑直接派发 `FunctionResult`,避免模型返回空字符串时用户无感知(`function_calls/llm.py:83-134`)。
|
||||
- 权限检查字段(`spec.auth`)仍是占位符,新增高权限工具前需补齐校验实现(`function_calls/router.py:35-38`)。
|
||||
|
||||
---
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
- 保持现有业务能力完整可用(天气、新闻、提醒、Perplexity 搜索、群管理等),迁移过程中不影响线上稳定性。
|
||||
- 兼容现有上下文对象 `MessageContext`,并保留与微信客户端交互所需的最小耦合。
|
||||
|
||||
> 注:下述“现有架构”描述的是迁移前的遗留实现,目前 `commands/` 下的正则路由与 handlers 已移除,仅保留 `MessageContext`。
|
||||
|
||||
## 2. 现有架构梳理
|
||||
### 2.1 指令流
|
||||
1. `robot.py` 中 `Robot.processMsg` 获取消息后构造 `MessageContext`,先交给 `CommandRouter.dispatch`(见 `commands/router.py:13`)。
|
||||
@@ -138,7 +140,7 @@ WxMsg -> MessageContext -> FunctionCallRouter
|
||||
### 阶段 P4:切换入口与清理遗留
|
||||
1. **替换 `Robot.processMsg` 流程**:
|
||||
- 将调用链切换为 `FunctionCallRouter.dispatch(ctx)`。
|
||||
- 如果返回 `False` 且 `ctx.chat` 存在,则调用默认聊天模型兜底(原 `handle_chitchat`)。
|
||||
- 如果返回 `False` 且 `ctx.chat` 存在,则调用默认聊天模型兜底(`run_chat_fallback`)。
|
||||
2. **移除旧模块**:
|
||||
- 删除 `commands/router.py`、`commands/models.py`、`commands/registry.py`、`commands/ai_router.py`、`commands/ai_functions.py`。
|
||||
- 将保留的业务 handler 根据需要移动到 `function_calls/handlers/` 或 `services/`。
|
||||
|
||||
@@ -2,9 +2,8 @@
|
||||
|
||||
## 1. 架构一致性如何提升?
|
||||
- **现状诊断**
|
||||
- `function_calls/handlers.py:147` 等 handler 仍依赖 `commands/handlers.py` 中的正则/自然语言逻辑,导致新旧两套体系并存,增大耦合与维护成本。
|
||||
- `function_calls/router.py:41` 的直接匹配逻辑与 LLM 选择逻辑混在一起,职责边界不清晰;而参数校验仅在 LLM 分支执行(`function_calls/router.py:150`)。
|
||||
- 业务逻辑散落:例如新闻查询同时存在 `_get_news_info` 与命令 handler 版本,缺少统一 service 层。
|
||||
- 业务逻辑已集中到 Function Call 体系,但闲聊兜底、服务层抽象仍需持续完善。
|
||||
- router 与服务层的职责边界要保持清晰:router 负责模型交互与参数校验,业务细节放在 `function_calls/services/`。
|
||||
- **建议的架构骨架**
|
||||
1. **分层组织代码**:
|
||||
- `function_calls/spec.py`:仅保留数据结构。
|
||||
@@ -47,7 +46,7 @@
|
||||
- **移除建议**:
|
||||
1. 删除 `self.command_router = CommandRouter(...)` 及相关 import;同时移除 `CommandRouter.dispatch` 调用与辅助日志。
|
||||
2. 移除 `ai_router` 回退逻辑和配置项 `FUNCTION_CALL_ROUTER.fallback_to_legacy`。确保配置文件同步更新(`config.yaml.template:151`)。
|
||||
3. 将闲聊 fallback 改为:当 `FunctionCallRouter` 返回 `False` 时直接走 `handle_chitchat`,并记录原因日志。
|
||||
3. 将闲聊 fallback 改为:当 `FunctionCallRouter` 返回 `False` 时直接走 `run_chat_fallback`,并记录原因日志。
|
||||
4. 清理不再使用的命令注册表与正则代码(`commands/registry.py`、`commands/router.py` 等),确认没有别的模块引用后可删。
|
||||
5. 回归测试:运行原有功能用例,确保删除旧路由不会影响提醒、天气等功能;同时观察日志,确认不再出现“命令路由器”相关输出。
|
||||
|
||||
|
||||
@@ -49,10 +49,12 @@ class FunctionCallLLM:
|
||||
self.logger.error("无可用的AI模型")
|
||||
return LLMRunResult(handled=False, error="no_model")
|
||||
|
||||
if not hasattr(chat_model, "call_with_functions"):
|
||||
self.logger.error("当前模型不支持函数调用接口,请配置支持 function calling 的模型")
|
||||
return LLMRunResult(handled=False, error="no_function_call_support")
|
||||
|
||||
try:
|
||||
if hasattr(chat_model, "call_with_functions"):
|
||||
return self._run_native_loop(ctx, chat_model, functions, executor, formatter)
|
||||
return self._run_prompt_loop(ctx, chat_model, functions, executor)
|
||||
return self._run_native_loop(ctx, chat_model, functions, executor, formatter)
|
||||
except Exception as exc: # pragma: no cover - safeguard
|
||||
self.logger.error(f"LLM 调用失败: {exc}")
|
||||
return LLMRunResult(handled=False, error=str(exc))
|
||||
@@ -133,62 +135,6 @@ class FunctionCallLLM:
|
||||
self.logger.warning("达到最大函数调用轮数,未得到最终回答")
|
||||
return LLMRunResult(handled=False, error="max_rounds")
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# Prompt-based fallback workflow
|
||||
# ---------------------------------------------------------------------
|
||||
|
||||
def _run_prompt_loop(
|
||||
self,
|
||||
ctx: MessageContext,
|
||||
chat_model: Any,
|
||||
functions: Dict[str, FunctionSpec],
|
||||
executor: Callable[[FunctionSpec, Dict[str, Any]], FunctionResult],
|
||||
) -> LLMRunResult:
|
||||
system_prompt = self._build_prompt_system_text(functions)
|
||||
user_input = f"用户输入:{ctx.text}"
|
||||
|
||||
ai_response = chat_model.get_answer(
|
||||
user_input,
|
||||
wxid=ctx.get_receiver(),
|
||||
system_prompt_override=system_prompt,
|
||||
)
|
||||
|
||||
json_match = re.search(r"\{.*\}", ai_response, re.DOTALL)
|
||||
if not json_match:
|
||||
self.logger.warning(f"提示词模式下无法解析JSON: {ai_response}")
|
||||
return LLMRunResult(handled=False)
|
||||
|
||||
try:
|
||||
decision = json.loads(json_match.group(0))
|
||||
except json.JSONDecodeError as exc:
|
||||
self.logger.error(f"提示词模式 JSON 解析失败: {exc}")
|
||||
return LLMRunResult(handled=False)
|
||||
|
||||
action_type = decision.get("action_type")
|
||||
if action_type == "chat":
|
||||
# 提示词模式下无法获得模型最终回答,交给上层兜底
|
||||
return LLMRunResult(handled=False)
|
||||
|
||||
if action_type != "function":
|
||||
self.logger.warning(f"未知的action_type: {action_type}")
|
||||
return LLMRunResult(handled=False)
|
||||
|
||||
function_name = decision.get("function_name")
|
||||
if function_name not in functions:
|
||||
self.logger.warning(f"未知的功能名 - {function_name}")
|
||||
return LLMRunResult(handled=False)
|
||||
|
||||
arguments = decision.get("arguments", {})
|
||||
result = executor(functions[function_name], arguments)
|
||||
if not result.handled:
|
||||
return LLMRunResult(handled=False)
|
||||
|
||||
return LLMRunResult(handled=True, final_response="\n".join(result.messages))
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@staticmethod
|
||||
def _convert_assistant_message(message: Any) -> Dict[str, Any]:
|
||||
entry: Dict[str, Any] = {
|
||||
@@ -224,16 +170,6 @@ class FunctionCallLLM:
|
||||
)
|
||||
return openai_functions
|
||||
|
||||
@staticmethod
|
||||
def _build_prompt_system_text(functions: Dict[str, FunctionSpec]) -> str:
|
||||
prompt = """你是一个智能路由助手。根据用户输入判断是否需要调用以下函数之一。"""
|
||||
for spec in functions.values():
|
||||
prompt += f"\n- {spec.name}: {spec.description}"
|
||||
prompt += """
|
||||
请严格输出JSON:{"action_type": "chat"} 或 {"action_type": "function", "function_name": "...", "arguments": {...}}
|
||||
"""
|
||||
return prompt
|
||||
|
||||
def validate_arguments(self, arguments: Dict[str, Any], schema: Dict[str, Any]) -> bool:
|
||||
try:
|
||||
required_fields = schema.get("required", [])
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
from .reminder import create_reminder, list_reminders, delete_reminder
|
||||
from .group_tools import summarize_messages
|
||||
from .perplexity import run_perplexity
|
||||
from .chat import run_chat_fallback
|
||||
|
||||
__all__ = [
|
||||
"create_reminder",
|
||||
@@ -10,4 +11,5 @@ __all__ = [
|
||||
"delete_reminder",
|
||||
"summarize_messages",
|
||||
"run_perplexity",
|
||||
"run_chat_fallback",
|
||||
]
|
||||
|
||||
130
function_calls/services/chat.py
Normal file
130
function_calls/services/chat.py
Normal file
@@ -0,0 +1,130 @@
|
||||
"""Chat fallback utilities for Function Call routing."""
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from typing import Optional
|
||||
|
||||
from commands.context import MessageContext
|
||||
|
||||
|
||||
def run_chat_fallback(ctx: MessageContext) -> bool:
|
||||
"""Send a conversational reply using the active chat model.
|
||||
|
||||
This is used when no Function Call handler processes the message.
|
||||
Returns True if a reply was sent successfully.
|
||||
"""
|
||||
chat_model = getattr(ctx, "chat", None) or getattr(ctx.robot, "chat", None)
|
||||
if not chat_model:
|
||||
if ctx.logger:
|
||||
ctx.logger.error("聊天兜底失败:没有可用的 chat 模型")
|
||||
ctx.send_text("抱歉,我现在无法进行对话。")
|
||||
return False
|
||||
|
||||
specific_max_history: Optional[int] = getattr(ctx, "specific_max_history", None)
|
||||
|
||||
if getattr(ctx, "is_quoted_image", False):
|
||||
if not _handle_quoted_image(ctx, chat_model):
|
||||
return False
|
||||
return True
|
||||
|
||||
prompt = _build_prompt(ctx)
|
||||
if ctx.logger:
|
||||
ctx.logger.info(f"闲聊兜底发送给 AI 的内容:\n{prompt}")
|
||||
|
||||
try:
|
||||
answer = chat_model.get_answer(
|
||||
question=prompt,
|
||||
wxid=ctx.get_receiver(),
|
||||
specific_max_history=specific_max_history,
|
||||
)
|
||||
except Exception as exc: # pragma: no cover - safety net
|
||||
if ctx.logger:
|
||||
ctx.logger.error(f"闲聊兜底调用模型失败: {exc}")
|
||||
return False
|
||||
|
||||
if not answer:
|
||||
if ctx.logger:
|
||||
ctx.logger.warning("闲聊兜底返回空响应")
|
||||
return False
|
||||
|
||||
at_list = ctx.msg.sender if ctx.is_group else ""
|
||||
ctx.send_text(answer, at_list)
|
||||
return True
|
||||
|
||||
|
||||
def _build_prompt(ctx: MessageContext) -> str:
|
||||
sender_name = ctx.sender_name
|
||||
content = ctx.text or ""
|
||||
|
||||
if ctx.robot and hasattr(ctx.robot, "xml_processor"):
|
||||
if ctx.is_group:
|
||||
msg_data = ctx.robot.xml_processor.extract_quoted_message(ctx.msg)
|
||||
formatted = ctx.robot.xml_processor.format_message_for_ai(msg_data, sender_name)
|
||||
else:
|
||||
msg_data = ctx.robot.xml_processor.extract_private_quoted_message(ctx.msg)
|
||||
formatted = ctx.robot.xml_processor.format_message_for_ai(msg_data, sender_name)
|
||||
|
||||
if formatted:
|
||||
return formatted
|
||||
|
||||
current_time = time.strftime("%H:%M", time.localtime())
|
||||
return f"[{current_time}] {sender_name}: {content or '[空内容]'}"
|
||||
|
||||
|
||||
def _handle_quoted_image(ctx: MessageContext, chat_model) -> bool:
|
||||
if ctx.logger:
|
||||
ctx.logger.info("检测到引用图片,尝试走模型图片理解能力")
|
||||
|
||||
from ai_providers.ai_chatgpt import ChatGPT # 避免循环导入
|
||||
|
||||
support_vision = False
|
||||
if isinstance(chat_model, ChatGPT):
|
||||
support_vision = getattr(chat_model, "support_vision", False)
|
||||
if not support_vision and hasattr(chat_model, "model"):
|
||||
model_name = getattr(chat_model, "model", "")
|
||||
support_vision = model_name in {"gpt-4.1-mini", "gpt-4o"} or "-vision" in model_name
|
||||
|
||||
if not support_vision:
|
||||
ctx.send_text("当前模型不支持图片理解,请联系管理员配置支持视觉的模型。")
|
||||
return True
|
||||
|
||||
import os
|
||||
|
||||
temp_dir = "temp/image_cache"
|
||||
os.makedirs(temp_dir, exist_ok=True)
|
||||
|
||||
try:
|
||||
image_path = ctx.wcf.download_image(
|
||||
id=ctx.quoted_msg_id,
|
||||
extra=ctx.quoted_image_extra,
|
||||
dir=temp_dir,
|
||||
timeout=30,
|
||||
)
|
||||
except Exception as exc: # pragma: no cover - IO 失败
|
||||
if ctx.logger:
|
||||
ctx.logger.error(f"图片下载失败: {exc}")
|
||||
ctx.send_text("抱歉,无法下载图片进行分析。")
|
||||
return True
|
||||
|
||||
if not image_path or not os.path.exists(image_path):
|
||||
ctx.send_text("抱歉,无法下载图片进行分析。")
|
||||
return True
|
||||
|
||||
prompt = ctx.text.strip() or "请详细描述这张图片"
|
||||
|
||||
try:
|
||||
response = chat_model.get_image_description(image_path, prompt)
|
||||
ctx.send_text(response)
|
||||
except Exception as exc: # pragma: no cover - 模型异常
|
||||
if ctx.logger:
|
||||
ctx.logger.error(f"图片分析失败: {exc}")
|
||||
ctx.send_text(f"分析图片时出错: {exc}")
|
||||
finally:
|
||||
try:
|
||||
if os.path.exists(image_path):
|
||||
os.remove(image_path)
|
||||
except OSError:
|
||||
if ctx.logger:
|
||||
ctx.logger.warning(f"清理临时图片失败: {image_path}")
|
||||
|
||||
return True
|
||||
66
robot.py
66
robot.py
@@ -25,13 +25,13 @@ from constants import ChatType
|
||||
from job_mgmt import Job
|
||||
from function.func_xml_process import XmlProcessor
|
||||
|
||||
# 导入命令上下文与闲聊处理
|
||||
# 导入命令上下文
|
||||
from commands.context import MessageContext
|
||||
from commands.handlers import handle_chitchat # 导入闲聊处理函数
|
||||
|
||||
# 导入新的Function Call系统
|
||||
from function_calls.router import FunctionCallRouter
|
||||
import function_calls.init_handlers # 导入以注册所有Function Call处理器
|
||||
from function_calls.services import run_chat_fallback
|
||||
|
||||
__version__ = "39.2.4.0"
|
||||
|
||||
@@ -212,48 +212,28 @@ class Robot(Job):
|
||||
self.LOG.info("消息已由Function Call路由器处理")
|
||||
return
|
||||
|
||||
# 7. 如果没有命令处理器处理,则进行特殊逻辑处理
|
||||
if not handled:
|
||||
# 7.1 好友请求自动处理
|
||||
if msg.type == 37: # 好友请求
|
||||
self.autoAcceptFriendRequest(msg)
|
||||
# 7. Function Call 未处理,则执行闲聊兜底或特殊逻辑
|
||||
if msg.type == 37: # 好友请求
|
||||
self.autoAcceptFriendRequest(msg)
|
||||
return
|
||||
|
||||
if msg.type == 10000:
|
||||
if "加入了群聊" in msg.content and msg.from_group():
|
||||
new_member_match = re.search(r'"(.+?)"邀请"(.+?)"加入了群聊', msg.content)
|
||||
if new_member_match:
|
||||
inviter = new_member_match.group(1)
|
||||
new_member = new_member_match.group(2)
|
||||
welcome_msg = self.config.WELCOME_MSG.format(new_member=new_member, inviter=inviter)
|
||||
self.sendTextMsg(welcome_msg, msg.roomid)
|
||||
self.LOG.info(f"已发送欢迎消息给新成员 {new_member} 在群 {msg.roomid}")
|
||||
return
|
||||
|
||||
# 7.2 系统消息处理
|
||||
elif msg.type == 10000:
|
||||
# 7.2.1 处理新成员入群
|
||||
if "加入了群聊" in msg.content and msg.from_group():
|
||||
new_member_match = re.search(r'"(.+?)"邀请"(.+?)"加入了群聊', msg.content)
|
||||
if new_member_match:
|
||||
inviter = new_member_match.group(1) # 邀请人
|
||||
new_member = new_member_match.group(2) # 新成员
|
||||
# 使用配置文件中的欢迎语,支持变量替换
|
||||
welcome_msg = self.config.WELCOME_MSG.format(new_member=new_member, inviter=inviter)
|
||||
self.sendTextMsg(welcome_msg, msg.roomid)
|
||||
self.LOG.info(f"已发送欢迎消息给新成员 {new_member} 在群 {msg.roomid}")
|
||||
return
|
||||
# 7.2.2 处理新好友添加
|
||||
elif "你已添加了" in msg.content:
|
||||
self.sayHiToNewFriend(msg)
|
||||
return
|
||||
|
||||
# 7.3 群聊消息,且配置了响应该群
|
||||
if msg.from_group() and msg.roomid in self.config.GROUPS:
|
||||
# 如果在群里被@了,但命令路由器没有处理,则进行闲聊
|
||||
if msg.is_at(self.wxid):
|
||||
# 调用handle_chitchat函数处理闲聊,传递完整的上下文
|
||||
handle_chitchat(ctx, None)
|
||||
else:
|
||||
pass
|
||||
|
||||
# 7.4 私聊消息,未被命令处理,进行闲聊
|
||||
elif not msg.from_group() and not msg.from_self():
|
||||
# 检查是否是文本消息(type 1)或者是包含用户输入的类型49消息
|
||||
if msg.type == 1 or (msg.type == 49 and ctx.text):
|
||||
self.LOG.info(f"准备回复私聊消息: 类型={msg.type}, 文本内容='{ctx.text}'")
|
||||
# 调用handle_chitchat函数处理闲聊,传递完整的上下文
|
||||
handle_chitchat(ctx, None)
|
||||
|
||||
if "你已添加了" in msg.content:
|
||||
self.sayHiToNewFriend(msg)
|
||||
return
|
||||
|
||||
if not run_chat_fallback(ctx):
|
||||
self.LOG.warning("闲聊兜底失败或未发送回复")
|
||||
|
||||
except Exception as e:
|
||||
self.LOG.error(f"处理消息时发生错误: {str(e)}", exc_info=True)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user