refactor: 移除 Perplexity 相关功能并重构搜索工具

重构 WebSearch 工具使用 Tavily 替代 Perplexity
移除 Perplexity 相关代码、配置和依赖
简化配置文件和工具注册逻辑
This commit is contained in:
zihanjian
2026-02-25 13:11:32 +08:00
parent 664b63e4ec
commit 005ec4f473
13 changed files with 124 additions and 1347 deletions

View File

@@ -16,10 +16,10 @@ __all__ = [
]
def create_default_registry() -> ToolRegistry:
def create_default_registry(tavily_api_key: str | None = None) -> ToolRegistry:
"""创建包含所有默认工具的注册表"""
registry = ToolRegistry()
registry.register(WebSearchTool())
registry.register(WebSearchTool(api_key=tavily_api_key))
registry.register(ReminderCreateTool())
registry.register(ReminderListTool())
registry.register(ReminderDeleteTool())

View File

@@ -1,8 +1,8 @@
# agent/tools/web_search.py
"""网络搜索工具"""
"""网络搜索工具 - 使用 Tavily"""
import json
import re
import os
from typing import Any, TYPE_CHECKING
from .base import Tool
@@ -10,9 +10,23 @@ from .base import Tool
if TYPE_CHECKING:
from agent.context import AgentContext
# Tavily API
try:
from tavily import TavilyClient
TAVILY_AVAILABLE = True
except ImportError:
TAVILY_AVAILABLE = False
TavilyClient = None
class WebSearchTool(Tool):
"""网络搜索工具 - 使用 Perplexity 进行搜索"""
"""网络搜索工具 - 使用 Tavily 搜索引擎"""
def __init__(self, api_key: str | None = None):
self._api_key = api_key or os.getenv("TAVILY_API_KEY")
self._client = None
if TAVILY_AVAILABLE and self._api_key:
self._client = TavilyClient(api_key=self._api_key)
@property
def name(self) -> str:
@@ -21,8 +35,8 @@ class WebSearchTool(Tool):
@property
def description(self) -> str:
return (
"在网络上搜索信息。用于回答需要最新数据、实时信息或你不确定的事实性问题。"
"deep_research 仅在问题非常复杂、需要深度研究时才开启"
"在网络上搜索最新信息。用于回答需要实时数据、新闻、或你不确定的事实性问题。"
"返回多个搜索结果,包含标题、内容摘要和来源链接"
)
@property
@@ -34,10 +48,6 @@ class WebSearchTool(Tool):
"type": "string",
"description": "搜索关键词或问题",
},
"deep_research": {
"type": "boolean",
"description": "是否启用深度研究模式(耗时较长,仅用于复杂问题)",
},
},
"required": ["query"],
"additionalProperties": False,
@@ -45,7 +55,7 @@ class WebSearchTool(Tool):
@property
def status_text(self) -> str | None:
return "正在联网搜索: "
return "正在搜索: "
@property
def status_arg(self) -> str | None:
@@ -55,39 +65,47 @@ class WebSearchTool(Tool):
self,
ctx: "AgentContext",
query: str = "",
deep_research: bool = False,
**_,
) -> str:
perplexity_instance = getattr(ctx.robot, "perplexity", None)
if not perplexity_instance:
return json.dumps(
{"error": "Perplexity 搜索功能不可用,未配置或未初始化"},
ensure_ascii=False,
)
if not query:
return json.dumps({"error": "请提供搜索关键词"}, ensure_ascii=False)
if not TAVILY_AVAILABLE:
return json.dumps(
{"error": "Tavily 未安装,请运行: pip install tavily-python"},
ensure_ascii=False,
)
if not self._client:
return json.dumps(
{"error": "Tavily API key 未配置,请在 config.yaml 中设置 tavily.key 或环境变量 TAVILY_API_KEY"},
ensure_ascii=False,
)
try:
# Perplexity.get_answer 是同步方法,需要在线程中运行
import asyncio
response = await asyncio.to_thread(
perplexity_instance.get_answer,
query,
ctx.get_receiver(),
deep_research,
self._client.search,
query=query,
search_depth="basic",
max_results=5,
include_answer=False,
)
if not response:
return json.dumps({"error": "搜索无结果"}, ensure_ascii=False)
results = response.get("results", [])
if not results:
return json.dumps({"error": "未找到相关结果"}, ensure_ascii=False)
formatted = []
for r in results:
formatted.append({
"title": r.get("title", ""),
"content": r.get("content", ""),
"url": r.get("url", ""),
})
return json.dumps({"results": formatted, "query": query}, ensure_ascii=False)
# 清理 think 标签
cleaned = re.sub(
r"<think>.*?</think>", "", response, flags=re.DOTALL
).strip()
return json.dumps(
{"result": cleaned or response}, ensure_ascii=False
)
except Exception as e:
return json.dumps({"error": f"搜索失败: {e}"}, ensure_ascii=False)

View File

@@ -7,6 +7,5 @@ AI Providers Module
from .ai_chatgpt import ChatGPT
from .ai_deepseek import DeepSeek
from .ai_kimi import Kimi
from .ai_perplexity import Perplexity
__all__ = ["ChatGPT", "DeepSeek", "Kimi", "Perplexity"]
__all__ = ["ChatGPT", "DeepSeek", "Kimi"]

View File

@@ -1,489 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import asyncio
import json
import logging
import re
import time
from typing import Optional, Dict, Callable, List
import os
from threading import Thread, Lock
from openai import OpenAI
from providers.base import LLMProvider, LLMResponse, ToolCall
class PerplexityThread(Thread):
"""处理Perplexity请求的线程"""
def __init__(
self,
perplexity_instance,
prompt,
chat_id,
send_text_func,
receiver,
at_user=None,
on_finish: Optional[Callable[[], None]] = None,
enable_full_research: bool = False,
):
"""初始化Perplexity处理线程
Args:
perplexity_instance: Perplexity实例
prompt: 查询内容
chat_id: 聊天ID
send_text_func: 发送消息的函数,接受(消息内容, 接收者ID, @用户ID)参数
receiver: 接收消息的ID
at_user: 被@的用户ID
"""
super().__init__(daemon=True)
self.perplexity = perplexity_instance
self.prompt = prompt
self.chat_id = chat_id
self.send_text_func = send_text_func
self.receiver = receiver
self.at_user = at_user
self.LOG = logging.getLogger("PerplexityThread")
self.on_finish = on_finish
self.enable_full_research = enable_full_research
# 检查是否使用reasoning模型
self.is_reasoning_model = bool(self.enable_full_research and getattr(self.perplexity, 'has_reasoning_model', False))
if self.is_reasoning_model:
self.LOG.info("Perplexity将启用推理模型处理此次请求")
def run(self):
"""线程执行函数"""
try:
self.LOG.info(f"开始处理Perplexity请求: {self.prompt[:30]}...")
# 获取回答
response = self.perplexity.get_answer(
self.prompt,
self.chat_id,
deep_research=self.enable_full_research
)
# 处理sonar-reasoning和sonar-reasoning-pro模型的<think>标签
if response:
# 只有对reasoning模型才应用清理逻辑
if self.is_reasoning_model:
response = self.remove_thinking_content(response)
# 移除Markdown格式符号
response = self.remove_markdown_formatting(response)
self.send_text_func(response, at_list=self.at_user)
else:
self.send_text_func("无法从Perplexity获取回答", at_list=self.at_user)
self.LOG.info(f"Perplexity请求处理完成: {self.prompt[:30]}...")
except Exception as e:
self.LOG.error(f"处理Perplexity请求时出错: {e}")
self.send_text_func(f"处理请求时出错: {e}", at_list=self.at_user)
finally:
if self.on_finish:
try:
self.on_finish()
except Exception as cleanup_error:
self.LOG.error(f"清理Perplexity线程时出错: {cleanup_error}")
def remove_thinking_content(self, text):
"""移除<think></think>标签之间的思考内容
Args:
text: 原始响应文本
Returns:
str: 处理后的文本
"""
try:
# 检查是否包含思考标签
has_thinking = '<think>' in text or '</think>' in text
if has_thinking:
self.LOG.info("检测到思考内容标签,准备移除...")
# 导入正则表达式库
import re
# 移除不完整的标签对情况
if text.count('<think>') != text.count('</think>'):
self.LOG.warning(f"检测到不匹配的思考标签: <think>数量={text.count('<think>')}, </think>数量={text.count('</think>')}")
# 提取思考内容用于日志记录
thinking_pattern = re.compile(r'<think>(.*?)</think>', re.DOTALL)
thinking_matches = thinking_pattern.findall(text)
if thinking_matches:
for i, thinking in enumerate(thinking_matches):
short_thinking = thinking[:100] + '...' if len(thinking) > 100 else thinking
self.LOG.debug(f"思考内容 #{i+1}: {short_thinking}")
# 替换所有的<think>...</think>内容 - 使用非贪婪模式
cleaned_text = re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL)
# 处理不完整的标签
cleaned_text = re.sub(r'<think>.*?$', '', cleaned_text, flags=re.DOTALL) # 处理未闭合的开始标签
cleaned_text = re.sub(r'^.*?</think>', '', cleaned_text, flags=re.DOTALL) # 处理未开始的闭合标签
# 处理可能的多余空行
cleaned_text = re.sub(r'\n{3,}', '\n\n', cleaned_text)
# 移除前后空白
cleaned_text = cleaned_text.strip()
self.LOG.info(f"思考内容已移除,原文本长度: {len(text)} -> 清理后: {len(cleaned_text)}")
# 如果清理后文本为空,返回一个提示信息
if not cleaned_text:
return "回答内容为空,可能是模型仅返回了思考过程。请重新提问。"
return cleaned_text
else:
return text # 没有思考标签,直接返回原文本
except Exception as e:
self.LOG.error(f"清理思考内容时出错: {e}")
return text # 出错时返回原始文本
def remove_markdown_formatting(self, text):
"""移除Markdown格式符号如*和#
Args:
text: 包含Markdown格式的文本
Returns:
str: 移除Markdown格式后的文本
"""
try:
# 导入正则表达式库
import re
self.LOG.info("开始移除Markdown格式符号...")
# 保存原始文本长度
original_length = len(text)
# 移除标题符号 (#)
# 替换 # 开头的标题,保留文本内容
cleaned_text = re.sub(r'^\s*#{1,6}\s+(.+)$', r'\1', text, flags=re.MULTILINE)
# 移除强调符号 (*)
# 替换 **粗体** 和 *斜体* 格式,保留文本内容
cleaned_text = re.sub(r'\*\*(.*?)\*\*', r'\1', cleaned_text)
cleaned_text = re.sub(r'\*(.*?)\*', r'\1', cleaned_text)
# 处理可能的多余空行
cleaned_text = re.sub(r'\n{3,}', '\n\n', cleaned_text)
# 移除前后空白
cleaned_text = cleaned_text.strip()
self.LOG.info(f"Markdown格式符号已移除原文本长度: {original_length} -> 清理后: {len(cleaned_text)}")
return cleaned_text
except Exception as e:
self.LOG.error(f"移除Markdown格式符号时出错: {e}")
return text # 出错时返回原始文本
class PerplexityManager:
"""管理Perplexity请求线程的类"""
def __init__(self):
self.threads = {}
self.lock = Lock()
self.LOG = logging.getLogger("PerplexityManager")
def start_request(
self,
perplexity_instance,
prompt,
chat_id,
send_text_func,
receiver,
at_user=None,
enable_full_research: bool = False,
):
"""启动Perplexity请求线程
Args:
perplexity_instance: Perplexity实例
prompt: 查询内容
chat_id: 聊天ID
send_text_func: 发送消息的函数
receiver: 接收消息的ID
at_user: 被@的用户ID
enable_full_research: 是否启用深度研究模式
Returns:
bool: 是否成功启动线程
"""
thread_key = f"{receiver}_{chat_id}"
full_research_available = enable_full_research and getattr(perplexity_instance, 'has_reasoning_model', False)
with self.lock:
# 检查是否已有正在处理的相同请求
if thread_key in self.threads and self.threads[thread_key].is_alive():
send_text_func("⚠️ 已有一个Perplexity请求正在处理中请稍后再试", at_list=at_user)
return False
# 发送等待消息
wait_msg = "正在启用满血模式研究中...." if full_research_available else "正在联网查询,请稍候..."
if enable_full_research and not full_research_available:
self.LOG.warning("收到满血模式请求,但未配置推理模型,退回普通模式。")
# 等待提示无需 @ 用户,避免频繁打扰
send_text_func(wait_msg, at_list="", record_message=False)
# 添加线程完成回调,自动清理线程
def thread_finished_callback():
with self.lock:
thread = self.threads.pop(thread_key, None)
if thread is not None:
self.LOG.info(f"已清理Perplexity线程: {thread_key}")
# 创建并启动新线程处理请求
perplexity_thread = PerplexityThread(
perplexity_instance=perplexity_instance,
prompt=prompt,
chat_id=chat_id,
send_text_func=send_text_func,
receiver=receiver,
at_user=at_user,
on_finish=thread_finished_callback,
enable_full_research=full_research_available
)
# 保存线程引用
self.threads[thread_key] = perplexity_thread
# 启动线程
perplexity_thread.start()
self.LOG.info(f"已启动Perplexity请求线程: {thread_key}")
return True
def cleanup_threads(self):
"""清理所有Perplexity线程"""
with self.lock:
active_threads = [thread_key for thread_key, thread in self.threads.items() if thread.is_alive()]
if active_threads:
self.LOG.info(f"等待{len(active_threads)}个Perplexity线程结束: {active_threads}")
# 等待所有线程结束但最多等待10秒
for _ in range(10):
with self.lock:
active_count = sum(1 for thread in self.threads.values() if thread.is_alive())
if active_count == 0:
break
time.sleep(1)
with self.lock:
still_active = [thread_key for thread_key, thread in self.threads.items() if thread.is_alive()]
if still_active:
self.LOG.warning(f"以下Perplexity线程在退出时仍在运行: {still_active}")
self.threads.clear()
else:
with self.lock:
self.threads.clear()
self.LOG.info("Perplexity线程管理已清理")
class Perplexity(LLMProvider):
def __init__(self, config):
self.config = config
self.api_key = config.get('key')
self.api_base = config.get('api', 'https://api.perplexity.ai')
self.proxy = config.get('proxy')
self.prompt = config.get('prompt', '你是智能助手Perplexity')
self.trigger_keyword = config.get('trigger_keyword', 'ask')
self.fallback_prompt = config.get('fallback_prompt', "请像 Perplexity 一样以专业、客观、信息丰富的方式回答问题。不要使用任何tex或者md格式,纯文本输出。")
self.LOG = logging.getLogger('Perplexity')
self.model_flash = config.get('model_flash') or config.get('model', 'sonar')
self.model_reasoning = config.get('model_reasoning')
if self.model_reasoning and self.model_reasoning.lower() == (self.model_flash or '').lower():
self.model_reasoning = None
self.has_reasoning_model = bool(self.model_reasoning)
# 设置编码环境变量确保处理Unicode字符
os.environ["PYTHONIOENCODING"] = "utf-8"
# 创建线程管理器
self.thread_manager = PerplexityManager()
# 创建OpenAI客户端
self.client = None
if self.api_key:
try:
self.client = OpenAI(
api_key=self.api_key,
base_url=self.api_base
)
# 如果有代理设置
if self.proxy:
# OpenAI客户端不直接支持代理设置需要通过环境变量
os.environ["HTTPS_PROXY"] = self.proxy
os.environ["HTTP_PROXY"] = self.proxy
self.LOG.info("Perplexity 客户端已初始化")
except Exception as e:
self.LOG.error(f"初始化Perplexity客户端失败: {str(e)}")
else:
self.LOG.warning("未配置Perplexity API密钥")
@staticmethod
def value_check(args: dict) -> bool:
if args:
return all(value is not None for key, value in args.items() if key != 'proxy')
return False
def get_answer(self, prompt, session_id=None, deep_research: bool = False):
"""获取Perplexity回答
Args:
prompt: 用户输入的问题
session_id: 会话ID用于区分不同会话
Returns:
str: Perplexity的回答
"""
try:
if not self.api_key or not self.client:
return "Perplexity API key 未配置或客户端初始化失败"
# 构建消息列表
messages = [
{"role": "system", "content": self.prompt},
{"role": "user", "content": prompt}
]
# 获取模型
model = self.model_reasoning if (deep_research and self.has_reasoning_model) else self.model_flash or self.config.get('model', 'sonar')
if deep_research and self.has_reasoning_model:
self.LOG.info(f"Perplexity启动深度研究模式使用模型: {model}")
# 使用json序列化确保正确处理Unicode
self.LOG.info(f"发送到Perplexity的消息: {json.dumps(messages, ensure_ascii=False)}")
# 创建聊天完成
response = self.client.chat.completions.create(
model=model,
messages=messages
)
# 返回回答内容
return response.choices[0].message.content
except Exception as e:
self.LOG.error(f"调用Perplexity API时发生错误: {str(e)}")
return f"发生错误: {str(e)}"
def process_message(
self,
content,
chat_id,
sender,
roomid,
from_group,
send_text_func,
enable_full_research: bool = False,
):
"""处理可能包含Perplexity触发词的消息
Args:
content: 消息内容
chat_id: 聊天ID
sender: 发送者ID
roomid: 群聊ID如果是群聊
from_group: 是否来自群聊
send_text_func: 发送消息的函数
enable_full_research: 是否启用深度研究模式
Returns:
tuple[bool, Optional[str]]:
- bool: 是否已处理该消息
- Optional[str]: 无权限时的备选prompt其他情况为None
"""
prompt = (content or "").strip()
if not prompt:
return False, None
stripped_by_keyword = False
trigger = (self.trigger_keyword or "").strip()
if trigger:
lowered_prompt = prompt.lower()
lowered_trigger = trigger.lower()
if lowered_prompt.startswith(lowered_trigger):
stripped_by_keyword = True
prompt = prompt[len(trigger):].strip()
if not prompt:
if stripped_by_keyword:
send_text_func(
"请告诉我你想搜索什么内容",
at_list=sender if from_group else "",
record_message=False
)
return True, None
return False, None
receiver = roomid if from_group else sender
at_user = sender if from_group else None
request_started = self.thread_manager.start_request(
perplexity_instance=self,
prompt=prompt,
chat_id=chat_id,
send_text_func=send_text_func,
receiver=receiver,
at_user=at_user,
enable_full_research=enable_full_research
)
return request_started, None
def cleanup(self):
"""清理所有资源"""
self.thread_manager.cleanup_threads()
async def chat(
self,
messages: list[dict],
tools: list[dict] | None = None,
) -> LLMResponse:
"""异步调用 LLM实现 LLMProvider 接口)
注意Perplexity 不支持工具调用tools 参数会被忽略
"""
if not self.api_key or not self.client:
return LLMResponse(content="Perplexity API key 未配置或客户端初始化失败")
try:
model = self.model_flash or self.config.get("model", "sonar")
response = await asyncio.to_thread(
self.client.chat.completions.create,
model=model,
messages=messages,
)
content = response.choices[0].message.content
# Perplexity 不支持工具调用
return LLMResponse(content=content, tool_calls=[])
except Exception as e:
self.LOG.error(f"Perplexity API 调用失败: {e}")
return LLMResponse(content=f"发生错误: {str(e)}")
def __str__(self):
return "Perplexity"

22
bot.py
View File

@@ -12,7 +12,6 @@ from channel import Channel, Message, MessageType
from ai_providers.ai_chatgpt import ChatGPT
from ai_providers.ai_deepseek import DeepSeek
from ai_providers.ai_kimi import Kimi
from ai_providers.ai_perplexity import Perplexity
from function.func_summary import MessageSummary
from function.func_reminder import ReminderManager
from function.func_persona import (
@@ -87,7 +86,8 @@ class BubblesBot:
self.persona_manager = None
# 初始化 Agent Loop 系统
self.tool_registry = create_default_registry()
tavily_key = getattr(self.config, "TAVILY", {}).get("key") if hasattr(self.config, "TAVILY") else None
self.tool_registry = create_default_registry(tavily_api_key=tavily_key)
self.agent_loop = AgentLoop(self.tool_registry, max_iterations=20)
self.session_manager = SessionManager(
message_summary=self.message_summary,
@@ -186,24 +186,6 @@ class BubblesBot:
except Exception as e:
self.LOG.error(f"初始化 Kimi 失败: {e}")
# Perplexity
if Perplexity.value_check(self.config.PERPLEXITY):
try:
flash_conf = copy.deepcopy(self.config.PERPLEXITY)
flash_model = flash_conf.get("model_flash", "sonar")
flash_conf["model"] = flash_model
self.chat_models[ChatType.PERPLEXITY.value] = Perplexity(flash_conf)
self.LOG.info(f"已加载 Perplexity: {flash_model}")
reasoning_model = self.config.PERPLEXITY.get("model_reasoning")
if reasoning_model and reasoning_model != flash_model:
reason_conf = copy.deepcopy(self.config.PERPLEXITY)
reason_conf["model"] = reasoning_model
self.reasoning_chat_models[ChatType.PERPLEXITY.value] = Perplexity(reason_conf)
self.LOG.info(f"已加载 Perplexity 推理模型: {reasoning_model}")
except Exception as e:
self.LOG.error(f"初始化 Perplexity 失败: {e}")
async def start(self) -> None:
"""启动机器人"""
self.LOG.info(f"BubblesBot v{__version__} 启动中...")

View File

@@ -1,162 +1,51 @@
# Bubbles 配置文件
# 复制此文件为 config.yaml 并填写 API Key
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# AI 模型(至少配置一个)
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
chatgpt:
key: # OpenAI API Key
api: https://api.openai.com/v1
model_flash: gpt-4o-mini
model_reasoning: gpt-4o
proxy: # 代理(可选)
deepseek:
key: # DeepSeek API Key
api: https://api.deepseek.com
model_flash: deepseek-chat
model_reasoning: deepseek-reasoner
kimi:
key: # Moonshot API Key
api: https://api.moonshot.cn/v1
model_flash: kimi-k2
model_reasoning: kimi-k2-thinking
# 搜索工具
tavily:
key: # Tavily API Key (https://tavily.com)
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# 其他
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
send_rate_limit: 10 # 每分钟最大发送数
logging:
version: 1
disable_existing_loggers: False
disable_existing_loggers: false
formatters:
simple:
format: "%(asctime)s %(message)s"
datefmt: "%Y-%m-%d %H:%M:%S"
error:
format: "%(asctime)s %(name)s %(levelname)s %(filename)s::%(funcName)s[%(lineno)d]:%(message)s"
format: "%(asctime)s [%(levelname)s] %(message)s"
datefmt: "%H:%M:%S"
handlers:
console:
class: logging.StreamHandler
level: INFO
formatter: simple
stream: ext://sys.stdout
info_file_handler:
class: logging.handlers.RotatingFileHandler
level: INFO
formatter: simple
filename: wx_info.log
maxBytes: 10485760 # 10MB
backupCount: 20
encoding: utf8
warning_file_handler:
class: logging.handlers.RotatingFileHandler
level: WARNING
formatter: simple
filename: wx_warning.log
maxBytes: 10485760 # 10MB
backupCount: 20
encoding: utf8
error_file_handler:
class: logging.handlers.RotatingFileHandler
level: ERROR
formatter: error
filename: wx_error.log
maxBytes: 10485760 # 10MB
backupCount: 20
encoding: utf8
root:
level: INFO
handlers: [console, info_file_handler, error_file_handler]
groups:
enable: [example12345@chatroom,example12345@chatroom] # 允许响应的群 roomId大概长这样2xxxxxxxxx3@chatroom
welcome_msg: "欢迎 {new_member} 加入群聊!\n请简单介绍一下自己吧~\n如果想和我聊天可以@我" # 新人入群欢迎消息,可使用{new_member}和{inviter}变量
# 群聊与AI模型映射如果不配置则使用默认模型
models:
# 模型ID参考
# 0: 自动选择第一个可用模型
# 1: ChatGPT
# 2: DeepSeek
# 3: Kimi
# 4: Perplexity
default: 0 # 默认模型ID0表示自动选择第一个可用模型
# 群聊映射
mapping:
- room_id: example12345@chatroom
model: 2
max_history: 30 # 回顾最近30条消息
random_chitchat_probability: 0.2 # 群聊随机闲聊概率0-10 表示关闭
force_reasoning: true # 闲聊时强制使用推理模型AI 路由仍正常执行)
- room_id: example12345@chatroom
model: 7
max_history: 30 # 回顾最近30条消息
random_chitchat_probability: 0.0 # 可单独覆盖默认概率
# 私聊映射
private_mapping:
- wxid: filehelper
model: 2
max_history: 30 # 回顾最近30条消息
- wxid: wxid_example12345
model: 8
max_history: 30 # 回顾最近30条消息
message_forwarding:
enable: false # 是否开启转发功能
rules:
- source_room_id: example12345@chatroom # 需要监听的群ID
target_room_ids:
- example67890@chatroom # 接受转发消息的群ID
keywords:
- "关键词1"
- "关键词2"
# - source_room_id: another_group@chatroom
# target_room_ids: ["target_group@chatroom"]
# keywords: ["需要的词"]
MAX_HISTORY: 300 # 记录数据库的消息历史
news:
receivers: ["filehelper"] # 定时新闻接收人roomid 或者 wxid
# 消息发送速率限制一分钟内最多发送6条消息
send_rate_limit: 6
weather: # -----天气提醒配置这行不填-----
city_code: 101010100 # 北京城市代码如若需要其他城市可参考base/main_city.json或者自寻城市代码填写
receivers: ["filehelper"] # 天气提醒接收人roomid 或者 wxid
chatgpt: # -----chatgpt配置这行不填-----
key: # 填写你 ChatGPT 的 key
api: https://api.openai.com/v1 # 如果你不知道这是干嘛的,就不要改
model_flash: gpt-3.5-turbo # 快速回复模型(可选)
model_reasoning: gpt-3.5-turbo # 深度思考模型(可选)
proxy: # 如果你在国内你可能需要魔法大概长这样http://域名或者IP地址:端口号
prompt: 你是智能聊天机器人,你叫 wcferry # 根据需要对角色进行设定
max_history_messages: 20 # <--- 添加这一行,设置 ChatGPT 最多回顾 20 条历史消息
deepseek: # -----deepseek配置这行不填-----
#思维链相关功能默认关闭开启后会增加响应时间和消耗更多的token
key: # 填写你的 DeepSeek API Key API Key的格式为sk-xxxxxxxxxxxxxxx
api: https://api.deepseek.com # DeepSeek API 地址
model_flash: deepseek-chat # 快速回复模型
model_reasoning: deepseek-reasoner # 深度思考模型
prompt: 你是智能聊天机器人,你叫 DeepSeek 助手 # 根据需要对角色进行设定
enable_reasoning: false # 是否启用思维链功能,仅在使用 deepseek-reasoner 模型时有效
show_reasoning: false # 是否在回复中显示思维过程,仅在启用思维链功能时有效
max_history_messages: 10 # <--- 添加这一行,设置 DeepSeek 最多回顾 10 条历史消息
kimi: # -----kimi配置-----
key: # 填写你的 Moonshot API Key
api: https://api.moonshot.cn/v1 # Kimi API 地址
proxy: # 国内可按需配置代理例如http://127.0.0.1:7890
model_flash: kimi-k2 # 快速回复模型
model_reasoning: kimi-k2-thinking # 深度思考模型
prompt: 你是 Kimi一个由 Moonshot AI 构建的可靠助手 # 角色设定
max_history_messages: 20 # 设置 Kimi 最多回顾 20 条历史消息
show_reasoning: false # 是否在回复中附带 reasoning_content 内容
aliyun_image: # -----如果要使用阿里云文生图,取消下面的注释并填写相关内容,模型到阿里云百炼找通义万相-文生图2.1-Turbo-----
enable: true # 是否启用阿里文生图功能false为关闭默认开启如果未配置则会将消息发送给聊天大模型
api_key: sk-xxxxxxxxxxxxxxxxxxxxxxxx # 替换为你的DashScope API密钥
model: wanx2.1-t2i-turbo # 模型名称默认使用wanx2.1-t2i-turbo(快),wanx2.1-t2i-plus,wanx-v1会给用户不同的提示
size: 1024*1024 # 图像尺寸,格式为宽*高
n: 1 # 生成图像的数量
temp_dir: ./temp # 临时文件存储路径
trigger_keyword: 牛阿里 # 触发词,默认为"牛阿里"
fallback_to_chat: true # 当服务不可用时是否转发给聊天模型处理
perplexity: # -----perplexity配置这行不填-----
key: # 填写你的Perplexity API Key
api: https://api.perplexity.ai # API地址
proxy: # 如果你在国内你可能需要魔法大概长这样http://域名或者IP地址:端口号
model_flash: mixtral-8x7b-instruct # 快速回复模型(可选)
model_reasoning: mixtral-8x7b-instruct # 深度思考模型(可选)
prompt: 你是Perplexity AI助手请用专业、准确、有帮助的方式回答问题 # 角色设定
ai_router: # -----AI路由器配置-----
enable: true # 是否启用AI路由功能
allowed_groups: [] # 允许使用AI路由的群聊ID列表例如["123456789@chatroom", "123456789@chatroom"]
auto_accept_friend_request: false # 是否自动通过好友申请,默认关闭
handlers: [console]

View File

@@ -12,21 +12,6 @@ class Config(object):
def __init__(self) -> None:
self.reload()
@staticmethod
def _normalize_random_chitchat_probability(entry, fallback_probability=0.0):
if isinstance(entry, (int, float)):
probability = entry
elif isinstance(entry, dict):
probability = entry.get("probability", fallback_probability)
else:
probability = fallback_probability
try:
probability = float(probability)
except (TypeError, ValueError):
probability = fallback_probability
probability = max(0.0, min(1.0, probability))
return probability
def _load_config(self) -> dict:
pwd = os.path.dirname(os.path.abspath(__file__))
try:
@@ -37,64 +22,37 @@ class Config(object):
with open(f"{pwd}/config.yaml", "rb") as fp:
yconfig = yaml.safe_load(fp)
return yconfig
return yconfig or {}
def reload(self) -> None:
yconfig = self._load_config()
logging.config.dictConfig(yconfig["logging"])
self.CITY_CODE = yconfig["weather"]["city_code"]
self.WEATHER = yconfig["weather"]["receivers"]
self.GROUPS = yconfig["groups"]["enable"]
self.WELCOME_MSG = yconfig["groups"].get("welcome_msg", "欢迎 {new_member} 加入群聊!")
self.GROUP_MODELS = yconfig["groups"].get("models", {"default": 0, "mapping": []})
legacy_random_conf = yconfig["groups"].get("random_chitchat", {})
legacy_default = self._normalize_random_chitchat_probability(
legacy_random_conf.get("default", 0.0) if isinstance(legacy_random_conf, dict) else 0.0,
fallback_probability=0.0,
)
legacy_mapping = {}
if isinstance(legacy_random_conf, dict):
for item in legacy_random_conf.get("mapping", []) or []:
if not isinstance(item, dict):
continue
room_id = item.get("room_id")
if not room_id:
continue
legacy_mapping[room_id] = self._normalize_random_chitchat_probability(
item,
fallback_probability=legacy_default,
)
random_chitchat_mapping = {}
for item in self.GROUP_MODELS.get("mapping", []) or []:
if not isinstance(item, dict):
continue
room_id = item.get("room_id")
if not room_id:
continue
if "random_chitchat_probability" in item:
rate = self._normalize_random_chitchat_probability(
item["random_chitchat_probability"],
fallback_probability=legacy_default,
)
random_chitchat_mapping[room_id] = rate
elif room_id in legacy_mapping:
random_chitchat_mapping[room_id] = legacy_mapping[room_id]
# 日志配置
if "logging" in yconfig:
logging.config.dictConfig(yconfig["logging"])
self.GROUP_RANDOM_CHITCHAT_DEFAULT = legacy_default
self.GROUP_RANDOM_CHITCHAT = random_chitchat_mapping
self.NEWS = yconfig["news"]["receivers"]
# AI 模型配置
self.CHATGPT = yconfig.get("chatgpt", {})
self.DEEPSEEK = yconfig.get("deepseek", {})
self.KIMI = yconfig.get("kimi", {})
self.PERPLEXITY = yconfig.get("perplexity", {})
self.ALIYUN_IMAGE = yconfig.get("aliyun_image", {})
self.AI_ROUTER = yconfig.get("ai_router", {"enable": True, "allowed_groups": []})
self.AUTO_ACCEPT_FRIEND_REQUEST = yconfig.get("auto_accept_friend_request", False)
# 发送限制
self.SEND_RATE_LIMIT = yconfig.get("send_rate_limit", 10)
# Tavily 搜索
self.TAVILY = yconfig.get("tavily", {})
# 向后兼容(旧版 robot.py 可能用到)
self.GROUPS = yconfig.get("groups", {}).get("enable", [])
self.WELCOME_MSG = yconfig.get("groups", {}).get("welcome_msg", "")
self.GROUP_MODELS = yconfig.get("groups_models", {"default": 0})
self.MAX_HISTORY = yconfig.get("MAX_HISTORY", 300)
self.SEND_RATE_LIMIT = yconfig.get("send_rate_limit", 0)
self.MESSAGE_FORWARDING = yconfig.get(
"message_forwarding",
{"enable": False, "rules": []}
)
self.AUTO_ACCEPT_FRIEND_REQUEST = yconfig.get("auto_accept_friend_request", False)
self.NEWS = []
self.WEATHER = []
self.CITY_CODE = ""
self.ALIYUN_IMAGE = {}
self.MESSAGE_FORWARDING = {"enable": False, "rules": []}
self.AI_ROUTER = {"enable": False}
self.GROUP_RANDOM_CHITCHAT_DEFAULT = 0.0
self.GROUP_RANDOM_CHITCHAT = {}

View File

@@ -7,14 +7,12 @@ class ChatType(IntEnum):
CHATGPT = 1 # ChatGPT
DEEPSEEK = 2 # DeepSeek
KIMI = 3 # Kimi (Moonshot)
PERPLEXITY = 4 # Perplexity
@staticmethod
def is_in_chat_types(chat_type: int) -> bool:
if chat_type in [ChatType.CHATGPT.value,
ChatType.DEEPSEEK.value,
ChatType.KIMI.value,
ChatType.PERPLEXITY.value]:
ChatType.KIMI.value]:
return True
return False

View File

@@ -16,7 +16,6 @@ from wcferry import Wcf, WxMsg
from ai_providers.ai_chatgpt import ChatGPT
from ai_providers.ai_deepseek import DeepSeek
from ai_providers.ai_kimi import Kimi
from ai_providers.ai_perplexity import Perplexity
from function.func_weather import Weather
from function.func_news import News
from function.func_summary import MessageSummary
@@ -190,27 +189,7 @@ class Robot(Job):
self.LOG.info(f"已加载 Kimi 推理模型: {reasoning_model_name}")
except Exception as e:
self.LOG.error(f"初始化 Kimi 模型时出错: {str(e)}")
# 初始化Perplexity
if Perplexity.value_check(self.config.PERPLEXITY):
try:
perplexity_flash_conf = copy.deepcopy(self.config.PERPLEXITY)
flash_model_name = perplexity_flash_conf.get("model_flash", "sonar")
perplexity_flash_conf["model"] = flash_model_name
self.chat_models[ChatType.PERPLEXITY.value] = Perplexity(perplexity_flash_conf)
self.perplexity = self.chat_models[ChatType.PERPLEXITY.value] # 单独保存一个引用用于特殊处理
self.LOG.info(f"已加载 Perplexity 模型: {flash_model_name}")
reasoning_model_name = self.config.PERPLEXITY.get("model_reasoning")
if reasoning_model_name and reasoning_model_name != flash_model_name:
perplexity_reason_conf = copy.deepcopy(self.config.PERPLEXITY)
perplexity_reason_conf["model"] = reasoning_model_name
self.reasoning_chat_models[ChatType.PERPLEXITY.value] = Perplexity(perplexity_reason_conf)
self.LOG.info(f"已加载 Perplexity 推理模型: {reasoning_model_name}")
except Exception as e:
self.LOG.error(f"初始化 Perplexity 模型时出错: {str(e)}")
# 根据chat_type参数选择默认模型
self.current_model_id = None
if chat_type > 0 and chat_type in self.chat_models:
@@ -586,20 +565,10 @@ class Robot(Job):
for r in receivers:
self.sendTextMsg(report, r)
def cleanup_perplexity_threads(self):
"""清理所有Perplexity线程"""
# 如果已初始化Perplexity实例调用其清理方法
perplexity_instance = self.get_perplexity_instance()
if perplexity_instance:
perplexity_instance.cleanup()
def cleanup(self):
"""清理所有资源,在程序退出前调用"""
self.LOG.info("开始清理机器人资源...")
# 清理Perplexity线程
self.cleanup_perplexity_threads()
# 关闭消息历史数据库连接
if hasattr(self, 'message_summary') and self.message_summary:
self.LOG.info("正在关闭消息历史数据库...")
@@ -610,34 +579,9 @@ class Robot(Job):
self.persona_manager.close()
except Exception as e:
self.LOG.error(f"关闭人设数据库时出错: {e}")
self.LOG.info("机器人资源清理完成")
def get_perplexity_instance(self):
"""获取Perplexity实例
Returns:
Perplexity: Perplexity实例如果未配置则返回None
"""
# 检查是否已有Perplexity实例
if hasattr(self, 'perplexity'):
return self.perplexity
# 检查config中是否有Perplexity配置
if hasattr(self.config, 'PERPLEXITY') and Perplexity.value_check(self.config.PERPLEXITY):
self.perplexity = Perplexity(self.config.PERPLEXITY)
return self.perplexity
# 检查chat是否是Perplexity类型
if isinstance(self.chat, Perplexity):
return self.chat
# 如果存在chat_models字典尝试从中获取
if hasattr(self, 'chat_models') and ChatType.PERPLEXITY.value in self.chat_models:
return self.chat_models[ChatType.PERPLEXITY.value]
return None
def _get_reasoning_chat_model(self):
"""获取当前聊天模型对应的推理模型实例"""
model_id = getattr(self, 'current_model_id', None)

View File

@@ -1,105 +0,0 @@
"""
工具系统 —— 让 LLM 在 Agent 循环中自主调用工具。
每个 Tool 提供 OpenAI function-calling 格式的 schema 和一个同步执行函数。
ToolRegistry 汇总所有工具,生成 tools 列表和统一的 tool_handler。
"""
import json
import logging
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional
logger = logging.getLogger(__name__)
@dataclass
class Tool:
"""LLM 可调用的工具。"""
name: str
description: str
parameters: dict # JSON Schema
handler: Callable[..., str] = None # (ctx, **kwargs) -> str
status_text: str = "" # 执行前发给用户的状态提示,空则不发
def to_openai_schema(self) -> dict:
return {
"type": "function",
"function": {
"name": self.name,
"description": self.description,
"parameters": self.parameters,
},
}
class ToolRegistry:
"""收集工具,为 Agent 循环提供 tools + tool_handler。"""
def __init__(self):
self._tools: Dict[str, Tool] = {}
def register(self, tool: Tool) -> None:
self._tools[tool.name] = tool
logger.info(f"注册工具: {tool.name}")
def get(self, name: str) -> Optional[Tool]:
return self._tools.get(name)
@property
def tools(self) -> Dict[str, Tool]:
return dict(self._tools)
def get_openai_tools(self) -> List[dict]:
"""返回所有工具的 OpenAI function-calling schema 列表。"""
return [t.to_openai_schema() for t in self._tools.values()]
def create_handler(self, ctx: Any) -> Callable[[str, dict], str]:
"""创建一个绑定了消息上下文的 tool_handler 函数。
执行工具前,如果该工具配置了 status_text会先给用户发一条状态提示
让用户知道"机器人在干什么"(类似 OpenClaw/OpenCode 的中间过程输出)。
"""
registry = self._tools
def _send_status(tool: 'Tool', arguments: dict) -> None:
"""发送工具执行状态消息给用户。"""
if not tool.status_text:
return
try:
# 对搜索类工具,把查询关键词带上
status = tool.status_text
if tool.name == "web_search" and arguments.get("query"):
status = f"{status}{arguments['query']}"
elif tool.name == "lookup_chat_history" and arguments.get("keywords"):
kw_str = "".join(str(k) for k in arguments["keywords"][:3])
status = f"{status}{kw_str}"
ctx.send_text(status, record_message=False)
except Exception:
pass # 状态提示失败不影响工具执行
def handler(tool_name: str, arguments: dict) -> str:
tool = registry.get(tool_name)
if not tool:
return json.dumps(
{"error": f"Unknown tool: {tool_name}"},
ensure_ascii=False,
)
_send_status(tool, arguments)
try:
result = tool.handler(ctx, **arguments)
if not isinstance(result, str):
result = json.dumps(result, ensure_ascii=False)
return result
except Exception as e:
logger.error(f"工具 {tool_name} 执行失败: {e}", exc_info=True)
return json.dumps({"error": str(e)}, ensure_ascii=False)
return handler
# ── 全局工具注册表 ──────────────────────────────────────────
tool_registry = ToolRegistry()

View File

@@ -1,190 +0,0 @@
"""聊天历史查询工具 —— 从 handlers.py 的内联定义中提取而来。
支持三种查询模式:
keywords — 关键词模糊搜索
range — 按倒序偏移取连续消息
time — 按时间窗口取消息
"""
import json
from tools import Tool, tool_registry
DEFAULT_VISIBLE_LIMIT = 30
def _handle_lookup_chat_history(ctx, mode: str = "", keywords: list = None,
start_offset: int = None, end_offset: int = None,
start_time: str = None, end_time: str = None,
**_) -> str:
message_summary = getattr(ctx.robot, "message_summary", None) if ctx.robot else None
if not message_summary:
return json.dumps({"error": "消息历史功能不可用"}, ensure_ascii=False)
chat_id = ctx.get_receiver()
visible_limit = DEFAULT_VISIBLE_LIMIT
raw = getattr(ctx, "specific_max_history", None)
if raw is not None:
try:
visible_limit = int(raw)
except (TypeError, ValueError):
pass
# 推断模式
mode = (mode or "").strip().lower()
if not mode:
if start_time and end_time:
mode = "time"
elif start_offset is not None and end_offset is not None:
mode = "range"
else:
mode = "keywords"
# ── keywords ────────────────────────────────────────────
if mode == "keywords":
if isinstance(keywords, str):
keywords = [keywords]
elif not isinstance(keywords, list):
keywords = []
cleaned = []
seen = set()
for kw in keywords:
if kw is None:
continue
s = str(kw).strip()
if s and (len(s) > 1 or s.isdigit()):
low = s.lower()
if low not in seen:
seen.add(low)
cleaned.append(s)
if not cleaned:
return json.dumps({"error": "未提供有效关键词", "results": []}, ensure_ascii=False)
search_results = message_summary.search_messages_with_context(
chat_id=chat_id,
keywords=cleaned,
context_window=10,
max_groups=20,
exclude_recent=visible_limit,
)
segments = []
lines_seen = set()
for seg in search_results:
formatted = [l for l in seg.get("formatted_messages", []) if l not in lines_seen]
lines_seen.update(formatted)
if formatted:
segments.append({
"matched_keywords": seg.get("matched_keywords", []),
"messages": formatted,
})
payload = {"segments": segments, "returned_groups": len(segments), "keywords": cleaned}
if not segments:
payload["notice"] = "未找到匹配的消息。"
return json.dumps(payload, ensure_ascii=False)
# ── range ───────────────────────────────────────────────
if mode == "range":
if start_offset is None or end_offset is None:
return json.dumps({"error": "range 模式需要 start_offset 和 end_offset"}, ensure_ascii=False)
try:
start_offset, end_offset = int(start_offset), int(end_offset)
except (TypeError, ValueError):
return json.dumps({"error": "start_offset 和 end_offset 必须是整数"}, ensure_ascii=False)
if start_offset <= visible_limit or end_offset <= visible_limit:
return json.dumps(
{"error": f"偏移量必须大于 {visible_limit} 以排除当前可见消息"},
ensure_ascii=False,
)
if start_offset > end_offset:
start_offset, end_offset = end_offset, start_offset
result = message_summary.get_messages_by_reverse_range(
chat_id=chat_id, start_offset=start_offset, end_offset=end_offset,
)
payload = {
"start_offset": result.get("start_offset"),
"end_offset": result.get("end_offset"),
"messages": result.get("messages", []),
"returned_count": result.get("returned_count", 0),
"total_messages": result.get("total_messages", 0),
}
if payload["returned_count"] == 0:
payload["notice"] = "请求范围内没有消息。"
return json.dumps(payload, ensure_ascii=False)
# ── time ────────────────────────────────────────────────
if mode == "time":
if not start_time or not end_time:
return json.dumps({"error": "time 模式需要 start_time 和 end_time"}, ensure_ascii=False)
time_lines = message_summary.get_messages_by_time_window(
chat_id=chat_id, start_time=start_time, end_time=end_time,
)
payload = {
"start_time": start_time,
"end_time": end_time,
"messages": time_lines,
"returned_count": len(time_lines),
}
if not time_lines:
payload["notice"] = "该时间范围内没有消息。"
return json.dumps(payload, ensure_ascii=False)
return json.dumps({"error": f"不支持的模式: {mode}"}, ensure_ascii=False)
# ── 注册 ────────────────────────────────────────────────────
tool_registry.register(Tool(
name="lookup_chat_history",
status_text="正在翻阅聊天记录: ",
description=(
"查询聊天历史记录。你当前只能看到最近的消息,调用此工具可以回溯更早的上下文。"
"支持三种模式:\n"
"1. mode=\"keywords\" — 用关键词模糊搜索历史消息,返回匹配片段及上下文。"
" 需要 keywords 数组2-4 个关键词)。\n"
"2. mode=\"range\" — 按倒序偏移获取连续消息块。"
" 需要 start_offset 和 end_offset均需大于当前可见消息数\n"
"3. mode=\"time\" — 按时间窗口获取消息。"
" 需要 start_time 和 end_time格式如 2025-05-01 08:00\n"
"可多次调用,例如先用 keywords 找到锚点,再用 range/time 扩展上下文。"
),
parameters={
"type": "object",
"properties": {
"mode": {
"type": "string",
"enum": ["keywords", "range", "time"],
"description": "查询模式",
},
"keywords": {
"type": "array",
"items": {"type": "string"},
"description": "mode=keywords 时的搜索关键词",
},
"start_offset": {
"type": "integer",
"description": "mode=range 时的起始偏移(从最新消息倒数)",
},
"end_offset": {
"type": "integer",
"description": "mode=range 时的结束偏移",
},
"start_time": {
"type": "string",
"description": "mode=time 时的开始时间 (YYYY-MM-DD HH:MM)",
},
"end_time": {
"type": "string",
"description": "mode=time 时的结束时间 (YYYY-MM-DD HH:MM)",
},
},
"additionalProperties": False,
},
handler=_handle_lookup_chat_history,
))

View File

@@ -1,165 +0,0 @@
"""提醒工具 —— 创建 / 查看 / 删除提醒。
LLM 直接传入结构化参数,不再需要二级路由或二次 AI 解析。
"""
import json
from datetime import datetime
from tools import Tool, tool_registry
# ── 创建提醒 ────────────────────────────────────────────────
def _handle_reminder_create(ctx, type: str = "once", time: str = "",
content: str = "", weekday: int = None, **_) -> str:
if not hasattr(ctx.robot, "reminder_manager"):
return json.dumps({"error": "提醒管理器未初始化"}, ensure_ascii=False)
if not time or not content:
return json.dumps({"error": "缺少必要字段: time 和 content"}, ensure_ascii=False)
if len(content.strip()) < 2:
return json.dumps({"error": "提醒内容太短"}, ensure_ascii=False)
# 校验时间格式
if type == "once":
parsed_dt = None
for fmt in ("%Y-%m-%d %H:%M", "%Y-%m-%d %H:%M:%S"):
try:
parsed_dt = datetime.strptime(time, fmt)
break
except ValueError:
continue
if not parsed_dt:
return json.dumps({"error": f"once 类型时间格式应为 YYYY-MM-DD HH:MM收到: {time}"}, ensure_ascii=False)
if parsed_dt < datetime.now():
return json.dumps({"error": f"时间 {time} 已过去,请使用未来的时间"}, ensure_ascii=False)
time = parsed_dt.strftime("%Y-%m-%d %H:%M")
elif type in ("daily", "weekly"):
parsed_time = None
for fmt in ("%H:%M", "%H:%M:%S"):
try:
parsed_time = datetime.strptime(time, fmt)
break
except ValueError:
continue
if not parsed_time:
return json.dumps({"error": f"daily/weekly 类型时间格式应为 HH:MM收到: {time}"}, ensure_ascii=False)
time = parsed_time.strftime("%H:%M")
else:
return json.dumps({"error": f"不支持的提醒类型: {type}"}, ensure_ascii=False)
if type == "weekly":
if weekday is None or not (isinstance(weekday, int) and 0 <= weekday <= 6):
return json.dumps({"error": "weekly 类型需要 weekday 参数 (0=周一 … 6=周日)"}, ensure_ascii=False)
data = {"type": type, "time": time, "content": content, "extra": {}}
if weekday is not None:
data["weekday"] = weekday
roomid = ctx.msg.roomid if ctx.is_group else None
success, result = ctx.robot.reminder_manager.add_reminder(ctx.msg.sender, data, roomid=roomid)
if success:
type_label = {"once": "一次性", "daily": "每日", "weekly": "每周"}.get(type, type)
return json.dumps({"success": True, "id": result,
"message": f"已创建{type_label}提醒: {time} - {content}"}, ensure_ascii=False)
return json.dumps({"success": False, "error": result}, ensure_ascii=False)
# ── 查看提醒 ────────────────────────────────────────────────
def _handle_reminder_list(ctx, **_) -> str:
if not hasattr(ctx.robot, "reminder_manager"):
return json.dumps({"error": "提醒管理器未初始化"}, ensure_ascii=False)
reminders = ctx.robot.reminder_manager.list_reminders(ctx.msg.sender)
if not reminders:
return json.dumps({"reminders": [], "message": "当前没有任何提醒"}, ensure_ascii=False)
return json.dumps({"reminders": reminders, "count": len(reminders)}, ensure_ascii=False)
# ── 删除提醒 ────────────────────────────────────────────────
def _handle_reminder_delete(ctx, reminder_id: str = "", delete_all: bool = False, **_) -> str:
if not hasattr(ctx.robot, "reminder_manager"):
return json.dumps({"error": "提醒管理器未初始化"}, ensure_ascii=False)
if delete_all:
success, message, count = ctx.robot.reminder_manager.delete_all_reminders(ctx.msg.sender)
return json.dumps({"success": success, "message": message, "deleted_count": count}, ensure_ascii=False)
if not reminder_id:
return json.dumps({"error": "请提供 reminder_id或设置 delete_all=true 删除全部"}, ensure_ascii=False)
success, message = ctx.robot.reminder_manager.delete_reminder(ctx.msg.sender, reminder_id)
return json.dumps({"success": success, "message": message}, ensure_ascii=False)
# ── 注册 ────────────────────────────────────────────────────
tool_registry.register(Tool(
name="reminder_create",
description=(
"创建提醒。支持 once(一次性)、daily(每日)、weekly(每周) 三种类型。"
"当前时间已在对话上下文中提供,请据此计算目标时间。"
),
status_text="正在设置提醒...",
parameters={
"type": "object",
"properties": {
"type": {
"type": "string",
"enum": ["once", "daily", "weekly"],
"description": "提醒类型",
},
"time": {
"type": "string",
"description": "once → YYYY-MM-DD HH:MMdaily/weekly → HH:MM",
},
"content": {
"type": "string",
"description": "提醒内容",
},
"weekday": {
"type": "integer",
"description": "仅 weekly 需要。0=周一 … 6=周日",
},
},
"required": ["type", "time", "content"],
"additionalProperties": False,
},
handler=_handle_reminder_create,
))
tool_registry.register(Tool(
name="reminder_list",
description="查看当前用户的所有提醒列表。",
parameters={"type": "object", "properties": {}, "additionalProperties": False},
handler=_handle_reminder_list,
))
tool_registry.register(Tool(
name="reminder_delete",
description=(
"删除提醒。需要先调用 reminder_list 获取 ID再用 reminder_id 精确删除;"
"或设置 delete_all=true 一次性删除全部。"
),
parameters={
"type": "object",
"properties": {
"reminder_id": {
"type": "string",
"description": "要删除的提醒完整 ID",
},
"delete_all": {
"type": "boolean",
"description": "是否删除该用户全部提醒",
},
},
"additionalProperties": False,
},
handler=_handle_reminder_delete,
))

View File

@@ -1,62 +0,0 @@
"""网络搜索工具 —— 通过 Perplexity 联网搜索。
直接调用 perplexity.get_answer() 获取同步结果,
结果回传给 LLM 做综合回答,而非直接发送给用户。
"""
import json
import re
from tools import Tool, tool_registry
def _handle_web_search(ctx, query: str = "", deep_research: bool = False, **_) -> str:
if not query:
return json.dumps({"error": "请提供搜索关键词"}, ensure_ascii=False)
perplexity_instance = getattr(ctx.robot, "perplexity", None)
if not perplexity_instance:
return json.dumps({"error": "Perplexity 搜索功能不可用,未配置或未初始化"}, ensure_ascii=False)
try:
chat_id = ctx.get_receiver()
response = perplexity_instance.get_answer(query, chat_id, deep_research=deep_research)
if not response:
return json.dumps({"error": "搜索无结果"}, ensure_ascii=False)
# 清理 <think> 标签reasoning 模型可能返回)
cleaned = re.sub(r"<think>.*?</think>", "", response, flags=re.DOTALL).strip()
if not cleaned:
cleaned = response
return json.dumps({"result": cleaned}, ensure_ascii=False)
except Exception as e:
return json.dumps({"error": f"搜索失败: {e}"}, ensure_ascii=False)
tool_registry.register(Tool(
name="web_search",
description=(
"在网络上搜索信息。用于回答需要最新数据、实时信息或你不确定的事实性问题。"
"deep_research 仅在问题非常复杂、需要深度研究时才开启。"
),
status_text="正在联网搜索: ",
parameters={
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "搜索关键词或问题",
},
"deep_research": {
"type": "boolean",
"description": "是否启用深度研究模式(耗时较长,仅用于复杂问题)",
},
},
"required": ["query"],
"additionalProperties": False,
},
handler=_handle_web_search,
))