双模式

This commit is contained in:
zihanjian
2025-09-25 15:56:48 +08:00
parent 93c4f0c53f
commit 670a94b2fb
2 changed files with 125 additions and 22 deletions

View File

@@ -92,7 +92,8 @@ weather: # -----天气提醒配置这行不填-----
chatgpt: # -----chatgpt配置这行不填-----
key: # 填写你 ChatGPT 的 key
api: https://api.openai.com/v1 # 如果你不知道这是干嘛的,就不要改
model: gpt-3.5-turbo # 可选gpt-3.5-turbo、gpt-4、gpt-4-turbo、gpt-4.1-mini、o4-mini
model_flash: gpt-3.5-turbo # 快速回复模型(可选)
model_reasoning: gpt-3.5-turbo # 深度思考模型(可选)
proxy: # 如果你在国内你可能需要魔法大概长这样http://域名或者IP地址:端口号
prompt: 你是智能聊天机器人,你叫 wcferry # 根据需要对角色进行设定
max_history_messages: 20 # <--- 添加这一行,设置 ChatGPT 最多回顾 20 条历史消息
@@ -101,7 +102,8 @@ deepseek: # -----deepseek配置这行不填-----
#思维链相关功能默认关闭开启后会增加响应时间和消耗更多的token
key: # 填写你的 DeepSeek API Key API Key的格式为sk-xxxxxxxxxxxxxxx
api: https://api.deepseek.com # DeepSeek API 地址
model: deepseek-chat # 可选: deepseek-chat (DeepSeek-V3), deepseek-reasoner (DeepSeek-R1)
model_flash: deepseek-chat # 快速回复模型
model_reasoning: deepseek-reasoner # 深度思考模型
prompt: 你是智能聊天机器人,你叫 DeepSeek 助手 # 根据需要对角色进行设定
enable_reasoning: false # 是否启用思维链功能,仅在使用 deepseek-reasoner 模型时有效
show_reasoning: false # 是否在回复中显示思维过程,仅在启用思维链功能时有效
@@ -109,7 +111,8 @@ deepseek: # -----deepseek配置这行不填-----
gemini: # -----gemini配置-----
api_key: "YOUR_GOOGLE_API_KEY" # 必须
model_name: "gemini-1.5-pro-latest" # 可选, 默认是 "gemini-1.5-pro-latest"
model_flash: "gemini-1.5-pro-latest" # 快速回复模型(可选)
model_reasoning: "gemini-1.5-pro-latest" # 深度思考模型(可选)
prompt: "你是一个AI助手请用通俗易懂的语言回答用户的问题。" # 可选
proxy: "http://127.0.0.1:7890" # 可选, 代理地址
max_history_messages: 20 # 可选, 对话历史长度
@@ -137,7 +140,8 @@ perplexity: # -----perplexity配置这行不填-----
key: # 填写你的Perplexity API Key
api: https://api.perplexity.ai # API地址
proxy: # 如果你在国内你可能需要魔法大概长这样http://域名或者IP地址:端口号
model: mixtral-8x7b-instruct # 可选模型包括sonar-small-chat, sonar-medium-chat, sonar-pro, mixtral-8x7b-instruct等
model_flash: mixtral-8x7b-instruct # 快速回复模型(可选)
model_reasoning: mixtral-8x7b-instruct # 深度思考模型(可选)
prompt: 你是Perplexity AI助手请用专业、准确、有帮助的方式回答问题 # 角色设定
trigger_keyword: ask # 触发Perplexity服务的前置词
allow_all: false # 是否允许所有群聊和用户使用Perplexity设为true时忽略下面的白名单配置
@@ -146,4 +150,4 @@ perplexity: # -----perplexity配置这行不填-----
ai_router: # -----AI路由器配置-----
enable: true # 是否启用AI路由功能
allowed_groups: [] # 允许使用AI路由的群聊ID列表例如["123456789@chatroom", "123456789@chatroom"]
allowed_groups: [] # 允许使用AI路由的群聊ID列表例如["123456789@chatroom", "123456789@chatroom"]

133
robot.py
View File

@@ -9,6 +9,7 @@ from threading import Thread
import os
import random
import shutil
import copy
from image import AliyunImage, GeminiImage
from image.img_manager import ImageGenerationManager
@@ -65,69 +66,130 @@ class Robot(Job):
self.xml_processor = XmlProcessor(self.LOG)
self.chat_models = {}
self.reasoning_chat_models = {}
self.LOG.info("开始初始化各种AI模型...")
# 初始化ChatGPT
if ChatGPT.value_check(self.config.CHATGPT):
try:
# 传入 message_summary 和 wxid
chatgpt_flash_conf = copy.deepcopy(self.config.CHATGPT)
flash_model_name = chatgpt_flash_conf.get("model_flash", "gpt-3.5-turbo")
chatgpt_flash_conf["model"] = flash_model_name
self.chat_models[ChatType.CHATGPT.value] = ChatGPT(
self.config.CHATGPT,
chatgpt_flash_conf,
message_summary_instance=self.message_summary,
bot_wxid=self.wxid
)
self.LOG.info(f"已加载 ChatGPT 模型")
self.LOG.info(f"已加载 ChatGPT 模型: {flash_model_name}")
reasoning_model_name = self.config.CHATGPT.get("model_reasoning")
if reasoning_model_name and reasoning_model_name != flash_model_name:
chatgpt_reason_conf = copy.deepcopy(self.config.CHATGPT)
chatgpt_reason_conf["model"] = reasoning_model_name
self.reasoning_chat_models[ChatType.CHATGPT.value] = ChatGPT(
chatgpt_reason_conf,
message_summary_instance=self.message_summary,
bot_wxid=self.wxid
)
self.LOG.info(f"已加载 ChatGPT 推理模型: {reasoning_model_name}")
except Exception as e:
self.LOG.error(f"初始化 ChatGPT 模型时出错: {str(e)}")
# 初始化DeepSeek
if DeepSeek.value_check(self.config.DEEPSEEK):
try:
# 传入 message_summary 和 wxid
self.chat_models[ChatType.DEEPSEEK.value] = DeepSeek(
self.config.DEEPSEEK,
message_summary_instance=self.message_summary,
bot_wxid=self.wxid
)
self.LOG.info(f"已加载 DeepSeek 模型")
deepseek_flash_conf = copy.deepcopy(self.config.DEEPSEEK)
flash_model_name = deepseek_flash_conf.get("model_flash", "deepseek-chat")
deepseek_flash_conf["model"] = flash_model_name
self.chat_models[ChatType.DEEPSEEK.value] = DeepSeek(
deepseek_flash_conf,
message_summary_instance=self.message_summary,
bot_wxid=self.wxid
)
self.LOG.info(f"已加载 DeepSeek 模型: {flash_model_name}")
reasoning_model_name = self.config.DEEPSEEK.get("model_reasoning")
if not reasoning_model_name and flash_model_name != "deepseek-reasoner":
reasoning_model_name = "deepseek-reasoner"
if reasoning_model_name and reasoning_model_name != flash_model_name:
deepseek_reason_conf = copy.deepcopy(self.config.DEEPSEEK)
deepseek_reason_conf["model"] = reasoning_model_name
self.reasoning_chat_models[ChatType.DEEPSEEK.value] = DeepSeek(
deepseek_reason_conf,
message_summary_instance=self.message_summary,
bot_wxid=self.wxid
)
self.LOG.info(f"已加载 DeepSeek 推理模型: {reasoning_model_name}")
except Exception as e:
self.LOG.error(f"初始化 DeepSeek 模型时出错: {str(e)}")
self.LOG.error(f"初始化 DeepSeek 模型时出错: {str(e)}")
# 初始化Gemini
if Gemini.value_check(self.config.GEMINI):
try:
# 传入 message_summary 和 wxid
gemini_flash_conf = copy.deepcopy(self.config.GEMINI)
flash_model_name = gemini_flash_conf.get("model_flash", Gemini.DEFAULT_MODEL)
gemini_flash_conf["model_name"] = flash_model_name
self.chat_models[ChatType.GEMINI.value] = Gemini(
self.config.GEMINI,
gemini_flash_conf,
message_summary_instance=self.message_summary,
bot_wxid=self.wxid
)
self.LOG.info(f"已加载 Gemini 模型")
self.LOG.info(f"已加载 Gemini 模型: {flash_model_name}")
reasoning_model_name = self.config.GEMINI.get("model_reasoning")
if reasoning_model_name and reasoning_model_name != flash_model_name:
gemini_reason_conf = copy.deepcopy(self.config.GEMINI)
gemini_reason_conf["model_name"] = reasoning_model_name
self.reasoning_chat_models[ChatType.GEMINI.value] = Gemini(
gemini_reason_conf,
message_summary_instance=self.message_summary,
bot_wxid=self.wxid
)
self.LOG.info(f"已加载 Gemini 推理模型: {reasoning_model_name}")
except Exception as e:
self.LOG.error(f"初始化 Gemini 模型时出错: {str(e)}")
# 初始化Perplexity
if Perplexity.value_check(self.config.PERPLEXITY):
self.chat_models[ChatType.PERPLEXITY.value] = Perplexity(self.config.PERPLEXITY)
self.perplexity = self.chat_models[ChatType.PERPLEXITY.value] # 单独保存一个引用用于特殊处理
self.LOG.info(f"已加载 Perplexity 模型")
try:
perplexity_flash_conf = copy.deepcopy(self.config.PERPLEXITY)
flash_model_name = perplexity_flash_conf.get("model_flash", "sonar")
perplexity_flash_conf["model"] = flash_model_name
self.chat_models[ChatType.PERPLEXITY.value] = Perplexity(perplexity_flash_conf)
self.perplexity = self.chat_models[ChatType.PERPLEXITY.value] # 单独保存一个引用用于特殊处理
self.LOG.info(f"已加载 Perplexity 模型: {flash_model_name}")
reasoning_model_name = self.config.PERPLEXITY.get("model_reasoning")
if reasoning_model_name and reasoning_model_name != flash_model_name:
perplexity_reason_conf = copy.deepcopy(self.config.PERPLEXITY)
perplexity_reason_conf["model"] = reasoning_model_name
self.reasoning_chat_models[ChatType.PERPLEXITY.value] = Perplexity(perplexity_reason_conf)
self.LOG.info(f"已加载 Perplexity 推理模型: {reasoning_model_name}")
except Exception as e:
self.LOG.error(f"初始化 Perplexity 模型时出错: {str(e)}")
# 根据chat_type参数选择默认模型
self.current_model_id = None
if chat_type > 0 and chat_type in self.chat_models:
self.chat = self.chat_models[chat_type]
self.default_model_id = chat_type
self.current_model_id = chat_type
else:
# 如果没有指定chat_type或指定的模型不可用尝试使用配置文件中指定的默认模型
self.default_model_id = self.config.GROUP_MODELS.get('default', 0)
if self.default_model_id in self.chat_models:
self.chat = self.chat_models[self.default_model_id]
self.current_model_id = self.default_model_id
elif self.chat_models: # 如果有任何可用模型,使用第一个
self.default_model_id = list(self.chat_models.keys())[0]
self.chat = self.chat_models[self.default_model_id]
self.current_model_id = self.default_model_id
else:
self.LOG.warning("未配置任何可用的模型")
self.chat = None
self.default_model_id = 0
self.current_model_id = None
self.LOG.info(f"默认模型: {self.chat}模型ID: {self.default_model_id}")
@@ -201,6 +263,30 @@ class Robot(Job):
setattr(ctx, 'chat', self.chat)
setattr(ctx, 'specific_max_history', specific_limit)
reasoning_triggered = bool(ctx.text and "想想" in ctx.text)
if reasoning_triggered:
self.LOG.info("检测到推理模式触发词跳过AI路由。")
ctx.send_text("正在深度思考,请稍候...")
previous_ctx_chat = ctx.chat
reasoning_chat = self._get_reasoning_chat_model()
if reasoning_chat:
ctx.chat = reasoning_chat
self.LOG.debug(f"使用推理模型 {reasoning_chat} 处理消息")
else:
self.LOG.warning("当前模型未配置推理模型,使用默认模型处理深度思考请求")
reasoning_handled = False
try:
reasoning_handled = handle_chitchat(ctx, None)
finally:
ctx.chat = previous_ctx_chat
if not reasoning_handled:
self.LOG.warning("推理模式处理消息失败,向用户返回降级提示")
ctx.send_text("抱歉,深度思考暂时遇到问题,请稍后再试。")
return
handled = False
# 5. 优先尝试使用AI路由器处理消息仅限私聊或@机器人)
@@ -448,6 +534,13 @@ class Robot(Job):
return None
def _get_reasoning_chat_model(self):
"""获取当前聊天模型对应的推理模型实例"""
model_id = getattr(self, 'current_model_id', None)
if model_id is None:
return None
return self.reasoning_chat_models.get(model_id)
def _select_model_for_message(self, msg: WxMsg) -> None:
"""根据消息来源选择对应的AI模型
@@ -464,6 +557,7 @@ class Robot(Job):
# 没有配置,使用默认模型
if self.default_model_id in self.chat_models:
self.chat = self.chat_models[self.default_model_id]
self.current_model_id = self.default_model_id
return
# 群聊消息处理
@@ -477,10 +571,12 @@ class Robot(Job):
if self.chat != self.chat_models[model_id]:
self.chat = self.chat_models[model_id]
self.LOG.info(f"已为群 {source_id} 切换到模型: {self.chat.__class__.__name__}")
self.current_model_id = model_id
else:
self.LOG.warning(f"{source_id} 配置的模型ID {model_id} 不可用,使用默认模型")
if self.default_model_id in self.chat_models:
self.chat = self.chat_models[self.default_model_id]
self.current_model_id = self.default_model_id
return
# 私聊消息处理
else:
@@ -493,15 +589,18 @@ class Robot(Job):
if self.chat != self.chat_models[model_id]:
self.chat = self.chat_models[model_id]
self.LOG.info(f"已为私聊用户 {source_id} 切换到模型: {self.chat.__class__.__name__}")
self.current_model_id = model_id
else:
self.LOG.warning(f"私聊用户 {source_id} 配置的模型ID {model_id} 不可用,使用默认模型")
if self.default_model_id in self.chat_models:
self.chat = self.chat_models[self.default_model_id]
self.current_model_id = self.default_model_id
return
# 如果没有找到对应配置,使用默认模型
if self.default_model_id in self.chat_models:
self.chat = self.chat_models[self.default_model_id]
self.current_model_id = self.default_model_id
def _get_specific_history_limit(self, msg: WxMsg) -> int:
"""根据消息来源和配置,获取特定的历史消息数量限制