mirror of
https://github.com/Zippland/Bubbles.git
synced 2026-01-19 01:21:15 +08:00
KIMI
This commit is contained in:
@@ -65,6 +65,7 @@ Bubbles 是一个功能丰富的微信机器人框架,基于 [wcferry](https:/
|
||||
- 支持为不同的群聊和私聊设置不同的 AI 模型和 system prompt
|
||||
- OpenAI (ChatGPT)
|
||||
- DeepSeek
|
||||
- Moonshot (Kimi)
|
||||
|
||||
#### 🛠️ 智能路由系统
|
||||
- 基于 AI 的意图识别,无需记住特定命令格式
|
||||
|
||||
@@ -2,4 +2,11 @@
|
||||
AI Providers Module
|
||||
|
||||
这个包包含了与各种 AI 服务提供商的集成实现。
|
||||
"""
|
||||
"""
|
||||
|
||||
from .ai_chatgpt import ChatGPT
|
||||
from .ai_deepseek import DeepSeek
|
||||
from .ai_kimi import Kimi
|
||||
from .ai_perplexity import Perplexity
|
||||
|
||||
__all__ = ["ChatGPT", "DeepSeek", "Kimi", "Perplexity"]
|
||||
|
||||
266
ai_providers/ai_kimi.py
Normal file
266
ai_providers/ai_kimi.py
Normal file
@@ -0,0 +1,266 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from typing import List
|
||||
|
||||
import httpx
|
||||
from openai import APIConnectionError, APIError, AuthenticationError, OpenAI
|
||||
|
||||
try:
|
||||
from function.func_summary import MessageSummary
|
||||
except ImportError: # pragma: no cover - fallback when typing
|
||||
MessageSummary = object
|
||||
|
||||
|
||||
class Kimi:
|
||||
"""Moonshot Kimi provider (兼容OpenAI SDK)"""
|
||||
|
||||
def __init__(self, conf: dict, message_summary_instance: MessageSummary = None, bot_wxid: str = None) -> None:
|
||||
key = conf.get("key")
|
||||
api = conf.get("api", "https://api.moonshot.cn/v1")
|
||||
proxy = conf.get("proxy")
|
||||
prompt = conf.get("prompt")
|
||||
|
||||
self.model = conf.get("model", "kimi-k2")
|
||||
self.max_history_messages = conf.get("max_history_messages", 30)
|
||||
self.show_reasoning = bool(conf.get("show_reasoning", False))
|
||||
self.LOG = logging.getLogger("Kimi")
|
||||
|
||||
self.message_summary = message_summary_instance
|
||||
self.bot_wxid = bot_wxid
|
||||
|
||||
if not self.message_summary:
|
||||
self.LOG.warning("MessageSummary 实例未提供给 Kimi,上下文功能将不可用!")
|
||||
if not self.bot_wxid:
|
||||
self.LOG.warning("bot_wxid 未提供给 Kimi,可能无法正确识别机器人自身消息!")
|
||||
|
||||
if proxy:
|
||||
self.client = OpenAI(api_key=key, base_url=api, http_client=httpx.Client(proxy=proxy))
|
||||
else:
|
||||
self.client = OpenAI(api_key=key, base_url=api)
|
||||
|
||||
self.system_content_msg = {
|
||||
"role": "system",
|
||||
"content": prompt or "你是 Kimi,一个由 Moonshot AI 打造的贴心助手。"
|
||||
}
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "Kimi"
|
||||
|
||||
@staticmethod
|
||||
def value_check(conf: dict) -> bool:
|
||||
if conf and conf.get("key"):
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_answer(
|
||||
self,
|
||||
question: str,
|
||||
wxid: str,
|
||||
system_prompt_override=None,
|
||||
specific_max_history=None,
|
||||
tools=None,
|
||||
tool_handler=None,
|
||||
tool_choice=None,
|
||||
tool_max_iterations: int = 10
|
||||
) -> str:
|
||||
api_messages = []
|
||||
|
||||
effective_system_prompt = system_prompt_override if system_prompt_override else self.system_content_msg.get("content")
|
||||
if effective_system_prompt:
|
||||
api_messages.append({"role": "system", "content": effective_system_prompt})
|
||||
|
||||
now_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
||||
api_messages.append({"role": "system", "content": f"Current time is: {now_time}"})
|
||||
|
||||
if self.message_summary and self.bot_wxid:
|
||||
history = self.message_summary.get_messages(wxid)
|
||||
|
||||
limit_to_use = specific_max_history if specific_max_history is not None else self.max_history_messages
|
||||
try:
|
||||
limit_to_use = int(limit_to_use) if limit_to_use is not None else None
|
||||
except (TypeError, ValueError):
|
||||
limit_to_use = self.max_history_messages
|
||||
|
||||
if limit_to_use is not None and limit_to_use > 0:
|
||||
history = history[-limit_to_use:]
|
||||
elif limit_to_use == 0:
|
||||
history = []
|
||||
|
||||
for msg in history:
|
||||
role = "assistant" if msg.get("sender_wxid") == self.bot_wxid else "user"
|
||||
content = msg.get("content") or ""
|
||||
if not content:
|
||||
continue
|
||||
if role == "user":
|
||||
sender_name = msg.get("sender", "未知用户")
|
||||
formatted_content = f"{sender_name}: {content}"
|
||||
api_messages.append({"role": role, "content": formatted_content})
|
||||
else:
|
||||
api_messages.append({"role": role, "content": content})
|
||||
else:
|
||||
self.LOG.debug(f"wxid={wxid} 无法加载历史记录(message_summary 或 bot_wxid 未设置)")
|
||||
|
||||
if question:
|
||||
api_messages.append({"role": "user", "content": question})
|
||||
|
||||
if tools and not tool_handler:
|
||||
self.LOG.warning("Kimi: 提供了 tools 但没有 tool_handler,忽略工具调用。")
|
||||
tools = None
|
||||
|
||||
try:
|
||||
response_text, reasoning_text = self._execute_with_tools(
|
||||
api_messages=api_messages,
|
||||
tools=tools,
|
||||
tool_handler=tool_handler,
|
||||
tool_choice=tool_choice,
|
||||
tool_max_iterations=tool_max_iterations
|
||||
)
|
||||
|
||||
if (
|
||||
self.show_reasoning
|
||||
and reasoning_text
|
||||
and isinstance(reasoning_text, str)
|
||||
and reasoning_text.strip()
|
||||
):
|
||||
reasoning_output = reasoning_text.strip()
|
||||
final_answer = response_text.strip() if isinstance(response_text, str) else response_text
|
||||
return f"【思考过程】\n{reasoning_output}\n\n【最终回答】\n{final_answer}"
|
||||
|
||||
return response_text
|
||||
|
||||
except AuthenticationError:
|
||||
self.LOG.error("Kimi API 认证失败,请检查 API 密钥是否正确")
|
||||
return "Kimi API 认证失败,请检查配置。"
|
||||
except APIConnectionError:
|
||||
self.LOG.error("无法连接到 Kimi API,请检查网络或代理设置")
|
||||
return "无法连接到 Kimi 服务,请稍后再试。"
|
||||
except APIError as api_err:
|
||||
self.LOG.error(f"Kimi API 返回错误:{api_err}")
|
||||
return f"Kimi API 错误:{api_err}"
|
||||
except Exception as exc:
|
||||
self.LOG.error(f"Kimi 处理请求时出现未知错误:{exc}", exc_info=True)
|
||||
return "处理请求时出现未知错误,请稍后再试。"
|
||||
|
||||
def _execute_with_tools(
|
||||
self,
|
||||
api_messages,
|
||||
tools=None,
|
||||
tool_handler=None,
|
||||
tool_choice=None,
|
||||
tool_max_iterations: int = 10
|
||||
):
|
||||
iterations = 0
|
||||
params_base = {"model": self.model}
|
||||
runtime_tools = tools if tools and isinstance(tools, list) else None
|
||||
runtime_tool_choice = tool_choice
|
||||
reasoning_segments: List[str] = []
|
||||
|
||||
while True:
|
||||
params = dict(params_base)
|
||||
params["messages"] = api_messages
|
||||
if runtime_tools:
|
||||
params["tools"] = runtime_tools
|
||||
if runtime_tool_choice:
|
||||
params["tool_choice"] = runtime_tool_choice
|
||||
|
||||
response = self.client.chat.completions.create(**params)
|
||||
choice = response.choices[0]
|
||||
message = choice.message
|
||||
finish_reason = choice.finish_reason
|
||||
|
||||
reasoning_chunk = self._extract_reasoning_text(message)
|
||||
if reasoning_chunk:
|
||||
reasoning_segments.append(reasoning_chunk)
|
||||
|
||||
if (
|
||||
runtime_tools
|
||||
and message
|
||||
and getattr(message, "tool_calls", None)
|
||||
and finish_reason == "tool_calls"
|
||||
and tool_handler
|
||||
):
|
||||
iterations += 1
|
||||
api_messages.append({
|
||||
"role": "assistant",
|
||||
"content": message.content or "",
|
||||
"tool_calls": message.tool_calls
|
||||
})
|
||||
|
||||
if tool_max_iterations is not None and iterations > max(tool_max_iterations, 0):
|
||||
api_messages.append({
|
||||
"role": "system",
|
||||
"content": "你已经达到允许的最大工具调用次数,请根据现有信息直接给出最终回答。"
|
||||
})
|
||||
runtime_tool_choice = "none"
|
||||
continue
|
||||
|
||||
for tool_call in message.tool_calls:
|
||||
tool_name = tool_call.function.name
|
||||
raw_arguments = tool_call.function.arguments or "{}"
|
||||
try:
|
||||
parsed_arguments = json.loads(raw_arguments)
|
||||
except json.JSONDecodeError:
|
||||
parsed_arguments = {"_raw": raw_arguments}
|
||||
|
||||
try:
|
||||
tool_output = tool_handler(tool_name, parsed_arguments)
|
||||
except Exception as handler_exc:
|
||||
self.LOG.error(f"工具 {tool_name} 执行失败: {handler_exc}", exc_info=True)
|
||||
tool_output = json.dumps(
|
||||
{"error": f"{tool_name} failed: {handler_exc.__class__.__name__}"},
|
||||
ensure_ascii=False
|
||||
)
|
||||
|
||||
if not isinstance(tool_output, str):
|
||||
tool_output = json.dumps(tool_output, ensure_ascii=False)
|
||||
|
||||
api_messages.append({
|
||||
"role": "tool",
|
||||
"tool_call_id": tool_call.id,
|
||||
"content": tool_output
|
||||
})
|
||||
|
||||
runtime_tool_choice = None
|
||||
continue
|
||||
|
||||
response_text = message.content if message and message.content else ""
|
||||
if response_text.startswith("\n\n"):
|
||||
response_text = response_text[2:]
|
||||
response_text = response_text.replace("\n\n", "\n")
|
||||
|
||||
reasoning_text = "\n".join(seg for seg in reasoning_segments if seg).strip()
|
||||
return response_text, reasoning_text
|
||||
|
||||
def _extract_reasoning_text(self, message) -> str:
|
||||
"""Moonshot 在 ChatCompletionMessage 上挂载 reasoning_content 字段"""
|
||||
if not message:
|
||||
return ""
|
||||
raw_reasoning = getattr(message, "reasoning_content", None)
|
||||
if not raw_reasoning:
|
||||
return ""
|
||||
|
||||
def _normalize_segment(segment) -> str:
|
||||
if isinstance(segment, str):
|
||||
return segment
|
||||
if isinstance(segment, dict):
|
||||
return segment.get("content") or segment.get("text") or ""
|
||||
if isinstance(segment, list):
|
||||
return "\n".join(filter(None, (_normalize_segment(item) for item in segment)))
|
||||
return str(segment) if segment is not None else ""
|
||||
|
||||
if isinstance(raw_reasoning, list):
|
||||
segments = []
|
||||
for part in raw_reasoning:
|
||||
normalized = _normalize_segment(part)
|
||||
if normalized:
|
||||
segments.append(normalized)
|
||||
return "\n".join(segments).strip()
|
||||
|
||||
return _normalize_segment(raw_reasoning).strip()
|
||||
|
||||
|
||||
__all__ = ["Kimi"]
|
||||
@@ -56,6 +56,8 @@ groups:
|
||||
# 0: 自动选择第一个可用模型
|
||||
# 1: ChatGPT
|
||||
# 2: DeepSeek
|
||||
# 3: Kimi
|
||||
# 4: Perplexity
|
||||
default: 0 # 默认模型ID(0表示自动选择第一个可用模型)
|
||||
# 群聊映射
|
||||
mapping:
|
||||
@@ -108,6 +110,16 @@ deepseek: # -----deepseek配置这行不填-----
|
||||
show_reasoning: false # 是否在回复中显示思维过程,仅在启用思维链功能时有效
|
||||
max_history_messages: 10 # <--- 添加这一行,设置 DeepSeek 最多回顾 10 条历史消息
|
||||
|
||||
kimi: # -----kimi配置-----
|
||||
key: # 填写你的 Moonshot API Key
|
||||
api: https://api.moonshot.cn/v1 # Kimi API 地址
|
||||
proxy: # 国内可按需配置代理,例如:http://127.0.0.1:7890
|
||||
model_flash: kimi-k2 # 快速回复模型
|
||||
model_reasoning: kimi-k2-thinking # 深度思考模型
|
||||
prompt: 你是 Kimi,一个由 Moonshot AI 构建的可靠助手 # 角色设定
|
||||
max_history_messages: 20 # 设置 Kimi 最多回顾 20 条历史消息
|
||||
show_reasoning: false # 是否在回复中附带 reasoning_content 内容
|
||||
|
||||
aliyun_image: # -----如果要使用阿里云文生图,取消下面的注释并填写相关内容,模型到阿里云百炼找通义万相-文生图2.1-Turbo-----
|
||||
enable: true # 是否启用阿里文生图功能,false为关闭,默认开启,如果未配置,则会将消息发送给聊天大模型
|
||||
api_key: sk-xxxxxxxxxxxxxxxxxxxxxxxx # 替换为你的DashScope API密钥
|
||||
|
||||
@@ -87,6 +87,7 @@ class Config(object):
|
||||
self.NEWS = yconfig["news"]["receivers"]
|
||||
self.CHATGPT = yconfig.get("chatgpt", {})
|
||||
self.DEEPSEEK = yconfig.get("deepseek", {})
|
||||
self.KIMI = yconfig.get("kimi", {})
|
||||
self.PERPLEXITY = yconfig.get("perplexity", {})
|
||||
self.ALIYUN_IMAGE = yconfig.get("aliyun_image", {})
|
||||
self.AI_ROUTER = yconfig.get("ai_router", {"enable": True, "allowed_groups": []})
|
||||
|
||||
@@ -6,12 +6,14 @@ class ChatType(IntEnum):
|
||||
# UnKnown = 0 # 未知, 即未设置
|
||||
CHATGPT = 1 # ChatGPT
|
||||
DEEPSEEK = 2 # DeepSeek
|
||||
KIMI = 3 # Kimi (Moonshot)
|
||||
PERPLEXITY = 4 # Perplexity
|
||||
|
||||
@staticmethod
|
||||
def is_in_chat_types(chat_type: int) -> bool:
|
||||
if chat_type in [ChatType.CHATGPT.value,
|
||||
ChatType.DEEPSEEK.value,
|
||||
ChatType.KIMI.value,
|
||||
ChatType.PERPLEXITY.value]:
|
||||
return True
|
||||
return False
|
||||
|
||||
31
robot.py
31
robot.py
@@ -16,6 +16,7 @@ from wcferry import Wcf, WxMsg
|
||||
|
||||
from ai_providers.ai_chatgpt import ChatGPT
|
||||
from ai_providers.ai_deepseek import DeepSeek
|
||||
from ai_providers.ai_kimi import Kimi
|
||||
from ai_providers.ai_perplexity import Perplexity
|
||||
from function.func_weather import Weather
|
||||
from function.func_news import News
|
||||
@@ -158,6 +159,35 @@ class Robot(Job):
|
||||
except Exception as e:
|
||||
self.LOG.error(f"初始化 DeepSeek 模型时出错: {str(e)}")
|
||||
|
||||
# 初始化Kimi
|
||||
if Kimi.value_check(self.config.KIMI):
|
||||
try:
|
||||
kimi_flash_conf = copy.deepcopy(self.config.KIMI)
|
||||
flash_model_name = kimi_flash_conf.get("model_flash", "kimi-k2")
|
||||
kimi_flash_conf["model"] = flash_model_name
|
||||
self.chat_models[ChatType.KIMI.value] = Kimi(
|
||||
kimi_flash_conf,
|
||||
message_summary_instance=self.message_summary,
|
||||
bot_wxid=self.wxid
|
||||
)
|
||||
self.LOG.info(f"已加载 Kimi 模型: {flash_model_name}")
|
||||
|
||||
reasoning_model_name = self.config.KIMI.get("model_reasoning")
|
||||
if not reasoning_model_name and flash_model_name != "kimi-k2-thinking":
|
||||
reasoning_model_name = "kimi-k2-thinking"
|
||||
|
||||
if reasoning_model_name and reasoning_model_name != flash_model_name:
|
||||
kimi_reason_conf = copy.deepcopy(self.config.KIMI)
|
||||
kimi_reason_conf["model"] = reasoning_model_name
|
||||
self.reasoning_chat_models[ChatType.KIMI.value] = Kimi(
|
||||
kimi_reason_conf,
|
||||
message_summary_instance=self.message_summary,
|
||||
bot_wxid=self.wxid
|
||||
)
|
||||
self.LOG.info(f"已加载 Kimi 推理模型: {reasoning_model_name}")
|
||||
except Exception as e:
|
||||
self.LOG.error(f"初始化 Kimi 模型时出错: {str(e)}")
|
||||
|
||||
|
||||
# 初始化Perplexity
|
||||
if Perplexity.value_check(self.config.PERPLEXITY):
|
||||
@@ -666,6 +696,7 @@ class Robot(Job):
|
||||
mapping = {
|
||||
ChatType.CHATGPT.value: getattr(self.config, 'CHATGPT', None),
|
||||
ChatType.DEEPSEEK.value: getattr(self.config, 'DEEPSEEK', None),
|
||||
ChatType.KIMI.value: getattr(self.config, 'KIMI', None),
|
||||
ChatType.PERPLEXITY.value: getattr(self.config, 'PERPLEXITY', None),
|
||||
}
|
||||
return mapping.get(model_id)
|
||||
|
||||
Reference in New Issue
Block a user