mirror of
https://github.com/zhayujie/chatgpt-on-wechat.git
synced 2026-01-19 01:21:01 +08:00
Merge branch 'master' into master
This commit is contained in:
10
README.md
10
README.md
@@ -4,10 +4,10 @@
|
||||
|
||||
最新版本支持的功能如下:
|
||||
|
||||
- [x] **多端部署:** 有多种部署方式可选择且功能完备,目前已支持个人微信,微信公众号和企业微信应用等部署方式
|
||||
- [x] **多端部署:** 有多种部署方式可选择且功能完备,目前已支持个人微信、微信公众号和、业微信、飞书等部署方式
|
||||
- [x] **基础对话:** 私聊及群聊的消息智能回复,支持多轮会话上下文记忆,支持 GPT-3.5, GPT-4, claude, 文心一言, 讯飞星火
|
||||
- [x] **语音识别:** 可识别语音消息,通过文字或语音回复,支持 azure, baidu, google, openai(whisper/tts) 等多种语音模型
|
||||
- [x] **图片生成:** 支持图片生成 和 图生图(如照片修复),可选择 Dall-E, stable diffusion, replicate, midjourney模型
|
||||
- [x] **语音能力:** 可识别语音消息,通过文字或语音回复,支持 azure, baidu, google, openai(whisper/tts) 等多种语音模型
|
||||
- [x] **图像能力:** 支持图片生成、图片识别、图生图(如照片修复),可选择 Dall-E-3, stable diffusion, replicate, midjourney, vision模型
|
||||
- [x] **丰富插件:** 支持个性化插件扩展,已实现多角色切换、文字冒险、敏感词过滤、聊天记录总结、文档总结和对话等插件
|
||||
- [X] **Tool工具:** 与操作系统和互联网交互,支持最新信息搜索、数学计算、天气和资讯查询、网页总结,基于 [chatgpt-tool-hub](https://github.com/goldfishh/chatgpt-tool-hub) 实现
|
||||
- [x] **知识库:** 通过上传知识库文件自定义专属机器人,可作为数字分身、领域知识库、智能客服使用,基于 [LinkAI](https://link-ai.tech/console) 实现
|
||||
@@ -28,6 +28,8 @@ Demo made by [Visionn](https://www.wangpc.cc/)
|
||||
|
||||
# 更新日志
|
||||
|
||||
>**2023.11.10:** [1.5.2版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.5.2),新增飞书通道、图像识别对话、黑名单配置
|
||||
|
||||
>**2023.11.10:** [1.5.0版本](https://github.com/zhayujie/chatgpt-on-wechat/releases/tag/1.5.0),新增 `gpt-4-turbo`, `dall-e-3`, `tts` 模型接入,完善图像理解&生成、语音识别&生成的多模态能力
|
||||
|
||||
>**2023.10.16:** 支持通过意图识别使用LinkAI联网搜索、数学计算、网页访问等插件,参考[插件文档](https://docs.link-ai.tech/platform/plugins)
|
||||
@@ -52,6 +54,8 @@ Demo made by [Visionn](https://www.wangpc.cc/)
|
||||
|
||||
# 快速开始
|
||||
|
||||
快速开始文档:[项目搭建文档](https://docs.link-ai.tech/cow/quick-start)
|
||||
|
||||
## 准备
|
||||
|
||||
### 1. 账号注册
|
||||
|
||||
6
app.py
6
app.py
@@ -5,8 +5,8 @@ import signal
|
||||
import sys
|
||||
|
||||
from channel import channel_factory
|
||||
from common.log import logger
|
||||
from config import conf, load_config
|
||||
from common import const
|
||||
from config import load_config
|
||||
from plugins import *
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ def run():
|
||||
# os.environ['WECHATY_PUPPET_SERVICE_ENDPOINT'] = '127.0.0.1:9001'
|
||||
|
||||
channel = channel_factory.create_channel(channel_name)
|
||||
if channel_name in ["wx", "wxy", "terminal", "wechatmp", "wechatmp_service", "wechatcom_app", "wework"]:
|
||||
if channel_name in ["wx", "wxy", "terminal", "wechatmp", "wechatmp_service", "wechatcom_app", "wework", const.FEISHU]:
|
||||
PluginManager().load_plugins()
|
||||
|
||||
# startup channel
|
||||
|
||||
@@ -13,6 +13,9 @@ from bridge.reply import Reply, ReplyType
|
||||
from common.log import logger
|
||||
from config import conf, pconf
|
||||
import threading
|
||||
from common import memory, utils
|
||||
import base64
|
||||
|
||||
|
||||
class LinkAIBot(Bot):
|
||||
# authentication failed
|
||||
@@ -21,7 +24,7 @@ class LinkAIBot(Bot):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo")
|
||||
self.sessions = LinkAISessionManager(LinkAISession, model=conf().get("model") or "gpt-3.5-turbo")
|
||||
self.args = {}
|
||||
|
||||
def reply(self, query, context: Context = None) -> Reply:
|
||||
@@ -61,17 +64,25 @@ class LinkAIBot(Bot):
|
||||
linkai_api_key = conf().get("linkai_api_key")
|
||||
|
||||
session_id = context["session_id"]
|
||||
session_message = self.sessions.session_msg_query(query, session_id)
|
||||
logger.debug(f"[LinkAI] session={session_message}, session_id={session_id}")
|
||||
|
||||
# image process
|
||||
img_cache = memory.USER_IMAGE_CACHE.get(session_id)
|
||||
if img_cache:
|
||||
messages = self._process_image_msg(app_code=app_code, session_id=session_id, query=query, img_cache=img_cache)
|
||||
if messages:
|
||||
session_message = messages
|
||||
|
||||
session = self.sessions.session_query(query, session_id)
|
||||
model = conf().get("model")
|
||||
# remove system message
|
||||
if session.messages[0].get("role") == "system":
|
||||
if session_message[0].get("role") == "system":
|
||||
if app_code or model == "wenxin":
|
||||
session.messages.pop(0)
|
||||
session_message.pop(0)
|
||||
|
||||
body = {
|
||||
"app_code": app_code,
|
||||
"messages": session.messages,
|
||||
"messages": session_message,
|
||||
"model": model, # 对话模型的名称, 支持 gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-4, wenxin, xunfei
|
||||
"temperature": conf().get("temperature"),
|
||||
"top_p": conf().get("top_p", 1),
|
||||
@@ -94,7 +105,7 @@ class LinkAIBot(Bot):
|
||||
reply_content = response["choices"][0]["message"]["content"]
|
||||
total_tokens = response["usage"]["total_tokens"]
|
||||
logger.info(f"[LINKAI] reply={reply_content}, total_tokens={total_tokens}")
|
||||
self.sessions.session_reply(reply_content, session_id, total_tokens)
|
||||
self.sessions.session_reply(reply_content, session_id, total_tokens, query=query)
|
||||
|
||||
agent_suffix = self._fetch_agent_suffix(response)
|
||||
if agent_suffix:
|
||||
@@ -130,6 +141,54 @@ class LinkAIBot(Bot):
|
||||
logger.warn(f"[LINKAI] do retry, times={retry_count}")
|
||||
return self._chat(query, context, retry_count + 1)
|
||||
|
||||
def _process_image_msg(self, app_code: str, session_id: str, query:str, img_cache: dict):
|
||||
try:
|
||||
enable_image_input = False
|
||||
app_info = self._fetch_app_info(app_code)
|
||||
if not app_info:
|
||||
logger.debug(f"[LinkAI] not found app, can't process images, app_code={app_code}")
|
||||
return None
|
||||
plugins = app_info.get("data").get("plugins")
|
||||
for plugin in plugins:
|
||||
if plugin.get("input_type") and "IMAGE" in plugin.get("input_type"):
|
||||
enable_image_input = True
|
||||
if not enable_image_input:
|
||||
return
|
||||
msg = img_cache.get("msg")
|
||||
path = img_cache.get("path")
|
||||
msg.prepare()
|
||||
logger.info(f"[LinkAI] query with images, path={path}")
|
||||
messages = self._build_vision_msg(query, path)
|
||||
memory.USER_IMAGE_CACHE[session_id] = None
|
||||
return messages
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
|
||||
def _build_vision_msg(self, query: str, path: str):
|
||||
try:
|
||||
suffix = utils.get_path_suffix(path)
|
||||
with open(path, "rb") as file:
|
||||
base64_str = base64.b64encode(file.read()).decode('utf-8')
|
||||
messages = [{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": query
|
||||
},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": f"data:image/{suffix};base64,{base64_str}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}]
|
||||
return messages
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
def reply_text(self, session: ChatGPTSession, app_code="", retry_count=0) -> dict:
|
||||
if retry_count >= 2:
|
||||
# exit from retry 2 times
|
||||
@@ -195,6 +254,16 @@ class LinkAIBot(Bot):
|
||||
logger.warn(f"[LINKAI] do retry, times={retry_count}")
|
||||
return self.reply_text(session, app_code, retry_count + 1)
|
||||
|
||||
def _fetch_app_info(self, app_code: str):
|
||||
headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
|
||||
# do http request
|
||||
base_url = conf().get("linkai_api_base", "https://api.link-ai.chat")
|
||||
params = {"app_code": app_code}
|
||||
res = requests.get(url=base_url + "/v1/app/info", params=params, headers=headers, timeout=(5, 10))
|
||||
if res.status_code == 200:
|
||||
return res.json()
|
||||
else:
|
||||
logger.warning(f"[LinkAI] find app info exception, res={res}")
|
||||
|
||||
def create_img(self, query, retry_count=0, api_key=None):
|
||||
try:
|
||||
@@ -239,6 +308,7 @@ class LinkAIBot(Bot):
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
|
||||
def _fetch_agent_suffix(self, response):
|
||||
try:
|
||||
plugin_list = []
|
||||
@@ -275,4 +345,44 @@ class LinkAIBot(Bot):
|
||||
reply = Reply(ReplyType.IMAGE_URL, url)
|
||||
channel.send(reply, context)
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
logger.error(e)
|
||||
|
||||
|
||||
class LinkAISessionManager(SessionManager):
|
||||
def session_msg_query(self, query, session_id):
|
||||
session = self.build_session(session_id)
|
||||
messages = session.messages + [{"role": "user", "content": query}]
|
||||
return messages
|
||||
|
||||
def session_reply(self, reply, session_id, total_tokens=None, query=None):
|
||||
session = self.build_session(session_id)
|
||||
if query:
|
||||
session.add_query(query)
|
||||
session.add_reply(reply)
|
||||
try:
|
||||
max_tokens = conf().get("conversation_max_tokens", 2500)
|
||||
tokens_cnt = session.discard_exceeding(max_tokens, total_tokens)
|
||||
logger.info(f"[LinkAI] chat history discard, before tokens={total_tokens}, now tokens={tokens_cnt}")
|
||||
except Exception as e:
|
||||
logger.warning("Exception when counting tokens precisely for session: {}".format(str(e)))
|
||||
return session
|
||||
|
||||
|
||||
class LinkAISession(ChatGPTSession):
|
||||
def calc_tokens(self):
|
||||
try:
|
||||
cur_tokens = super().calc_tokens()
|
||||
except Exception as e:
|
||||
logger.debug("Exception when counting tokens precisely for query: {}".format(e))
|
||||
cur_tokens = len(str(self.messages))
|
||||
return cur_tokens
|
||||
|
||||
def discard_exceeding(self, max_tokens, cur_tokens=None):
|
||||
cur_tokens = self.calc_tokens()
|
||||
if cur_tokens > max_tokens:
|
||||
for i in range(0, len(self.messages)):
|
||||
if i > 0 and self.messages[i].get("role") == "assistant" and self.messages[i - 1].get("role") == "user":
|
||||
self.messages.pop(i)
|
||||
self.messages.pop(i - 1)
|
||||
return self.calc_tokens()
|
||||
return cur_tokens
|
||||
|
||||
@@ -69,7 +69,7 @@ class SessionManager(object):
|
||||
total_tokens = session.discard_exceeding(max_tokens, None)
|
||||
logger.debug("prompt tokens used={}".format(total_tokens))
|
||||
except Exception as e:
|
||||
logger.debug("Exception when counting tokens precisely for prompt: {}".format(str(e)))
|
||||
logger.warning("Exception when counting tokens precisely for prompt: {}".format(str(e)))
|
||||
return session
|
||||
|
||||
def session_reply(self, reply, session_id, total_tokens=None):
|
||||
@@ -80,7 +80,7 @@ class SessionManager(object):
|
||||
tokens_cnt = session.discard_exceeding(max_tokens, total_tokens)
|
||||
logger.debug("raw total_tokens={}, savesession tokens={}".format(total_tokens, tokens_cnt))
|
||||
except Exception as e:
|
||||
logger.debug("Exception when counting tokens precisely for session: {}".format(str(e)))
|
||||
logger.warning("Exception when counting tokens precisely for session: {}".format(str(e)))
|
||||
return session
|
||||
|
||||
def clear_session(self, session_id):
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""
|
||||
channel factory
|
||||
"""
|
||||
|
||||
from common import const
|
||||
|
||||
def create_channel(channel_type):
|
||||
"""
|
||||
@@ -35,6 +35,10 @@ def create_channel(channel_type):
|
||||
return WechatComAppChannel()
|
||||
elif channel_type == "wework":
|
||||
from channel.wework.wework_channel import WeworkChannel
|
||||
|
||||
return WeworkChannel()
|
||||
|
||||
elif channel_type == const.FEISHU:
|
||||
from channel.feishu.feishu_channel import FeiShuChanel
|
||||
return FeiShuChanel()
|
||||
|
||||
raise RuntimeError
|
||||
|
||||
@@ -9,8 +9,7 @@ from bridge.context import *
|
||||
from bridge.reply import *
|
||||
from channel.channel import Channel
|
||||
from common.dequeue import Dequeue
|
||||
from common.log import logger
|
||||
from config import conf
|
||||
from common import memory
|
||||
from plugins import *
|
||||
|
||||
try:
|
||||
@@ -95,6 +94,7 @@ class ChatChannel(Channel):
|
||||
logger.debug("[WX]reference query skipped")
|
||||
return None
|
||||
|
||||
nick_name_black_list = conf().get("nick_name_black_list", [])
|
||||
if context.get("isgroup", False): # 群聊
|
||||
# 校验关键字
|
||||
match_prefix = check_prefix(content, conf().get("group_chat_prefix"))
|
||||
@@ -106,6 +106,12 @@ class ChatChannel(Channel):
|
||||
if match_prefix:
|
||||
content = content.replace(match_prefix, "", 1).strip()
|
||||
if context["msg"].is_at:
|
||||
nick_name = context["msg"].actual_user_nickname
|
||||
if nick_name and nick_name in nick_name_black_list:
|
||||
# 黑名单过滤
|
||||
logger.warning(f"[WX] Nickname {nick_name} in In BlackList, ignore")
|
||||
return None
|
||||
|
||||
logger.info("[WX]receive group at")
|
||||
if not conf().get("group_at_off", False):
|
||||
flag = True
|
||||
@@ -125,6 +131,12 @@ class ChatChannel(Channel):
|
||||
logger.info("[WX]receive group voice, but checkprefix didn't match")
|
||||
return None
|
||||
else: # 单聊
|
||||
nick_name = context["msg"].from_user_nickname
|
||||
if nick_name and nick_name in nick_name_black_list:
|
||||
# 黑名单过滤
|
||||
logger.warning(f"[WX] Nickname '{nick_name}' in In BlackList, ignore")
|
||||
return None
|
||||
|
||||
match_prefix = check_prefix(content, conf().get("single_chat_prefix", [""]))
|
||||
if match_prefix is not None: # 判断如果匹配到自定义前缀,则返回过滤掉前缀+空格后的内容
|
||||
content = content.replace(match_prefix, "", 1).strip()
|
||||
@@ -205,14 +217,16 @@ class ChatChannel(Channel):
|
||||
else:
|
||||
return
|
||||
elif context.type == ContextType.IMAGE: # 图片消息,当前仅做下载保存到本地的逻辑
|
||||
cmsg = context["msg"]
|
||||
cmsg.prepare()
|
||||
memory.USER_IMAGE_CACHE[context["session_id"]] = {
|
||||
"path": context.content,
|
||||
"msg": context.get("msg")
|
||||
}
|
||||
elif context.type == ContextType.SHARING: # 分享信息,当前无默认逻辑
|
||||
pass
|
||||
elif context.type == ContextType.FUNCTION or context.type == ContextType.FILE: # 文件消息及函数调用等,当前无默认逻辑
|
||||
pass
|
||||
else:
|
||||
logger.error("[WX] unknown context type: {}".format(context.type))
|
||||
logger.warning("[WX] unknown context type: {}".format(context.type))
|
||||
return
|
||||
return reply
|
||||
|
||||
@@ -238,7 +252,8 @@ class ChatChannel(Channel):
|
||||
reply = super().build_text_to_voice(reply.content)
|
||||
return self._decorate_reply(context, reply)
|
||||
if context.get("isgroup", False):
|
||||
reply_text = "@" + context["msg"].actual_user_nickname + "\n" + reply_text.strip()
|
||||
if not context.get("no_need_at", False):
|
||||
reply_text = "@" + context["msg"].actual_user_nickname + "\n" + reply_text.strip()
|
||||
reply_text = conf().get("group_chat_reply_prefix", "") + reply_text + conf().get("group_chat_reply_suffix", "")
|
||||
else:
|
||||
reply_text = conf().get("single_chat_reply_prefix", "") + reply_text + conf().get("single_chat_reply_suffix", "")
|
||||
|
||||
250
channel/feishu/feishu_channel.py
Normal file
250
channel/feishu/feishu_channel.py
Normal file
@@ -0,0 +1,250 @@
|
||||
"""
|
||||
飞书通道接入
|
||||
|
||||
@author Saboteur7
|
||||
@Date 2023/11/19
|
||||
"""
|
||||
|
||||
# -*- coding=utf-8 -*-
|
||||
import uuid
|
||||
|
||||
import requests
|
||||
import web
|
||||
from channel.feishu.feishu_message import FeishuMessage
|
||||
from bridge.context import Context
|
||||
from bridge.reply import Reply, ReplyType
|
||||
from common.log import logger
|
||||
from common.singleton import singleton
|
||||
from config import conf
|
||||
from common.expired_dict import ExpiredDict
|
||||
from bridge.context import ContextType
|
||||
from channel.chat_channel import ChatChannel, check_prefix
|
||||
from common import utils
|
||||
import json
|
||||
import os
|
||||
|
||||
URL_VERIFICATION = "url_verification"
|
||||
|
||||
|
||||
@singleton
|
||||
class FeiShuChanel(ChatChannel):
|
||||
feishu_app_id = conf().get('feishu_app_id')
|
||||
feishu_app_secret = conf().get('feishu_app_secret')
|
||||
feishu_token = conf().get('feishu_token')
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# 历史消息id暂存,用于幂等控制
|
||||
self.receivedMsgs = ExpiredDict(60 * 60 * 7.1)
|
||||
logger.info("[FeiShu] app_id={}, app_secret={} verification_token={}".format(
|
||||
self.feishu_app_id, self.feishu_app_secret, self.feishu_token))
|
||||
# 无需群校验和前缀
|
||||
conf()["group_name_white_list"] = ["ALL_GROUP"]
|
||||
conf()["single_chat_prefix"] = []
|
||||
|
||||
def startup(self):
|
||||
urls = (
|
||||
'/', 'channel.feishu.feishu_channel.FeishuController'
|
||||
)
|
||||
app = web.application(urls, globals(), autoreload=False)
|
||||
port = conf().get("feishu_port", 9891)
|
||||
web.httpserver.runsimple(app.wsgifunc(), ("0.0.0.0", port))
|
||||
|
||||
def send(self, reply: Reply, context: Context):
|
||||
msg = context["msg"]
|
||||
is_group = context["isgroup"]
|
||||
headers = {
|
||||
"Authorization": "Bearer " + msg.access_token,
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
msg_type = "text"
|
||||
logger.info(f"[FeiShu] start send reply message, type={context.type}, content={reply.content}")
|
||||
reply_content = reply.content
|
||||
content_key = "text"
|
||||
if reply.type == ReplyType.IMAGE_URL:
|
||||
# 图片上传
|
||||
reply_content = self._upload_image_url(reply.content, msg.access_token)
|
||||
if not reply_content:
|
||||
logger.warning("[FeiShu] upload file failed")
|
||||
return
|
||||
msg_type = "image"
|
||||
content_key = "image_key"
|
||||
if is_group:
|
||||
# 群聊中直接回复
|
||||
url = f"https://open.feishu.cn/open-apis/im/v1/messages/{msg.msg_id}/reply"
|
||||
data = {
|
||||
"msg_type": msg_type,
|
||||
"content": json.dumps({content_key: reply_content})
|
||||
}
|
||||
res = requests.post(url=url, headers=headers, json=data, timeout=(5, 10))
|
||||
else:
|
||||
url = "https://open.feishu.cn/open-apis/im/v1/messages"
|
||||
params = {"receive_id_type": context.get("receive_id_type")}
|
||||
data = {
|
||||
"receive_id": context.get("receiver"),
|
||||
"msg_type": msg_type,
|
||||
"content": json.dumps({content_key: reply_content})
|
||||
}
|
||||
res = requests.post(url=url, headers=headers, params=params, json=data, timeout=(5, 10))
|
||||
res = res.json()
|
||||
if res.get("code") == 0:
|
||||
logger.info(f"[FeiShu] send message success")
|
||||
else:
|
||||
logger.error(f"[FeiShu] send message failed, code={res.get('code')}, msg={res.get('msg')}")
|
||||
|
||||
|
||||
def fetch_access_token(self) -> str:
|
||||
url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
|
||||
headers = {
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
req_body = {
|
||||
"app_id": self.feishu_app_id,
|
||||
"app_secret": self.feishu_app_secret
|
||||
}
|
||||
data = bytes(json.dumps(req_body), encoding='utf8')
|
||||
response = requests.post(url=url, data=data, headers=headers)
|
||||
if response.status_code == 200:
|
||||
res = response.json()
|
||||
if res.get("code") != 0:
|
||||
logger.error(f"[FeiShu] get tenant_access_token error, code={res.get('code')}, msg={res.get('msg')}")
|
||||
return ""
|
||||
else:
|
||||
return res.get("tenant_access_token")
|
||||
else:
|
||||
logger.error(f"[FeiShu] fetch token error, res={response}")
|
||||
|
||||
|
||||
def _upload_image_url(self, img_url, access_token):
|
||||
logger.debug(f"[WX] start download image, img_url={img_url}")
|
||||
response = requests.get(img_url)
|
||||
suffix = utils.get_path_suffix(img_url)
|
||||
temp_name = str(uuid.uuid4()) + "." + suffix
|
||||
if response.status_code == 200:
|
||||
# 将图片内容保存为临时文件
|
||||
with open(temp_name, "wb") as file:
|
||||
file.write(response.content)
|
||||
|
||||
# upload
|
||||
upload_url = "https://open.feishu.cn/open-apis/im/v1/images"
|
||||
data = {
|
||||
'image_type': 'message'
|
||||
}
|
||||
headers = {
|
||||
'Authorization': f'Bearer {access_token}',
|
||||
}
|
||||
with open(temp_name, "rb") as file:
|
||||
upload_response = requests.post(upload_url, files={"image": file}, data=data, headers=headers)
|
||||
logger.info(f"[FeiShu] upload file, res={upload_response.content}")
|
||||
os.remove(temp_name)
|
||||
return upload_response.json().get("data").get("image_key")
|
||||
|
||||
|
||||
|
||||
class FeishuController:
|
||||
# 类常量
|
||||
FAILED_MSG = '{"success": false}'
|
||||
SUCCESS_MSG = '{"success": true}'
|
||||
MESSAGE_RECEIVE_TYPE = "im.message.receive_v1"
|
||||
|
||||
def GET(self):
|
||||
return "Feishu service start success!"
|
||||
|
||||
def POST(self):
|
||||
try:
|
||||
channel = FeiShuChanel()
|
||||
|
||||
request = json.loads(web.data().decode("utf-8"))
|
||||
logger.debug(f"[FeiShu] receive request: {request}")
|
||||
|
||||
# 1.事件订阅回调验证
|
||||
if request.get("type") == URL_VERIFICATION:
|
||||
varify_res = {"challenge": request.get("challenge")}
|
||||
return json.dumps(varify_res)
|
||||
|
||||
# 2.消息接收处理
|
||||
# token 校验
|
||||
header = request.get("header")
|
||||
if not header or header.get("token") != channel.feishu_token:
|
||||
return self.FAILED_MSG
|
||||
|
||||
# 处理消息事件
|
||||
event = request.get("event")
|
||||
if header.get("event_type") == self.MESSAGE_RECEIVE_TYPE and event:
|
||||
if not event.get("message") or not event.get("sender"):
|
||||
logger.warning(f"[FeiShu] invalid message, msg={request}")
|
||||
return self.FAILED_MSG
|
||||
msg = event.get("message")
|
||||
|
||||
# 幂等判断
|
||||
if channel.receivedMsgs.get(msg.get("message_id")):
|
||||
logger.warning(f"[FeiShu] repeat msg filtered, event_id={header.get('event_id')}")
|
||||
return self.SUCCESS_MSG
|
||||
channel.receivedMsgs[msg.get("message_id")] = True
|
||||
|
||||
is_group = False
|
||||
chat_type = msg.get("chat_type")
|
||||
if chat_type == "group":
|
||||
if not msg.get("mentions") and msg.get("message_type") == "text":
|
||||
# 群聊中未@不响应
|
||||
return self.SUCCESS_MSG
|
||||
if msg.get("mentions")[0].get("name") != conf().get("feishu_bot_name") and msg.get("message_type") == "text":
|
||||
# 不是@机器人,不响应
|
||||
return self.SUCCESS_MSG
|
||||
# 群聊
|
||||
is_group = True
|
||||
receive_id_type = "chat_id"
|
||||
elif chat_type == "p2p":
|
||||
receive_id_type = "open_id"
|
||||
else:
|
||||
logger.warning("[FeiShu] message ignore")
|
||||
return self.SUCCESS_MSG
|
||||
# 构造飞书消息对象
|
||||
feishu_msg = FeishuMessage(event, is_group=is_group, access_token=channel.fetch_access_token())
|
||||
if not feishu_msg:
|
||||
return self.SUCCESS_MSG
|
||||
|
||||
context = self._compose_context(
|
||||
feishu_msg.ctype,
|
||||
feishu_msg.content,
|
||||
isgroup=is_group,
|
||||
msg=feishu_msg,
|
||||
receive_id_type=receive_id_type,
|
||||
no_need_at=True
|
||||
)
|
||||
if context:
|
||||
channel.produce(context)
|
||||
logger.info(f"[FeiShu] query={feishu_msg.content}, type={feishu_msg.ctype}")
|
||||
return self.SUCCESS_MSG
|
||||
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
return self.FAILED_MSG
|
||||
|
||||
def _compose_context(self, ctype: ContextType, content, **kwargs):
|
||||
context = Context(ctype, content)
|
||||
context.kwargs = kwargs
|
||||
if "origin_ctype" not in context:
|
||||
context["origin_ctype"] = ctype
|
||||
|
||||
cmsg = context["msg"]
|
||||
context["session_id"] = cmsg.from_user_id
|
||||
context["receiver"] = cmsg.other_user_id
|
||||
|
||||
if ctype == ContextType.TEXT:
|
||||
# 1.文本请求
|
||||
# 图片生成处理
|
||||
img_match_prefix = check_prefix(content, conf().get("image_create_prefix"))
|
||||
if img_match_prefix:
|
||||
content = content.replace(img_match_prefix, "", 1)
|
||||
context.type = ContextType.IMAGE_CREATE
|
||||
else:
|
||||
context.type = ContextType.TEXT
|
||||
context.content = content.strip()
|
||||
|
||||
elif context.type == ContextType.VOICE:
|
||||
# 2.语音请求
|
||||
if "desire_rtype" not in context and conf().get("voice_reply_voice"):
|
||||
context["desire_rtype"] = ReplyType.VOICE
|
||||
|
||||
return context
|
||||
92
channel/feishu/feishu_message.py
Normal file
92
channel/feishu/feishu_message.py
Normal file
@@ -0,0 +1,92 @@
|
||||
from bridge.context import ContextType
|
||||
from channel.chat_message import ChatMessage
|
||||
import json
|
||||
import requests
|
||||
from common.log import logger
|
||||
from common.tmp_dir import TmpDir
|
||||
from common import utils
|
||||
|
||||
|
||||
class FeishuMessage(ChatMessage):
|
||||
def __init__(self, event: dict, is_group=False, access_token=None):
|
||||
super().__init__(event)
|
||||
msg = event.get("message")
|
||||
sender = event.get("sender")
|
||||
self.access_token = access_token
|
||||
self.msg_id = msg.get("message_id")
|
||||
self.create_time = msg.get("create_time")
|
||||
self.is_group = is_group
|
||||
msg_type = msg.get("message_type")
|
||||
|
||||
if msg_type == "text":
|
||||
self.ctype = ContextType.TEXT
|
||||
content = json.loads(msg.get('content'))
|
||||
self.content = content.get("text").strip()
|
||||
elif msg_type == "file":
|
||||
self.ctype = ContextType.FILE
|
||||
content = json.loads(msg.get("content"))
|
||||
file_key = content.get("file_key")
|
||||
file_name = content.get("file_name")
|
||||
|
||||
self.content = TmpDir().path() + file_key + "." + utils.get_path_suffix(file_name)
|
||||
|
||||
def _download_file():
|
||||
# 如果响应状态码是200,则将响应内容写入本地文件
|
||||
url = f"https://open.feishu.cn/open-apis/im/v1/messages/{self.msg_id}/resources/{file_key}"
|
||||
headers = {
|
||||
"Authorization": "Bearer " + access_token,
|
||||
}
|
||||
params = {
|
||||
"type": "file"
|
||||
}
|
||||
response = requests.get(url=url, headers=headers, params=params)
|
||||
if response.status_code == 200:
|
||||
with open(self.content, "wb") as f:
|
||||
f.write(response.content)
|
||||
else:
|
||||
logger.info(f"[FeiShu] Failed to download file, key={file_key}, res={response.text}")
|
||||
self._prepare_fn = _download_file
|
||||
|
||||
# elif msg.type == "voice":
|
||||
# self.ctype = ContextType.VOICE
|
||||
# self.content = TmpDir().path() + msg.media_id + "." + msg.format # content直接存临时目录路径
|
||||
#
|
||||
# def download_voice():
|
||||
# # 如果响应状态码是200,则将响应内容写入本地文件
|
||||
# response = client.media.download(msg.media_id)
|
||||
# if response.status_code == 200:
|
||||
# with open(self.content, "wb") as f:
|
||||
# f.write(response.content)
|
||||
# else:
|
||||
# logger.info(f"[wechatcom] Failed to download voice file, {response.content}")
|
||||
#
|
||||
# self._prepare_fn = download_voice
|
||||
# elif msg.type == "image":
|
||||
# self.ctype = ContextType.IMAGE
|
||||
# self.content = TmpDir().path() + msg.media_id + ".png" # content直接存临时目录路径
|
||||
#
|
||||
# def download_image():
|
||||
# # 如果响应状态码是200,则将响应内容写入本地文件
|
||||
# response = client.media.download(msg.media_id)
|
||||
# if response.status_code == 200:
|
||||
# with open(self.content, "wb") as f:
|
||||
# f.write(response.content)
|
||||
# else:
|
||||
# logger.info(f"[wechatcom] Failed to download image file, {response.content}")
|
||||
#
|
||||
# self._prepare_fn = download_image
|
||||
else:
|
||||
raise NotImplementedError("Unsupported message type: Type:{} ".format(msg_type))
|
||||
|
||||
self.from_user_id = sender.get("sender_id").get("open_id")
|
||||
self.to_user_id = event.get("app_id")
|
||||
if is_group:
|
||||
# 群聊
|
||||
self.other_user_id = msg.get("chat_id")
|
||||
self.actual_user_id = self.from_user_id
|
||||
self.content = self.content.replace("@_user_1", "").strip()
|
||||
self.actual_user_nickname = ""
|
||||
else:
|
||||
# 私聊
|
||||
self.other_user_id = self.from_user_id
|
||||
self.actual_user_id = self.from_user_id
|
||||
@@ -18,3 +18,6 @@ TTS_1 = "tts-1"
|
||||
TTS_1_HD = "tts-1-hd"
|
||||
|
||||
MODEL_LIST = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "wenxin", "wenxin-4", "xunfei", "claude", "gpt-4-turbo", GPT4_TURBO_PREVIEW, "tongyi"]
|
||||
|
||||
# channel
|
||||
FEISHU = "feishu"
|
||||
|
||||
3
common/memory.py
Normal file
3
common/memory.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from common.expired_dict import ExpiredDict
|
||||
|
||||
USER_IMAGE_CACHE = ExpiredDict(60 * 3)
|
||||
@@ -1,6 +1,6 @@
|
||||
import io
|
||||
import os
|
||||
|
||||
from urllib.parse import urlparse
|
||||
from PIL import Image
|
||||
|
||||
|
||||
@@ -49,3 +49,8 @@ def split_string_by_utf8_length(string, max_length, max_split=0):
|
||||
result.append(encoded[start:end].decode("utf-8"))
|
||||
start = end
|
||||
return result
|
||||
|
||||
|
||||
def get_path_suffix(path):
|
||||
path = urlparse(path).path
|
||||
return os.path.splitext(path)[-1].lstrip('.')
|
||||
|
||||
@@ -32,6 +32,7 @@ available_setting = {
|
||||
"group_name_white_list": ["ChatGPT测试群", "ChatGPT测试群2"], # 开启自动回复的群名称列表
|
||||
"group_name_keyword_white_list": [], # 开启自动回复的群名称关键词列表
|
||||
"group_chat_in_one_session": ["ChatGPT测试群"], # 支持会话上下文共享的群名称
|
||||
"nick_name_black_list": [], # 用户昵称黑名单
|
||||
"group_welcome_msg": "", # 配置新人进群固定欢迎语,不配置则使用随机风格欢迎
|
||||
"trigger_by_self": False, # 是否允许机器人触发
|
||||
"text_to_image": "dall-e-2", # 图片生成模型,可选 dall-e-2, dall-e-3
|
||||
@@ -121,6 +122,14 @@ available_setting = {
|
||||
"wechatcomapp_secret": "", # 企业微信app的secret
|
||||
"wechatcomapp_agent_id": "", # 企业微信app的agent_id
|
||||
"wechatcomapp_aes_key": "", # 企业微信app的aes_key
|
||||
|
||||
# 飞书配置
|
||||
"feishu_port": 80, # 飞书bot监听端口
|
||||
"feishu_app_id": "", # 飞书机器人应用APP Id
|
||||
"feishu_app_secret": "", # 飞书机器人APP secret
|
||||
"feishu_token": "", # 飞书 verification token
|
||||
"feishu_bot_name": "", # 飞书机器人的名字
|
||||
|
||||
# chatgpt指令自定义触发词
|
||||
"clear_memory_commands": ["#清除记忆"], # 重置会话指令,必须以#开头
|
||||
# channel配置
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
"enabled": true, # 文档总结和对话功能开关
|
||||
"group_enabled": true, # 是否支持群聊开启
|
||||
"max_file_size": 5000, # 文件的大小限制,单位KB,默认为5M,超过该大小直接忽略
|
||||
"type": ["FILE", "SHARING", "IMAGE"] # 支持总结的类型,分别表示 文件、分享链接、图片
|
||||
"type": ["FILE", "SHARING", "IMAGE"] # 支持总结的类型,分别表示 文件、分享链接、图片,其中文件和链接默认打开,图片默认关闭
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -15,6 +15,6 @@
|
||||
"enabled": true,
|
||||
"group_enabled": true,
|
||||
"max_file_size": 5000,
|
||||
"type": ["FILE", "SHARING", "IMAGE"]
|
||||
"type": ["FILE", "SHARING"]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -192,9 +192,7 @@ class LinkAI(Plugin):
|
||||
return False
|
||||
if context.kwargs.get("isgroup") and not self.sum_config.get("group_enabled"):
|
||||
return False
|
||||
support_type = self.sum_config.get("type")
|
||||
if not support_type:
|
||||
return True
|
||||
support_type = self.sum_config.get("type") or ["FILE", "SHARING"]
|
||||
if context.type.name not in support_type:
|
||||
return False
|
||||
return True
|
||||
|
||||
@@ -91,5 +91,4 @@ class LinkSummary:
|
||||
for support_url in support_list:
|
||||
if url.strip().startswith(support_url):
|
||||
return True
|
||||
logger.debug(f"[LinkSum] unsupported url, no need to process, url={url}")
|
||||
return False
|
||||
|
||||
Reference in New Issue
Block a user