mirror of
https://github.com/zhayujie/chatgpt-on-wechat.git
synced 2026-01-19 01:21:01 +08:00
Add support for ModelScope API
This commit is contained in:
@@ -68,5 +68,9 @@ def create_bot(bot_type):
|
||||
from bot.minimax.minimax_bot import MinimaxBot
|
||||
return MinimaxBot()
|
||||
|
||||
elif bot_type == const.MODELSCOPE:
|
||||
from bot.modelscope.modelscope_bot import ModelScopeBot
|
||||
return ModelScopeBot()
|
||||
|
||||
|
||||
raise RuntimeError
|
||||
|
||||
152
bot/modelscope/modelscope_bot.py
Normal file
152
bot/modelscope/modelscope_bot.py
Normal file
@@ -0,0 +1,152 @@
|
||||
# encoding:utf-8
|
||||
|
||||
import time
|
||||
import json
|
||||
import openai
|
||||
import openai.error
|
||||
from bot.bot import Bot
|
||||
from bot.session_manager import SessionManager
|
||||
from bridge.context import ContextType
|
||||
from bridge.reply import Reply, ReplyType
|
||||
from common.log import logger
|
||||
from config import conf, load_config
|
||||
from .modelscope_session import ModelScopeSession
|
||||
import requests
|
||||
|
||||
|
||||
# ModelScope对话模型API
|
||||
class ModelScopeBot(Bot):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.sessions = SessionManager(ModelScopeSession, model=conf().get("model") or "Qwen/Qwen2.5-7B-Instruct")
|
||||
model = conf().get("model") or "Qwen/Qwen2.5-7B-Instruct"
|
||||
if model == "modelscope":
|
||||
model = "Qwen/Qwen2.5-7B-Instruct"
|
||||
self.args = {
|
||||
"model": model, # 对话模型的名称
|
||||
"temperature": conf().get("temperature", 0.3), # 如果设置,值域须为 [0, 1] 我们推荐 0.3,以达到较合适的效果。
|
||||
"top_p": conf().get("top_p", 1.0), # 使用默认值
|
||||
}
|
||||
self.api_key = conf().get("modelscope_api_key")
|
||||
self.base_url = conf().get("modelscope_base_url", "https://api-inference.modelscope.cn/v1/chat/completions")
|
||||
"""
|
||||
需要获取MOdelScope支持API-inference的模型名称列表,请到魔搭社区官网模型中心查看 https://modelscope.cn/models?filter=inference_type&page=1。
|
||||
或者使用命令 curl https://api-inference.modelscope.cn/v1/models 对模型列表和ID进行获取。
|
||||
获取ModelScope的免费API Key,请到魔搭社区官网用户中心查看获取方式 https://modelscope.cn/docs/model-service/API-Inference/intro。
|
||||
"""
|
||||
def reply(self, query, context=None):
|
||||
# acquire reply content
|
||||
if context.type == ContextType.TEXT:
|
||||
logger.info("[MODELSCOPE_AI] query={}".format(query))
|
||||
|
||||
session_id = context["session_id"]
|
||||
reply = None
|
||||
clear_memory_commands = conf().get("clear_memory_commands", ["#清除记忆"])
|
||||
if query in clear_memory_commands:
|
||||
self.sessions.clear_session(session_id)
|
||||
reply = Reply(ReplyType.INFO, "记忆已清除")
|
||||
elif query == "#清除所有":
|
||||
self.sessions.clear_all_session()
|
||||
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
|
||||
elif query == "#更新配置":
|
||||
load_config()
|
||||
reply = Reply(ReplyType.INFO, "配置已更新")
|
||||
if reply:
|
||||
return reply
|
||||
session = self.sessions.session_query(query, session_id)
|
||||
logger.debug("[MODELSCOPE_AI] session query={}".format(session.messages))
|
||||
|
||||
model = context.get("modelscope_model")
|
||||
new_args = self.args.copy()
|
||||
if model:
|
||||
new_args["model"] = model
|
||||
# if context.get('stream'):
|
||||
# # reply in stream
|
||||
# return self.reply_text_stream(query, new_query, session_id)
|
||||
|
||||
reply_content = self.reply_text(session, args=new_args)
|
||||
logger.debug(
|
||||
"[MODELSCOPE_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
|
||||
session.messages,
|
||||
session_id,
|
||||
reply_content["content"],
|
||||
reply_content["completion_tokens"],
|
||||
)
|
||||
)
|
||||
if reply_content["completion_tokens"] == 0 and len(reply_content["content"]) > 0:
|
||||
reply = Reply(ReplyType.ERROR, reply_content["content"])
|
||||
elif reply_content["completion_tokens"] > 0:
|
||||
self.sessions.session_reply(reply_content["content"], session_id, reply_content["total_tokens"])
|
||||
reply = Reply(ReplyType.TEXT, reply_content["content"])
|
||||
else:
|
||||
reply = Reply(ReplyType.ERROR, reply_content["content"])
|
||||
logger.debug("[MODELSCOPE_AI] reply {} used 0 tokens.".format(reply_content))
|
||||
return reply
|
||||
else:
|
||||
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
|
||||
return reply
|
||||
|
||||
def reply_text(self, session: ModelScopeSession, args=None, retry_count=0) -> dict:
|
||||
"""
|
||||
call openai's ChatCompletion to get the answer
|
||||
:param session: a conversation session
|
||||
:param session_id: session id
|
||||
:param retry_count: retry count
|
||||
:return: {}
|
||||
"""
|
||||
try:
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": "Bearer " + self.api_key
|
||||
}
|
||||
|
||||
body = args
|
||||
body["messages"] = session.messages
|
||||
# logger.debug("[MODELSCOPE_AI] response={}".format(response))
|
||||
# logger.info("[MODELSCOPE_AI] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"]))
|
||||
res = requests.post(
|
||||
self.base_url,
|
||||
headers=headers,
|
||||
data=json.dumps(body)
|
||||
)
|
||||
|
||||
if res.status_code == 200:
|
||||
response = res.json()
|
||||
return {
|
||||
"total_tokens": response["usage"]["total_tokens"],
|
||||
"completion_tokens": response["usage"]["completion_tokens"],
|
||||
"content": response["choices"][0]["message"]["content"]
|
||||
}
|
||||
else:
|
||||
response = res.json()
|
||||
error = response.get("error")
|
||||
logger.error(f"[MODELSCOPE_AI] chat failed, status_code={res.status_code}, "
|
||||
f"msg={error.get('message')}, type={error.get('type')}")
|
||||
|
||||
result = {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
|
||||
need_retry = False
|
||||
if res.status_code >= 500:
|
||||
# server error, need retry
|
||||
logger.warn(f"[MODELSCOPE_AI] do retry, times={retry_count}")
|
||||
need_retry = retry_count < 2
|
||||
elif res.status_code == 401:
|
||||
result["content"] = "授权失败,请检查API Key是否正确"
|
||||
elif res.status_code == 429:
|
||||
result["content"] = "请求过于频繁,请稍后再试"
|
||||
need_retry = retry_count < 2
|
||||
else:
|
||||
need_retry = False
|
||||
|
||||
if need_retry:
|
||||
time.sleep(3)
|
||||
return self.reply_text(session, args, retry_count + 1)
|
||||
else:
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
need_retry = retry_count < 2
|
||||
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
|
||||
if need_retry:
|
||||
return self.reply_text(session, args, retry_count + 1)
|
||||
else:
|
||||
return result
|
||||
51
bot/modelscope/modelscope_session.py
Normal file
51
bot/modelscope/modelscope_session.py
Normal file
@@ -0,0 +1,51 @@
|
||||
from bot.session_manager import Session
|
||||
from common.log import logger
|
||||
|
||||
|
||||
class ModelScopeSession(Session):
|
||||
def __init__(self, session_id, system_prompt=None, model="Qwen/Qwen2.5-7B-Instruct"):
|
||||
super().__init__(session_id, system_prompt)
|
||||
self.model = model
|
||||
self.reset()
|
||||
|
||||
def discard_exceeding(self, max_tokens, cur_tokens=None):
|
||||
precise = True
|
||||
try:
|
||||
cur_tokens = self.calc_tokens()
|
||||
except Exception as e:
|
||||
precise = False
|
||||
if cur_tokens is None:
|
||||
raise e
|
||||
logger.debug("Exception when counting tokens precisely for query: {}".format(e))
|
||||
while cur_tokens > max_tokens:
|
||||
if len(self.messages) > 2:
|
||||
self.messages.pop(1)
|
||||
elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant":
|
||||
self.messages.pop(1)
|
||||
if precise:
|
||||
cur_tokens = self.calc_tokens()
|
||||
else:
|
||||
cur_tokens = cur_tokens - max_tokens
|
||||
break
|
||||
elif len(self.messages) == 2 and self.messages[1]["role"] == "user":
|
||||
logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
|
||||
break
|
||||
else:
|
||||
logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens,
|
||||
len(self.messages)))
|
||||
break
|
||||
if precise:
|
||||
cur_tokens = self.calc_tokens()
|
||||
else:
|
||||
cur_tokens = cur_tokens - max_tokens
|
||||
return cur_tokens
|
||||
|
||||
def calc_tokens(self):
|
||||
return num_tokens_from_messages(self.messages, self.model)
|
||||
|
||||
|
||||
def num_tokens_from_messages(messages, model):
|
||||
tokens = 0
|
||||
for msg in messages:
|
||||
tokens += len(msg["content"])
|
||||
return tokens
|
||||
@@ -49,6 +49,9 @@ class Bridge(object):
|
||||
if model_type in [const.MOONSHOT, "moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"]:
|
||||
self.btype["chat"] = const.MOONSHOT
|
||||
|
||||
if model_type in [const.MODELSCOPE,]:
|
||||
self.btype["chat"] = const.MODELSCOPE
|
||||
|
||||
if model_type in ["abab6.5-chat"]:
|
||||
self.btype["chat"] = const.MiniMax
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ GEMINI = "gemini" # gemini-1.0-pro
|
||||
ZHIPU_AI = "glm-4"
|
||||
MOONSHOT = "moonshot"
|
||||
MiniMax = "minimax"
|
||||
|
||||
MODELSCOPE = "Qwen/Qwen2.5-7B-Instruct"
|
||||
|
||||
# model
|
||||
CLAUDE3 = "claude-3-opus-20240229"
|
||||
@@ -97,7 +97,8 @@ MODEL_LIST = [
|
||||
"moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k",
|
||||
QWEN, QWEN_TURBO, QWEN_PLUS, QWEN_MAX,
|
||||
LINKAI_35, LINKAI_4_TURBO, LINKAI_4o,
|
||||
DEEPSEEK_CHAT, DEEPSEEK_REASONER
|
||||
DEEPSEEK_CHAT, DEEPSEEK_REASONER,
|
||||
MODELSCOPE
|
||||
]
|
||||
|
||||
MODEL_LIST = MODEL_LIST + GITEE_AI_MODEL_LIST
|
||||
|
||||
@@ -171,6 +171,9 @@ available_setting = {
|
||||
"zhipu_ai_api_base": "https://open.bigmodel.cn/api/paas/v4",
|
||||
"moonshot_api_key": "",
|
||||
"moonshot_base_url": "https://api.moonshot.cn/v1/chat/completions",
|
||||
#魔搭社区 平台配置
|
||||
"modelscope_api_key": "",
|
||||
"modelscope_base_url": "https://api-inference.modelscope.cn/v1/chat/completions",
|
||||
# LinkAI平台配置
|
||||
"use_linkai": False,
|
||||
"linkai_api_key": "",
|
||||
|
||||
@@ -339,7 +339,8 @@ class Godcmd(Plugin):
|
||||
ok, result = True, "配置已重载"
|
||||
elif cmd == "resetall":
|
||||
if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI,
|
||||
const.BAIDU, const.XUNFEI, const.QWEN, const.GEMINI, const.ZHIPU_AI, const.MOONSHOT]:
|
||||
const.BAIDU, const.XUNFEI, const.QWEN, const.GEMINI, const.ZHIPU_AI, const.MOONSHOT,
|
||||
const.MODELSCOPE]:
|
||||
channel.cancel_all_session()
|
||||
bot.sessions.clear_all_session()
|
||||
ok, result = True, "重置所有会话成功"
|
||||
|
||||
@@ -99,7 +99,7 @@ class Role(Plugin):
|
||||
if e_context["context"].type != ContextType.TEXT:
|
||||
return
|
||||
btype = Bridge().get_bot_type("chat")
|
||||
if btype not in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.QWEN_DASHSCOPE, const.XUNFEI, const.BAIDU, const.ZHIPU_AI, const.MOONSHOT, const.MiniMax, const.LINKAI]:
|
||||
if btype not in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.QWEN_DASHSCOPE, const.XUNFEI, const.BAIDU, const.ZHIPU_AI, const.MOONSHOT, const.MiniMax, const.LINKAI,const.MODELSCOPE]:
|
||||
logger.debug(f'不支持的bot: {btype}')
|
||||
return
|
||||
bot = Bridge().get_bot("chat")
|
||||
|
||||
Reference in New Issue
Block a user