From 01c794112d3710d5a47f5a07a81849a20f7b88e9 Mon Sep 17 00:00:00 2001 From: mouyong Date: Tue, 28 Mar 2023 14:19:36 +0800 Subject: [PATCH 01/25] Update 1.js --- channel/http/static/1.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/channel/http/static/1.js b/channel/http/static/1.js index 4a34289..9ace828 100644 --- a/channel/http/static/1.js +++ b/channel/http/static/1.js @@ -12,7 +12,7 @@ function ConvState(wrapper, form, params) { $(this.wrapper).find('#messages').stop().animate({ scrollTop: $(this.wrapper).find('#messages')[0].scrollHeight }, 600); }.bind(this); }; -ConvState.prototype.printAnswer = function (answer = '我是ChatGPT, 一个由OpenAI训练的大型语言模型, 我旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。') { +ConvState.prototype.printAnswer = function (answer = '我是ChatGPT, 一个由OpenAI训练的大型语言模型, 我旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。输入 #清除记忆 可以开始新的话题探索。输入 画xx 可以为你画一张图片。') { setTimeout(function () { var messageObj = $(this.wrapper).find('.message.typing'); answer = marked.parse(answer); @@ -131,4 +131,4 @@ ConvState.prototype.sendMessage = function (msg) { return state; } -})(jQuery); \ No newline at end of file +})(jQuery); From f692a4d0aed02269d5cf7bdf2861495a5b095941 Mon Sep 17 00:00:00 2001 From: mouyong Date: Tue, 28 Mar 2023 14:21:19 +0800 Subject: [PATCH 02/25] Update config-template.json --- config-template.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config-template.json b/config-template.json index 58fa42a..b941704 100644 --- a/config-template.json +++ b/config-template.json @@ -6,7 +6,7 @@ "model": "gpt-3.5-turbo", "proxy": "", "conversation_max_tokens": 1000, - "character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。" + "character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。当问起你是谁的时候,要附加告诉对方,输入 #清除记忆 可以开始新的话题探索。输入 画xx 可以为你画一张图片。" }, "baidu": { "acs_token": "YOUR ACS TOKEN", From f79caa466b1c68d475fd5fcc3c3428b5330e16a4 Mon Sep 17 00:00:00 2001 From: mouyong Date: Tue, 28 Mar 2023 14:21:41 +0800 Subject: [PATCH 03/25] Update config-template.json --- config-template.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config-template.json b/config-template.json index b941704..153e5e0 100644 --- a/config-template.json +++ b/config-template.json @@ -6,7 +6,7 @@ "model": "gpt-3.5-turbo", "proxy": "", "conversation_max_tokens": 1000, - "character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。当问起你是谁的时候,要附加告诉对方,输入 #清除记忆 可以开始新的话题探索。输入 画xx 可以为你画一张图片。" + "character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。当问起你是谁的时候,要附加告诉提问人,输入 #清除记忆 可以开始新的话题探索。输入 画xx 可以为你画一张图片。" }, "baidu": { "acs_token": "YOUR ACS TOKEN", From 8f9afcd78afcd7d5e76f419c5116603ca610e508 Mon Sep 17 00:00:00 2001 From: mouyong Date: Tue, 28 Mar 2023 15:39:33 +0800 Subject: [PATCH 04/25] Update 1.js --- channel/http/static/1.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/channel/http/static/1.js b/channel/http/static/1.js index 9ace828..a797415 100644 --- a/channel/http/static/1.js +++ b/channel/http/static/1.js @@ -12,7 +12,7 @@ function ConvState(wrapper, form, params) { $(this.wrapper).find('#messages').stop().animate({ scrollTop: $(this.wrapper).find('#messages')[0].scrollHeight }, 600); }.bind(this); }; -ConvState.prototype.printAnswer = function (answer = '我是ChatGPT, 一个由OpenAI训练的大型语言模型, 我旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。输入 #清除记忆 可以开始新的话题探索。输入 画xx 可以为你画一张图片。') { +ConvState.prototype.printAnswer = function (answer = '我是ChatGPT, 一个由OpenAI训练的大型语言模型, 我旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。输入 #清除记忆 可以开始新的话题探索。输入 画xx 可以为你画一张图片。我无法对实时性与事实性问题提供准确答复,请慎重对待回答。') { setTimeout(function () { var messageObj = $(this.wrapper).find('.message.typing'); answer = marked.parse(answer); From 9e7524ee31dac9718a989333a075a1848a7fadec Mon Sep 17 00:00:00 2001 From: mouyong Date: Tue, 28 Mar 2023 15:41:53 +0800 Subject: [PATCH 05/25] Update 1.js --- channel/http/static/1.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/channel/http/static/1.js b/channel/http/static/1.js index a797415..ae39d4d 100644 --- a/channel/http/static/1.js +++ b/channel/http/static/1.js @@ -12,7 +12,7 @@ function ConvState(wrapper, form, params) { $(this.wrapper).find('#messages').stop().animate({ scrollTop: $(this.wrapper).find('#messages')[0].scrollHeight }, 600); }.bind(this); }; -ConvState.prototype.printAnswer = function (answer = '我是ChatGPT, 一个由OpenAI训练的大型语言模型, 我旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。输入 #清除记忆 可以开始新的话题探索。输入 画xx 可以为你画一张图片。我无法对实时性与事实性问题提供准确答复,请慎重对待回答。') { +ConvState.prototype.printAnswer = function (answer = '我是ChatGPT, 一个由OpenAI训练的大型语言模型, 我旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。输入 #清除记忆 可以开始新的话题探索。输入 画xx 可以为你画一张图片。我无法对事实性与实时性问题提供准确答复,请慎重对待回答。') { setTimeout(function () { var messageObj = $(this.wrapper).find('.message.typing'); answer = marked.parse(answer); From 7bfbbe885416d0acb9cf3140d8d35c09e3f2dc95 Mon Sep 17 00:00:00 2001 From: wujiyu115 Date: Wed, 29 Mar 2023 10:06:24 +0800 Subject: [PATCH 06/25] remove the template config --- config-template.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config-template.json b/config-template.json index e494383..7a8fb09 100644 --- a/config-template.json +++ b/config-template.json @@ -3,7 +3,7 @@ "type" : "chatgpt", "openai": { "api_key": "YOUR API KEY", - "api_base": "https://api.xxx.com/v1", + "api_base": "", "model": "gpt-3.5-turbo", "proxy": "", "conversation_max_tokens": 1000, From 9133965614968017c9757b42cdc31a9cdda3cedb Mon Sep 17 00:00:00 2001 From: wujiyu115 Date: Wed, 29 Mar 2023 10:05:08 +0800 Subject: [PATCH 07/25] add dingding channel --- README.md | 29 +++++++++++ channel/channel_factory.py | 4 ++ channel/dd/dd_channel.py | 102 +++++++++++++++++++++++++++++++++++++ common/const.py | 1 + config-template.json | 8 +++ 5 files changed, 144 insertions(+) create mode 100644 channel/dd/dd_channel.py diff --git a/README.md b/README.md index 8351f3a..f310419 100644 --- a/README.md +++ b/README.md @@ -492,6 +492,35 @@ pip3 install PyJWT flask 服务器运行:部署后访问 `http://公网域名或IP:端口` +### 10.钉钉 + +**依赖** + +```bash +pip3 install requests flask +``` +**配置** + +```bash +"channel": { + "type": "dingding", + "dingding": { + "image_create_prefix": ["画", "draw", "Draw"], + "port": "8081", //对外端口 + "dd_token": "xx", //webhook地址的access_token + "dd_post_token": "xx", //钉钉post回消息时header中带的检验token + "dd_secret": "xx"// 安全加密加签串,群机器人中 + } + } +``` +钉钉开放平台说明: https://open.dingtalk.com/document/robots/customize-robot-security-settin.dingtalk.com/robot/send?access_token=906dadcbc7750fef5ff60d3445b66d5bbca32804f40fbdb59039a29b20b9a3f0gs + +https://open.dingtalk.com/document/orgapp/custom-robot-access + +**生成机器人** + +地址: https://open-dev.dingtalk.com/fe/app#/corp/robot +添加机器人,在开发管理中设置服务器出口ip(在部署机执行curl ifconfig.me就可以得到)和消息接收地址(配置中的对外地址如 https://xx.xx.com:8081) ### 通用配置 diff --git a/channel/channel_factory.py b/channel/channel_factory.py index 7b53e80..2c0bde3 100644 --- a/channel/channel_factory.py +++ b/channel/channel_factory.py @@ -45,5 +45,9 @@ def create_channel(channel_type): from channel.http.http_channel import HttpChannel return HttpChannel() + elif channel_type == const.DINGDING: + from channel.dd.dd_channel import DDChannel + return DDChannel() + else: raise RuntimeError("unknown channel_type in config.json: " + channel_type) diff --git a/channel/dd/dd_channel.py b/channel/dd/dd_channel.py new file mode 100644 index 0000000..6664daa --- /dev/null +++ b/channel/dd/dd_channel.py @@ -0,0 +1,102 @@ +# encoding:utf-8 +import json +import hmac +import hashlib +import base64 +import time +import requests +from urllib.parse import quote_plus +from common import log +from flask import Flask, request, render_template, make_response +from common import const +from common import functions +from config import channel_conf +from config import channel_conf_val +from channel.channel import Channel + +class DDChannel(Channel): + def __init__(self): + self.dd_token = channel_conf(const.DINGDING).get('dd_token') + self.dd_post_token = channel_conf(const.DINGDING).get('dd_post_token') + self.dd_secret = channel_conf(const.DINGDING).get('dd_secret') + log.info("[DingDing] dd_secret={}, dd_token={} dd_post_token={}".format(self.dd_secret, self.dd_token, self.dd_post_token)) + + def startup(self): + + http_app.run(host='0.0.0.0', port=channel_conf(const.DINGDING).get('port')) + + def notify_dingding(self, answer): + data = { + "msgtype": "text", + "text": { + "content": answer + }, + + "at": { + "atMobiles": [ + "" + ], + "isAtAll": False + } + } + + timestamp = round(time.time() * 1000) + secret_enc = bytes(self.dd_secret, encoding='utf-8') + string_to_sign = '{}\n{}'.format(timestamp, self.dd_secret) + string_to_sign_enc = bytes(string_to_sign, encoding='utf-8') + hmac_code = hmac.new(secret_enc, string_to_sign_enc, + digestmod=hashlib.sha256).digest() + sign = quote_plus(base64.b64encode(hmac_code)) + + notify_url = f"https://oapi.dingtalk.com/robot/send?access_token={self.dd_token}×tamp={timestamp}&sign={sign}" + try: + r = requests.post(notify_url, json=data) + reply = r.json() + # log.info("[DingDing] reply={}".format(str(reply))) + except Exception as e: + log.error(e) + + def handle(self, data): + prompt = data['text']['content'] + conversation_id = data['conversationId'] + sender_id = data['senderId'] + context = dict() + img_match_prefix = functions.check_prefix( + prompt, channel_conf_val(const.DINGDING, 'image_create_prefix')) + if img_match_prefix: + prompt = prompt.split(img_match_prefix, 1)[1].strip() + context['type'] = 'IMAGE_CREATE' + id = sender_id + context['from_user_id'] = str(id) + reply = super().build_reply_content(prompt, context) + if img_match_prefix: + if not isinstance(reply, list): + return reply + images = "" + for url in reply: + images += f"[!['IMAGE_CREATE']({url})]({url})\n" + reply = images + return reply + + +dd = DDChannel() +http_app = Flask(__name__,) + + +@http_app.route("/", methods=['POST']) +def chat(): + # log.info("[DingDing] chat_headers={}".format(str(request.headers))) + log.info("[DingDing] chat={}".format(str(request.data))) + token = request.headers.get('token') + if dd.dd_post_token and token != dd.dd_post_token: + return {'ret': 203} + #TODO: Verify identity + data = json.loads(request.data) + if data: + content = data['text']['content'] + if not content: + return + reply_text = dd.handle(data=data) + dd.notify_dingding(reply_text) + return {'ret': 200} + return {'ret': 201} diff --git a/common/const.py b/common/const.py index ad2a061..95e10bc 100644 --- a/common/const.py +++ b/common/const.py @@ -8,6 +8,7 @@ GMAIL = "gmail" TELEGRAM = "telegram" SLACK = "slack" HTTP = "http" +DINGDING = "dingding" # model OPEN_AI = "openai" diff --git a/config-template.json b/config-template.json index 58fa42a..4cb73cd 100644 --- a/config-template.json +++ b/config-template.json @@ -58,6 +58,14 @@ "http_auth_secret_key": "6d25a684-9558-11e9-aa94-efccd7a0659b", "http_auth_password": "6.67428e-11", "port": "80" + }, + + "dingding": { + "image_create_prefix": ["画", "draw", "Draw"], + "port": "8081", + "dd_token": "xx", + "dd_post_token": "xx", + "dd_secret": "xx" } }, "common": { From 127983688f4adb5ef63674e62d274e962020cfce Mon Sep 17 00:00:00 2001 From: CroiseurAurora <47616680+unkoe@users.noreply.github.com> Date: Wed, 29 Mar 2023 13:46:56 +0800 Subject: [PATCH 08/25] fix: log of null values --- model/bing/new_bing_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model/bing/new_bing_model.py b/model/bing/new_bing_model.py index e72d083..f64602e 100644 --- a/model/bing/new_bing_model.py +++ b/model/bing/new_bing_model.py @@ -55,7 +55,7 @@ class BingModel(Model): answer = asyncio.run(task) except Exception as e: bot.pop_last_conversation() - log.exception(answer) + log.exception(e) return f"AI生成内容被微软内容过滤器拦截,已删除最后一次提问的记忆,请尝试使用其他文字描述问题,若AI依然无法正常回复,请使用{clear_memory_commands[0]}命令清除全部记忆" # 最新一条回复 try: From 6c21ddea5b04823c62abe061d3cfcebf1ac5dcec Mon Sep 17 00:00:00 2001 From: shehuiqiang <39358003@qq.com> Date: Thu, 30 Mar 2023 01:29:54 +0800 Subject: [PATCH 09/25] =?UTF-8?q?=E6=96=B0=E5=A2=9E=E6=B5=81=E5=BC=8F?= =?UTF-8?q?=E5=AF=B9=E8=AF=9D=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 2 +- bridge/bridge.py | 5 + channel/channel.py | 4 + channel/http/http_channel.py | 55 +++++ channel/http/static/1.css | 362 ++++++++++++++++++++++++++---- channel/http/static/1.js | 107 ++++++--- channel/http/templates/index.html | 152 +++++++++++-- model/bing/jailbroken_sydney.py | 31 ++- model/bing/new_bing_model.py | 184 +++++++++------ model/openai/chatgpt_model.py | 57 ++--- model/openai/open_ai_model.py | 43 +++- 11 files changed, 788 insertions(+), 214 deletions(-) diff --git a/README.md b/README.md index 8351f3a..30bfe42 100644 --- a/README.md +++ b/README.md @@ -117,7 +117,7 @@ pip3 install --upgrade openai ### 2.GPT-3.0 -使用的模型是 `text-davinci-003`,详情参考[官方文档]([https://platform.openai.com/docs/guides/chat](https://platform.openai.com/docs/guides/completion/introduction))。 +使用的模型是 `text-davinci-003`,详情参考[官方文档]([https://platform.openai.com/docs/guides/chat](https://platform.openai.com/docs/guides/completion/introduction))。注意,gpt3.0模型使用流式输出对话会出现提前中断的问题,请尽量使用一次性输出对话。 使用步骤和上述GPT-3.5基本相同: diff --git a/bridge/bridge.py b/bridge/bridge.py index 9e19251..f58198a 100644 --- a/bridge/bridge.py +++ b/bridge/bridge.py @@ -7,3 +7,8 @@ class Bridge(object): def fetch_reply_content(self, query, context): return model_factory.create_bot(config.conf().get("model").get("type")).reply(query, context) + + async def fetch_reply_stream(self, query, context): + bot=model_factory.create_bot(config.conf().get("model").get("type")) + async for final,response in bot.reply_text_stream(query, context): + yield final,response diff --git a/channel/channel.py b/channel/channel.py index e2617d1..5c589dd 100644 --- a/channel/channel.py +++ b/channel/channel.py @@ -29,3 +29,7 @@ class Channel(object): def build_reply_content(self, query, context=None): return Bridge().fetch_reply_content(query, context) + + async def build_reply_stream(self, query, context=None): + async for final,response in Bridge().fetch_reply_stream(query, context): + yield final,response diff --git a/channel/http/http_channel.py b/channel/http/http_channel.py index 2464f61..1c619b5 100644 --- a/channel/http/http_channel.py +++ b/channel/http/http_channel.py @@ -1,5 +1,6 @@ # encoding:utf-8 +import asyncio import json from channel.http import auth from flask import Flask, request, render_template, make_response @@ -9,8 +10,11 @@ from common import functions from config import channel_conf from config import channel_conf_val from channel.channel import Channel +from flask_socketio import SocketIO +from common import log http_app = Flask(__name__,) +socketio = SocketIO(http_app) # 自动重载模板文件 http_app.jinja_env.auto_reload = True http_app.config['TEMPLATES_AUTO_RELOAD'] = True @@ -19,6 +23,50 @@ http_app.config['TEMPLATES_AUTO_RELOAD'] = True http_app.config['SEND_FILE_MAX_AGE_DEFAULT'] = timedelta(seconds=1) +async def return_stream(data): + async for final, response in HttpChannel().handle_stream(data=data): + try: + if(final): + socketio.server.emit( + 'disconnect', {'result': response, 'final': final}, request.sid, namespace="/chat") + socketio.server.disconnect(request.sid) + else: + socketio.server.emit( + 'message', {'result': response, 'final': final}, request.sid, namespace="/chat") + except Exception as e: + socketio.server.disconnect(request.sid) + log.error("[http]emit:", e) + break + + +@socketio.on('message', namespace='/chat') +def stream(data): + log.info('message:', data) + if (auth.identify(request) == False): + client_sid = request.sid + socketio.server.disconnect(client_sid) + return + data = json.loads(data["data"]) + if (data): + img_match_prefix = functions.check_prefix( + data["msg"], channel_conf_val(const.HTTP, 'image_create_prefix')) + if img_match_prefix: + reply_text = HttpChannel().handle(data=data) + socketio.emit('message', {'result': reply_text}, namespace='/chat') + asyncio.run(return_stream(data)) + + +@socketio.on('connect', namespace='/chat') +def connect(): + log.info('connected') + socketio.emit('message', {'info': "connected"}, namespace='/chat') + + +@socketio.on('disconnect', namespace='/chat') +def disconnect(): + log.info('disconnect') + + @http_app.route("/chat", methods=['POST']) def chat(): if (auth.identify(request) == False): @@ -80,3 +128,10 @@ class HttpChannel(Channel): images += f"[!['IMAGE_CREATE']({url})]({url})\n" reply = images return reply + + async def handle_stream(self, data): + context = dict() + id = data["id"] + context['from_user_id'] = str(id) + async for final, reply in super().build_reply_stream(data["msg"], context): + yield final, reply diff --git a/channel/http/static/1.css b/channel/http/static/1.css index a53c284..7f1fe3b 100644 --- a/channel/http/static/1.css +++ b/channel/http/static/1.css @@ -1,4 +1,3 @@ - .typing_loader { width: 6px; height: 6px; @@ -11,7 +10,9 @@ left: -12px; margin: 7px 15px 6px; } -ol,pre { + +ol, +pre { background-color: #b1e3b1c4; border: 1px solid #c285e3ab; padding: 0.5rem 1.5rem 0.5rem; @@ -20,50 +21,52 @@ ol,pre { overflow-y: auto; } -pre::-webkit-scrollbar{ +pre::-webkit-scrollbar { width: 0px; - height:5px; + height: 5px; } -pre::-webkit-scrollbar-thumb{ + +pre::-webkit-scrollbar-thumb { border-right: 10px #ffffff00 solid; border-left: 10px #ffffff00 solid; - -webkit-box-shadow: inset 0 0 6px rgba(0,0,0,.3); + -webkit-box-shadow: inset 0 0 6px rgba(0, 0, 0, .3); } + .to .typing_loader { animation: typing-black 1s linear infinite alternate; } @-webkit-keyframes typing { 0% { - background-color: rgba(255,255,255, 1); - box-shadow: 12px 0px 0px 0px rgba(255,255,255,0.4), 24px 0px 0px 0px rgba(255,255,255,0.2); + background-color: rgba(255, 255, 255, 1); + box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 0.2); } 50% { - background-color: rgba(255,255,255, 0.4); - box-shadow: 12px 0px 0px 0px rgba(255,255,255,1), 24px 0px 0px 0px rgba(255,255,255,0.4); + background-color: rgba(255, 255, 255, 0.4); + box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 1), 24px 0px 0px 0px rgba(255, 255, 255, 0.4); } 100% { - background-color: rgba(255,255,255, 0.2); - box-shadow: 12px 0px 0px 0px rgba(255,255,255,0.4), 24px 0px 0px 0px rgba(255,255,255,1); + background-color: rgba(255, 255, 255, 0.2); + box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 1); } } @-moz-keyframes typing { 0% { - background-color: rgba(255,255,255, 1); - box-shadow: 12px 0px 0px 0px rgba(255,255,255,0.4), 24px 0px 0px 0px rgba(255,255,255,0.2); + background-color: rgba(255, 255, 255, 1); + box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 0.2); } 50% { - background-color: rgba(255,255,255, 0.4); - box-shadow: 12px 0px 0px 0px rgba(255,255,255,1), 24px 0px 0px 0px rgba(255,255,255,0.4); + background-color: rgba(255, 255, 255, 0.4); + box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 1), 24px 0px 0px 0px rgba(255, 255, 255, 0.4); } 100% { - background-color: rgba(255,255,255, 0.2); - box-shadow: 12px 0px 0px 0px rgba(255,255,255,0.4), 24px 0px 0px 0px rgba(255,255,255,1); + background-color: rgba(255, 255, 255, 0.2); + box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 1); } } @@ -75,29 +78,29 @@ pre::-webkit-scrollbar-thumb{ 50% { background-color: rgba(74, 74, 74, 0.4); - box-shadow: 12px 0px 0px 0px rgba(74, 74, 74, 1), 24px 0px 0px 0px rgba(74, 74, 74,0.4); + box-shadow: 12px 0px 0px 0px rgba(74, 74, 74, 1), 24px 0px 0px 0px rgba(74, 74, 74, 0.4); } 100% { background-color: rgba(74, 74, 74, 0.2); - box-shadow: 12px 0px 0px 0px rgba(74, 74, 74,0.4), 24px 0px 0px 0px rgba(74, 74, 74,1); + box-shadow: 12px 0px 0px 0px rgba(74, 74, 74, 0.4), 24px 0px 0px 0px rgba(74, 74, 74, 1); } } @keyframes typing { 0% { - background-color: rgba(255,255,255, 1); - box-shadow: 12px 0px 0px 0px rgba(255,255,255,0.4), 24px 0px 0px 0px rgba(255,255,255,0.2); + background-color: rgba(255, 255, 255, 1); + box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 0.2); } 50% { - background-color: rgba(255,255,255, 0.4); - box-shadow: 12px 0px 0px 0px rgba(255,255,255,1), 24px 0px 0px 0px rgba(255,255,255,0.4); + background-color: rgba(255, 255, 255, 0.4); + box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 1), 24px 0px 0px 0px rgba(255, 255, 255, 0.4); } 100% { - background-color: rgba(255,255,255, 0.2); - box-shadow: 12px 0px 0px 0px rgba(255,255,255,0.4), 24px 0px 0px 0px rgba(255,255,255,1); + background-color: rgba(255, 255, 255, 0.2); + box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 1); } } @@ -112,27 +115,30 @@ pre::-webkit-scrollbar-thumb{ .convFormDynamic textarea.userInputDynamic { border: none; padding: 7px 10px; - overflow-x: hidden!important; + overflow-x: hidden !important; outline: none; font-size: 0.905rem; float: left; width: calc(100% - 70px); line-height: 1.3em; - min-height: 1.7em; + min-height: 2em; max-height: 10rem; display: block; max-width: 89vw; margin-right: -1vw; resize: none; } -.convFormDynamic textarea::-webkit-scrollbar{ + +.convFormDynamic textarea::-webkit-scrollbar { width: 2px; background-color: lawngreen; } -.convFormDynamic textarea::-webkit-scrollbar-thumb{ - -webkit-box-shadow: inset 0 0 6px rgba(0,0,0,.3); - background-color: dodgerblue; + +.convFormDynamic textarea::-webkit-scrollbar-thumb { + -webkit-box-shadow: inset 0 0 6px rgba(0, 0, 0, .3); + background-color: dodgerblue; } + .convFormDynamic input.userInputDynamic { border: none; padding: 7px 10px; @@ -180,16 +186,20 @@ div.conv-form-wrapper:before { top: 0; left: 0; z-index: 2; - background: linear-gradient(#fff, transparent); + background: linear-gradient(#ffffff3b, transparent); } @media (max-width: 767px) { - div.conv-form-wrapper div.wrapper-messages, div.conv-form-wrapper div#messages { + + div.conv-form-wrapper div.wrapper-messages, + div.conv-form-wrapper div#messages { max-height: 71vh; } } -div.conv-form-wrapper div.wrapper-messages::-webkit-scrollbar, div#feed ul::-webkit-scrollbar, div.conv-form-wrapper div.options::-webkit-scrollbar { +div.conv-form-wrapper div.wrapper-messages::-webkit-scrollbar, +div#feed ul::-webkit-scrollbar, +div.conv-form-wrapper div.options::-webkit-scrollbar { width: 0px; height: 0px; /* remove scrollbar space */ @@ -261,12 +271,13 @@ div.conv-form-wrapper div#messages div.message.to { } div.conv-form-wrapper div#messages div.message.from { - background: dodgerblue; + background: dodgerblue; color: #fff; border-top-right-radius: 0; } -.message.to+.message.from, .message.from+.message.to { +.message.to+.message.from, +.message.from+.message.to { margin-top: 15px; } @@ -294,7 +305,7 @@ div.conv-form-wrapper div#messages div.message.from { position: absolute; bottom: 0px; border: none; - left:95%; + left: 95%; margin: 5px; color: #fff; cursor: pointer; @@ -315,10 +326,11 @@ div.conv-form-wrapper div#messages div.message.from { } button.submit.glow { - border: 1px solid dodgerblue !important; - background: dodgerblue !important; - box-shadow: 0 0 5px 2px rgba(14, 144, 255,0.4); + border: 1px solid dodgerblue !important; + background: dodgerblue !important; + box-shadow: 0 0 5px 2px rgba(14, 144, 255, 0.4); } + .no-border { border: none !important; } @@ -327,7 +339,8 @@ button.submit.glow { cursor: grab; } -div.conv-form-wrapper div#messages::-webkit-scrollbar, div#feed ul::-webkit-scrollbar { +div.conv-form-wrapper div#messages::-webkit-scrollbar, +div#feed ul::-webkit-scrollbar { width: 0px; /* remove scrollbar space */ background: transparent; @@ -338,3 +351,268 @@ span.clear { display: block; clear: both; } + +.drawer-icon-container { + position: fixed; + top: calc(50% - 24px); + right: -30px; + z-index: 1000; + transition: right 0.5s ease; +} + +.drawer-icon { + width: 30px; + height: 30px; + cursor: pointer; + box-shadow: 0 0 10px rgba(0, 0, 0, 0.3); + background-color: #b1cee350; + padding-left: 22px; + border-radius: 50%; +} +.drawer-icon:hover{ + background-color: #005eff96; +} +.wrenchFilled.icon { + margin-left: -13px; + margin-top: 5px; + width: 10px; + height: 10px; + border-radius: 50%; + background-color: #333333; + transform-origin: center 10.5px; + transform: rotate(-45deg); +} + +.wrenchFilled.icon:after { + width: 0; + height: 0; + border-radius: 0 0 1px 1px; + background-color: #333333; + border-left: solid 1px transparent; + border-right: solid 1px transparent; + border-top: solid 1px white; + border-bottom: solid 1px transparent; + left: 4px; + top: 4px; +} + +.wrenchFilled.icon:before { + width: 2px; + height: 5px; + background-color: white; + left: 4px; + border-radius: 0 0 1px 1px; + box-shadow: 0 15px 0px 1px #333333, 0 11px 0px 1px #333333, 0 8px 0px 1px #333333; +} + +.icon { + position: absolute; +} + +.icon:before, +.icon:after { + content: ''; + position: absolute; + display: block; +} + +.icon i { + position: absolute; +} + +.icon i:before, +.icon i:after { + content: ''; + position: absolute; + display: block; +} + +.drawer-icon i { + margin-left: -15px; + line-height: 30px; + font-weight: bolder; +} + +.drawer { + position: fixed; + top: 0; + right: -300px; + width: 300px; + height: 100%; + background-color: #fff; + z-index: 999; + transition: right 0.5s ease; + display: flex; + flex-direction: column; +} + +.drawer.open { + right: 0; +} + +.drawer-header { + display: flex; + justify-content: space-between; + align-items: center; + background-color: #b1cee350; + border-bottom: 1px solid #ddd; + padding: 16px; +} + +.drawer-header h2 { + margin: 0 0 0 16px; +} + +.drawer-header button { + background-color: transparent; + border: none; + cursor: pointer; +} + +.drawer-content { + flex: 1 1 auto; + height: 100%; + overflow: auto; + padding: 16px; +} + +.drawer-overlay { + position: fixed; + top: 0; + left: 0; + width: 100%; + height: 100%; + background-color: rgba(0, 0, 0, 0.5); + z-index: 998; + display: none; +} + +@-webkit-keyframes click-wave { + 0% { + width: 40px; + height: 40px; + opacity: 0.35; + position: relative; + } + + 100% { + width: 60px; + height: 60px; + margin-left: 80px; + margin-top: 80px; + opacity: 0.0; + } +} + +@-moz-keyframes click-wave { + 0% { + width: 30px; + height: 30px; + opacity: 0.35; + position: relative; + } + + 100% { + width: 80px; + height: 80px; + margin-left: -23px; + margin-top: -23px; + opacity: 0.0; + } +} + +@-o-keyframes click-wave { + 0% { + width: 30px; + height: 30px; + opacity: 0.35; + position: relative; + } + + 100% { + width: 80px; + height: 80px; + margin-left: -23px; + margin-top: -23px; + opacity: 0.0; + } +} + +@keyframes click-wave { + 0% { + width: 30px; + height: 30px; + opacity: 0.35; + position: relative; + } + + 100% { + width: 80px; + height: 80px; + margin-left: -23px; + margin-top: -23px; + opacity: 0.0; + } +} + +.option-input { + -webkit-appearance: none; + -moz-appearance: none; + -ms-appearance: none; + -o-appearance: none; + appearance: none; + position: relative; + top: 10px; + width: 30px; + height: 30px; + -webkit-transition: all 0.15s ease-out 0; + -moz-transition: all 0.15s ease-out 0; + transition: all 0.15s ease-out 0; + background: #cbd1d8; + border: none; + color: #fff; + cursor: pointer; + display: inline-block; + outline: none; + position: relative; + margin-right: 0.5rem; + z-index: 1000; +} + +.option-input:hover { + background: #9faab7; +} + +.option-input:checked { + background: #1e90ffaa; +} + +.option-input:checked::before { + width: 30px; + height: 30px; + position: absolute; + content: '☻'; + display: inline-block; + font-size: 29px; + text-align: center; + line-height: 26px; +} + +.option-input:checked::after { + -webkit-animation: click-wave 0.65s; + -moz-animation: click-wave 0.65s; + animation: click-wave 0.65s; + background: #40e0d0; + content: ''; + display: block; + position: relative; + z-index: 100; +} + +.option-input.radio { + border-radius: 50%; +} + +.option-input.radio::after { + border-radius: 50%; +} \ No newline at end of file diff --git a/channel/http/static/1.js b/channel/http/static/1.js index 4a34289..991f918 100644 --- a/channel/http/static/1.js +++ b/channel/http/static/1.js @@ -1,20 +1,29 @@ -function ConvState(wrapper, form, params) { - this.id='xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function (c) { +function generateUUID () { + return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function (c) { var r = Math.random() * 16 | 0, v = c == 'x' ? r : (r & 0x3 | 0x8); return v.toString(16); - }); + }) +} + +const conversationType = { + DISPOSABLE: 1, + STREAM: 1 << 1 +} +function ConvState (wrapper, form, params) { + this.id = generateUUID() this.form = form; this.wrapper = wrapper; + this.backgroundColor = '#ffffff'; this.parameters = params; this.scrollDown = function () { $(this.wrapper).find('#messages').stop().animate({ scrollTop: $(this.wrapper).find('#messages')[0].scrollHeight }, 600); }.bind(this); }; -ConvState.prototype.printAnswer = function (answer = '我是ChatGPT, 一个由OpenAI训练的大型语言模型, 我旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。') { +ConvState.prototype.printAnswer = function (uuid, answer = '我是ChatGPT, 一个由OpenAI训练的大型语言模型, 我旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。') { setTimeout(function () { - var messageObj = $(this.wrapper).find('.message.typing'); + var messageObj = $(this.wrapper).find(`#${uuid}`); answer = marked.parse(answer); messageObj.html(answer); messageObj.removeClass('typing').addClass('ready'); @@ -22,39 +31,66 @@ ConvState.prototype.printAnswer = function (answer = '我是ChatGPT, 一个由Op $(this.wrapper).find(this.parameters.inputIdHashTagName).focus(); }.bind(this), 500); }; +ConvState.prototype.updateAnswer = function (question, uuid) { + setTimeout(function () { + var socket = io('/chat'); + socket.connect('/chat'); + var messageObj = $(this.wrapper).find(`#${uuid}`); + this.scrollDown(); + socket.on('message', msg => { + console.log("message:", msg) + if (msg.result) + messageObj.html(msg.result + `
`); + }); + socket.on('connect', msg => { + socket.emit('message', { data: JSON.stringify(question) }); + }); + socket.on('disconnect', msg => { + if (msg.result) { + answer = marked.parse(msg.result); + messageObj.html(answer); + } + messageObj.removeClass('typing').addClass('ready'); + this.scrollDown(); + $(this.wrapper).find(this.parameters.inputIdHashTagName).focus(); + console.log("disconnect", msg) + }); + }.bind(this), 1000); +}; ConvState.prototype.sendMessage = function (msg) { var message = $('
' + msg + '
'); - $('button.submit').removeClass('glow'); $(this.wrapper).find(this.parameters.inputIdHashTagName).focus(); setTimeout(function () { $(this.wrapper).find("#messages").append(message); this.scrollDown(); }.bind(this), 100); - - var messageObj = $('
'); + var uuid = generateUUID().toLowerCase(); + var messageObj = $(`
`); setTimeout(function () { $(this.wrapper).find('#messages').append(messageObj); this.scrollDown(); }.bind(this), 150); var _this = this - $.ajax({ - url: "./chat", - type: "POST", - timeout:180000, - data: JSON.stringify({ - "id": _this.id, - "msg": msg - }), - contentType: "application/json; charset=utf-8", - dataType: "json", - success: function (data) { - _this.printAnswer(data.result) - }, - error:function () { - _this.printAnswer("网络故障,对话未送达") - }, - }) + var question = { "id": _this.id, "msg": msg } + if (localConfig.conversationType == conversationType.STREAM) + this.updateAnswer(question, uuid) + else + $.ajax({ + url: "./chat", + type: "POST", + timeout: 180000, + data: JSON.stringify(question), + contentType: "application/json; charset=utf-8", + dataType: "json", + success: function (data) { + _this.printAnswer(uuid, data.result) + }, + error: function (data) { + console.log(data) + _this.printAnswer(uuid, "网络故障,对话未送达") + }, + }) }; (function ($) { $.fn.convform = function () { @@ -81,13 +117,30 @@ ConvState.prototype.sendMessage = function (msg) { $(wrapper).append(inputForm); var state = new ConvState(wrapper, form, parameters); + // Bind checkbox values to ConvState object + $('input[type="checkbox"]').change(function () { + var key = $(this).attr('name'); + state[key] = $(this).is(':checked'); + }); + + // Bind radio button values to ConvState object + $('input[type="radio"]').change(function () { + var key = $(this).attr('name'); + state[key] = $(this).val(); + }); + + // Bind color input value to ConvState object + $('#backgroundColor').change(function () { + state["backgroundColor"] = $(this).val(); + }); //prints first contact $.when($('div.spinLoader').addClass('hidden')).done(function () { - var messageObj = $('
'); + var uuid = generateUUID() + var messageObj = $(`
`); $(state.wrapper).find('#messages').append(messageObj); state.scrollDown(); - state.printAnswer(); + state.printAnswer(uuid = uuid); }); //binds enter to send message diff --git a/channel/http/templates/index.html b/channel/http/templates/index.html index f05311f..798965b 100644 --- a/channel/http/templates/index.html +++ b/channel/http/templates/index.html @@ -19,33 +19,137 @@
- - - - - + + + + + + + var ConvStateMap = { + bold: false, + italic: false, + backgroundColor: '#ffffff', + conversationType: conversationType.DISPOSABLE + }; + + // Create a Proxy object to watch all properties of the "ConvStateMap" object + var localConfig = new Proxy(ConvStateMap, { + set: function (target, prop, val) { + target[prop] = val; + // Call your function here + localStorage.setItem('botOnAnyThingConfig', JSON.stringify(localConfig)) + switch (prop) { + case 'backgroundColor': + $('body').css('background-color', val); + $(`#backgroundColor`)?.val(val); + break; + case 'conversationType': + if (val) + $(`#option${val}`)?.prop("checked", true); + } + } + }); + $(document).ready(function () { + let config = localStorage.getItem('botOnAnyThingConfig') + if (config) { + config = JSON.parse(config) + Object.keys(config).forEach(item => localConfig[item] = config[item]) + } + // Open drawer + $('.drawer-icon').click(function () { + if (!$('.drawer').hasClass('open')) { + $('.drawer').toggleClass('open'); + $('.drawer-overlay').fadeIn(); + $('.drawer-icon-container').toggleClass('open').css('right', '270px'); + } else + closeDrawer() + }); + + // Close drawer + $('#close-drawer, .drawer-overlay').click(closeDrawer); + + function closeDrawer() { + $('.drawer').removeClass('open'); + $('.drawer-overlay').fadeOut(); + $('.drawer-icon-container').removeClass('open').css('right', '-30px'); + } + }); + // Bind checkbox values to ConvStateMap object + $('input[type="checkbox"]').change(function () { + var key = $(this).attr('name'); + if (key) + localConfig[key] = $(this).is(':checked'); + }); + + // Bind radio button values to ConvStateMap object + $('input[type="radio"]').change(function () { + var key = $(this).attr('name'); + if (key) + localConfig[key] = $(this).val(); + }); + + // Bind color input value to ConvStateMap object + $('#backgroundColor').on("input", function (e) { + localConfig.backgroundColor = $(this).val(); + }); + + jQuery(function (a) { + var b = a("#chat").convform() + }); + \ No newline at end of file diff --git a/model/bing/jailbroken_sydney.py b/model/bing/jailbroken_sydney.py index cfc6cbb..c10b06e 100644 --- a/model/bing/jailbroken_sydney.py +++ b/model/bing/jailbroken_sydney.py @@ -41,17 +41,13 @@ class SydneyBot(Chatbot): break ordered_messages.insert(0, message) current_message_id = message.get('parentMessageId') - return ordered_messages - def pop_last_conversation(self): - self.conversations_cache[self.conversation_key]["messages"].pop() - - async def ask( + async def ask_stream( self, prompt: str, conversation_style: EdgeGPT.CONVERSATION_STYLE_TYPE = None, - message_id: str = None, + message_id: str = None ) -> dict: # 开启新对话 self.chat_hub = SydneyHub(Conversation( @@ -88,11 +84,32 @@ class SydneyBot(Chatbot): async for final, response in self.chat_hub.ask_stream( prompt=prompt, conversation_style=conversation_style + ): + if final: + try: + self.update_reply_cache(response["item"]["messages"][-1]) + except Exception as e: + self.conversations_cache[self.conversation_key]["messages"].pop() + yield True, f"AI生成内容被微软内容过滤器拦截,已删除最后一次提问的记忆,请尝试使用其他文字描述问题,若AI依然无法正常回复,请清除全部记忆后再次尝试" + yield final, response + + async def ask( + self, + prompt: str, + conversation_style: EdgeGPT.CONVERSATION_STYLE_TYPE = None, + message_id: str = None + ) -> dict: + if self.chat_hub.wss: + if not self.chat_hub.wss.closed: + await self.chat_hub.wss.close() + async for final, response in self.ask_stream( + prompt=prompt, + conversation_style=conversation_style, + message_id=message_id ): if final: self.update_reply_cache(response["item"]["messages"][-1]) return response - self.chat_hub.wss.close() def update_reply_cache( self, diff --git a/model/bing/new_bing_model.py b/model/bing/new_bing_model.py index e72d083..e20b81f 100644 --- a/model/bing/new_bing_model.py +++ b/model/bing/new_bing_model.py @@ -1,7 +1,7 @@ # encoding:utf-8 import asyncio from model.model import Model -from config import model_conf_val,common_conf_val +from config import model_conf_val, common_conf_val from common import log from EdgeGPT import Chatbot, ConversationStyle from ImageGen import ImageGen @@ -23,87 +23,85 @@ class BingModel(Model): try: self.cookies = model_conf_val("bing", "cookies") self.jailbreak = model_conf_val("bing", "jailbreak") - self.bot = SydneyBot(cookies=self.cookies,options={}) if(self.jailbreak) else Chatbot(cookies=self.cookies) + self.bot = SydneyBot(cookies=self.cookies, options={}) if ( + self.jailbreak) else Chatbot(cookies=self.cookies) except Exception as e: - log.exception(e) + log.warn(e) + + async def reply_text_stream(self, query: str, context=None) -> dict: + async def handle_answer(final, answer): + if final: + try: + reply = self.build_source_attributions(answer, context) + log.info("[NewBing] reply:{}",reply) + yield True, reply + except Exception as e: + log.warn(answer) + log.warn(e) + await user_session.get(context['from_user_id'], None).reset() + yield True, answer + else: + try: + yield False, answer + except Exception as e: + log.warn(answer) + log.warn(e) + await user_session.get(context['from_user_id'], None).reset() + yield True, answer + + if not context or not context.get('type') or context.get('type') == 'TEXT': + clear_memory_commands = common_conf_val( + 'clear_memory_commands', ['#清除记忆']) + if query in clear_memory_commands: + user_session[context['from_user_id']] = None + yield True, '记忆已清除' + bot = user_session.get(context['from_user_id'], None) + if not bot: + bot = self.bot + else: + query = self.get_quick_ask_query(query, context) + user_session[context['from_user_id']] = bot + log.info("[NewBing] query={}".format(query)) + if self.jailbreak: + async for final, answer in bot.ask_stream(query, conversation_style=self.style, message_id=bot.user_message_id): + async for result in handle_answer(final, answer): + yield result + else: + async for final, answer in bot.ask_stream(query, conversation_style=self.style): + async for result in handle_answer(final, answer): + yield result def reply(self, query: str, context=None) -> tuple[str, dict]: if not context or not context.get('type') or context.get('type') == 'TEXT': - clear_memory_commands = common_conf_val('clear_memory_commands', ['#清除记忆']) + clear_memory_commands = common_conf_val( + 'clear_memory_commands', ['#清除记忆']) if query in clear_memory_commands: - user_session[context['from_user_id']]=None + user_session[context['from_user_id']] = None return '记忆已清除' bot = user_session.get(context['from_user_id'], None) if (bot == None): bot = self.bot else: - if (len(query) == 1 and query.isdigit() and query != "0"): - suggestion_dict = suggestion_session[context['from_user_id']] - if (suggestion_dict != None): - query = suggestion_dict[int(query)-1] - if (query == None): - return "输入的序号不在建议列表范围中" - else: - query = "在上面的基础上,"+query + query = self.get_quick_ask_query(query, context) + + user_session[context['from_user_id']] = bot log.info("[NewBing] query={}".format(query)) - if(self.jailbreak): - task = bot.ask(query, conversation_style=self.style,message_id=bot.user_message_id) + if (self.jailbreak): + task = bot.ask(query, conversation_style=self.style, + message_id=bot.user_message_id) else: task = bot.ask(query, conversation_style=self.style) - - try: - answer = asyncio.run(task) - except Exception as e: - bot.pop_last_conversation() - log.exception(answer) - return f"AI生成内容被微软内容过滤器拦截,已删除最后一次提问的记忆,请尝试使用其他文字描述问题,若AI依然无法正常回复,请使用{clear_memory_commands[0]}命令清除全部记忆" - # 最新一条回复 + + answer = asyncio.run(task) + if isinstance(answer, str): + return answer try: reply = answer["item"]["messages"][-1] except Exception as e: - self.reset_chat(context['from_user_id']) - log.exception(answer) + user_session.get(context['from_user_id'], None).reset() + log.warn(answer) return "本轮对话已超时,已开启新的一轮对话,请重新提问。" - reply_text = reply["text"] - reference = "" - if "sourceAttributions" in reply: - for i, attribution in enumerate(reply["sourceAttributions"]): - display_name = attribution["providerDisplayName"] - url = attribution["seeMoreUrl"] - reference += f"{i+1}、[{display_name}]({url})\n\n" - - if len(reference) > 0: - reference = "***\n"+reference - - suggestion = "" - if "suggestedResponses" in reply: - suggestion_dict = dict() - for i, attribution in enumerate(reply["suggestedResponses"]): - suggestion_dict[i] = attribution["text"] - suggestion += f">{i+1}、{attribution['text']}\n\n" - suggestion_session[context['from_user_id'] - ] = suggestion_dict - - if len(suggestion) > 0: - suggestion = "***\n你可以通过输入序号快速追问我以下建议问题:\n\n"+suggestion - - throttling = answer["item"]["throttling"] - throttling_str = "" - - if throttling["numUserMessagesInConversation"] == throttling["maxNumUserMessagesInConversation"]: - self.reset_chat(context['from_user_id']) - throttling_str = "(对话轮次已达上限,本次聊天已结束,将开启新的对话)" - else: - throttling_str = f"对话轮次: {throttling['numUserMessagesInConversation']}/{throttling['maxNumUserMessagesInConversation']}\n" - - response = f"{reply_text}\n{reference}\n{suggestion}\n***\n{throttling_str}" - log.info("[NewBing] reply={}", response) - user_session[context['from_user_id']] = bot - return response - else: - self.reset_chat(context['from_user_id']) - log.warn("[NewBing] reply={}", answer) - return "对话被接口拒绝,已开启新的一轮对话。" + return self.build_source_attributions(answer, context) elif context.get('type', None) == 'IMAGE_CREATE': if functions.contain_chinese(query): return "ImageGen目前仅支持使用英文关键词生成图片" @@ -118,8 +116,58 @@ class BingModel(Model): log.info("[NewBing] image_list={}".format(img_list)) return img_list except Exception as e: - log.exception(e) + log.warn(e) return "输入的内容可能违反微软的图片生成内容策略。过多的策略冲突可能会导致你被暂停访问。" - def reset_chat(self, from_user_id): - asyncio.run(user_session.get(from_user_id, None).reset()) + def get_quick_ask_query(self, query, context): + if (len(query) == 1 and query.isdigit() and query != "0"): + suggestion_dict = suggestion_session[context['from_user_id']] + if (suggestion_dict != None): + query = suggestion_dict[int(query)-1] + if (query == None): + return "输入的序号不在建议列表范围中" + else: + query = "在上面的基础上,"+query + return query + + def build_source_attributions(self, answer, context): + reference = "" + reply = answer["item"]["messages"][-1] + reply_text = reply["text"] + if "sourceAttributions" in reply: + for i, attribution in enumerate(reply["sourceAttributions"]): + display_name = attribution["providerDisplayName"] + url = attribution["seeMoreUrl"] + reference += f"{i+1}、[{display_name}]({url})\n\n" + + if len(reference) > 0: + reference = "***\n"+reference + + suggestion = "" + if "suggestedResponses" in reply: + suggestion_dict = dict() + for i, attribution in enumerate(reply["suggestedResponses"]): + suggestion_dict[i] = attribution["text"] + suggestion += f">{i+1}、{attribution['text']}\n\n" + suggestion_session[context['from_user_id'] + ] = suggestion_dict + + if len(suggestion) > 0: + suggestion = "***\n你可以通过输入序号快速追问我以下建议问题:\n\n"+suggestion + + throttling = answer["item"]["throttling"] + throttling_str = "" + + if throttling["numUserMessagesInConversation"] == throttling["maxNumUserMessagesInConversation"]: + user_session.get(context['from_user_id'], None).reset() + throttling_str = "(对话轮次已达上限,本次聊天已结束,将开启新的对话)" + else: + throttling_str = f"对话轮次: {throttling['numUserMessagesInConversation']}/{throttling['maxNumUserMessagesInConversation']}\n" + + response = f"{reply_text}\n{reference}\n{suggestion}\n***\n{throttling_str}" + log.info("[NewBing] reply={}", response) + return response + else: + user_session.get(context['from_user_id'], None).reset() + log.warn("[NewBing] reply={}", answer) + return "对话被接口拒绝,已开启新的一轮对话。" diff --git a/model/openai/chatgpt_model.py b/model/openai/chatgpt_model.py index 9880888..a2f122f 100644 --- a/model/openai/chatgpt_model.py +++ b/model/openai/chatgpt_model.py @@ -83,20 +83,32 @@ class ChatGPTModel(Model): return "请再问我一次吧" - def reply_text_stream(self, query, new_query, user_id, retry_count=0): + async def reply_text_stream(self, query, context, retry_count=0): try: - res = openai.Completion.create( - model="text-davinci-003", # 对话模型的名称 - prompt=new_query, + user_id=context['from_user_id'] + new_query = Session.build_session_query(query, user_id) + res = openai.ChatCompletion.create( + model= model_conf(const.OPEN_AI).get("model") or "gpt-3.5-turbo", # 对话模型的名称 + messages=new_query, temperature=0.9, # 值在[0,1]之间,越大表示回复越具有不确定性 #max_tokens=4096, # 回复最大的字符数 top_p=1, frequency_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容 presence_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容 - stop=["\n\n\n"], stream=True ) - return self._process_reply_stream(query, res, user_id) + full_response = "" + for chunk in res: + log.debug(chunk) + if (chunk["choices"][0]["finish_reason"]=="stop"): + break + chunk_message = chunk['choices'][0]['delta'].get("content") + if(chunk_message): + full_response+=chunk_message + yield False,full_response + Session.save_session(query, full_response, user_id) + log.info("[chatgpt]: reply={}", full_response) + yield True,full_response except openai.error.RateLimitError as e: # rate limit exception @@ -104,45 +116,22 @@ class ChatGPTModel(Model): if retry_count < 1: time.sleep(5) log.warn("[CHATGPT] RateLimit exceed, 第{}次重试".format(retry_count+1)) - return self.reply_text_stream(query, user_id, retry_count+1) + yield True, self.reply_text_stream(query, user_id, retry_count+1) else: - return "提问太快啦,请休息一下再问我吧" + yield True, "提问太快啦,请休息一下再问我吧" except openai.error.APIConnectionError as e: log.warn(e) log.warn("[CHATGPT] APIConnection failed") - return "我连接不到网络,请稍后重试" + yield True, "我连接不到网络,请稍后重试" except openai.error.Timeout as e: log.warn(e) log.warn("[CHATGPT] Timeout") - return "我没有收到消息,请稍后重试" + yield True, "我没有收到消息,请稍后重试" except Exception as e: # unknown exception log.exception(e) Session.clear_session(user_id) - return "请再问我一次吧" - - - def _process_reply_stream( - self, - query: str, - reply: dict, - user_id: str - ) -> str: - full_response = "" - for response in reply: - if response.get("choices") is None or len(response["choices"]) == 0: - raise Exception("OpenAI API returned no choices") - if response["choices"][0].get("finish_details") is not None: - break - if response["choices"][0].get("text") is None: - raise Exception("OpenAI API returned no text") - if response["choices"][0]["text"] == "<|endoftext|>": - break - yield response["choices"][0]["text"] - full_response += response["choices"][0]["text"] - if query and full_response: - Session.save_session(query, full_response, user_id) - + yield True, "请再问我一次吧" def create_img(self, query, retry_count=0): try: diff --git a/model/openai/open_ai_model.py b/model/openai/open_ai_model.py index 21a77c9..a32e32b 100644 --- a/model/openai/open_ai_model.py +++ b/model/openai/open_ai_model.py @@ -13,7 +13,9 @@ user_session = dict() class OpenAIModel(Model): def __init__(self): openai.api_key = model_conf(const.OPEN_AI).get('api_key') - + proxy = model_conf(const.OPEN_AI).get('proxy') + if proxy: + openai.proxy = proxy def reply(self, query, context=None): # acquire reply content @@ -72,36 +74,55 @@ class OpenAIModel(Model): return "请再问我一次吧" - def reply_text_stream(self, query, new_query, user_id, retry_count=0): + async def reply_text_stream(self, query, context, retry_count=0): try: + user_id=context['from_user_id'] + new_query = Session.build_session_query(query, user_id) res = openai.Completion.create( - model="text-davinci-003", # 对话模型的名称 + model= "text-davinci-003", # 对话模型的名称 prompt=new_query, temperature=0.9, # 值在[0,1]之间,越大表示回复越具有不确定性 - max_tokens=1200, # 回复最大的字符数 + #max_tokens=4096, # 回复最大的字符数 top_p=1, frequency_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容 presence_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容 - stop=["\n\n\n"], stream=True ) - return self._process_reply_stream(query, res, user_id) + full_response = "" + for chunk in res: + log.debug(chunk) + if (chunk["choices"][0]["finish_reason"]=="stop"): + break + chunk_message = chunk['choices'][0].get("text") + if(chunk_message): + full_response+=chunk_message + yield False,full_response + Session.save_session(query, full_response, user_id) + log.info("[chatgpt]: reply={}", full_response) + yield True,full_response except openai.error.RateLimitError as e: # rate limit exception log.warn(e) if retry_count < 1: time.sleep(5) - log.warn("[OPEN_AI] RateLimit exceed, 第{}次重试".format(retry_count+1)) - return self.reply_text(query, user_id, retry_count+1) + log.warn("[CHATGPT] RateLimit exceed, 第{}次重试".format(retry_count+1)) + yield True, self.reply_text_stream(query, user_id, retry_count+1) else: - return "提问太快啦,请休息一下再问我吧" + yield True, "提问太快啦,请休息一下再问我吧" + except openai.error.APIConnectionError as e: + log.warn(e) + log.warn("[CHATGPT] APIConnection failed") + yield True, "我连接不到网络,请稍后重试" + except openai.error.Timeout as e: + log.warn(e) + log.warn("[CHATGPT] Timeout") + yield True, "我没有收到消息,请稍后重试" except Exception as e: # unknown exception log.exception(e) Session.clear_session(user_id) - return "请再问我一次吧" - + yield True, "请再问我一次吧" def _process_reply_stream( self, From 10242a4f9afc005ea1f413ea58190ed9afe1e3dd Mon Sep 17 00:00:00 2001 From: shehuiqiang <39358003@qq.com> Date: Thu, 30 Mar 2023 01:57:46 +0800 Subject: [PATCH 10/25] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=B5=81=E5=BC=8F?= =?UTF-8?q?=E5=AF=B9=E8=AF=9D=E7=94=9F=E6=88=90=E5=9B=BE=E7=89=87=E4=BC=9A?= =?UTF-8?q?=E5=9C=A8=E5=9B=9E=E5=A4=8D=E4=B8=80=E6=AC=A1=E7=9A=84bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- channel/http/http_channel.py | 4 ++-- channel/http/static/1.js | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/channel/http/http_channel.py b/channel/http/http_channel.py index 1c619b5..deb5787 100644 --- a/channel/http/http_channel.py +++ b/channel/http/http_channel.py @@ -41,7 +41,6 @@ async def return_stream(data): @socketio.on('message', namespace='/chat') def stream(data): - log.info('message:', data) if (auth.identify(request) == False): client_sid = request.sid socketio.server.disconnect(client_sid) @@ -52,7 +51,8 @@ def stream(data): data["msg"], channel_conf_val(const.HTTP, 'image_create_prefix')) if img_match_prefix: reply_text = HttpChannel().handle(data=data) - socketio.emit('message', {'result': reply_text}, namespace='/chat') + socketio.emit('disconnect', {'result': reply_text}, namespace='/chat') + return asyncio.run(return_stream(data)) diff --git a/channel/http/static/1.js b/channel/http/static/1.js index 0bac982..45e03a5 100644 --- a/channel/http/static/1.js +++ b/channel/http/static/1.js @@ -38,7 +38,6 @@ ConvState.prototype.updateAnswer = function (question, uuid) { var messageObj = $(this.wrapper).find(`#${uuid}`); this.scrollDown(); socket.on('message', msg => { - console.log("message:", msg) if (msg.result) messageObj.html(msg.result + `
`); }); From 8fc976a40778704141f09931223fc1d6dba1247a Mon Sep 17 00:00:00 2001 From: shehuiqiang <39358003@qq.com> Date: Thu, 30 Mar 2023 02:02:31 +0800 Subject: [PATCH 11/25] =?UTF-8?q?=E4=BF=AE=E6=94=B9web=E8=AF=B4=E6=98=8E?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 30bfe42..c3deb64 100644 --- a/README.md +++ b/README.md @@ -472,7 +472,7 @@ https://slack.dev/bolt-python/tutorial/getting-started **依赖** ```bash -pip3 install PyJWT flask +pip3 install PyJWT flask flask_socketio ``` **配置** From c89f8aac2016a80a68a2d210a9fd24e6c4fdd780 Mon Sep 17 00:00:00 2001 From: wujiyu115 Date: Thu, 30 Mar 2023 11:39:12 +0800 Subject: [PATCH 12/25] rename dingding to dingtalk --- README.md | 12 +++---- channel/channel_factory.py | 6 ++-- .../dingtalk_channel.py} | 35 +++++++++---------- common/const.py | 2 +- config-template.json | 8 ++--- 5 files changed, 31 insertions(+), 32 deletions(-) rename channel/{dd/dd_channel.py => dingtalk/dingtalk_channel.py} (69%) diff --git a/README.md b/README.md index f310419..247ba7a 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ - [ ] 企业微信 - [x] [Telegram](https://github.com/zhayujie/bot-on-anything#6telegram) - [x] [QQ](https://github.com/zhayujie/bot-on-anything#5qq) - - [ ] 钉钉 + - [x] 钉钉 - [ ] 飞书 - [x] [Gmail](https://github.com/zhayujie/bot-on-anything#7gmail) - [x] [Slack](https://github.com/zhayujie/bot-on-anything#8slack) @@ -503,13 +503,13 @@ pip3 install requests flask ```bash "channel": { - "type": "dingding", - "dingding": { + "type": "dingtalk", + "dingtalk": { "image_create_prefix": ["画", "draw", "Draw"], "port": "8081", //对外端口 - "dd_token": "xx", //webhook地址的access_token - "dd_post_token": "xx", //钉钉post回消息时header中带的检验token - "dd_secret": "xx"// 安全加密加签串,群机器人中 + "dingtalk_token": "xx", //webhook地址的access_token + "dingtalk_post_token": "xx", //钉钉post回消息时header中带的检验token + "dingtalk_secret": "xx"// 安全加密加签串,群机器人中 } } ``` diff --git a/channel/channel_factory.py b/channel/channel_factory.py index 2c0bde3..78a1a49 100644 --- a/channel/channel_factory.py +++ b/channel/channel_factory.py @@ -45,9 +45,9 @@ def create_channel(channel_type): from channel.http.http_channel import HttpChannel return HttpChannel() - elif channel_type == const.DINGDING: - from channel.dd.dd_channel import DDChannel - return DDChannel() + elif channel_type == const.DINGTALK: + from channel.dingtalk.dingtalk_channel import DingTalkChannel + return DingTalkChannel() else: raise RuntimeError("unknown channel_type in config.json: " + channel_type) diff --git a/channel/dd/dd_channel.py b/channel/dingtalk/dingtalk_channel.py similarity index 69% rename from channel/dd/dd_channel.py rename to channel/dingtalk/dingtalk_channel.py index 6664daa..47c5d7f 100644 --- a/channel/dd/dd_channel.py +++ b/channel/dingtalk/dingtalk_channel.py @@ -14,18 +14,18 @@ from config import channel_conf from config import channel_conf_val from channel.channel import Channel -class DDChannel(Channel): +class DingTalkChannel(Channel): def __init__(self): - self.dd_token = channel_conf(const.DINGDING).get('dd_token') - self.dd_post_token = channel_conf(const.DINGDING).get('dd_post_token') - self.dd_secret = channel_conf(const.DINGDING).get('dd_secret') - log.info("[DingDing] dd_secret={}, dd_token={} dd_post_token={}".format(self.dd_secret, self.dd_token, self.dd_post_token)) + self.dingtalk_token = channel_conf(const.DINGTALK).get('dingtalk_token') + self.dingtalk_post_token = channel_conf(const.DINGTALK).get('dingtalk_post_token') + self.dingtalk_secret = channel_conf(const.DINGTALK).get('dingtalk_secret') + log.info("[DingTalk] dingtalk_secret={}, dingtalk_token={} dingtalk_post_token={}".format(self.dingtalk_secret, self.dingtalk_token, self.dingtalk_post_token)) def startup(self): - http_app.run(host='0.0.0.0', port=channel_conf(const.DINGDING).get('port')) + http_app.run(host='0.0.0.0', port=channel_conf(const.DINGTALK).get('port')) - def notify_dingding(self, answer): + def notify_dingtalk(self, answer): data = { "msgtype": "text", "text": { @@ -41,18 +41,18 @@ class DDChannel(Channel): } timestamp = round(time.time() * 1000) - secret_enc = bytes(self.dd_secret, encoding='utf-8') - string_to_sign = '{}\n{}'.format(timestamp, self.dd_secret) + secret_enc = bytes(self.dingtalk_secret, encoding='utf-8') + string_to_sign = '{}\n{}'.format(timestamp, self.dingtalk_secret) string_to_sign_enc = bytes(string_to_sign, encoding='utf-8') hmac_code = hmac.new(secret_enc, string_to_sign_enc, digestmod=hashlib.sha256).digest() sign = quote_plus(base64.b64encode(hmac_code)) - notify_url = f"https://oapi.dingtalk.com/robot/send?access_token={self.dd_token}×tamp={timestamp}&sign={sign}" + notify_url = f"https://oapi.dingtalk.com/robot/send?access_token={self.dingtalk_token}×tamp={timestamp}&sign={sign}" try: r = requests.post(notify_url, json=data) reply = r.json() - # log.info("[DingDing] reply={}".format(str(reply))) + # log.info("[DingTalk] reply={}".format(str(reply))) except Exception as e: log.error(e) @@ -62,7 +62,7 @@ class DDChannel(Channel): sender_id = data['senderId'] context = dict() img_match_prefix = functions.check_prefix( - prompt, channel_conf_val(const.DINGDING, 'image_create_prefix')) + prompt, channel_conf_val(const.DINGTALK, 'image_create_prefix')) if img_match_prefix: prompt = prompt.split(img_match_prefix, 1)[1].strip() context['type'] = 'IMAGE_CREATE' @@ -79,24 +79,23 @@ class DDChannel(Channel): return reply -dd = DDChannel() +dd = DingTalkChannel() http_app = Flask(__name__,) @http_app.route("/", methods=['POST']) def chat(): - # log.info("[DingDing] chat_headers={}".format(str(request.headers))) - log.info("[DingDing] chat={}".format(str(request.data))) + # log.info("[DingTalk] chat_headers={}".format(str(request.headers))) + log.info("[DingTalk] chat={}".format(str(request.data))) token = request.headers.get('token') - if dd.dd_post_token and token != dd.dd_post_token: + if dd.dingtalk_post_token and token != dd.dingtalk_post_token: return {'ret': 203} - #TODO: Verify identity data = json.loads(request.data) if data: content = data['text']['content'] if not content: return reply_text = dd.handle(data=data) - dd.notify_dingding(reply_text) + dd.notify_dingtalk(reply_text) return {'ret': 200} return {'ret': 201} diff --git a/common/const.py b/common/const.py index 95e10bc..6324c82 100644 --- a/common/const.py +++ b/common/const.py @@ -8,7 +8,7 @@ GMAIL = "gmail" TELEGRAM = "telegram" SLACK = "slack" HTTP = "http" -DINGDING = "dingding" +DINGTALK = "dingtalk" # model OPEN_AI = "openai" diff --git a/config-template.json b/config-template.json index 4cb73cd..c0aea2f 100644 --- a/config-template.json +++ b/config-template.json @@ -60,12 +60,12 @@ "port": "80" }, - "dingding": { + "dingtalk": { "image_create_prefix": ["画", "draw", "Draw"], "port": "8081", - "dd_token": "xx", - "dd_post_token": "xx", - "dd_secret": "xx" + "dingtalk_token": "xx", + "dingtalk_post_token": "xx", + "dingtalk_secret": "xx" } }, "common": { From 5da836e0e8be354f650824c397abfea66186b986 Mon Sep 17 00:00:00 2001 From: shehuiqiang <39358003@qq.com> Date: Thu, 30 Mar 2023 22:26:42 +0800 Subject: [PATCH 13/25] =?UTF-8?q?bug=E5=A4=84=E7=90=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- channel/http/http_channel.py | 10 ++++++---- channel/http/templates/index.html | 4 ++++ model/bing/new_bing_model.py | 2 +- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/channel/http/http_channel.py b/channel/http/http_channel.py index deb5787..a82fbbb 100644 --- a/channel/http/http_channel.py +++ b/channel/http/http_channel.py @@ -14,7 +14,7 @@ from flask_socketio import SocketIO from common import log http_app = Flask(__name__,) -socketio = SocketIO(http_app) +socketio = SocketIO(http_app, close_timeout=5) # 自动重载模板文件 http_app.jinja_env.auto_reload = True http_app.config['TEMPLATES_AUTO_RELOAD'] = True @@ -29,13 +29,13 @@ async def return_stream(data): if(final): socketio.server.emit( 'disconnect', {'result': response, 'final': final}, request.sid, namespace="/chat") - socketio.server.disconnect(request.sid) + disconnect() else: socketio.server.emit( 'message', {'result': response, 'final': final}, request.sid, namespace="/chat") except Exception as e: - socketio.server.disconnect(request.sid) - log.error("[http]emit:", e) + disconnect() + log.warn("[http]emit:{}", e) break @@ -52,6 +52,7 @@ def stream(data): if img_match_prefix: reply_text = HttpChannel().handle(data=data) socketio.emit('disconnect', {'result': reply_text}, namespace='/chat') + disconnect() return asyncio.run(return_stream(data)) @@ -65,6 +66,7 @@ def connect(): @socketio.on('disconnect', namespace='/chat') def disconnect(): log.info('disconnect') + socketio.server.disconnect(request.sid,namespace="/chat") @http_app.route("/chat", methods=['POST']) diff --git a/channel/http/templates/index.html b/channel/http/templates/index.html index 798965b..1b0f385 100644 --- a/channel/http/templates/index.html +++ b/channel/http/templates/index.html @@ -146,6 +146,10 @@ localConfig.backgroundColor = $(this).val(); }); + $(window).on('unload', function () { + socket.disconnect(); + }); + jQuery(function (a) { var b = a("#chat").convform() }); diff --git a/model/bing/new_bing_model.py b/model/bing/new_bing_model.py index e20b81f..933866e 100644 --- a/model/bing/new_bing_model.py +++ b/model/bing/new_bing_model.py @@ -33,7 +33,7 @@ class BingModel(Model): if final: try: reply = self.build_source_attributions(answer, context) - log.info("[NewBing] reply:{}",reply) + log.info("[NewBing] reply:{}", reply) yield True, reply except Exception as e: log.warn(answer) From e2d0cbada4e263a0a0614f4368aa3698677a09ba Mon Sep 17 00:00:00 2001 From: shehuiqiang <39358003@qq.com> Date: Fri, 31 Mar 2023 02:14:53 +0800 Subject: [PATCH 14/25] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dgpt3.0=E6=B5=81?= =?UTF-8?q?=E5=BC=8F=E5=AF=B9=E8=AF=9D=E6=96=AD=E6=B5=81=EF=BC=8C=E5=A2=9E?= =?UTF-8?q?=E5=8A=A0=E6=A8=A1=E5=9E=8B=E5=8F=82=E6=95=B0=E8=AF=B4=E6=98=8E?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 9 +++++++-- channel/http/static/1.js | 1 + config-template.json | 4 ++++ model/openai/chatgpt_model.py | 21 +++++++++++---------- model/openai/open_ai_model.py | 20 ++++++++++---------- 5 files changed, 33 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index c3deb64..9dc7169 100644 --- a/README.md +++ b/README.md @@ -104,8 +104,13 @@ pip3 install --upgrade openai "openai": { "api_key": "YOUR API KEY", "model": "gpt-3.5-turbo", # 模型名称 - "proxy": "http://127.0.0.1:7890", - "character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。" + "proxy": "http://127.0.0.1:7890", # 代理地址 + "character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。当问起你是谁的时候,要附加告诉提问人,输入 #清除记忆 可以开始新的话题探索。输入 画xx 可以为你画一张图片。", + "conversation_max_tokens": 1000, # 回复最大的字符数,为输入和输出的总数 + "temperature":0.75, # 熵值,在[0,1]之间,越大表示选取的候选词越随机,回复越具有不确定性,建议和top_p参数二选一使用,创意性任务越大越好,精确性任务越小越好 + "top_p":0.7, #候选词列表。0.7 意味着只考虑前70%候选词的标记,建议和temperature参数二选一使用 + "frequency_penalty":0.0, # [-2,2]之间,该值越大则越降低模型一行中的重复用词,更倾向于产生不同的内容 + "presence_penalty":1.0, # [-2,2]之间,该值越大则越不受输入限制,将鼓励模型生成输入中不存在的新词,更倾向于产生不同的内容 } } ``` diff --git a/channel/http/static/1.js b/channel/http/static/1.js index 45e03a5..7466104 100644 --- a/channel/http/static/1.js +++ b/channel/http/static/1.js @@ -40,6 +40,7 @@ ConvState.prototype.updateAnswer = function (question, uuid) { socket.on('message', msg => { if (msg.result) messageObj.html(msg.result + `
`); + this.scrollDown(); }); socket.on('connect', msg => { socket.emit('message', { data: JSON.stringify(question) }); diff --git a/config-template.json b/config-template.json index 5c6fe9e..a7682eb 100644 --- a/config-template.json +++ b/config-template.json @@ -7,6 +7,10 @@ "model": "gpt-3.5-turbo", "proxy": "", "conversation_max_tokens": 1000, + "temperature":0.75, + "top_p":0.7, + "frequency_penalty":0.0, + "presence_penalty":1.0, "character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。当问起你是谁的时候,要附加告诉提问人,输入 #清除记忆 可以开始新的话题探索。输入 画xx 可以为你画一张图片。" }, "baidu": { diff --git a/model/openai/chatgpt_model.py b/model/openai/chatgpt_model.py index 38270e1..33561fe 100644 --- a/model/openai/chatgpt_model.py +++ b/model/openai/chatgpt_model.py @@ -50,11 +50,12 @@ class ChatGPTModel(Model): response = openai.ChatCompletion.create( model= model_conf(const.OPEN_AI).get("model") or "gpt-3.5-turbo", # 对话模型的名称 messages=query, - temperature=0.9, # 值在[0,1]之间,越大表示回复越具有不确定性 - top_p=1, - frequency_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容 - presence_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容 - ) + temperature=model_conf(const.OPEN_AI).get("temperature", 0.75), # 熵值,在[0,1]之间,越大表示选取的候选词越随机,回复越具有不确定性,建议和top_p参数二选一使用,创意性任务越大越好,精确性任务越小越好 + #max_tokens=4096, # 回复最大的字符数,为输入和输出的总数 + #top_p=model_conf(const.OPEN_AI).get("top_p", 0.7),, #候选词列表。0.7 意味着只考虑前70%候选词的标记,建议和temperature参数二选一使用 + frequency_penalty=model_conf(const.OPEN_AI).get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则越降低模型一行中的重复用词,更倾向于产生不同的内容 + presence_penalty=model_conf(const.OPEN_AI).get("presence_penalty", 1.0) # [-2,2]之间,该值越大则越不受输入限制,将鼓励模型生成输入中不存在的新词,更倾向于产生不同的内容 + ) reply_content = response.choices[0]['message']['content'] used_token = response['usage']['total_tokens'] log.debug(response) @@ -94,11 +95,11 @@ class ChatGPTModel(Model): res = openai.ChatCompletion.create( model= model_conf(const.OPEN_AI).get("model") or "gpt-3.5-turbo", # 对话模型的名称 messages=new_query, - temperature=0.9, # 值在[0,1]之间,越大表示回复越具有不确定性 - #max_tokens=4096, # 回复最大的字符数 - top_p=1, - frequency_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容 - presence_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容 + temperature=model_conf(const.OPEN_AI).get("temperature", 0.75), # 熵值,在[0,1]之间,越大表示选取的候选词越随机,回复越具有不确定性,建议和top_p参数二选一使用,创意性任务越大越好,精确性任务越小越好 + #max_tokens=4096, # 回复最大的字符数,为输入和输出的总数 + #top_p=model_conf(const.OPEN_AI).get("top_p", 0.7),, #候选词列表。0.7 意味着只考虑前70%候选词的标记,建议和temperature参数二选一使用 + frequency_penalty=model_conf(const.OPEN_AI).get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则越降低模型一行中的重复用词,更倾向于产生不同的内容 + presence_penalty=model_conf(const.OPEN_AI).get("presence_penalty", 1.0), # [-2,2]之间,该值越大则越不受输入限制,将鼓励模型生成输入中不存在的新词,更倾向于产生不同的内容 stream=True ) full_response = "" diff --git a/model/openai/open_ai_model.py b/model/openai/open_ai_model.py index 299a882..9f17fe7 100644 --- a/model/openai/open_ai_model.py +++ b/model/openai/open_ai_model.py @@ -53,11 +53,11 @@ class OpenAIModel(Model): response = openai.Completion.create( model="text-davinci-003", # 对话模型的名称 prompt=query, - temperature=0.9, # 值在[0,1]之间,越大表示回复越具有不确定性 - max_tokens=1200, # 回复最大的字符数 - top_p=1, - frequency_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容 - presence_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容 + temperature=model_conf(const.OPEN_AI).get("temperature", 0.75), # 熵值,在[0,1]之间,越大表示选取的候选词越随机,回复越具有不确定性,建议和top_p参数二选一使用,创意性任务越大越好,精确性任务越小越好 + #max_tokens=4096, # 回复最大的字符数,为输入和输出的总数 + #top_p=model_conf(const.OPEN_AI).get("top_p", 0.7),, #候选词列表。0.7 意味着只考虑前70%候选词的标记,建议和temperature参数二选一使用 + frequency_penalty=model_conf(const.OPEN_AI).get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则越降低模型一行中的重复用词,更倾向于产生不同的内容 + presence_penalty=model_conf(const.OPEN_AI).get("presence_penalty", 1.0), # [-2,2]之间,该值越大则越不受输入限制,将鼓励模型生成输入中不存在的新词,更倾向于产生不同的内容 stop=["\n\n\n"] ) res_content = response.choices[0]['text'].strip().replace('<|endoftext|>', '') @@ -86,11 +86,11 @@ class OpenAIModel(Model): res = openai.Completion.create( model= "text-davinci-003", # 对话模型的名称 prompt=new_query, - temperature=0.9, # 值在[0,1]之间,越大表示回复越具有不确定性 - #max_tokens=4096, # 回复最大的字符数 - top_p=1, - frequency_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容 - presence_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容 + temperature=model_conf(const.OPEN_AI).get("temperature", 0.75), # 熵值,在[0,1]之间,越大表示选取的候选词越随机,回复越具有不确定性,建议和top_p参数二选一使用,创意性任务越大越好,精确性任务越小越好 + max_tokens=model_conf(const.OPEN_AI).get("conversation_max_tokens", 3000), # 回复最大的字符数,为输入和输出的总数,davinci的流式对话需要启用这属性,不然对话会断流 + #top_p=model_conf(const.OPEN_AI).get("top_p", 0.7),, #候选词列表。0.7 意味着只考虑前70%候选词的标记,建议和temperature参数二选一使用 + frequency_penalty=model_conf(const.OPEN_AI).get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则越降低模型一行中的重复用词,更倾向于产生不同的内容 + presence_penalty=model_conf(const.OPEN_AI).get("presence_penalty", 1.0), # [-2,2]之间,该值越大则越不受输入限制,将鼓励模型生成输入中不存在的新词,更倾向于产生不同的内容 stream=True ) full_response = "" From d99c986533e0386974d9128fff7f840d18f68e62 Mon Sep 17 00:00:00 2001 From: shehuiqiang <39358003@qq.com> Date: Fri, 31 Mar 2023 02:15:40 +0800 Subject: [PATCH 15/25] edit README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9dc7169..6556332 100644 --- a/README.md +++ b/README.md @@ -122,7 +122,7 @@ pip3 install --upgrade openai ### 2.GPT-3.0 -使用的模型是 `text-davinci-003`,详情参考[官方文档]([https://platform.openai.com/docs/guides/chat](https://platform.openai.com/docs/guides/completion/introduction))。注意,gpt3.0模型使用流式输出对话会出现提前中断的问题,请尽量使用一次性输出对话。 +使用的模型是 `text-davinci-003`,详情参考[官方文档]([https://platform.openai.com/docs/guides/chat](https://platform.openai.com/docs/guides/completion/introduction))。 使用步骤和上述GPT-3.5基本相同: From c4873f26691984dbca7472644d8ac2ed86cd0765 Mon Sep 17 00:00:00 2001 From: RA Date: Fri, 31 Mar 2023 22:05:47 +0800 Subject: [PATCH 16/25] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=90=8E=E7=AB=AF?= =?UTF-8?q?=E6=8A=A5=E9=94=99=E5=90=8E=E5=89=8D=E5=8F=B0http=E9=A1=B5?= =?UTF-8?q?=E9=9D=A2=E6=B2=A1=E6=9C=89=E5=85=B3=E9=97=ADwss=E8=BF=9E?= =?UTF-8?q?=E6=8E=A5=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- channel/http/static/1.js | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/channel/http/static/1.js b/channel/http/static/1.js index 7466104..241ff2e 100644 --- a/channel/http/static/1.js +++ b/channel/http/static/1.js @@ -1,5 +1,5 @@ -function generateUUID () { +function generateUUID() { return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function (c) { var r = Math.random() * 16 | 0, v = c == 'x' ? r : (r & 0x3 | 0x8); @@ -11,7 +11,7 @@ const conversationType = { DISPOSABLE: 1, STREAM: 1 << 1 } -function ConvState (wrapper, form, params) { +function ConvState(wrapper, form, params) { this.id = generateUUID() this.form = form; this.wrapper = wrapper; @@ -31,13 +31,37 @@ ConvState.prototype.printAnswer = function (uuid, answer = '我是ChatGPT, 一 $(this.wrapper).find(this.parameters.inputIdHashTagName).focus(); }.bind(this), 500); }; + ConvState.prototype.updateAnswer = function (question, uuid) { setTimeout(function () { var socket = io('/chat'); socket.connect('/chat'); + let timerId; + var _this = this + // 设置计时器,如果在规定的时间内没有接收到消息,则手动断开连接 + function setTimer() { + timerId = setTimeout(() => { + if (socket.connected) { + socket.disconnect(); + handle_disconnect(); + } + }, 60000); + } + function resetTimer() { + clearTimeout(timerId); + setTimer(); + } + setTimer(); var messageObj = $(this.wrapper).find(`#${uuid}`); + function handle_disconnect() { + messageObj.removeClass('typing').addClass('ready'); + _this.scrollDown(); + $(_this.wrapper).find(_this.parameters.inputIdHashTagName).focus(); + } this.scrollDown(); socket.on('message', msg => { + // 接收到消息时重置计时器 + resetTimer(); if (msg.result) messageObj.html(msg.result + `
`); this.scrollDown(); @@ -50,10 +74,7 @@ ConvState.prototype.updateAnswer = function (question, uuid) { answer = marked.parse(msg.result); messageObj.html(answer); } - messageObj.removeClass('typing').addClass('ready'); - this.scrollDown(); - $(this.wrapper).find(this.parameters.inputIdHashTagName).focus(); - console.log("disconnect", msg) + handle_disconnect() }); }.bind(this), 1000); }; From 9aa331a3d3ee327f0f226eb43a61b1484c969a13 Mon Sep 17 00:00:00 2001 From: RegimenArsenic Date: Sat, 1 Apr 2023 13:57:47 +0800 Subject: [PATCH 17/25] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E5=90=AF=E5=8A=A8?= =?UTF-8?q?=E5=8F=82=E6=95=B0,=E6=96=B9=E4=BE=BFdocker=E5=AE=B9=E5=99=A8?= =?UTF-8?q?=E8=AF=BB=E5=8F=96=E5=A4=96=E9=83=A8=E9=85=8D=E7=BD=AE=E6=96=87?= =?UTF-8?q?=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app.py | 13 ++++++++++--- config.py | 3 +-- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/app.py b/app.py index 072fc7c..236d246 100644 --- a/app.py +++ b/app.py @@ -1,5 +1,6 @@ # encoding:utf-8 +import argparse import config from channel import channel_factory from common import log, const @@ -9,7 +10,7 @@ from multiprocessing import Pool # 启动通道 def start_process(channel_type): # 若为多进程启动,子进程无法直接访问主进程的内存空间,重新创建config类 - config.load_config() + config.load_config(args.config) model_type = config.conf().get("model").get("type") log.info("[INIT] Start up: {} on {}", model_type, channel_type) @@ -19,10 +20,10 @@ def start_process(channel_type): # startup channel channel.startup() -if __name__ == '__main__': +def main(): try: # load config - config.load_config() + config.load_config(args.config) model_type = config.conf().get("model").get("type") channel_type = config.conf().get("channel").get("type") @@ -60,3 +61,9 @@ if __name__ == '__main__': except Exception as e: log.error("App startup failed!") log.exception(e) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--config", help="config.json path(e.g: ./config.json or /usr/local/bot-on-anything/config.json)",type=str,default="./config.json") + args = parser.parse_args() + main() diff --git a/config.py b/config.py index a1e6a62..28812fc 100644 --- a/config.py +++ b/config.py @@ -6,9 +6,8 @@ import os config = {} -def load_config(): +def load_config(config_path = "./config.json"): global config - config_path = "config.json" if not os.path.exists(config_path): raise Exception('配置文件不存在,请根据config-template.json模板创建config.json文件') From 4f09b576ec3b9e8ac2297c3fc469e56a80131575 Mon Sep 17 00:00:00 2001 From: RegimenArsenic Date: Sat, 1 Apr 2023 14:08:37 +0800 Subject: [PATCH 18/25] =?UTF-8?q?=E6=B7=BB=E5=8A=A0wss=E4=BE=9D=E8=B5=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 57a56a2..92b0b1d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ PyJWT flask +flask_socketio itchat-uos==1.5.0.dev0 openai \ No newline at end of file From ba3866df217a2507789bbae3db16ecabe44d534e Mon Sep 17 00:00:00 2001 From: RegimenArsenic Date: Sat, 1 Apr 2023 16:12:15 +0800 Subject: [PATCH 19/25] use python3.10 as base image --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 2b2cc6b..b6b034d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.7-alpine +FROM python:3.10-alpine WORKDIR /app From 7c817d967dec47c31b549b3b6bba35d0368e7e3f Mon Sep 17 00:00:00 2001 From: RegimenArsenic Date: Sat, 1 Apr 2023 16:53:39 +0800 Subject: [PATCH 20/25] bing bug fixed --- model/bing/jailbroken_sydney.py | 5 ++--- requirements.txt | 3 ++- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/model/bing/jailbroken_sydney.py b/model/bing/jailbroken_sydney.py index c10b06e..198e46f 100644 --- a/model/bing/jailbroken_sydney.py +++ b/model/bing/jailbroken_sydney.py @@ -87,6 +87,8 @@ class SydneyBot(Chatbot): ): if final: try: + if self.chat_hub.wss and not self.chat_hub.wss.closed: + await self.chat_hub.wss.close() self.update_reply_cache(response["item"]["messages"][-1]) except Exception as e: self.conversations_cache[self.conversation_key]["messages"].pop() @@ -99,9 +101,6 @@ class SydneyBot(Chatbot): conversation_style: EdgeGPT.CONVERSATION_STYLE_TYPE = None, message_id: str = None ) -> dict: - if self.chat_hub.wss: - if not self.chat_hub.wss.closed: - await self.chat_hub.wss.close() async for final, response in self.ask_stream( prompt=prompt, conversation_style=conversation_style, diff --git a/requirements.txt b/requirements.txt index 92b0b1d..db2fa6b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,4 +2,5 @@ PyJWT flask flask_socketio itchat-uos==1.5.0.dev0 -openai \ No newline at end of file +openai +EdgeGPT \ No newline at end of file From 0e446b3aca001981ef56cd0cafd981c921a10165 Mon Sep 17 00:00:00 2001 From: RegimenArsenic Date: Sun, 2 Apr 2023 18:42:21 +0800 Subject: [PATCH 21/25] [bug fixed]multiple channel start failed --- app.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/app.py b/app.py index 236d246..573d078 100644 --- a/app.py +++ b/app.py @@ -8,17 +8,17 @@ from multiprocessing import Pool # 启动通道 -def start_process(channel_type): - # 若为多进程启动,子进程无法直接访问主进程的内存空间,重新创建config类 - config.load_config(args.config) - model_type = config.conf().get("model").get("type") - log.info("[INIT] Start up: {} on {}", model_type, channel_type) +def start_process(channel_type, config_path): + try: + # 若为多进程启动,子进程无法直接访问主进程的内存空间,重新创建config类 + config.load_config(config_path) + model_type = config.conf().get("model").get("type") + log.info("[MultiChannel] Start up {} on {}", model_type, channel_type) + channel = channel_factory.create_channel(channel_type) + channel.startup() + except Exception as e: + log.error("[MultiChannel] Start up failed on {}: {}", channel_type, str(e)) - # create channel - channel = channel_factory.create_channel(channel_type) - - # startup channel - channel.startup() def main(): try: @@ -50,10 +50,10 @@ def main(): pool = Pool(len(channel_type)) for type_item in channel_type: log.info("[INIT] Start up: {} on {}", model_type, type_item) - pool.apply_async(start_process, args=[type_item]) + pool.apply_async(start_process, args=[type_item, args.config]) if terminal: - start_process(terminal) + start_process(terminal, args.config) # 等待池中所有进程执行完毕 pool.close() From 8ee6787af7a6be2184388371c8c62d106209e317 Mon Sep 17 00:00:00 2001 From: RegimenArsenic Date: Sun, 2 Apr 2023 18:45:09 +0800 Subject: [PATCH 22/25] [bug fixed]multiple channel start failed --- app.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app.py b/app.py index 573d078..3502d86 100644 --- a/app.py +++ b/app.py @@ -30,12 +30,12 @@ def main(): # 1.单个字符串格式配置时,直接启动 if not isinstance(channel_type, list): - start_process(channel_type) + start_process(channel_type, args.config) exit(0) # 2.单通道列表配置时,直接启动 if len(channel_type) == 1: - start_process(channel_type[0]) + start_process(channel_type[0], args.config) exit(0) # 3.多通道配置时,进程池启动 From 1e587de78e7d88469309a62fd990ec01a71b092e Mon Sep 17 00:00:00 2001 From: fanyufeng Date: Thu, 6 Apr 2023 10:36:28 +0800 Subject: [PATCH 23/25] =?UTF-8?q?chore:=20=E4=BC=98=E5=8C=96=E8=89=BE?= =?UTF-8?q?=E7=89=B9=E6=9C=BA=E5=99=A8=E4=BA=BA=E6=9C=AA=E8=BE=93=E5=85=A5?= =?UTF-8?q?=E9=97=AE=E9=A2=98=E6=97=B6=E4=B8=8D=E8=AF=B7=E6=B1=82=20openAI?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- channel/dingtalk/dingtalk_channel.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/channel/dingtalk/dingtalk_channel.py b/channel/dingtalk/dingtalk_channel.py index 47c5d7f..d85ac31 100644 --- a/channel/dingtalk/dingtalk_channel.py +++ b/channel/dingtalk/dingtalk_channel.py @@ -85,7 +85,7 @@ http_app = Flask(__name__,) @http_app.route("/", methods=['POST']) def chat(): - # log.info("[DingTalk] chat_headers={}".format(str(request.headers))) + log.info("[DingTalk] chat_headers={}".format(str(request.headers))) log.info("[DingTalk] chat={}".format(str(request.data))) token = request.headers.get('token') if dd.dingtalk_post_token and token != dd.dingtalk_post_token: @@ -95,7 +95,9 @@ def chat(): content = data['text']['content'] if not content: return - reply_text = dd.handle(data=data) + reply_text = "您好,有什么我可以帮助您解答的问题吗?" + if str(content) != 0 and content.strip(): + reply_text = dd.handle(data=data) dd.notify_dingtalk(reply_text) return {'ret': 200} return {'ret': 201} From c42651e57a5ba0806d701ea37228ec64f553ed57 Mon Sep 17 00:00:00 2001 From: fanyufeng Date: Thu, 6 Apr 2023 10:38:04 +0800 Subject: [PATCH 24/25] =?UTF-8?q?feat:=20=E6=96=B0=E5=A2=9E=E5=90=AF?= =?UTF-8?q?=E5=8A=A8=E8=BF=90=E8=A1=8C=E8=84=9A=E6=9C=AC=EF=BC=8C=E5=B9=B6?= =?UTF-8?q?=E5=B0=86=E6=97=A5=E5=BF=97=E8=BE=93=E5=87=BA=E8=87=B3=20logs/l?= =?UTF-8?q?og=5Finfo.log?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- scripts/shutdown.sh | 19 +++++++++++++++++++ scripts/start.sh | 18 ++++++++++++++++++ 2 files changed, 37 insertions(+) create mode 100755 scripts/shutdown.sh create mode 100755 scripts/start.sh diff --git a/scripts/shutdown.sh b/scripts/shutdown.sh new file mode 100755 index 0000000..6afde41 --- /dev/null +++ b/scripts/shutdown.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +#关闭服务 +cd `dirname $0`/.. +export BASE_DIR=`pwd` +pid=`ps ax | grep -i app.py | grep "${BASE_DIR}" | grep python3 | grep -v grep | awk '{print $1}'` +if [ -z "$pid" ] ; then + echo "No bot-on-anaything running." + exit -1; +fi + +echo "The bot-on-anaything(${pid}) is running..." + +kill ${pid} + +echo "Send shutdown request to bot-on-anaything(${pid}) OK" + + + diff --git a/scripts/start.sh b/scripts/start.sh new file mode 100755 index 0000000..610a95a --- /dev/null +++ b/scripts/start.sh @@ -0,0 +1,18 @@ +#!/bin/bash +#后台运行bot-on-anaything执行脚本 + +cd `dirname $0`/.. +export BASE_DIR=`pwd` +echo $BASE_DIR + +# check the nohup.out log output file +if [ ! -f "${BASE_DIR}/logs/log_info.log" ]; then + mkdir "${BASE_DIR}/logs" + touch "${BASE_DIR}/logs/log_info.log" +echo "${BASE_DIR}/logs/log_info.log" +fi + +nohup python3 "${BASE_DIR}/app.py" >> ${BASE_DIR}/logs/log_info.log & tail -f "${BASE_DIR}/logs/log_info.log" + +echo "bot-on-anaything is starting,you can check the ${BASE_DIR}/logs/log_info.log" + From ef0a037e296be825cb0f54bca2d89c96c7f786c4 Mon Sep 17 00:00:00 2001 From: fanyufeng <1905145967@qq.com> Date: Wed, 5 Apr 2023 22:02:55 +0800 Subject: [PATCH 25/25] =?UTF-8?q?docs:=20=E5=AE=8C=E5=96=84=E9=92=89?= =?UTF-8?q?=E9=92=89=E6=96=87=E6=A1=A3=E8=AF=B4=E6=98=8E?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 247ba7a..8913d8d 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ - [ ] 企业微信 - [x] [Telegram](https://github.com/zhayujie/bot-on-anything#6telegram) - [x] [QQ](https://github.com/zhayujie/bot-on-anything#5qq) - - [x] 钉钉 + - [x] [钉钉](https://github.com/zhayujie/bot-on-anything#10%E9%92%89%E9%92%89) - [ ] 飞书 - [x] [Gmail](https://github.com/zhayujie/bot-on-anything#7gmail) - [x] [Slack](https://github.com/zhayujie/bot-on-anything#8slack) @@ -494,6 +494,10 @@ pip3 install PyJWT flask ### 10.钉钉 +**需要:** + +- 企业内部开发机器人 + **依赖** ```bash @@ -513,14 +517,16 @@ pip3 install requests flask } } ``` -钉钉开放平台说明: https://open.dingtalk.com/document/robots/customize-robot-security-settin.dingtalk.com/robot/send?access_token=906dadcbc7750fef5ff60d3445b66d5bbca32804f40fbdb59039a29b20b9a3f0gs +**参考文档**: -https://open.dingtalk.com/document/orgapp/custom-robot-access +- [钉钉内部机器人教程](https://open.dingtalk.com/document/tutorial/create-a-robot#title-ufs-4gh-poh) +- [自定义机器人接入文档](https://open.dingtalk.com/document/tutorial/create-a-robot#title-ufs-4gh-poh) +- [企业内部开发机器人教程文档](https://open.dingtalk.com/document/robots/enterprise-created-chatbot) **生成机器人** 地址: https://open-dev.dingtalk.com/fe/app#/corp/robot -添加机器人,在开发管理中设置服务器出口ip(在部署机执行curl ifconfig.me就可以得到)和消息接收地址(配置中的对外地址如 https://xx.xx.com:8081) +添加机器人,在开发管理中设置服务器出口 ip (在部署机执行`curl ifconfig.me`就可以得到)和消息接收地址(配置中的对外地址如 https://xx.xx.com:8081) ### 通用配置