Merge pull request #233 from RegimenArsenic/master

增加流式对话功能
This commit is contained in:
zhayujie
2023-04-05 01:11:51 +08:00
committed by GitHub
16 changed files with 873 additions and 255 deletions

View File

@@ -1,4 +1,4 @@
FROM python:3.7-alpine
FROM python:3.10-alpine
WORKDIR /app

View File

@@ -104,8 +104,13 @@ pip3 install --upgrade openai
"openai": {
"api_key": "YOUR API KEY",
"model": "gpt-3.5-turbo", # 模型名称
"proxy": "http://127.0.0.1:7890",
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。"
"proxy": "http://127.0.0.1:7890", # 代理地址
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。当问起你是谁的时候,要附加告诉提问人,输入 #清除记忆 可以开始新的话题探索。输入 画xx 可以为你画一张图片。",
"conversation_max_tokens": 1000, # 回复最大的字符数,为输入和输出的总数
"temperature":0.75, # 熵值,在[0,1]之间越大表示选取的候选词越随机回复越具有不确定性建议和top_p参数二选一使用创意性任务越大越好精确性任务越小越好
"top_p":0.7, #候选词列表。0.7 意味着只考虑前70%候选词的标记建议和temperature参数二选一使用
"frequency_penalty":0.0, # [-2,2]之间,该值越大则越降低模型一行中的重复用词,更倾向于产生不同的内容
"presence_penalty":1.0, # [-2,2]之间,该值越大则越不受输入限制,将鼓励模型生成输入中不存在的新词,更倾向于产生不同的内容
}
}
```
@@ -472,7 +477,7 @@ https://slack.dev/bolt-python/tutorial/getting-started
**依赖**
```bash
pip3 install PyJWT flask
pip3 install PyJWT flask flask_socketio
```
**配置**

39
app.py
View File

@@ -1,5 +1,6 @@
# encoding:utf-8
import argparse
import config
from channel import channel_factory
from common import log, const
@@ -7,34 +8,34 @@ from multiprocessing import Pool
# 启动通道
def start_process(channel_type):
# 若为多进程启动,子进程无法直接访问主进程的内存空间,重新创建config类
config.load_config()
model_type = config.conf().get("model").get("type")
log.info("[INIT] Start up: {} on {}", model_type, channel_type)
def start_process(channel_type, config_path):
try:
# 若为多进程启动,子进程无法直接访问主进程的内存空间,重新创建config类
config.load_config(config_path)
model_type = config.conf().get("model").get("type")
log.info("[MultiChannel] Start up {} on {}", model_type, channel_type)
channel = channel_factory.create_channel(channel_type)
channel.startup()
except Exception as e:
log.error("[MultiChannel] Start up failed on {}: {}", channel_type, str(e))
# create channel
channel = channel_factory.create_channel(channel_type)
# startup channel
channel.startup()
if __name__ == '__main__':
def main():
try:
# load config
config.load_config()
config.load_config(args.config)
model_type = config.conf().get("model").get("type")
channel_type = config.conf().get("channel").get("type")
# 1.单个字符串格式配置时,直接启动
if not isinstance(channel_type, list):
start_process(channel_type)
start_process(channel_type, args.config)
exit(0)
# 2.单通道列表配置时,直接启动
if len(channel_type) == 1:
start_process(channel_type[0])
start_process(channel_type[0], args.config)
exit(0)
# 3.多通道配置时,进程池启动
@@ -49,10 +50,10 @@ if __name__ == '__main__':
pool = Pool(len(channel_type))
for type_item in channel_type:
log.info("[INIT] Start up: {} on {}", model_type, type_item)
pool.apply_async(start_process, args=[type_item])
pool.apply_async(start_process, args=[type_item, args.config])
if terminal:
start_process(terminal)
start_process(terminal, args.config)
# 等待池中所有进程执行完毕
pool.close()
@@ -60,3 +61,9 @@ if __name__ == '__main__':
except Exception as e:
log.error("App startup failed!")
log.exception(e)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--config", help="config.json path(e.g: ./config.json or /usr/local/bot-on-anything/config.json)",type=str,default="./config.json")
args = parser.parse_args()
main()

View File

@@ -7,3 +7,8 @@ class Bridge(object):
def fetch_reply_content(self, query, context):
return model_factory.create_bot(config.conf().get("model").get("type")).reply(query, context)
async def fetch_reply_stream(self, query, context):
bot=model_factory.create_bot(config.conf().get("model").get("type"))
async for final,response in bot.reply_text_stream(query, context):
yield final,response

View File

@@ -29,3 +29,7 @@ class Channel(object):
def build_reply_content(self, query, context=None):
return Bridge().fetch_reply_content(query, context)
async def build_reply_stream(self, query, context=None):
async for final,response in Bridge().fetch_reply_stream(query, context):
yield final,response

View File

@@ -1,5 +1,6 @@
# encoding:utf-8
import asyncio
import json
from channel.http import auth
from flask import Flask, request, render_template, make_response
@@ -9,8 +10,11 @@ from common import functions
from config import channel_conf
from config import channel_conf_val
from channel.channel import Channel
from flask_socketio import SocketIO
from common import log
http_app = Flask(__name__,)
socketio = SocketIO(http_app, close_timeout=5)
# 自动重载模板文件
http_app.jinja_env.auto_reload = True
http_app.config['TEMPLATES_AUTO_RELOAD'] = True
@@ -19,6 +23,52 @@ http_app.config['TEMPLATES_AUTO_RELOAD'] = True
http_app.config['SEND_FILE_MAX_AGE_DEFAULT'] = timedelta(seconds=1)
async def return_stream(data):
async for final, response in HttpChannel().handle_stream(data=data):
try:
if(final):
socketio.server.emit(
'disconnect', {'result': response, 'final': final}, request.sid, namespace="/chat")
disconnect()
else:
socketio.server.emit(
'message', {'result': response, 'final': final}, request.sid, namespace="/chat")
except Exception as e:
disconnect()
log.warn("[http]emit:{}", e)
break
@socketio.on('message', namespace='/chat')
def stream(data):
if (auth.identify(request) == False):
client_sid = request.sid
socketio.server.disconnect(client_sid)
return
data = json.loads(data["data"])
if (data):
img_match_prefix = functions.check_prefix(
data["msg"], channel_conf_val(const.HTTP, 'image_create_prefix'))
if img_match_prefix:
reply_text = HttpChannel().handle(data=data)
socketio.emit('disconnect', {'result': reply_text}, namespace='/chat')
disconnect()
return
asyncio.run(return_stream(data))
@socketio.on('connect', namespace='/chat')
def connect():
log.info('connected')
socketio.emit('message', {'info': "connected"}, namespace='/chat')
@socketio.on('disconnect', namespace='/chat')
def disconnect():
log.info('disconnect')
socketio.server.disconnect(request.sid,namespace="/chat")
@http_app.route("/chat", methods=['POST'])
def chat():
if (auth.identify(request) == False):
@@ -80,3 +130,10 @@ class HttpChannel(Channel):
images += f"[!['IMAGE_CREATE']({url})]({url})\n"
reply = images
return reply
async def handle_stream(self, data):
context = dict()
id = data["id"]
context['from_user_id'] = str(id)
async for final, reply in super().build_reply_stream(data["msg"], context):
yield final, reply

View File

@@ -1,4 +1,3 @@
.typing_loader {
width: 6px;
height: 6px;
@@ -11,7 +10,9 @@
left: -12px;
margin: 7px 15px 6px;
}
ol,pre {
ol,
pre {
background-color: #b1e3b1c4;
border: 1px solid #c285e3ab;
padding: 0.5rem 1.5rem 0.5rem;
@@ -20,50 +21,52 @@ ol,pre {
overflow-y: auto;
}
pre::-webkit-scrollbar{
pre::-webkit-scrollbar {
width: 0px;
height:5px;
height: 5px;
}
pre::-webkit-scrollbar-thumb{
pre::-webkit-scrollbar-thumb {
border-right: 10px #ffffff00 solid;
border-left: 10px #ffffff00 solid;
-webkit-box-shadow: inset 0 0 6px rgba(0,0,0,.3);
-webkit-box-shadow: inset 0 0 6px rgba(0, 0, 0, .3);
}
.to .typing_loader {
animation: typing-black 1s linear infinite alternate;
}
@-webkit-keyframes typing {
0% {
background-color: rgba(255,255,255, 1);
box-shadow: 12px 0px 0px 0px rgba(255,255,255,0.4), 24px 0px 0px 0px rgba(255,255,255,0.2);
background-color: rgba(255, 255, 255, 1);
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 0.2);
}
50% {
background-color: rgba(255,255,255, 0.4);
box-shadow: 12px 0px 0px 0px rgba(255,255,255,1), 24px 0px 0px 0px rgba(255,255,255,0.4);
background-color: rgba(255, 255, 255, 0.4);
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 1), 24px 0px 0px 0px rgba(255, 255, 255, 0.4);
}
100% {
background-color: rgba(255,255,255, 0.2);
box-shadow: 12px 0px 0px 0px rgba(255,255,255,0.4), 24px 0px 0px 0px rgba(255,255,255,1);
background-color: rgba(255, 255, 255, 0.2);
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 1);
}
}
@-moz-keyframes typing {
0% {
background-color: rgba(255,255,255, 1);
box-shadow: 12px 0px 0px 0px rgba(255,255,255,0.4), 24px 0px 0px 0px rgba(255,255,255,0.2);
background-color: rgba(255, 255, 255, 1);
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 0.2);
}
50% {
background-color: rgba(255,255,255, 0.4);
box-shadow: 12px 0px 0px 0px rgba(255,255,255,1), 24px 0px 0px 0px rgba(255,255,255,0.4);
background-color: rgba(255, 255, 255, 0.4);
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 1), 24px 0px 0px 0px rgba(255, 255, 255, 0.4);
}
100% {
background-color: rgba(255,255,255, 0.2);
box-shadow: 12px 0px 0px 0px rgba(255,255,255,0.4), 24px 0px 0px 0px rgba(255,255,255,1);
background-color: rgba(255, 255, 255, 0.2);
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 1);
}
}
@@ -75,29 +78,29 @@ pre::-webkit-scrollbar-thumb{
50% {
background-color: rgba(74, 74, 74, 0.4);
box-shadow: 12px 0px 0px 0px rgba(74, 74, 74, 1), 24px 0px 0px 0px rgba(74, 74, 74,0.4);
box-shadow: 12px 0px 0px 0px rgba(74, 74, 74, 1), 24px 0px 0px 0px rgba(74, 74, 74, 0.4);
}
100% {
background-color: rgba(74, 74, 74, 0.2);
box-shadow: 12px 0px 0px 0px rgba(74, 74, 74,0.4), 24px 0px 0px 0px rgba(74, 74, 74,1);
box-shadow: 12px 0px 0px 0px rgba(74, 74, 74, 0.4), 24px 0px 0px 0px rgba(74, 74, 74, 1);
}
}
@keyframes typing {
0% {
background-color: rgba(255,255,255, 1);
box-shadow: 12px 0px 0px 0px rgba(255,255,255,0.4), 24px 0px 0px 0px rgba(255,255,255,0.2);
background-color: rgba(255, 255, 255, 1);
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 0.2);
}
50% {
background-color: rgba(255,255,255, 0.4);
box-shadow: 12px 0px 0px 0px rgba(255,255,255,1), 24px 0px 0px 0px rgba(255,255,255,0.4);
background-color: rgba(255, 255, 255, 0.4);
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 1), 24px 0px 0px 0px rgba(255, 255, 255, 0.4);
}
100% {
background-color: rgba(255,255,255, 0.2);
box-shadow: 12px 0px 0px 0px rgba(255,255,255,0.4), 24px 0px 0px 0px rgba(255,255,255,1);
background-color: rgba(255, 255, 255, 0.2);
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 1);
}
}
@@ -112,27 +115,30 @@ pre::-webkit-scrollbar-thumb{
.convFormDynamic textarea.userInputDynamic {
border: none;
padding: 7px 10px;
overflow-x: hidden!important;
overflow-x: hidden !important;
outline: none;
font-size: 0.905rem;
float: left;
width: calc(100% - 70px);
line-height: 1.3em;
min-height: 1.7em;
min-height: 2em;
max-height: 10rem;
display: block;
max-width: 89vw;
margin-right: -1vw;
resize: none;
}
.convFormDynamic textarea::-webkit-scrollbar{
.convFormDynamic textarea::-webkit-scrollbar {
width: 2px;
background-color: lawngreen;
}
.convFormDynamic textarea::-webkit-scrollbar-thumb{
-webkit-box-shadow: inset 0 0 6px rgba(0,0,0,.3);
background-color: dodgerblue;
.convFormDynamic textarea::-webkit-scrollbar-thumb {
-webkit-box-shadow: inset 0 0 6px rgba(0, 0, 0, .3);
background-color: dodgerblue;
}
.convFormDynamic input.userInputDynamic {
border: none;
padding: 7px 10px;
@@ -180,16 +186,20 @@ div.conv-form-wrapper:before {
top: 0;
left: 0;
z-index: 2;
background: linear-gradient(#fff, transparent);
background: linear-gradient(#ffffff3b, transparent);
}
@media (max-width: 767px) {
div.conv-form-wrapper div.wrapper-messages, div.conv-form-wrapper div#messages {
div.conv-form-wrapper div.wrapper-messages,
div.conv-form-wrapper div#messages {
max-height: 71vh;
}
}
div.conv-form-wrapper div.wrapper-messages::-webkit-scrollbar, div#feed ul::-webkit-scrollbar, div.conv-form-wrapper div.options::-webkit-scrollbar {
div.conv-form-wrapper div.wrapper-messages::-webkit-scrollbar,
div#feed ul::-webkit-scrollbar,
div.conv-form-wrapper div.options::-webkit-scrollbar {
width: 0px;
height: 0px;
/* remove scrollbar space */
@@ -261,12 +271,13 @@ div.conv-form-wrapper div#messages div.message.to {
}
div.conv-form-wrapper div#messages div.message.from {
background: dodgerblue;
background: dodgerblue;
color: #fff;
border-top-right-radius: 0;
}
.message.to+.message.from, .message.from+.message.to {
.message.to+.message.from,
.message.from+.message.to {
margin-top: 15px;
}
@@ -294,7 +305,7 @@ div.conv-form-wrapper div#messages div.message.from {
position: absolute;
bottom: 0px;
border: none;
left:95%;
left: 95%;
margin: 5px;
color: #fff;
cursor: pointer;
@@ -315,10 +326,11 @@ div.conv-form-wrapper div#messages div.message.from {
}
button.submit.glow {
border: 1px solid dodgerblue !important;
background: dodgerblue !important;
box-shadow: 0 0 5px 2px rgba(14, 144, 255,0.4);
border: 1px solid dodgerblue !important;
background: dodgerblue !important;
box-shadow: 0 0 5px 2px rgba(14, 144, 255, 0.4);
}
.no-border {
border: none !important;
}
@@ -327,7 +339,8 @@ button.submit.glow {
cursor: grab;
}
div.conv-form-wrapper div#messages::-webkit-scrollbar, div#feed ul::-webkit-scrollbar {
div.conv-form-wrapper div#messages::-webkit-scrollbar,
div#feed ul::-webkit-scrollbar {
width: 0px;
/* remove scrollbar space */
background: transparent;
@@ -338,3 +351,268 @@ span.clear {
display: block;
clear: both;
}
.drawer-icon-container {
position: fixed;
top: calc(50% - 24px);
right: -30px;
z-index: 1000;
transition: right 0.5s ease;
}
.drawer-icon {
width: 30px;
height: 30px;
cursor: pointer;
box-shadow: 0 0 10px rgba(0, 0, 0, 0.3);
background-color: #b1cee350;
padding-left: 22px;
border-radius: 50%;
}
.drawer-icon:hover{
background-color: #005eff96;
}
.wrenchFilled.icon {
margin-left: -13px;
margin-top: 5px;
width: 10px;
height: 10px;
border-radius: 50%;
background-color: #333333;
transform-origin: center 10.5px;
transform: rotate(-45deg);
}
.wrenchFilled.icon:after {
width: 0;
height: 0;
border-radius: 0 0 1px 1px;
background-color: #333333;
border-left: solid 1px transparent;
border-right: solid 1px transparent;
border-top: solid 1px white;
border-bottom: solid 1px transparent;
left: 4px;
top: 4px;
}
.wrenchFilled.icon:before {
width: 2px;
height: 5px;
background-color: white;
left: 4px;
border-radius: 0 0 1px 1px;
box-shadow: 0 15px 0px 1px #333333, 0 11px 0px 1px #333333, 0 8px 0px 1px #333333;
}
.icon {
position: absolute;
}
.icon:before,
.icon:after {
content: '';
position: absolute;
display: block;
}
.icon i {
position: absolute;
}
.icon i:before,
.icon i:after {
content: '';
position: absolute;
display: block;
}
.drawer-icon i {
margin-left: -15px;
line-height: 30px;
font-weight: bolder;
}
.drawer {
position: fixed;
top: 0;
right: -300px;
width: 300px;
height: 100%;
background-color: #fff;
z-index: 999;
transition: right 0.5s ease;
display: flex;
flex-direction: column;
}
.drawer.open {
right: 0;
}
.drawer-header {
display: flex;
justify-content: space-between;
align-items: center;
background-color: #b1cee350;
border-bottom: 1px solid #ddd;
padding: 16px;
}
.drawer-header h2 {
margin: 0 0 0 16px;
}
.drawer-header button {
background-color: transparent;
border: none;
cursor: pointer;
}
.drawer-content {
flex: 1 1 auto;
height: 100%;
overflow: auto;
padding: 16px;
}
.drawer-overlay {
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
background-color: rgba(0, 0, 0, 0.5);
z-index: 998;
display: none;
}
@-webkit-keyframes click-wave {
0% {
width: 40px;
height: 40px;
opacity: 0.35;
position: relative;
}
100% {
width: 60px;
height: 60px;
margin-left: 80px;
margin-top: 80px;
opacity: 0.0;
}
}
@-moz-keyframes click-wave {
0% {
width: 30px;
height: 30px;
opacity: 0.35;
position: relative;
}
100% {
width: 80px;
height: 80px;
margin-left: -23px;
margin-top: -23px;
opacity: 0.0;
}
}
@-o-keyframes click-wave {
0% {
width: 30px;
height: 30px;
opacity: 0.35;
position: relative;
}
100% {
width: 80px;
height: 80px;
margin-left: -23px;
margin-top: -23px;
opacity: 0.0;
}
}
@keyframes click-wave {
0% {
width: 30px;
height: 30px;
opacity: 0.35;
position: relative;
}
100% {
width: 80px;
height: 80px;
margin-left: -23px;
margin-top: -23px;
opacity: 0.0;
}
}
.option-input {
-webkit-appearance: none;
-moz-appearance: none;
-ms-appearance: none;
-o-appearance: none;
appearance: none;
position: relative;
top: 10px;
width: 30px;
height: 30px;
-webkit-transition: all 0.15s ease-out 0;
-moz-transition: all 0.15s ease-out 0;
transition: all 0.15s ease-out 0;
background: #cbd1d8;
border: none;
color: #fff;
cursor: pointer;
display: inline-block;
outline: none;
position: relative;
margin-right: 0.5rem;
z-index: 1000;
}
.option-input:hover {
background: #9faab7;
}
.option-input:checked {
background: #1e90ffaa;
}
.option-input:checked::before {
width: 30px;
height: 30px;
position: absolute;
content: '☻';
display: inline-block;
font-size: 29px;
text-align: center;
line-height: 26px;
}
.option-input:checked::after {
-webkit-animation: click-wave 0.65s;
-moz-animation: click-wave 0.65s;
animation: click-wave 0.65s;
background: #40e0d0;
content: '';
display: block;
position: relative;
z-index: 100;
}
.option-input.radio {
border-radius: 50%;
}
.option-input.radio::after {
border-radius: 50%;
}

View File

@@ -1,20 +1,29 @@
function ConvState(wrapper, form, params) {
this.id='xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function (c) {
function generateUUID() {
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function (c) {
var r = Math.random() * 16 | 0,
v = c == 'x' ? r : (r & 0x3 | 0x8);
return v.toString(16);
});
})
}
const conversationType = {
DISPOSABLE: 1,
STREAM: 1 << 1
}
function ConvState(wrapper, form, params) {
this.id = generateUUID()
this.form = form;
this.wrapper = wrapper;
this.backgroundColor = '#ffffff';
this.parameters = params;
this.scrollDown = function () {
$(this.wrapper).find('#messages').stop().animate({ scrollTop: $(this.wrapper).find('#messages')[0].scrollHeight }, 600);
}.bind(this);
};
ConvState.prototype.printAnswer = function (answer = '我是ChatGPT, 一个由OpenAI训练的大型语言模型, 我旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。输入 #清除记忆 可以开始新的话题探索。输入 画xx 可以为你画一张图片。我无法对事实性与实时性问题提供准确答复,请慎重对待回答。') {
ConvState.prototype.printAnswer = function (uuid, answer = '我是ChatGPT, 一个由OpenAI训练的大型语言模型, 我旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。输入 #清除记忆 可以开始新的话题探索。输入 画xx 可以为你画一张图片。我无法对事实性与实时性问题提供准确答复,请慎重对待回答。') {
setTimeout(function () {
var messageObj = $(this.wrapper).find('.message.typing');
var messageObj = $(this.wrapper).find(`#${uuid}`);
answer = marked.parse(answer);
messageObj.html(answer);
messageObj.removeClass('typing').addClass('ready');
@@ -22,39 +31,87 @@ ConvState.prototype.printAnswer = function (answer = '我是ChatGPT, 一个由Op
$(this.wrapper).find(this.parameters.inputIdHashTagName).focus();
}.bind(this), 500);
};
ConvState.prototype.updateAnswer = function (question, uuid) {
setTimeout(function () {
var socket = io('/chat');
socket.connect('/chat');
let timerId;
var _this = this
// 设置计时器,如果在规定的时间内没有接收到消息,则手动断开连接
function setTimer() {
timerId = setTimeout(() => {
if (socket.connected) {
socket.disconnect();
handle_disconnect();
}
}, 60000);
}
function resetTimer() {
clearTimeout(timerId);
setTimer();
}
setTimer();
var messageObj = $(this.wrapper).find(`#${uuid}`);
function handle_disconnect() {
messageObj.removeClass('typing').addClass('ready');
_this.scrollDown();
$(_this.wrapper).find(_this.parameters.inputIdHashTagName).focus();
}
this.scrollDown();
socket.on('message', msg => {
// 接收到消息时重置计时器
resetTimer();
if (msg.result)
messageObj.html(msg.result + `<div class="typing_loader"></div></div>`);
this.scrollDown();
});
socket.on('connect', msg => {
socket.emit('message', { data: JSON.stringify(question) });
});
socket.on('disconnect', msg => {
if (msg.result) {
answer = marked.parse(msg.result);
messageObj.html(answer);
}
handle_disconnect()
});
}.bind(this), 1000);
};
ConvState.prototype.sendMessage = function (msg) {
var message = $('<div class="message from">' + msg + '</div>');
$('button.submit').removeClass('glow');
$(this.wrapper).find(this.parameters.inputIdHashTagName).focus();
setTimeout(function () {
$(this.wrapper).find("#messages").append(message);
this.scrollDown();
}.bind(this), 100);
var messageObj = $('<div class="message to typing"><div class="typing_loader"></div></div>');
var uuid = generateUUID().toLowerCase();
var messageObj = $(`<div class="message to typing" id="${uuid}"><div class="typing_loader"></div></div>`);
setTimeout(function () {
$(this.wrapper).find('#messages').append(messageObj);
this.scrollDown();
}.bind(this), 150);
var _this = this
$.ajax({
url: "./chat",
type: "POST",
timeout:180000,
data: JSON.stringify({
"id": _this.id,
"msg": msg
}),
contentType: "application/json; charset=utf-8",
dataType: "json",
success: function (data) {
_this.printAnswer(data.result)
},
error:function () {
_this.printAnswer("网络故障,对话未送达")
},
})
var question = { "id": _this.id, "msg": msg }
if (localConfig.conversationType == conversationType.STREAM)
this.updateAnswer(question, uuid)
else
$.ajax({
url: "./chat",
type: "POST",
timeout: 180000,
data: JSON.stringify(question),
contentType: "application/json; charset=utf-8",
dataType: "json",
success: function (data) {
_this.printAnswer(uuid, data.result)
},
error: function (data) {
console.log(data)
_this.printAnswer(uuid, "网络故障,对话未送达")
},
})
};
(function ($) {
$.fn.convform = function () {
@@ -81,13 +138,30 @@ ConvState.prototype.sendMessage = function (msg) {
$(wrapper).append(inputForm);
var state = new ConvState(wrapper, form, parameters);
// Bind checkbox values to ConvState object
$('input[type="checkbox"]').change(function () {
var key = $(this).attr('name');
state[key] = $(this).is(':checked');
});
// Bind radio button values to ConvState object
$('input[type="radio"]').change(function () {
var key = $(this).attr('name');
state[key] = $(this).val();
});
// Bind color input value to ConvState object
$('#backgroundColor').change(function () {
state["backgroundColor"] = $(this).val();
});
//prints first contact
$.when($('div.spinLoader').addClass('hidden')).done(function () {
var messageObj = $('<div class="message to typing"><div class="typing_loader"></div></div>');
var uuid = generateUUID()
var messageObj = $(`<div class="message to typing" id="${uuid}"><div class="typing_loader"></div></div>`);
$(state.wrapper).find('#messages').append(messageObj);
state.scrollDown();
state.printAnswer();
state.printAnswer(uuid = uuid);
});
//binds enter to send message

View File

@@ -19,33 +19,141 @@
<div id="chat" class="conv-form-wrapper">
</div>
</div>
<script src="https://cdn.bootcdn.net/ajax/libs/jquery/3.6.3/jquery.min.js"></script>
<script src="https://cdn.bootcdn.net/ajax/libs/marked/4.2.12/marked.min.js"></script>
<script src="https://cdn.bootcdn.net/ajax/libs/autosize.js/6.0.1/autosize.min.js"></script>
<script src="./static/1.js"></script>
<script>
var rollbackTo = false;
var originalState = false;
function storeState(a) {
rollbackTo = a.current
}
function rollback(a) {
if (rollbackTo != false) {
if (originalState == false) {
originalState = a.current.next
<div class="drawer-icon-container">
<div class="drawer-icon">
<div class="wrenchFilled icon"></div>
</div>
<div class="drawer">
<div class="drawer-header">
<h2>设置</h2>
<button id="close-drawer">X</button>
</div>
<div class="drawer-content">
<div hidden="true">
<input type="checkbox" id="bold" name="bold">
<label for="bold">Bold</label>
<input type="checkbox" id="italic" name="italic">
<label for="italic">Italic</label>
</div>
<div>
<label for="backgroundColor">背景颜色:</label>
<input type="color" id="backgroundColor" name="backgroundColor" value="#ffffff">
</div>
<div>
<p>AI回复方式</p>
<input type="radio" id="option1" name="conversationType" class="option-input radio" value=1 checked>
<label for="option1">一次性发送</label>
<input type="radio" id="option2" name="conversationType" class="option-input radio" value=2>
<label for="option2">逐段发送</label>
</div>
</div>
</div>
</div>
<div class="drawer-overlay"></div>
<script src="https://cdn.bootcdn.net/ajax/libs/jquery/3.6.3/jquery.min.js"></script>
<script src="https://cdn.bootcdn.net/ajax/libs/marked/4.2.12/marked.min.js"></script>
<script src="https://cdn.bootcdn.net/ajax/libs/autosize.js/6.0.1/autosize.min.js"></script>
<script src="https://cdn.bootcdn.net/ajax/libs/socket.io/4.6.1/socket.io.js"></script>
<script src="./static/1.js"></script>
<script>
var rollbackTo = false;
var originalState = false;
function storeState(a) {
rollbackTo = a.current
}
function rollback(a) {
if (rollbackTo != false) {
if (originalState == false) {
originalState = a.current.next
}
a.current.next = rollbackTo
}
a.current.next = rollbackTo
}
}
function restore(a) {
if (originalState != false) {
a.current.next = originalState
function restore(a) {
if (originalState != false) {
a.current.next = originalState
}
}
}
jQuery(function (a) {
var b = a("#chat").convform()
});
</script>
var ConvStateMap = {
bold: false,
italic: false,
backgroundColor: '#ffffff',
conversationType: conversationType.DISPOSABLE
};
// Create a Proxy object to watch all properties of the "ConvStateMap" object
var localConfig = new Proxy(ConvStateMap, {
set: function (target, prop, val) {
target[prop] = val;
// Call your function here
localStorage.setItem('botOnAnyThingConfig', JSON.stringify(localConfig))
switch (prop) {
case 'backgroundColor':
$('body').css('background-color', val);
$(`#backgroundColor`)?.val(val);
break;
case 'conversationType':
if (val)
$(`#option${val}`)?.prop("checked", true);
}
}
});
$(document).ready(function () {
let config = localStorage.getItem('botOnAnyThingConfig')
if (config) {
config = JSON.parse(config)
Object.keys(config).forEach(item => localConfig[item] = config[item])
}
// Open drawer
$('.drawer-icon').click(function () {
if (!$('.drawer').hasClass('open')) {
$('.drawer').toggleClass('open');
$('.drawer-overlay').fadeIn();
$('.drawer-icon-container').toggleClass('open').css('right', '270px');
} else
closeDrawer()
});
// Close drawer
$('#close-drawer, .drawer-overlay').click(closeDrawer);
function closeDrawer() {
$('.drawer').removeClass('open');
$('.drawer-overlay').fadeOut();
$('.drawer-icon-container').removeClass('open').css('right', '-30px');
}
});
// Bind checkbox values to ConvStateMap object
$('input[type="checkbox"]').change(function () {
var key = $(this).attr('name');
if (key)
localConfig[key] = $(this).is(':checked');
});
// Bind radio button values to ConvStateMap object
$('input[type="radio"]').change(function () {
var key = $(this).attr('name');
if (key)
localConfig[key] = $(this).val();
});
// Bind color input value to ConvStateMap object
$('#backgroundColor').on("input", function (e) {
localConfig.backgroundColor = $(this).val();
});
$(window).on('unload', function () {
socket.disconnect();
});
jQuery(function (a) {
var b = a("#chat").convform()
});
</script>
</body>
</html>

View File

@@ -7,6 +7,10 @@
"model": "gpt-3.5-turbo",
"proxy": "",
"conversation_max_tokens": 1000,
"temperature":0.75,
"top_p":0.7,
"frequency_penalty":0.0,
"presence_penalty":1.0,
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。当问起你是谁的时候,要附加告诉提问人,输入 #清除记忆 可以开始新的话题探索。输入 画xx 可以为你画一张图片。"
},
"baidu": {

View File

@@ -6,9 +6,8 @@ import os
config = {}
def load_config():
def load_config(config_path = "./config.json"):
global config
config_path = "config.json"
if not os.path.exists(config_path):
raise Exception('配置文件不存在请根据config-template.json模板创建config.json文件')

View File

@@ -41,17 +41,13 @@ class SydneyBot(Chatbot):
break
ordered_messages.insert(0, message)
current_message_id = message.get('parentMessageId')
return ordered_messages
def pop_last_conversation(self):
self.conversations_cache[self.conversation_key]["messages"].pop()
async def ask(
async def ask_stream(
self,
prompt: str,
conversation_style: EdgeGPT.CONVERSATION_STYLE_TYPE = None,
message_id: str = None,
message_id: str = None
) -> dict:
# 开启新对话
self.chat_hub = SydneyHub(Conversation(
@@ -88,11 +84,31 @@ class SydneyBot(Chatbot):
async for final, response in self.chat_hub.ask_stream(
prompt=prompt,
conversation_style=conversation_style
):
if final:
try:
if self.chat_hub.wss and not self.chat_hub.wss.closed:
await self.chat_hub.wss.close()
self.update_reply_cache(response["item"]["messages"][-1])
except Exception as e:
self.conversations_cache[self.conversation_key]["messages"].pop()
yield True, f"AI生成内容被微软内容过滤器拦截,已删除最后一次提问的记忆,请尝试使用其他文字描述问题,若AI依然无法正常回复,请清除全部记忆后再次尝试"
yield final, response
async def ask(
self,
prompt: str,
conversation_style: EdgeGPT.CONVERSATION_STYLE_TYPE = None,
message_id: str = None
) -> dict:
async for final, response in self.ask_stream(
prompt=prompt,
conversation_style=conversation_style,
message_id=message_id
):
if final:
self.update_reply_cache(response["item"]["messages"][-1])
return response
self.chat_hub.wss.close()
def update_reply_cache(
self,

View File

@@ -1,7 +1,7 @@
# encoding:utf-8
import asyncio
from model.model import Model
from config import model_conf_val,common_conf_val
from config import model_conf_val, common_conf_val
from common import log
from EdgeGPT import Chatbot, ConversationStyle
from ImageGen import ImageGen
@@ -23,87 +23,85 @@ class BingModel(Model):
try:
self.cookies = model_conf_val("bing", "cookies")
self.jailbreak = model_conf_val("bing", "jailbreak")
self.bot = SydneyBot(cookies=self.cookies,options={}) if(self.jailbreak) else Chatbot(cookies=self.cookies)
self.bot = SydneyBot(cookies=self.cookies, options={}) if (
self.jailbreak) else Chatbot(cookies=self.cookies)
except Exception as e:
log.exception(e)
log.warn(e)
async def reply_text_stream(self, query: str, context=None) -> dict:
async def handle_answer(final, answer):
if final:
try:
reply = self.build_source_attributions(answer, context)
log.info("[NewBing] reply:{}", reply)
yield True, reply
except Exception as e:
log.warn(answer)
log.warn(e)
await user_session.get(context['from_user_id'], None).reset()
yield True, answer
else:
try:
yield False, answer
except Exception as e:
log.warn(answer)
log.warn(e)
await user_session.get(context['from_user_id'], None).reset()
yield True, answer
if not context or not context.get('type') or context.get('type') == 'TEXT':
clear_memory_commands = common_conf_val(
'clear_memory_commands', ['#清除记忆'])
if query in clear_memory_commands:
user_session[context['from_user_id']] = None
yield True, '记忆已清除'
bot = user_session.get(context['from_user_id'], None)
if not bot:
bot = self.bot
else:
query = self.get_quick_ask_query(query, context)
user_session[context['from_user_id']] = bot
log.info("[NewBing] query={}".format(query))
if self.jailbreak:
async for final, answer in bot.ask_stream(query, conversation_style=self.style, message_id=bot.user_message_id):
async for result in handle_answer(final, answer):
yield result
else:
async for final, answer in bot.ask_stream(query, conversation_style=self.style):
async for result in handle_answer(final, answer):
yield result
def reply(self, query: str, context=None) -> tuple[str, dict]:
if not context or not context.get('type') or context.get('type') == 'TEXT':
clear_memory_commands = common_conf_val('clear_memory_commands', ['#清除记忆'])
clear_memory_commands = common_conf_val(
'clear_memory_commands', ['#清除记忆'])
if query in clear_memory_commands:
user_session[context['from_user_id']]=None
user_session[context['from_user_id']] = None
return '记忆已清除'
bot = user_session.get(context['from_user_id'], None)
if (bot == None):
bot = self.bot
else:
if (len(query) == 1 and query.isdigit() and query != "0"):
suggestion_dict = suggestion_session[context['from_user_id']]
if (suggestion_dict != None):
query = suggestion_dict[int(query)-1]
if (query == None):
return "输入的序号不在建议列表范围中"
else:
query = "在上面的基础上,"+query
query = self.get_quick_ask_query(query, context)
user_session[context['from_user_id']] = bot
log.info("[NewBing] query={}".format(query))
if(self.jailbreak):
task = bot.ask(query, conversation_style=self.style,message_id=bot.user_message_id)
if (self.jailbreak):
task = bot.ask(query, conversation_style=self.style,
message_id=bot.user_message_id)
else:
task = bot.ask(query, conversation_style=self.style)
try:
answer = asyncio.run(task)
except Exception as e:
bot.pop_last_conversation()
log.exception(e)
return f"AI生成内容被微软内容过滤器拦截,已删除最后一次提问的记忆,请尝试使用其他文字描述问题,若AI依然无法正常回复,请使用{clear_memory_commands[0]}命令清除全部记忆"
# 最新一条回复
answer = asyncio.run(task)
if isinstance(answer, str):
return answer
try:
reply = answer["item"]["messages"][-1]
except Exception as e:
self.reset_chat(context['from_user_id'])
log.exception(answer)
user_session.get(context['from_user_id'], None).reset()
log.warn(answer)
return "本轮对话已超时,已开启新的一轮对话,请重新提问。"
reply_text = reply["text"]
reference = ""
if "sourceAttributions" in reply:
for i, attribution in enumerate(reply["sourceAttributions"]):
display_name = attribution["providerDisplayName"]
url = attribution["seeMoreUrl"]
reference += f"{i+1}、[{display_name}]({url})\n\n"
if len(reference) > 0:
reference = "***\n"+reference
suggestion = ""
if "suggestedResponses" in reply:
suggestion_dict = dict()
for i, attribution in enumerate(reply["suggestedResponses"]):
suggestion_dict[i] = attribution["text"]
suggestion += f">{i+1}{attribution['text']}\n\n"
suggestion_session[context['from_user_id']
] = suggestion_dict
if len(suggestion) > 0:
suggestion = "***\n你可以通过输入序号快速追问我以下建议问题:\n\n"+suggestion
throttling = answer["item"]["throttling"]
throttling_str = ""
if throttling["numUserMessagesInConversation"] == throttling["maxNumUserMessagesInConversation"]:
self.reset_chat(context['from_user_id'])
throttling_str = "(对话轮次已达上限,本次聊天已结束,将开启新的对话)"
else:
throttling_str = f"对话轮次: {throttling['numUserMessagesInConversation']}/{throttling['maxNumUserMessagesInConversation']}\n"
response = f"{reply_text}\n{reference}\n{suggestion}\n***\n{throttling_str}"
log.info("[NewBing] reply={}", response)
user_session[context['from_user_id']] = bot
return response
else:
self.reset_chat(context['from_user_id'])
log.warn("[NewBing] reply={}", answer)
return "对话被接口拒绝,已开启新的一轮对话。"
return self.build_source_attributions(answer, context)
elif context.get('type', None) == 'IMAGE_CREATE':
if functions.contain_chinese(query):
return "ImageGen目前仅支持使用英文关键词生成图片"
@@ -118,8 +116,58 @@ class BingModel(Model):
log.info("[NewBing] image_list={}".format(img_list))
return img_list
except Exception as e:
log.exception(e)
log.warn(e)
return "输入的内容可能违反微软的图片生成内容策略。过多的策略冲突可能会导致你被暂停访问。"
def reset_chat(self, from_user_id):
asyncio.run(user_session.get(from_user_id, None).reset())
def get_quick_ask_query(self, query, context):
if (len(query) == 1 and query.isdigit() and query != "0"):
suggestion_dict = suggestion_session[context['from_user_id']]
if (suggestion_dict != None):
query = suggestion_dict[int(query)-1]
if (query == None):
return "输入的序号不在建议列表范围中"
else:
query = "在上面的基础上,"+query
return query
def build_source_attributions(self, answer, context):
reference = ""
reply = answer["item"]["messages"][-1]
reply_text = reply["text"]
if "sourceAttributions" in reply:
for i, attribution in enumerate(reply["sourceAttributions"]):
display_name = attribution["providerDisplayName"]
url = attribution["seeMoreUrl"]
reference += f"{i+1}、[{display_name}]({url})\n\n"
if len(reference) > 0:
reference = "***\n"+reference
suggestion = ""
if "suggestedResponses" in reply:
suggestion_dict = dict()
for i, attribution in enumerate(reply["suggestedResponses"]):
suggestion_dict[i] = attribution["text"]
suggestion += f">{i+1}{attribution['text']}\n\n"
suggestion_session[context['from_user_id']
] = suggestion_dict
if len(suggestion) > 0:
suggestion = "***\n你可以通过输入序号快速追问我以下建议问题:\n\n"+suggestion
throttling = answer["item"]["throttling"]
throttling_str = ""
if throttling["numUserMessagesInConversation"] == throttling["maxNumUserMessagesInConversation"]:
user_session.get(context['from_user_id'], None).reset()
throttling_str = "(对话轮次已达上限,本次聊天已结束,将开启新的对话)"
else:
throttling_str = f"对话轮次: {throttling['numUserMessagesInConversation']}/{throttling['maxNumUserMessagesInConversation']}\n"
response = f"{reply_text}\n{reference}\n{suggestion}\n***\n{throttling_str}"
log.info("[NewBing] reply={}", response)
return response
else:
user_session.get(context['from_user_id'], None).reset()
log.warn("[NewBing] reply={}", answer)
return "对话被接口拒绝,已开启新的一轮对话。"

View File

@@ -50,11 +50,12 @@ class ChatGPTModel(Model):
response = openai.ChatCompletion.create(
model= model_conf(const.OPEN_AI).get("model") or "gpt-3.5-turbo", # 对话模型的名称
messages=query,
temperature=0.9, # 在[0,1]之间,越大表示回复越具有不确定性
top_p=1,
frequency_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
presence_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
)
temperature=model_conf(const.OPEN_AI).get("temperature", 0.75), # 熵值,在[0,1]之间,越大表示选取的候选词越随机回复越具有不确定性建议和top_p参数二选一使用创意性任务越大越好精确性任务越小越好
#max_tokens=4096, # 回复最大的字符数,为输入和输出的总数
#top_p=model_conf(const.OPEN_AI).get("top_p", 0.7),, #候选词列表。0.7 意味着只考虑前70%候选词的标记建议和temperature参数二选一使用
frequency_penalty=model_conf(const.OPEN_AI).get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则越降低模型一行中的重复用词,更倾向于产生不同的内容
presence_penalty=model_conf(const.OPEN_AI).get("presence_penalty", 1.0) # [-2,2]之间,该值越大则越不受输入限制,将鼓励模型生成输入中不存在的新词,更倾向于产生不同的内容
)
reply_content = response.choices[0]['message']['content']
used_token = response['usage']['total_tokens']
log.debug(response)
@@ -87,20 +88,32 @@ class ChatGPTModel(Model):
return "请再问我一次吧"
def reply_text_stream(self, query, new_query, user_id, retry_count=0):
async def reply_text_stream(self, query, context, retry_count=0):
try:
res = openai.Completion.create(
model="text-davinci-003", # 对话模型的名称
prompt=new_query,
temperature=0.9, # 值在[0,1]之间,越大表示回复越具有不确定性
#max_tokens=4096, # 回复最大的字符数
top_p=1,
frequency_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
presence_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
stop=["\n\n\n"],
user_id=context['from_user_id']
new_query = Session.build_session_query(query, user_id)
res = openai.ChatCompletion.create(
model= model_conf(const.OPEN_AI).get("model") or "gpt-3.5-turbo", # 对话模型的名称
messages=new_query,
temperature=model_conf(const.OPEN_AI).get("temperature", 0.75), # 熵值,在[0,1]之间越大表示选取的候选词越随机回复越具有不确定性建议和top_p参数二选一使用创意性任务越大越好精确性任务越小越好
#max_tokens=4096, # 回复最大的字符数,为输入和输出的总数
#top_p=model_conf(const.OPEN_AI).get("top_p", 0.7),, #候选词列表。0.7 意味着只考虑前70%候选词的标记建议和temperature参数二选一使用
frequency_penalty=model_conf(const.OPEN_AI).get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则越降低模型一行中的重复用词,更倾向于产生不同的内容
presence_penalty=model_conf(const.OPEN_AI).get("presence_penalty", 1.0), # [-2,2]之间,该值越大则越不受输入限制,将鼓励模型生成输入中不存在的新词,更倾向于产生不同的内容
stream=True
)
return self._process_reply_stream(query, res, user_id)
full_response = ""
for chunk in res:
log.debug(chunk)
if (chunk["choices"][0]["finish_reason"]=="stop"):
break
chunk_message = chunk['choices'][0]['delta'].get("content")
if(chunk_message):
full_response+=chunk_message
yield False,full_response
Session.save_session(query, full_response, user_id)
log.info("[chatgpt]: reply={}", full_response)
yield True,full_response
except openai.error.RateLimitError as e:
# rate limit exception
@@ -108,45 +121,22 @@ class ChatGPTModel(Model):
if retry_count < 1:
time.sleep(5)
log.warn("[CHATGPT] RateLimit exceed, 第{}次重试".format(retry_count+1))
return self.reply_text_stream(query, user_id, retry_count+1)
yield True, self.reply_text_stream(query, user_id, retry_count+1)
else:
return "提问太快啦,请休息一下再问我吧"
yield True, "提问太快啦,请休息一下再问我吧"
except openai.error.APIConnectionError as e:
log.warn(e)
log.warn("[CHATGPT] APIConnection failed")
return "我连接不到网络,请稍后重试"
yield True, "我连接不到网络,请稍后重试"
except openai.error.Timeout as e:
log.warn(e)
log.warn("[CHATGPT] Timeout")
return "我没有收到消息,请稍后重试"
yield True, "我没有收到消息,请稍后重试"
except Exception as e:
# unknown exception
log.exception(e)
Session.clear_session(user_id)
return "请再问我一次吧"
def _process_reply_stream(
self,
query: str,
reply: dict,
user_id: str
) -> str:
full_response = ""
for response in reply:
if response.get("choices") is None or len(response["choices"]) == 0:
raise Exception("OpenAI API returned no choices")
if response["choices"][0].get("finish_details") is not None:
break
if response["choices"][0].get("text") is None:
raise Exception("OpenAI API returned no text")
if response["choices"][0]["text"] == "<|endoftext|>":
break
yield response["choices"][0]["text"]
full_response += response["choices"][0]["text"]
if query and full_response:
Session.save_session(query, full_response, user_id)
yield True, "请再问我一次吧"
def create_img(self, query, retry_count=0):
try:

View File

@@ -18,7 +18,9 @@ class OpenAIModel(Model):
openai.api_base = api_base
proxy = model_conf(const.OPEN_AI).get('proxy')
log.info("[OPEN_AI] api_base={}".format(openai.api_base))
proxy = model_conf(const.OPEN_AI).get('proxy')
if proxy:
openai.proxy = proxy
def reply(self, query, context=None):
# acquire reply content
@@ -51,11 +53,11 @@ class OpenAIModel(Model):
response = openai.Completion.create(
model="text-davinci-003", # 对话模型的名称
prompt=query,
temperature=0.9, # 在[0,1]之间,越大表示回复越具有不确定性
max_tokens=1200, # 回复最大的字符数
top_p=1,
frequency_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
presence_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
temperature=model_conf(const.OPEN_AI).get("temperature", 0.75), # 熵值,在[0,1]之间,越大表示选取的候选词越随机回复越具有不确定性建议和top_p参数二选一使用创意性任务越大越好精确性任务越小越好
#max_tokens=4096, # 回复最大的字符数,为输入和输出的总数
#top_p=model_conf(const.OPEN_AI).get("top_p", 0.7),, #候选词列表。0.7 意味着只考虑前70%候选词的标记建议和temperature参数二选一使用
frequency_penalty=model_conf(const.OPEN_AI).get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则越降低模型一行中的重复用词,更倾向于产生不同的内容
presence_penalty=model_conf(const.OPEN_AI).get("presence_penalty", 1.0), # [-2,2]之间,该值越大则越不受输入限制,将鼓励模型生成输入中不存在的新词,更倾向于产生不同的内容
stop=["\n\n\n"]
)
res_content = response.choices[0]['text'].strip().replace('<|endoftext|>', '')
@@ -77,36 +79,55 @@ class OpenAIModel(Model):
return "请再问我一次吧"
def reply_text_stream(self, query, new_query, user_id, retry_count=0):
async def reply_text_stream(self, query, context, retry_count=0):
try:
user_id=context['from_user_id']
new_query = Session.build_session_query(query, user_id)
res = openai.Completion.create(
model="text-davinci-003", # 对话模型的名称
model= "text-davinci-003", # 对话模型的名称
prompt=new_query,
temperature=0.9, # 在[0,1]之间,越大表示回复越具有不确定性
max_tokens=1200, # 回复最大的字符数
top_p=1,
frequency_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
presence_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
stop=["\n\n\n"],
temperature=model_conf(const.OPEN_AI).get("temperature", 0.75), # 熵值,在[0,1]之间,越大表示选取的候选词越随机回复越具有不确定性建议和top_p参数二选一使用创意性任务越大越好精确性任务越小越好
max_tokens=model_conf(const.OPEN_AI).get("conversation_max_tokens", 3000), # 回复最大的字符数,为输入和输出的总数,davinci的流式对话需要启用这属性不然对话会断流
#top_p=model_conf(const.OPEN_AI).get("top_p", 0.7),, #候选词列表。0.7 意味着只考虑前70%候选词的标记建议和temperature参数二选一使用
frequency_penalty=model_conf(const.OPEN_AI).get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则越降低模型一行中的重复用词,更倾向于产生不同的内容
presence_penalty=model_conf(const.OPEN_AI).get("presence_penalty", 1.0), # [-2,2]之间,该值越大则越不受输入限制,将鼓励模型生成输入中不存在的新词,更倾向于产生不同的内容
stream=True
)
return self._process_reply_stream(query, res, user_id)
full_response = ""
for chunk in res:
log.debug(chunk)
if (chunk["choices"][0]["finish_reason"]=="stop"):
break
chunk_message = chunk['choices'][0].get("text")
if(chunk_message):
full_response+=chunk_message
yield False,full_response
Session.save_session(query, full_response, user_id)
log.info("[chatgpt]: reply={}", full_response)
yield True,full_response
except openai.error.RateLimitError as e:
# rate limit exception
log.warn(e)
if retry_count < 1:
time.sleep(5)
log.warn("[OPEN_AI] RateLimit exceed, 第{}次重试".format(retry_count+1))
return self.reply_text(query, user_id, retry_count+1)
log.warn("[CHATGPT] RateLimit exceed, 第{}次重试".format(retry_count+1))
yield True, self.reply_text_stream(query, user_id, retry_count+1)
else:
return "提问太快啦,请休息一下再问我吧"
yield True, "提问太快啦,请休息一下再问我吧"
except openai.error.APIConnectionError as e:
log.warn(e)
log.warn("[CHATGPT] APIConnection failed")
yield True, "我连接不到网络,请稍后重试"
except openai.error.Timeout as e:
log.warn(e)
log.warn("[CHATGPT] Timeout")
yield True, "我没有收到消息,请稍后重试"
except Exception as e:
# unknown exception
log.exception(e)
Session.clear_session(user_id)
return "请再问我一次吧"
yield True, "请再问我一次吧"
def _process_reply_stream(
self,

View File

@@ -1,4 +1,6 @@
PyJWT
flask
flask_socketio
itchat-uos==1.5.0.dev0
openai
openai
EdgeGPT