mirror of
https://github.com/zhayujie/bot-on-anything.git
synced 2026-01-19 01:21:06 +08:00
新增流式对话功能
This commit is contained in:
@@ -117,7 +117,7 @@ pip3 install --upgrade openai
|
||||
|
||||
### 2.GPT-3.0
|
||||
|
||||
使用的模型是 `text-davinci-003`,详情参考[官方文档]([https://platform.openai.com/docs/guides/chat](https://platform.openai.com/docs/guides/completion/introduction))。
|
||||
使用的模型是 `text-davinci-003`,详情参考[官方文档]([https://platform.openai.com/docs/guides/chat](https://platform.openai.com/docs/guides/completion/introduction))。注意,gpt3.0模型使用流式输出对话会出现提前中断的问题,请尽量使用一次性输出对话。
|
||||
|
||||
使用步骤和上述GPT-3.5基本相同:
|
||||
|
||||
|
||||
@@ -7,3 +7,8 @@ class Bridge(object):
|
||||
|
||||
def fetch_reply_content(self, query, context):
|
||||
return model_factory.create_bot(config.conf().get("model").get("type")).reply(query, context)
|
||||
|
||||
async def fetch_reply_stream(self, query, context):
|
||||
bot=model_factory.create_bot(config.conf().get("model").get("type"))
|
||||
async for final,response in bot.reply_text_stream(query, context):
|
||||
yield final,response
|
||||
|
||||
@@ -29,3 +29,7 @@ class Channel(object):
|
||||
|
||||
def build_reply_content(self, query, context=None):
|
||||
return Bridge().fetch_reply_content(query, context)
|
||||
|
||||
async def build_reply_stream(self, query, context=None):
|
||||
async for final,response in Bridge().fetch_reply_stream(query, context):
|
||||
yield final,response
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# encoding:utf-8
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from channel.http import auth
|
||||
from flask import Flask, request, render_template, make_response
|
||||
@@ -9,8 +10,11 @@ from common import functions
|
||||
from config import channel_conf
|
||||
from config import channel_conf_val
|
||||
from channel.channel import Channel
|
||||
from flask_socketio import SocketIO
|
||||
from common import log
|
||||
|
||||
http_app = Flask(__name__,)
|
||||
socketio = SocketIO(http_app)
|
||||
# 自动重载模板文件
|
||||
http_app.jinja_env.auto_reload = True
|
||||
http_app.config['TEMPLATES_AUTO_RELOAD'] = True
|
||||
@@ -19,6 +23,50 @@ http_app.config['TEMPLATES_AUTO_RELOAD'] = True
|
||||
http_app.config['SEND_FILE_MAX_AGE_DEFAULT'] = timedelta(seconds=1)
|
||||
|
||||
|
||||
async def return_stream(data):
|
||||
async for final, response in HttpChannel().handle_stream(data=data):
|
||||
try:
|
||||
if(final):
|
||||
socketio.server.emit(
|
||||
'disconnect', {'result': response, 'final': final}, request.sid, namespace="/chat")
|
||||
socketio.server.disconnect(request.sid)
|
||||
else:
|
||||
socketio.server.emit(
|
||||
'message', {'result': response, 'final': final}, request.sid, namespace="/chat")
|
||||
except Exception as e:
|
||||
socketio.server.disconnect(request.sid)
|
||||
log.error("[http]emit:", e)
|
||||
break
|
||||
|
||||
|
||||
@socketio.on('message', namespace='/chat')
|
||||
def stream(data):
|
||||
log.info('message:', data)
|
||||
if (auth.identify(request) == False):
|
||||
client_sid = request.sid
|
||||
socketio.server.disconnect(client_sid)
|
||||
return
|
||||
data = json.loads(data["data"])
|
||||
if (data):
|
||||
img_match_prefix = functions.check_prefix(
|
||||
data["msg"], channel_conf_val(const.HTTP, 'image_create_prefix'))
|
||||
if img_match_prefix:
|
||||
reply_text = HttpChannel().handle(data=data)
|
||||
socketio.emit('message', {'result': reply_text}, namespace='/chat')
|
||||
asyncio.run(return_stream(data))
|
||||
|
||||
|
||||
@socketio.on('connect', namespace='/chat')
|
||||
def connect():
|
||||
log.info('connected')
|
||||
socketio.emit('message', {'info': "connected"}, namespace='/chat')
|
||||
|
||||
|
||||
@socketio.on('disconnect', namespace='/chat')
|
||||
def disconnect():
|
||||
log.info('disconnect')
|
||||
|
||||
|
||||
@http_app.route("/chat", methods=['POST'])
|
||||
def chat():
|
||||
if (auth.identify(request) == False):
|
||||
@@ -80,3 +128,10 @@ class HttpChannel(Channel):
|
||||
images += f"[]({url})\n"
|
||||
reply = images
|
||||
return reply
|
||||
|
||||
async def handle_stream(self, data):
|
||||
context = dict()
|
||||
id = data["id"]
|
||||
context['from_user_id'] = str(id)
|
||||
async for final, reply in super().build_reply_stream(data["msg"], context):
|
||||
yield final, reply
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
.typing_loader {
|
||||
width: 6px;
|
||||
height: 6px;
|
||||
@@ -11,7 +10,9 @@
|
||||
left: -12px;
|
||||
margin: 7px 15px 6px;
|
||||
}
|
||||
ol,pre {
|
||||
|
||||
ol,
|
||||
pre {
|
||||
background-color: #b1e3b1c4;
|
||||
border: 1px solid #c285e3ab;
|
||||
padding: 0.5rem 1.5rem 0.5rem;
|
||||
@@ -20,50 +21,52 @@ ol,pre {
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
pre::-webkit-scrollbar{
|
||||
pre::-webkit-scrollbar {
|
||||
width: 0px;
|
||||
height:5px;
|
||||
height: 5px;
|
||||
}
|
||||
pre::-webkit-scrollbar-thumb{
|
||||
|
||||
pre::-webkit-scrollbar-thumb {
|
||||
border-right: 10px #ffffff00 solid;
|
||||
border-left: 10px #ffffff00 solid;
|
||||
-webkit-box-shadow: inset 0 0 6px rgba(0,0,0,.3);
|
||||
-webkit-box-shadow: inset 0 0 6px rgba(0, 0, 0, .3);
|
||||
}
|
||||
|
||||
.to .typing_loader {
|
||||
animation: typing-black 1s linear infinite alternate;
|
||||
}
|
||||
|
||||
@-webkit-keyframes typing {
|
||||
0% {
|
||||
background-color: rgba(255,255,255, 1);
|
||||
box-shadow: 12px 0px 0px 0px rgba(255,255,255,0.4), 24px 0px 0px 0px rgba(255,255,255,0.2);
|
||||
background-color: rgba(255, 255, 255, 1);
|
||||
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 0.2);
|
||||
}
|
||||
|
||||
50% {
|
||||
background-color: rgba(255,255,255, 0.4);
|
||||
box-shadow: 12px 0px 0px 0px rgba(255,255,255,1), 24px 0px 0px 0px rgba(255,255,255,0.4);
|
||||
background-color: rgba(255, 255, 255, 0.4);
|
||||
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 1), 24px 0px 0px 0px rgba(255, 255, 255, 0.4);
|
||||
}
|
||||
|
||||
100% {
|
||||
background-color: rgba(255,255,255, 0.2);
|
||||
box-shadow: 12px 0px 0px 0px rgba(255,255,255,0.4), 24px 0px 0px 0px rgba(255,255,255,1);
|
||||
background-color: rgba(255, 255, 255, 0.2);
|
||||
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@-moz-keyframes typing {
|
||||
0% {
|
||||
background-color: rgba(255,255,255, 1);
|
||||
box-shadow: 12px 0px 0px 0px rgba(255,255,255,0.4), 24px 0px 0px 0px rgba(255,255,255,0.2);
|
||||
background-color: rgba(255, 255, 255, 1);
|
||||
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 0.2);
|
||||
}
|
||||
|
||||
50% {
|
||||
background-color: rgba(255,255,255, 0.4);
|
||||
box-shadow: 12px 0px 0px 0px rgba(255,255,255,1), 24px 0px 0px 0px rgba(255,255,255,0.4);
|
||||
background-color: rgba(255, 255, 255, 0.4);
|
||||
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 1), 24px 0px 0px 0px rgba(255, 255, 255, 0.4);
|
||||
}
|
||||
|
||||
100% {
|
||||
background-color: rgba(255,255,255, 0.2);
|
||||
box-shadow: 12px 0px 0px 0px rgba(255,255,255,0.4), 24px 0px 0px 0px rgba(255,255,255,1);
|
||||
background-color: rgba(255, 255, 255, 0.2);
|
||||
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,29 +78,29 @@ pre::-webkit-scrollbar-thumb{
|
||||
|
||||
50% {
|
||||
background-color: rgba(74, 74, 74, 0.4);
|
||||
box-shadow: 12px 0px 0px 0px rgba(74, 74, 74, 1), 24px 0px 0px 0px rgba(74, 74, 74,0.4);
|
||||
box-shadow: 12px 0px 0px 0px rgba(74, 74, 74, 1), 24px 0px 0px 0px rgba(74, 74, 74, 0.4);
|
||||
}
|
||||
|
||||
100% {
|
||||
background-color: rgba(74, 74, 74, 0.2);
|
||||
box-shadow: 12px 0px 0px 0px rgba(74, 74, 74,0.4), 24px 0px 0px 0px rgba(74, 74, 74,1);
|
||||
box-shadow: 12px 0px 0px 0px rgba(74, 74, 74, 0.4), 24px 0px 0px 0px rgba(74, 74, 74, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes typing {
|
||||
0% {
|
||||
background-color: rgba(255,255,255, 1);
|
||||
box-shadow: 12px 0px 0px 0px rgba(255,255,255,0.4), 24px 0px 0px 0px rgba(255,255,255,0.2);
|
||||
background-color: rgba(255, 255, 255, 1);
|
||||
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 0.2);
|
||||
}
|
||||
|
||||
50% {
|
||||
background-color: rgba(255,255,255, 0.4);
|
||||
box-shadow: 12px 0px 0px 0px rgba(255,255,255,1), 24px 0px 0px 0px rgba(255,255,255,0.4);
|
||||
background-color: rgba(255, 255, 255, 0.4);
|
||||
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 1), 24px 0px 0px 0px rgba(255, 255, 255, 0.4);
|
||||
}
|
||||
|
||||
100% {
|
||||
background-color: rgba(255,255,255, 0.2);
|
||||
box-shadow: 12px 0px 0px 0px rgba(255,255,255,0.4), 24px 0px 0px 0px rgba(255,255,255,1);
|
||||
background-color: rgba(255, 255, 255, 0.2);
|
||||
box-shadow: 12px 0px 0px 0px rgba(255, 255, 255, 0.4), 24px 0px 0px 0px rgba(255, 255, 255, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,27 +115,30 @@ pre::-webkit-scrollbar-thumb{
|
||||
.convFormDynamic textarea.userInputDynamic {
|
||||
border: none;
|
||||
padding: 7px 10px;
|
||||
overflow-x: hidden!important;
|
||||
overflow-x: hidden !important;
|
||||
outline: none;
|
||||
font-size: 0.905rem;
|
||||
float: left;
|
||||
width: calc(100% - 70px);
|
||||
line-height: 1.3em;
|
||||
min-height: 1.7em;
|
||||
min-height: 2em;
|
||||
max-height: 10rem;
|
||||
display: block;
|
||||
max-width: 89vw;
|
||||
margin-right: -1vw;
|
||||
resize: none;
|
||||
}
|
||||
.convFormDynamic textarea::-webkit-scrollbar{
|
||||
|
||||
.convFormDynamic textarea::-webkit-scrollbar {
|
||||
width: 2px;
|
||||
background-color: lawngreen;
|
||||
}
|
||||
.convFormDynamic textarea::-webkit-scrollbar-thumb{
|
||||
-webkit-box-shadow: inset 0 0 6px rgba(0,0,0,.3);
|
||||
background-color: dodgerblue;
|
||||
|
||||
.convFormDynamic textarea::-webkit-scrollbar-thumb {
|
||||
-webkit-box-shadow: inset 0 0 6px rgba(0, 0, 0, .3);
|
||||
background-color: dodgerblue;
|
||||
}
|
||||
|
||||
.convFormDynamic input.userInputDynamic {
|
||||
border: none;
|
||||
padding: 7px 10px;
|
||||
@@ -180,16 +186,20 @@ div.conv-form-wrapper:before {
|
||||
top: 0;
|
||||
left: 0;
|
||||
z-index: 2;
|
||||
background: linear-gradient(#fff, transparent);
|
||||
background: linear-gradient(#ffffff3b, transparent);
|
||||
}
|
||||
|
||||
@media (max-width: 767px) {
|
||||
div.conv-form-wrapper div.wrapper-messages, div.conv-form-wrapper div#messages {
|
||||
|
||||
div.conv-form-wrapper div.wrapper-messages,
|
||||
div.conv-form-wrapper div#messages {
|
||||
max-height: 71vh;
|
||||
}
|
||||
}
|
||||
|
||||
div.conv-form-wrapper div.wrapper-messages::-webkit-scrollbar, div#feed ul::-webkit-scrollbar, div.conv-form-wrapper div.options::-webkit-scrollbar {
|
||||
div.conv-form-wrapper div.wrapper-messages::-webkit-scrollbar,
|
||||
div#feed ul::-webkit-scrollbar,
|
||||
div.conv-form-wrapper div.options::-webkit-scrollbar {
|
||||
width: 0px;
|
||||
height: 0px;
|
||||
/* remove scrollbar space */
|
||||
@@ -261,12 +271,13 @@ div.conv-form-wrapper div#messages div.message.to {
|
||||
}
|
||||
|
||||
div.conv-form-wrapper div#messages div.message.from {
|
||||
background: dodgerblue;
|
||||
background: dodgerblue;
|
||||
color: #fff;
|
||||
border-top-right-radius: 0;
|
||||
}
|
||||
|
||||
.message.to+.message.from, .message.from+.message.to {
|
||||
.message.to+.message.from,
|
||||
.message.from+.message.to {
|
||||
margin-top: 15px;
|
||||
}
|
||||
|
||||
@@ -294,7 +305,7 @@ div.conv-form-wrapper div#messages div.message.from {
|
||||
position: absolute;
|
||||
bottom: 0px;
|
||||
border: none;
|
||||
left:95%;
|
||||
left: 95%;
|
||||
margin: 5px;
|
||||
color: #fff;
|
||||
cursor: pointer;
|
||||
@@ -315,10 +326,11 @@ div.conv-form-wrapper div#messages div.message.from {
|
||||
}
|
||||
|
||||
button.submit.glow {
|
||||
border: 1px solid dodgerblue !important;
|
||||
background: dodgerblue !important;
|
||||
box-shadow: 0 0 5px 2px rgba(14, 144, 255,0.4);
|
||||
border: 1px solid dodgerblue !important;
|
||||
background: dodgerblue !important;
|
||||
box-shadow: 0 0 5px 2px rgba(14, 144, 255, 0.4);
|
||||
}
|
||||
|
||||
.no-border {
|
||||
border: none !important;
|
||||
}
|
||||
@@ -327,7 +339,8 @@ button.submit.glow {
|
||||
cursor: grab;
|
||||
}
|
||||
|
||||
div.conv-form-wrapper div#messages::-webkit-scrollbar, div#feed ul::-webkit-scrollbar {
|
||||
div.conv-form-wrapper div#messages::-webkit-scrollbar,
|
||||
div#feed ul::-webkit-scrollbar {
|
||||
width: 0px;
|
||||
/* remove scrollbar space */
|
||||
background: transparent;
|
||||
@@ -338,3 +351,268 @@ span.clear {
|
||||
display: block;
|
||||
clear: both;
|
||||
}
|
||||
|
||||
.drawer-icon-container {
|
||||
position: fixed;
|
||||
top: calc(50% - 24px);
|
||||
right: -30px;
|
||||
z-index: 1000;
|
||||
transition: right 0.5s ease;
|
||||
}
|
||||
|
||||
.drawer-icon {
|
||||
width: 30px;
|
||||
height: 30px;
|
||||
cursor: pointer;
|
||||
box-shadow: 0 0 10px rgba(0, 0, 0, 0.3);
|
||||
background-color: #b1cee350;
|
||||
padding-left: 22px;
|
||||
border-radius: 50%;
|
||||
}
|
||||
.drawer-icon:hover{
|
||||
background-color: #005eff96;
|
||||
}
|
||||
.wrenchFilled.icon {
|
||||
margin-left: -13px;
|
||||
margin-top: 5px;
|
||||
width: 10px;
|
||||
height: 10px;
|
||||
border-radius: 50%;
|
||||
background-color: #333333;
|
||||
transform-origin: center 10.5px;
|
||||
transform: rotate(-45deg);
|
||||
}
|
||||
|
||||
.wrenchFilled.icon:after {
|
||||
width: 0;
|
||||
height: 0;
|
||||
border-radius: 0 0 1px 1px;
|
||||
background-color: #333333;
|
||||
border-left: solid 1px transparent;
|
||||
border-right: solid 1px transparent;
|
||||
border-top: solid 1px white;
|
||||
border-bottom: solid 1px transparent;
|
||||
left: 4px;
|
||||
top: 4px;
|
||||
}
|
||||
|
||||
.wrenchFilled.icon:before {
|
||||
width: 2px;
|
||||
height: 5px;
|
||||
background-color: white;
|
||||
left: 4px;
|
||||
border-radius: 0 0 1px 1px;
|
||||
box-shadow: 0 15px 0px 1px #333333, 0 11px 0px 1px #333333, 0 8px 0px 1px #333333;
|
||||
}
|
||||
|
||||
.icon {
|
||||
position: absolute;
|
||||
}
|
||||
|
||||
.icon:before,
|
||||
.icon:after {
|
||||
content: '';
|
||||
position: absolute;
|
||||
display: block;
|
||||
}
|
||||
|
||||
.icon i {
|
||||
position: absolute;
|
||||
}
|
||||
|
||||
.icon i:before,
|
||||
.icon i:after {
|
||||
content: '';
|
||||
position: absolute;
|
||||
display: block;
|
||||
}
|
||||
|
||||
.drawer-icon i {
|
||||
margin-left: -15px;
|
||||
line-height: 30px;
|
||||
font-weight: bolder;
|
||||
}
|
||||
|
||||
.drawer {
|
||||
position: fixed;
|
||||
top: 0;
|
||||
right: -300px;
|
||||
width: 300px;
|
||||
height: 100%;
|
||||
background-color: #fff;
|
||||
z-index: 999;
|
||||
transition: right 0.5s ease;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.drawer.open {
|
||||
right: 0;
|
||||
}
|
||||
|
||||
.drawer-header {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
background-color: #b1cee350;
|
||||
border-bottom: 1px solid #ddd;
|
||||
padding: 16px;
|
||||
}
|
||||
|
||||
.drawer-header h2 {
|
||||
margin: 0 0 0 16px;
|
||||
}
|
||||
|
||||
.drawer-header button {
|
||||
background-color: transparent;
|
||||
border: none;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.drawer-content {
|
||||
flex: 1 1 auto;
|
||||
height: 100%;
|
||||
overflow: auto;
|
||||
padding: 16px;
|
||||
}
|
||||
|
||||
.drawer-overlay {
|
||||
position: fixed;
|
||||
top: 0;
|
||||
left: 0;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
background-color: rgba(0, 0, 0, 0.5);
|
||||
z-index: 998;
|
||||
display: none;
|
||||
}
|
||||
|
||||
@-webkit-keyframes click-wave {
|
||||
0% {
|
||||
width: 40px;
|
||||
height: 40px;
|
||||
opacity: 0.35;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
100% {
|
||||
width: 60px;
|
||||
height: 60px;
|
||||
margin-left: 80px;
|
||||
margin-top: 80px;
|
||||
opacity: 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
@-moz-keyframes click-wave {
|
||||
0% {
|
||||
width: 30px;
|
||||
height: 30px;
|
||||
opacity: 0.35;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
100% {
|
||||
width: 80px;
|
||||
height: 80px;
|
||||
margin-left: -23px;
|
||||
margin-top: -23px;
|
||||
opacity: 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
@-o-keyframes click-wave {
|
||||
0% {
|
||||
width: 30px;
|
||||
height: 30px;
|
||||
opacity: 0.35;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
100% {
|
||||
width: 80px;
|
||||
height: 80px;
|
||||
margin-left: -23px;
|
||||
margin-top: -23px;
|
||||
opacity: 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes click-wave {
|
||||
0% {
|
||||
width: 30px;
|
||||
height: 30px;
|
||||
opacity: 0.35;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
100% {
|
||||
width: 80px;
|
||||
height: 80px;
|
||||
margin-left: -23px;
|
||||
margin-top: -23px;
|
||||
opacity: 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
.option-input {
|
||||
-webkit-appearance: none;
|
||||
-moz-appearance: none;
|
||||
-ms-appearance: none;
|
||||
-o-appearance: none;
|
||||
appearance: none;
|
||||
position: relative;
|
||||
top: 10px;
|
||||
width: 30px;
|
||||
height: 30px;
|
||||
-webkit-transition: all 0.15s ease-out 0;
|
||||
-moz-transition: all 0.15s ease-out 0;
|
||||
transition: all 0.15s ease-out 0;
|
||||
background: #cbd1d8;
|
||||
border: none;
|
||||
color: #fff;
|
||||
cursor: pointer;
|
||||
display: inline-block;
|
||||
outline: none;
|
||||
position: relative;
|
||||
margin-right: 0.5rem;
|
||||
z-index: 1000;
|
||||
}
|
||||
|
||||
.option-input:hover {
|
||||
background: #9faab7;
|
||||
}
|
||||
|
||||
.option-input:checked {
|
||||
background: #1e90ffaa;
|
||||
}
|
||||
|
||||
.option-input:checked::before {
|
||||
width: 30px;
|
||||
height: 30px;
|
||||
position: absolute;
|
||||
content: '☻';
|
||||
display: inline-block;
|
||||
font-size: 29px;
|
||||
text-align: center;
|
||||
line-height: 26px;
|
||||
}
|
||||
|
||||
.option-input:checked::after {
|
||||
-webkit-animation: click-wave 0.65s;
|
||||
-moz-animation: click-wave 0.65s;
|
||||
animation: click-wave 0.65s;
|
||||
background: #40e0d0;
|
||||
content: '';
|
||||
display: block;
|
||||
position: relative;
|
||||
z-index: 100;
|
||||
}
|
||||
|
||||
.option-input.radio {
|
||||
border-radius: 50%;
|
||||
}
|
||||
|
||||
.option-input.radio::after {
|
||||
border-radius: 50%;
|
||||
}
|
||||
@@ -1,20 +1,29 @@
|
||||
|
||||
function ConvState(wrapper, form, params) {
|
||||
this.id='xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function (c) {
|
||||
function generateUUID () {
|
||||
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function (c) {
|
||||
var r = Math.random() * 16 | 0,
|
||||
v = c == 'x' ? r : (r & 0x3 | 0x8);
|
||||
return v.toString(16);
|
||||
});
|
||||
})
|
||||
}
|
||||
|
||||
const conversationType = {
|
||||
DISPOSABLE: 1,
|
||||
STREAM: 1 << 1
|
||||
}
|
||||
function ConvState (wrapper, form, params) {
|
||||
this.id = generateUUID()
|
||||
this.form = form;
|
||||
this.wrapper = wrapper;
|
||||
this.backgroundColor = '#ffffff';
|
||||
this.parameters = params;
|
||||
this.scrollDown = function () {
|
||||
$(this.wrapper).find('#messages').stop().animate({ scrollTop: $(this.wrapper).find('#messages')[0].scrollHeight }, 600);
|
||||
}.bind(this);
|
||||
};
|
||||
ConvState.prototype.printAnswer = function (answer = '我是ChatGPT, 一个由OpenAI训练的大型语言模型, 我旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。') {
|
||||
ConvState.prototype.printAnswer = function (uuid, answer = '我是ChatGPT, 一个由OpenAI训练的大型语言模型, 我旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。') {
|
||||
setTimeout(function () {
|
||||
var messageObj = $(this.wrapper).find('.message.typing');
|
||||
var messageObj = $(this.wrapper).find(`#${uuid}`);
|
||||
answer = marked.parse(answer);
|
||||
messageObj.html(answer);
|
||||
messageObj.removeClass('typing').addClass('ready');
|
||||
@@ -22,39 +31,66 @@ ConvState.prototype.printAnswer = function (answer = '我是ChatGPT, 一个由Op
|
||||
$(this.wrapper).find(this.parameters.inputIdHashTagName).focus();
|
||||
}.bind(this), 500);
|
||||
};
|
||||
ConvState.prototype.updateAnswer = function (question, uuid) {
|
||||
setTimeout(function () {
|
||||
var socket = io('/chat');
|
||||
socket.connect('/chat');
|
||||
var messageObj = $(this.wrapper).find(`#${uuid}`);
|
||||
this.scrollDown();
|
||||
socket.on('message', msg => {
|
||||
console.log("message:", msg)
|
||||
if (msg.result)
|
||||
messageObj.html(msg.result + `<div class="typing_loader"></div></div>`);
|
||||
});
|
||||
socket.on('connect', msg => {
|
||||
socket.emit('message', { data: JSON.stringify(question) });
|
||||
});
|
||||
socket.on('disconnect', msg => {
|
||||
if (msg.result) {
|
||||
answer = marked.parse(msg.result);
|
||||
messageObj.html(answer);
|
||||
}
|
||||
messageObj.removeClass('typing').addClass('ready');
|
||||
this.scrollDown();
|
||||
$(this.wrapper).find(this.parameters.inputIdHashTagName).focus();
|
||||
console.log("disconnect", msg)
|
||||
});
|
||||
}.bind(this), 1000);
|
||||
};
|
||||
ConvState.prototype.sendMessage = function (msg) {
|
||||
var message = $('<div class="message from">' + msg + '</div>');
|
||||
|
||||
$('button.submit').removeClass('glow');
|
||||
$(this.wrapper).find(this.parameters.inputIdHashTagName).focus();
|
||||
setTimeout(function () {
|
||||
$(this.wrapper).find("#messages").append(message);
|
||||
this.scrollDown();
|
||||
}.bind(this), 100);
|
||||
|
||||
var messageObj = $('<div class="message to typing"><div class="typing_loader"></div></div>');
|
||||
var uuid = generateUUID().toLowerCase();
|
||||
var messageObj = $(`<div class="message to typing" id="${uuid}"><div class="typing_loader"></div></div>`);
|
||||
setTimeout(function () {
|
||||
$(this.wrapper).find('#messages').append(messageObj);
|
||||
this.scrollDown();
|
||||
}.bind(this), 150);
|
||||
var _this = this
|
||||
$.ajax({
|
||||
url: "./chat",
|
||||
type: "POST",
|
||||
timeout:180000,
|
||||
data: JSON.stringify({
|
||||
"id": _this.id,
|
||||
"msg": msg
|
||||
}),
|
||||
contentType: "application/json; charset=utf-8",
|
||||
dataType: "json",
|
||||
success: function (data) {
|
||||
_this.printAnswer(data.result)
|
||||
},
|
||||
error:function () {
|
||||
_this.printAnswer("网络故障,对话未送达")
|
||||
},
|
||||
})
|
||||
var question = { "id": _this.id, "msg": msg }
|
||||
if (localConfig.conversationType == conversationType.STREAM)
|
||||
this.updateAnswer(question, uuid)
|
||||
else
|
||||
$.ajax({
|
||||
url: "./chat",
|
||||
type: "POST",
|
||||
timeout: 180000,
|
||||
data: JSON.stringify(question),
|
||||
contentType: "application/json; charset=utf-8",
|
||||
dataType: "json",
|
||||
success: function (data) {
|
||||
_this.printAnswer(uuid, data.result)
|
||||
},
|
||||
error: function (data) {
|
||||
console.log(data)
|
||||
_this.printAnswer(uuid, "网络故障,对话未送达")
|
||||
},
|
||||
})
|
||||
};
|
||||
(function ($) {
|
||||
$.fn.convform = function () {
|
||||
@@ -81,13 +117,30 @@ ConvState.prototype.sendMessage = function (msg) {
|
||||
$(wrapper).append(inputForm);
|
||||
|
||||
var state = new ConvState(wrapper, form, parameters);
|
||||
// Bind checkbox values to ConvState object
|
||||
$('input[type="checkbox"]').change(function () {
|
||||
var key = $(this).attr('name');
|
||||
state[key] = $(this).is(':checked');
|
||||
});
|
||||
|
||||
// Bind radio button values to ConvState object
|
||||
$('input[type="radio"]').change(function () {
|
||||
var key = $(this).attr('name');
|
||||
state[key] = $(this).val();
|
||||
});
|
||||
|
||||
// Bind color input value to ConvState object
|
||||
$('#backgroundColor').change(function () {
|
||||
state["backgroundColor"] = $(this).val();
|
||||
});
|
||||
|
||||
//prints first contact
|
||||
$.when($('div.spinLoader').addClass('hidden')).done(function () {
|
||||
var messageObj = $('<div class="message to typing"><div class="typing_loader"></div></div>');
|
||||
var uuid = generateUUID()
|
||||
var messageObj = $(`<div class="message to typing" id="${uuid}"><div class="typing_loader"></div></div>`);
|
||||
$(state.wrapper).find('#messages').append(messageObj);
|
||||
state.scrollDown();
|
||||
state.printAnswer();
|
||||
state.printAnswer(uuid = uuid);
|
||||
});
|
||||
|
||||
//binds enter to send message
|
||||
|
||||
@@ -19,33 +19,137 @@
|
||||
<div id="chat" class="conv-form-wrapper">
|
||||
</div>
|
||||
</div>
|
||||
<script src="https://cdn.bootcdn.net/ajax/libs/jquery/3.6.3/jquery.min.js"></script>
|
||||
<script src="https://cdn.bootcdn.net/ajax/libs/marked/4.2.12/marked.min.js"></script>
|
||||
<script src="https://cdn.bootcdn.net/ajax/libs/autosize.js/6.0.1/autosize.min.js"></script>
|
||||
<script src="./static/1.js"></script>
|
||||
<script>
|
||||
var rollbackTo = false;
|
||||
var originalState = false;
|
||||
function storeState(a) {
|
||||
rollbackTo = a.current
|
||||
}
|
||||
function rollback(a) {
|
||||
if (rollbackTo != false) {
|
||||
if (originalState == false) {
|
||||
originalState = a.current.next
|
||||
<div class="drawer-icon-container">
|
||||
<div class="drawer-icon">
|
||||
<div class="wrenchFilled icon"></div>
|
||||
</div>
|
||||
<div class="drawer">
|
||||
<div class="drawer-header">
|
||||
<h2>设置</h2>
|
||||
<button id="close-drawer">X</button>
|
||||
</div>
|
||||
<div class="drawer-content">
|
||||
<div hidden="true">
|
||||
<input type="checkbox" id="bold" name="bold">
|
||||
<label for="bold">Bold</label>
|
||||
<input type="checkbox" id="italic" name="italic">
|
||||
<label for="italic">Italic</label>
|
||||
</div>
|
||||
<div>
|
||||
<label for="backgroundColor">背景颜色:</label>
|
||||
<input type="color" id="backgroundColor" name="backgroundColor" value="#ffffff">
|
||||
</div>
|
||||
<div>
|
||||
<p>AI回复方式:</p>
|
||||
<input type="radio" id="option1" name="conversationType" class="option-input radio" value=1 checked>
|
||||
<label for="option1">一次性发送</label>
|
||||
<input type="radio" id="option2" name="conversationType" class="option-input radio" value=2>
|
||||
<label for="option2">逐段发送</label>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="drawer-overlay"></div>
|
||||
<script src="https://cdn.bootcdn.net/ajax/libs/jquery/3.6.3/jquery.min.js"></script>
|
||||
<script src="https://cdn.bootcdn.net/ajax/libs/marked/4.2.12/marked.min.js"></script>
|
||||
<script src="https://cdn.bootcdn.net/ajax/libs/autosize.js/6.0.1/autosize.min.js"></script>
|
||||
<script src="https://cdn.bootcdn.net/ajax/libs/socket.io/4.6.1/socket.io.js"></script>
|
||||
<script src="./static/1.js"></script>
|
||||
<script>
|
||||
var rollbackTo = false;
|
||||
var originalState = false;
|
||||
|
||||
function storeState(a) {
|
||||
rollbackTo = a.current
|
||||
}
|
||||
|
||||
function rollback(a) {
|
||||
if (rollbackTo != false) {
|
||||
if (originalState == false) {
|
||||
originalState = a.current.next
|
||||
}
|
||||
a.current.next = rollbackTo
|
||||
}
|
||||
a.current.next = rollbackTo
|
||||
}
|
||||
}
|
||||
function restore(a) {
|
||||
if (originalState != false) {
|
||||
a.current.next = originalState
|
||||
|
||||
function restore(a) {
|
||||
if (originalState != false) {
|
||||
a.current.next = originalState
|
||||
}
|
||||
}
|
||||
}
|
||||
jQuery(function (a) {
|
||||
var b = a("#chat").convform()
|
||||
});
|
||||
</script>
|
||||
|
||||
var ConvStateMap = {
|
||||
bold: false,
|
||||
italic: false,
|
||||
backgroundColor: '#ffffff',
|
||||
conversationType: conversationType.DISPOSABLE
|
||||
};
|
||||
|
||||
// Create a Proxy object to watch all properties of the "ConvStateMap" object
|
||||
var localConfig = new Proxy(ConvStateMap, {
|
||||
set: function (target, prop, val) {
|
||||
target[prop] = val;
|
||||
// Call your function here
|
||||
localStorage.setItem('botOnAnyThingConfig', JSON.stringify(localConfig))
|
||||
switch (prop) {
|
||||
case 'backgroundColor':
|
||||
$('body').css('background-color', val);
|
||||
$(`#backgroundColor`)?.val(val);
|
||||
break;
|
||||
case 'conversationType':
|
||||
if (val)
|
||||
$(`#option${val}`)?.prop("checked", true);
|
||||
}
|
||||
}
|
||||
});
|
||||
$(document).ready(function () {
|
||||
let config = localStorage.getItem('botOnAnyThingConfig')
|
||||
if (config) {
|
||||
config = JSON.parse(config)
|
||||
Object.keys(config).forEach(item => localConfig[item] = config[item])
|
||||
}
|
||||
// Open drawer
|
||||
$('.drawer-icon').click(function () {
|
||||
if (!$('.drawer').hasClass('open')) {
|
||||
$('.drawer').toggleClass('open');
|
||||
$('.drawer-overlay').fadeIn();
|
||||
$('.drawer-icon-container').toggleClass('open').css('right', '270px');
|
||||
} else
|
||||
closeDrawer()
|
||||
});
|
||||
|
||||
// Close drawer
|
||||
$('#close-drawer, .drawer-overlay').click(closeDrawer);
|
||||
|
||||
function closeDrawer() {
|
||||
$('.drawer').removeClass('open');
|
||||
$('.drawer-overlay').fadeOut();
|
||||
$('.drawer-icon-container').removeClass('open').css('right', '-30px');
|
||||
}
|
||||
});
|
||||
// Bind checkbox values to ConvStateMap object
|
||||
$('input[type="checkbox"]').change(function () {
|
||||
var key = $(this).attr('name');
|
||||
if (key)
|
||||
localConfig[key] = $(this).is(':checked');
|
||||
});
|
||||
|
||||
// Bind radio button values to ConvStateMap object
|
||||
$('input[type="radio"]').change(function () {
|
||||
var key = $(this).attr('name');
|
||||
if (key)
|
||||
localConfig[key] = $(this).val();
|
||||
});
|
||||
|
||||
// Bind color input value to ConvStateMap object
|
||||
$('#backgroundColor').on("input", function (e) {
|
||||
localConfig.backgroundColor = $(this).val();
|
||||
});
|
||||
|
||||
jQuery(function (a) {
|
||||
var b = a("#chat").convform()
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
@@ -41,17 +41,13 @@ class SydneyBot(Chatbot):
|
||||
break
|
||||
ordered_messages.insert(0, message)
|
||||
current_message_id = message.get('parentMessageId')
|
||||
|
||||
return ordered_messages
|
||||
|
||||
def pop_last_conversation(self):
|
||||
self.conversations_cache[self.conversation_key]["messages"].pop()
|
||||
|
||||
async def ask(
|
||||
async def ask_stream(
|
||||
self,
|
||||
prompt: str,
|
||||
conversation_style: EdgeGPT.CONVERSATION_STYLE_TYPE = None,
|
||||
message_id: str = None,
|
||||
message_id: str = None
|
||||
) -> dict:
|
||||
# 开启新对话
|
||||
self.chat_hub = SydneyHub(Conversation(
|
||||
@@ -88,11 +84,32 @@ class SydneyBot(Chatbot):
|
||||
async for final, response in self.chat_hub.ask_stream(
|
||||
prompt=prompt,
|
||||
conversation_style=conversation_style
|
||||
):
|
||||
if final:
|
||||
try:
|
||||
self.update_reply_cache(response["item"]["messages"][-1])
|
||||
except Exception as e:
|
||||
self.conversations_cache[self.conversation_key]["messages"].pop()
|
||||
yield True, f"AI生成内容被微软内容过滤器拦截,已删除最后一次提问的记忆,请尝试使用其他文字描述问题,若AI依然无法正常回复,请清除全部记忆后再次尝试"
|
||||
yield final, response
|
||||
|
||||
async def ask(
|
||||
self,
|
||||
prompt: str,
|
||||
conversation_style: EdgeGPT.CONVERSATION_STYLE_TYPE = None,
|
||||
message_id: str = None
|
||||
) -> dict:
|
||||
if self.chat_hub.wss:
|
||||
if not self.chat_hub.wss.closed:
|
||||
await self.chat_hub.wss.close()
|
||||
async for final, response in self.ask_stream(
|
||||
prompt=prompt,
|
||||
conversation_style=conversation_style,
|
||||
message_id=message_id
|
||||
):
|
||||
if final:
|
||||
self.update_reply_cache(response["item"]["messages"][-1])
|
||||
return response
|
||||
self.chat_hub.wss.close()
|
||||
|
||||
def update_reply_cache(
|
||||
self,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# encoding:utf-8
|
||||
import asyncio
|
||||
from model.model import Model
|
||||
from config import model_conf_val,common_conf_val
|
||||
from config import model_conf_val, common_conf_val
|
||||
from common import log
|
||||
from EdgeGPT import Chatbot, ConversationStyle
|
||||
from ImageGen import ImageGen
|
||||
@@ -23,87 +23,85 @@ class BingModel(Model):
|
||||
try:
|
||||
self.cookies = model_conf_val("bing", "cookies")
|
||||
self.jailbreak = model_conf_val("bing", "jailbreak")
|
||||
self.bot = SydneyBot(cookies=self.cookies,options={}) if(self.jailbreak) else Chatbot(cookies=self.cookies)
|
||||
self.bot = SydneyBot(cookies=self.cookies, options={}) if (
|
||||
self.jailbreak) else Chatbot(cookies=self.cookies)
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
log.warn(e)
|
||||
|
||||
async def reply_text_stream(self, query: str, context=None) -> dict:
|
||||
async def handle_answer(final, answer):
|
||||
if final:
|
||||
try:
|
||||
reply = self.build_source_attributions(answer, context)
|
||||
log.info("[NewBing] reply:{}",reply)
|
||||
yield True, reply
|
||||
except Exception as e:
|
||||
log.warn(answer)
|
||||
log.warn(e)
|
||||
await user_session.get(context['from_user_id'], None).reset()
|
||||
yield True, answer
|
||||
else:
|
||||
try:
|
||||
yield False, answer
|
||||
except Exception as e:
|
||||
log.warn(answer)
|
||||
log.warn(e)
|
||||
await user_session.get(context['from_user_id'], None).reset()
|
||||
yield True, answer
|
||||
|
||||
if not context or not context.get('type') or context.get('type') == 'TEXT':
|
||||
clear_memory_commands = common_conf_val(
|
||||
'clear_memory_commands', ['#清除记忆'])
|
||||
if query in clear_memory_commands:
|
||||
user_session[context['from_user_id']] = None
|
||||
yield True, '记忆已清除'
|
||||
bot = user_session.get(context['from_user_id'], None)
|
||||
if not bot:
|
||||
bot = self.bot
|
||||
else:
|
||||
query = self.get_quick_ask_query(query, context)
|
||||
user_session[context['from_user_id']] = bot
|
||||
log.info("[NewBing] query={}".format(query))
|
||||
if self.jailbreak:
|
||||
async for final, answer in bot.ask_stream(query, conversation_style=self.style, message_id=bot.user_message_id):
|
||||
async for result in handle_answer(final, answer):
|
||||
yield result
|
||||
else:
|
||||
async for final, answer in bot.ask_stream(query, conversation_style=self.style):
|
||||
async for result in handle_answer(final, answer):
|
||||
yield result
|
||||
|
||||
def reply(self, query: str, context=None) -> tuple[str, dict]:
|
||||
if not context or not context.get('type') or context.get('type') == 'TEXT':
|
||||
clear_memory_commands = common_conf_val('clear_memory_commands', ['#清除记忆'])
|
||||
clear_memory_commands = common_conf_val(
|
||||
'clear_memory_commands', ['#清除记忆'])
|
||||
if query in clear_memory_commands:
|
||||
user_session[context['from_user_id']]=None
|
||||
user_session[context['from_user_id']] = None
|
||||
return '记忆已清除'
|
||||
bot = user_session.get(context['from_user_id'], None)
|
||||
if (bot == None):
|
||||
bot = self.bot
|
||||
else:
|
||||
if (len(query) == 1 and query.isdigit() and query != "0"):
|
||||
suggestion_dict = suggestion_session[context['from_user_id']]
|
||||
if (suggestion_dict != None):
|
||||
query = suggestion_dict[int(query)-1]
|
||||
if (query == None):
|
||||
return "输入的序号不在建议列表范围中"
|
||||
else:
|
||||
query = "在上面的基础上,"+query
|
||||
query = self.get_quick_ask_query(query, context)
|
||||
|
||||
user_session[context['from_user_id']] = bot
|
||||
log.info("[NewBing] query={}".format(query))
|
||||
if(self.jailbreak):
|
||||
task = bot.ask(query, conversation_style=self.style,message_id=bot.user_message_id)
|
||||
if (self.jailbreak):
|
||||
task = bot.ask(query, conversation_style=self.style,
|
||||
message_id=bot.user_message_id)
|
||||
else:
|
||||
task = bot.ask(query, conversation_style=self.style)
|
||||
|
||||
try:
|
||||
answer = asyncio.run(task)
|
||||
except Exception as e:
|
||||
bot.pop_last_conversation()
|
||||
log.exception(answer)
|
||||
return f"AI生成内容被微软内容过滤器拦截,已删除最后一次提问的记忆,请尝试使用其他文字描述问题,若AI依然无法正常回复,请使用{clear_memory_commands[0]}命令清除全部记忆"
|
||||
# 最新一条回复
|
||||
|
||||
answer = asyncio.run(task)
|
||||
if isinstance(answer, str):
|
||||
return answer
|
||||
try:
|
||||
reply = answer["item"]["messages"][-1]
|
||||
except Exception as e:
|
||||
self.reset_chat(context['from_user_id'])
|
||||
log.exception(answer)
|
||||
user_session.get(context['from_user_id'], None).reset()
|
||||
log.warn(answer)
|
||||
return "本轮对话已超时,已开启新的一轮对话,请重新提问。"
|
||||
reply_text = reply["text"]
|
||||
reference = ""
|
||||
if "sourceAttributions" in reply:
|
||||
for i, attribution in enumerate(reply["sourceAttributions"]):
|
||||
display_name = attribution["providerDisplayName"]
|
||||
url = attribution["seeMoreUrl"]
|
||||
reference += f"{i+1}、[{display_name}]({url})\n\n"
|
||||
|
||||
if len(reference) > 0:
|
||||
reference = "***\n"+reference
|
||||
|
||||
suggestion = ""
|
||||
if "suggestedResponses" in reply:
|
||||
suggestion_dict = dict()
|
||||
for i, attribution in enumerate(reply["suggestedResponses"]):
|
||||
suggestion_dict[i] = attribution["text"]
|
||||
suggestion += f">{i+1}、{attribution['text']}\n\n"
|
||||
suggestion_session[context['from_user_id']
|
||||
] = suggestion_dict
|
||||
|
||||
if len(suggestion) > 0:
|
||||
suggestion = "***\n你可以通过输入序号快速追问我以下建议问题:\n\n"+suggestion
|
||||
|
||||
throttling = answer["item"]["throttling"]
|
||||
throttling_str = ""
|
||||
|
||||
if throttling["numUserMessagesInConversation"] == throttling["maxNumUserMessagesInConversation"]:
|
||||
self.reset_chat(context['from_user_id'])
|
||||
throttling_str = "(对话轮次已达上限,本次聊天已结束,将开启新的对话)"
|
||||
else:
|
||||
throttling_str = f"对话轮次: {throttling['numUserMessagesInConversation']}/{throttling['maxNumUserMessagesInConversation']}\n"
|
||||
|
||||
response = f"{reply_text}\n{reference}\n{suggestion}\n***\n{throttling_str}"
|
||||
log.info("[NewBing] reply={}", response)
|
||||
user_session[context['from_user_id']] = bot
|
||||
return response
|
||||
else:
|
||||
self.reset_chat(context['from_user_id'])
|
||||
log.warn("[NewBing] reply={}", answer)
|
||||
return "对话被接口拒绝,已开启新的一轮对话。"
|
||||
return self.build_source_attributions(answer, context)
|
||||
elif context.get('type', None) == 'IMAGE_CREATE':
|
||||
if functions.contain_chinese(query):
|
||||
return "ImageGen目前仅支持使用英文关键词生成图片"
|
||||
@@ -118,8 +116,58 @@ class BingModel(Model):
|
||||
log.info("[NewBing] image_list={}".format(img_list))
|
||||
return img_list
|
||||
except Exception as e:
|
||||
log.exception(e)
|
||||
log.warn(e)
|
||||
return "输入的内容可能违反微软的图片生成内容策略。过多的策略冲突可能会导致你被暂停访问。"
|
||||
|
||||
def reset_chat(self, from_user_id):
|
||||
asyncio.run(user_session.get(from_user_id, None).reset())
|
||||
def get_quick_ask_query(self, query, context):
|
||||
if (len(query) == 1 and query.isdigit() and query != "0"):
|
||||
suggestion_dict = suggestion_session[context['from_user_id']]
|
||||
if (suggestion_dict != None):
|
||||
query = suggestion_dict[int(query)-1]
|
||||
if (query == None):
|
||||
return "输入的序号不在建议列表范围中"
|
||||
else:
|
||||
query = "在上面的基础上,"+query
|
||||
return query
|
||||
|
||||
def build_source_attributions(self, answer, context):
|
||||
reference = ""
|
||||
reply = answer["item"]["messages"][-1]
|
||||
reply_text = reply["text"]
|
||||
if "sourceAttributions" in reply:
|
||||
for i, attribution in enumerate(reply["sourceAttributions"]):
|
||||
display_name = attribution["providerDisplayName"]
|
||||
url = attribution["seeMoreUrl"]
|
||||
reference += f"{i+1}、[{display_name}]({url})\n\n"
|
||||
|
||||
if len(reference) > 0:
|
||||
reference = "***\n"+reference
|
||||
|
||||
suggestion = ""
|
||||
if "suggestedResponses" in reply:
|
||||
suggestion_dict = dict()
|
||||
for i, attribution in enumerate(reply["suggestedResponses"]):
|
||||
suggestion_dict[i] = attribution["text"]
|
||||
suggestion += f">{i+1}、{attribution['text']}\n\n"
|
||||
suggestion_session[context['from_user_id']
|
||||
] = suggestion_dict
|
||||
|
||||
if len(suggestion) > 0:
|
||||
suggestion = "***\n你可以通过输入序号快速追问我以下建议问题:\n\n"+suggestion
|
||||
|
||||
throttling = answer["item"]["throttling"]
|
||||
throttling_str = ""
|
||||
|
||||
if throttling["numUserMessagesInConversation"] == throttling["maxNumUserMessagesInConversation"]:
|
||||
user_session.get(context['from_user_id'], None).reset()
|
||||
throttling_str = "(对话轮次已达上限,本次聊天已结束,将开启新的对话)"
|
||||
else:
|
||||
throttling_str = f"对话轮次: {throttling['numUserMessagesInConversation']}/{throttling['maxNumUserMessagesInConversation']}\n"
|
||||
|
||||
response = f"{reply_text}\n{reference}\n{suggestion}\n***\n{throttling_str}"
|
||||
log.info("[NewBing] reply={}", response)
|
||||
return response
|
||||
else:
|
||||
user_session.get(context['from_user_id'], None).reset()
|
||||
log.warn("[NewBing] reply={}", answer)
|
||||
return "对话被接口拒绝,已开启新的一轮对话。"
|
||||
|
||||
@@ -83,20 +83,32 @@ class ChatGPTModel(Model):
|
||||
return "请再问我一次吧"
|
||||
|
||||
|
||||
def reply_text_stream(self, query, new_query, user_id, retry_count=0):
|
||||
async def reply_text_stream(self, query, context, retry_count=0):
|
||||
try:
|
||||
res = openai.Completion.create(
|
||||
model="text-davinci-003", # 对话模型的名称
|
||||
prompt=new_query,
|
||||
user_id=context['from_user_id']
|
||||
new_query = Session.build_session_query(query, user_id)
|
||||
res = openai.ChatCompletion.create(
|
||||
model= model_conf(const.OPEN_AI).get("model") or "gpt-3.5-turbo", # 对话模型的名称
|
||||
messages=new_query,
|
||||
temperature=0.9, # 值在[0,1]之间,越大表示回复越具有不确定性
|
||||
#max_tokens=4096, # 回复最大的字符数
|
||||
top_p=1,
|
||||
frequency_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
|
||||
presence_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
|
||||
stop=["\n\n\n"],
|
||||
stream=True
|
||||
)
|
||||
return self._process_reply_stream(query, res, user_id)
|
||||
full_response = ""
|
||||
for chunk in res:
|
||||
log.debug(chunk)
|
||||
if (chunk["choices"][0]["finish_reason"]=="stop"):
|
||||
break
|
||||
chunk_message = chunk['choices'][0]['delta'].get("content")
|
||||
if(chunk_message):
|
||||
full_response+=chunk_message
|
||||
yield False,full_response
|
||||
Session.save_session(query, full_response, user_id)
|
||||
log.info("[chatgpt]: reply={}", full_response)
|
||||
yield True,full_response
|
||||
|
||||
except openai.error.RateLimitError as e:
|
||||
# rate limit exception
|
||||
@@ -104,45 +116,22 @@ class ChatGPTModel(Model):
|
||||
if retry_count < 1:
|
||||
time.sleep(5)
|
||||
log.warn("[CHATGPT] RateLimit exceed, 第{}次重试".format(retry_count+1))
|
||||
return self.reply_text_stream(query, user_id, retry_count+1)
|
||||
yield True, self.reply_text_stream(query, user_id, retry_count+1)
|
||||
else:
|
||||
return "提问太快啦,请休息一下再问我吧"
|
||||
yield True, "提问太快啦,请休息一下再问我吧"
|
||||
except openai.error.APIConnectionError as e:
|
||||
log.warn(e)
|
||||
log.warn("[CHATGPT] APIConnection failed")
|
||||
return "我连接不到网络,请稍后重试"
|
||||
yield True, "我连接不到网络,请稍后重试"
|
||||
except openai.error.Timeout as e:
|
||||
log.warn(e)
|
||||
log.warn("[CHATGPT] Timeout")
|
||||
return "我没有收到消息,请稍后重试"
|
||||
yield True, "我没有收到消息,请稍后重试"
|
||||
except Exception as e:
|
||||
# unknown exception
|
||||
log.exception(e)
|
||||
Session.clear_session(user_id)
|
||||
return "请再问我一次吧"
|
||||
|
||||
|
||||
def _process_reply_stream(
|
||||
self,
|
||||
query: str,
|
||||
reply: dict,
|
||||
user_id: str
|
||||
) -> str:
|
||||
full_response = ""
|
||||
for response in reply:
|
||||
if response.get("choices") is None or len(response["choices"]) == 0:
|
||||
raise Exception("OpenAI API returned no choices")
|
||||
if response["choices"][0].get("finish_details") is not None:
|
||||
break
|
||||
if response["choices"][0].get("text") is None:
|
||||
raise Exception("OpenAI API returned no text")
|
||||
if response["choices"][0]["text"] == "<|endoftext|>":
|
||||
break
|
||||
yield response["choices"][0]["text"]
|
||||
full_response += response["choices"][0]["text"]
|
||||
if query and full_response:
|
||||
Session.save_session(query, full_response, user_id)
|
||||
|
||||
yield True, "请再问我一次吧"
|
||||
|
||||
def create_img(self, query, retry_count=0):
|
||||
try:
|
||||
|
||||
@@ -13,7 +13,9 @@ user_session = dict()
|
||||
class OpenAIModel(Model):
|
||||
def __init__(self):
|
||||
openai.api_key = model_conf(const.OPEN_AI).get('api_key')
|
||||
|
||||
proxy = model_conf(const.OPEN_AI).get('proxy')
|
||||
if proxy:
|
||||
openai.proxy = proxy
|
||||
|
||||
def reply(self, query, context=None):
|
||||
# acquire reply content
|
||||
@@ -72,36 +74,55 @@ class OpenAIModel(Model):
|
||||
return "请再问我一次吧"
|
||||
|
||||
|
||||
def reply_text_stream(self, query, new_query, user_id, retry_count=0):
|
||||
async def reply_text_stream(self, query, context, retry_count=0):
|
||||
try:
|
||||
user_id=context['from_user_id']
|
||||
new_query = Session.build_session_query(query, user_id)
|
||||
res = openai.Completion.create(
|
||||
model="text-davinci-003", # 对话模型的名称
|
||||
model= "text-davinci-003", # 对话模型的名称
|
||||
prompt=new_query,
|
||||
temperature=0.9, # 值在[0,1]之间,越大表示回复越具有不确定性
|
||||
max_tokens=1200, # 回复最大的字符数
|
||||
#max_tokens=4096, # 回复最大的字符数
|
||||
top_p=1,
|
||||
frequency_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
|
||||
presence_penalty=0.0, # [-2,2]之间,该值越大则更倾向于产生不同的内容
|
||||
stop=["\n\n\n"],
|
||||
stream=True
|
||||
)
|
||||
return self._process_reply_stream(query, res, user_id)
|
||||
full_response = ""
|
||||
for chunk in res:
|
||||
log.debug(chunk)
|
||||
if (chunk["choices"][0]["finish_reason"]=="stop"):
|
||||
break
|
||||
chunk_message = chunk['choices'][0].get("text")
|
||||
if(chunk_message):
|
||||
full_response+=chunk_message
|
||||
yield False,full_response
|
||||
Session.save_session(query, full_response, user_id)
|
||||
log.info("[chatgpt]: reply={}", full_response)
|
||||
yield True,full_response
|
||||
|
||||
except openai.error.RateLimitError as e:
|
||||
# rate limit exception
|
||||
log.warn(e)
|
||||
if retry_count < 1:
|
||||
time.sleep(5)
|
||||
log.warn("[OPEN_AI] RateLimit exceed, 第{}次重试".format(retry_count+1))
|
||||
return self.reply_text(query, user_id, retry_count+1)
|
||||
log.warn("[CHATGPT] RateLimit exceed, 第{}次重试".format(retry_count+1))
|
||||
yield True, self.reply_text_stream(query, user_id, retry_count+1)
|
||||
else:
|
||||
return "提问太快啦,请休息一下再问我吧"
|
||||
yield True, "提问太快啦,请休息一下再问我吧"
|
||||
except openai.error.APIConnectionError as e:
|
||||
log.warn(e)
|
||||
log.warn("[CHATGPT] APIConnection failed")
|
||||
yield True, "我连接不到网络,请稍后重试"
|
||||
except openai.error.Timeout as e:
|
||||
log.warn(e)
|
||||
log.warn("[CHATGPT] Timeout")
|
||||
yield True, "我没有收到消息,请稍后重试"
|
||||
except Exception as e:
|
||||
# unknown exception
|
||||
log.exception(e)
|
||||
Session.clear_session(user_id)
|
||||
return "请再问我一次吧"
|
||||
|
||||
yield True, "请再问我一次吧"
|
||||
|
||||
def _process_reply_stream(
|
||||
self,
|
||||
|
||||
Reference in New Issue
Block a user