This commit is contained in:
RA
2023-03-26 18:01:22 +08:00
7 changed files with 42 additions and 18 deletions

View File

@@ -74,7 +74,6 @@ cp config-template.json config.json
在使用时只需要更改 model 和 channel 配置块下的 type 字段,即可在任意模型和应用间完成切换,连接不同的通路。下面将依次介绍各个 模型 及 应用 的配置和运行过程。
## 二、选择模型
### 1. ChatGPT
@@ -114,6 +113,7 @@ pip3 install --upgrade openai
+ `model`: 模型名称,目前支持填入 `gpt-3.5-turbo`, `gpt-4`, `gpt-4-32k` (其中gpt-4 api暂未开放)
+ `proxy`: 代理客户端的地址,详情参考 [#56](https://github.com/zhayujie/bot-on-anything/issues/56)
+ `character_desc`: 配置中保存着你对chatgpt说的一段话他会记住这段话并作为他的设定你可以为他定制任何人格
+ `max_history_num`[optional]: 对话最大记忆长度,超过该长度则清理前面的记忆。
### 2.GPT-3.0
@@ -491,3 +491,13 @@ pip3 install PyJWT flask
本地运行:`python3 app.py`运行后访问 `http://127.0.0.1:80`
服务器运行:部署后访问 `http://公网域名或IP:端口`
### 通用配置
+ `clear_memory_commands`: 对话内指令,主动清空前文记忆,字符串数组可自定义指令别名。
+ default: ["#清除记忆"]
# 教程
1.视频教程https://www.bilibili.com/video/BV1KM4y167e8

View File

@@ -16,20 +16,15 @@ cache = {}
@robot.text
def hello_world(msg):
with open('sensitive_words.txt', 'r', encoding='utf-8') as f: #加入检测违规词
sensitive_wordss = [msg.content[i:i+2] for i in range(0, len(msg.content), 2)]
sensitive_words = [line.strip() for line in f.readlines()]
found = False
#判断文件是否为空
if not os.path.getsize('sensitive_words.txt'):
found = False
else:
for i in sensitive_wordss:
if i in f.read():
found = True
break
else:
found = False
for word in sensitive_words:
if word != '' and word in msg.content:
found = True
break
if found:
return '输入内容包含敏感词汇'
return "输入内容敏感词汇"
else:
logger.info('[WX_Public] receive public msg: {}, userId: {}'.format(msg.content, msg.source))
key = msg.content + '|' + msg.source

View File

@@ -59,5 +59,8 @@
"http_auth_password": "6.67428e-11",
"port": "80"
}
},
"common": {
"clear_memory_commands": ["#清除记忆"]
}
}

View File

@@ -52,3 +52,12 @@ def channel_conf_val(channel_type, key, default=None):
# common default config
return config.get('channel').get(key, default)
return val
def common_conf():
return config.get('common')
def common_conf_val(key, default=None):
return config.get('common').get(key, default)

View File

@@ -1,7 +1,7 @@
# encoding:utf-8
from model.model import Model
from config import model_conf
from config import model_conf, common_conf_val
from common import const
from common import log
import openai
@@ -22,7 +22,8 @@ class ChatGPTModel(Model):
if not context or not context.get('type') or context.get('type') == 'TEXT':
log.info("[CHATGPT] query={}".format(query))
from_user_id = context['from_user_id']
if query == '#清除记忆':
clear_memory_commands = common_conf_val('clear_memory_commands', ['#清除记忆'])
if query in clear_memory_commands:
Session.clear_session(from_user_id)
return '记忆已清除'
@@ -195,6 +196,7 @@ class Session(object):
@staticmethod
def save_session(query, answer, user_id, used_tokens=0):
max_tokens = model_conf(const.OPEN_AI).get('conversation_max_tokens')
max_history_num = model_conf(const.OPEN_AI).get('max_history_num', None)
if not max_tokens or max_tokens > 4000:
# default value
max_tokens = 1000
@@ -209,6 +211,11 @@ class Session(object):
session.pop(1)
session.pop(1)
if max_history_num is not None:
while len(session) > max_history_num * 2 + 1:
session.pop(1)
session.pop(1)
@staticmethod
def clear_session(user_id):
user_session[user_id] = []

View File

@@ -1,7 +1,7 @@
# encoding:utf-8
from model.model import Model
from config import model_conf
from config import model_conf, common_conf_val
from common import const
from common import log
import openai
@@ -20,7 +20,8 @@ class OpenAIModel(Model):
if not context or not context.get('type') or context.get('type') == 'TEXT':
log.info("[OPEN_AI] query={}".format(query))
from_user_id = context['from_user_id']
if query == '#清除记忆':
clear_memory_commands = common_conf_val('clear_memory_commands', ['#清除记忆'])
if query in clear_memory_commands:
Session.clear_session(from_user_id)
return '记忆已清除'

View File

@@ -1 +0,0 @@