feat: support o1-preview and o1-mini model

This commit is contained in:
6vision
2024-09-13 10:13:23 +08:00
parent bb5ecfc398
commit c8cf27b544
2 changed files with 15 additions and 4 deletions

View File

@@ -5,7 +5,7 @@ import time
import openai
import openai.error
import requests
from common import const
from bot.bot import Bot
from bot.chatgpt.chat_gpt_session import ChatGPTSession
from bot.openai.open_ai_image import OpenAIImage
@@ -15,7 +15,7 @@ from bridge.reply import Reply, ReplyType
from common.log import logger
from common.token_bucket import TokenBucket
from config import conf, load_config
from bot.baidu.baidu_wenxin_session import BaiduWenxinSession
# OpenAI对话模型API (可用)
class ChatGPTBot(Bot, OpenAIImage):
@@ -30,10 +30,13 @@ class ChatGPTBot(Bot, OpenAIImage):
openai.proxy = proxy
if conf().get("rate_limit_chatgpt"):
self.tb4chatgpt = TokenBucket(conf().get("rate_limit_chatgpt", 20))
conf_model = conf().get("model") or "gpt-3.5-turbo"
self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo")
# o1相关模型不支持system prompt暂时用文心模型的session
if conf_model == const.O1 or const.O1_MINI:
self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or const.O1_MINI)
self.args = {
"model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称
"model": conf_model, # 对话模型的名称
"temperature": conf().get("temperature", 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
# "max_tokens":4096, # 回复最大的字符数
"top_p": conf().get("top_p", 1),
@@ -42,6 +45,11 @@ class ChatGPTBot(Bot, OpenAIImage):
"request_timeout": conf().get("request_timeout", None), # 请求超时时间openai接口默认设置为600对于难问题一般需要较长时间
"timeout": conf().get("request_timeout", None), # 重试超时时间,在这个时间内,将会自动重试
}
# o1相关模型固定了部分参数暂时去掉
if conf_model == const.O1 or const.O1_MINI:
remove_keys = ["temperature", "top_p", "frequency_penalty", "presence_penalty"]
for key in remove_keys:
self.args.pop(key, None) # 如果键不存在,使用 None 来避免抛出错误
def reply(self, query, context=None):
# acquire reply content

View File

@@ -38,6 +38,9 @@ GPT4_32k = "gpt-4-32k"
GPT4_06_13 = "gpt-4-0613"
GPT4_32k_06_13 = "gpt-4-32k-0613"
O1 = "o1-preview"
O1_MINI = "o1-mini"
WHISPER_1 = "whisper-1"
TTS_1 = "tts-1"
TTS_1_HD = "tts-1-hd"