From ad77ad8f2b719cbcd2076e9265bfbdc79cd22c9e Mon Sep 17 00:00:00 2001 From: KurolVeko <128911459+Kurolviko@users.noreply.github.com> Date: Fri, 30 Aug 2024 17:00:51 +0800 Subject: [PATCH 1/4] Lower Gemini's safety thresholds Gemini's default safety thresholds are set too high, resulting in frequent censorship of generated text. I have lowered the thresholds for all four safety categories according to Google's documentation. --- bot/gemini/google_gemini_bot.py | 44 ++++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 9 deletions(-) diff --git a/bot/gemini/google_gemini_bot.py b/bot/gemini/google_gemini_bot.py index 8a4100a..86eb124 100644 --- a/bot/gemini/google_gemini_bot.py +++ b/bot/gemini/google_gemini_bot.py @@ -14,6 +14,7 @@ from bridge.reply import Reply, ReplyType from common.log import logger from config import conf from bot.baidu.baidu_wenxin_session import BaiduWenxinSession +from google.generativeai.types import HarmCategory, HarmBlockThreshold # OpenAI对话模型API (可用) @@ -38,16 +39,41 @@ class GoogleGeminiBot(Bot): gemini_messages = self._convert_to_gemini_messages(self.filter_messages(session.messages)) genai.configure(api_key=self.api_key) model = genai.GenerativeModel(self.model) - response = model.generate_content(gemini_messages) - reply_text = response.text - self.sessions.session_reply(reply_text, session_id) - logger.info(f"[Gemini] reply={reply_text}") - return Reply(ReplyType.TEXT, reply_text) + + # 添加安全设置 + safety_settings = { + HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE, + HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE, + HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE, + HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE, + } + + # 生成回复,包含安全设置 + response = model.generate_content( + gemini_messages, + safety_settings=safety_settings + ) + if response.candidates and response.candidates[0].content: + reply_text = response.candidates[0].content.parts[0].text + logger.info(f"[Gemini] reply={reply_text}") + self.sessions.session_reply(reply_text, session_id) + return Reply(ReplyType.TEXT, reply_text) + else: + # 没有有效响应内容,可能内容被屏蔽,输出安全评分 + logger.warning("[Gemini] No valid response generated. Checking safety ratings.") + if hasattr(response, 'candidates') and response.candidates: + for rating in response.candidates[0].safety_ratings: + logger.warning(f"Safety rating: {rating.category} - {rating.probability}") + error_message = "No valid response generated due to safety constraints." + self.sessions.session_reply(error_message, session_id) + return Reply(ReplyType.ERROR, error_message) + except Exception as e: - logger.error("[Gemini] fetch reply error, may contain unsafe content") - logger.error(e) - return Reply(ReplyType.ERROR, "invoke [Gemini] api failed!") - + logger.error(f"[Gemini] Error generating response: {str(e)}", exc_info=True) + error_message = "Failed to invoke [Gemini] api!" + self.sessions.session_reply(error_message, session_id) + return Reply(ReplyType.ERROR, error_message) + def _convert_to_gemini_messages(self, messages: list): res = [] for msg in messages: From f6e680545353bc50d42910cf081245851e073ef7 Mon Sep 17 00:00:00 2001 From: 6vision Date: Sat, 31 Aug 2024 16:09:10 +0800 Subject: [PATCH 2/4] fix: "model":"mooshoot", which defaults to "moonshot-v1-32k". --- bot/moonshot/moonshot_bot.py | 5 ++++- bridge/bridge.py | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/bot/moonshot/moonshot_bot.py b/bot/moonshot/moonshot_bot.py index 7d2589c..61eb26d 100644 --- a/bot/moonshot/moonshot_bot.py +++ b/bot/moonshot/moonshot_bot.py @@ -19,8 +19,11 @@ class MoonshotBot(Bot): def __init__(self): super().__init__() self.sessions = SessionManager(MoonshotSession, model=conf().get("model") or "moonshot-v1-128k") + model = conf().get("model") or "moonshot-v1-128k" + if model == "moonshot": + model = "moonshot-v1-32k" self.args = { - "model": conf().get("model") or "moonshot-v1-128k", # 对话模型的名称 + "model": model, # 对话模型的名称 "temperature": conf().get("temperature", 0.3), # 如果设置,值域须为 [0, 1] 我们推荐 0.3,以达到较合适的效果。 "top_p": conf().get("top_p", 1.0), # 使用默认值 } diff --git a/bridge/bridge.py b/bridge/bridge.py index b7b3ebf..40f0432 100644 --- a/bridge/bridge.py +++ b/bridge/bridge.py @@ -46,7 +46,7 @@ class Bridge(object): if model_type in ["claude"]: self.btype["chat"] = const.CLAUDEAI - if model_type in ["moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"]: + if model_type in [const.MOONSHOT, "moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"]: self.btype["chat"] = const.MOONSHOT if model_type in ["abab6.5-chat"]: From 6b02bae9577352517f38dbbbd3c6f1b6cdc9ea07 Mon Sep 17 00:00:00 2001 From: KurolVeko <128911459+KuroIVeko@users.noreply.github.com> Date: Thu, 5 Sep 2024 10:59:57 +0800 Subject: [PATCH 3/4] Update bridge.py --- bridge/bridge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bridge/bridge.py b/bridge/bridge.py index b7b3ebf..a602159 100644 --- a/bridge/bridge.py +++ b/bridge/bridge.py @@ -38,7 +38,7 @@ class Bridge(object): self.btype["chat"] = const.QWEN_DASHSCOPE if model_type and model_type.startswith("gemini"): self.btype["chat"] = const.GEMINI - if model_type in [const.ZHIPU_AI]: + if model_type and model_type.startswith("glm"): self.btype["chat"] = const.ZHIPU_AI if model_type and model_type.startswith("claude-3"): self.btype["chat"] = const.CLAUDEAPI From 111ad44029e2fa6e52a36a8acfe2b52a8a420146 Mon Sep 17 00:00:00 2001 From: KurolVeko <128911459+KuroIVeko@users.noreply.github.com> Date: Thu, 5 Sep 2024 11:07:06 +0800 Subject: [PATCH 4/4] Update const.py --- common/const.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/common/const.py b/common/const.py index e2e6a00..b099030 100644 --- a/common/const.py +++ b/common/const.py @@ -57,11 +57,22 @@ GEMINI_PRO = "gemini-1.0-pro" GEMINI_15_flash = "gemini-1.5-flash" GEMINI_15_PRO = "gemini-1.5-pro" +GLM_4 = "glm-4" +GLM_4_PLUS = "glm-4-plus" +GLM_4_flash = "glm-4-flash" +GLM_4_LONG = "glm-4-long" +GLM_4_ALLTOOLS = "glm-4-alltools" +GLM_4_0520 = "glm-4-0520" +GLM_4_AIR = "glm-4-air" +GLM_4_AIRX = "glm-4-airx" + MODEL_LIST = [ GPT35, GPT35_0125, GPT35_1106, "gpt-3.5-turbo-16k", GPT_4o, GPT_4O_0806, GPT_4o_MINI, GPT4_TURBO, GPT4_TURBO_PREVIEW, GPT4_TURBO_01_25, GPT4_TURBO_11_06, GPT4, GPT4_32k, GPT4_06_13, GPT4_32k_06_13, WEN_XIN, WEN_XIN_4, - XUNFEI, ZHIPU_AI, MOONSHOT, MiniMax, + XUNFEI, + ZHIPU_AI, GLM_4, GLM_4_PLUS, GLM_4_flash, GLM_4_LONG, GLM_4_ALLTOOLS, GLM_4_0520, GLM_4_AIR, GLM_4_AIRX, + MOONSHOT, MiniMax, GEMINI, GEMINI_PRO, GEMINI_15_flash, GEMINI_15_PRO, "claude", "claude-3-haiku", "claude-3-sonnet", "claude-3-opus", "claude-3-opus-20240229", "claude-3.5-sonnet", "moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k",