重构模型管理和配置加载逻辑,支持多模态和推理模型,优化API密钥管理,改进前端模型选择和版本显示

This commit is contained in:
Zylan
2025-03-22 19:37:42 +08:00
parent be8e83d762
commit fa654207c8
13 changed files with 963 additions and 205 deletions

316
app.py
View File

@@ -1,4 +1,4 @@
from flask import Flask, jsonify, render_template, request
from flask import Flask, jsonify, render_template, request, send_from_directory
from flask_socketio import SocketIO
import pyautogui
import base64
@@ -10,9 +10,17 @@ from PIL import Image, ImageDraw
import pyperclip
from models import ModelFactory
import time
import os
import json
app = Flask(__name__)
socketio = SocketIO(app, cors_allowed_origins="*", ping_timeout=30, ping_interval=5, max_http_buffer_size=50 * 1024 * 1024)
# 添加配置文件路径
CONFIG_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config')
# 初始化模型工厂
ModelFactory.initialize()
def get_local_ip():
try:
@@ -67,15 +75,21 @@ def handle_disconnect():
def stream_model_response(response_generator, sid):
def stream_model_response(response_generator, sid, model_name=None):
"""Stream model responses to the client"""
try:
print("Starting response streaming...")
# 判断模型是否为推理模型
is_reasoning = model_name and ModelFactory.is_reasoning(model_name)
if is_reasoning:
print(f"使用推理模型 {model_name},将显示思考过程")
# 初始化:发送开始状态
socketio.emit('claude_response', {
'status': 'started',
'content': ''
'content': '',
'is_reasoning': is_reasoning
}, room=sid)
print("Sent initial status to client")
@@ -101,27 +115,33 @@ def stream_model_response(response_generator, sid):
# 根据不同的状态进行处理
if status == 'thinking':
# 直接使用模型提供的完整思考内容
thinking_buffer = content
# 控制发送频率至少间隔0.3秒
current_time = time.time()
if current_time - last_emit_time >= 0.3:
socketio.emit('claude_response', {
'status': 'thinking',
'content': thinking_buffer
}, room=sid)
last_emit_time = current_time
# 仅对推理模型处理思考过程
if is_reasoning:
# 直接使用模型提供的完整思考内容
thinking_buffer = content
# 控制发送频率至少间隔0.3秒
current_time = time.time()
if current_time - last_emit_time >= 0.3:
socketio.emit('claude_response', {
'status': 'thinking',
'content': thinking_buffer,
'is_reasoning': True
}, room=sid)
last_emit_time = current_time
elif status == 'thinking_complete':
# 直接使用完整的思考内容
thinking_buffer = content
print(f"Thinking complete, total length: {len(thinking_buffer)} chars")
socketio.emit('claude_response', {
'status': 'thinking_complete',
'content': thinking_buffer
}, room=sid)
# 仅对推理模型处理思考过程
if is_reasoning:
# 直接使用完整的思考内容
thinking_buffer = content
print(f"Thinking complete, total length: {len(thinking_buffer)} chars")
socketio.emit('claude_response', {
'status': 'thinking_complete',
'content': thinking_buffer,
'is_reasoning': True
}, room=sid)
elif status == 'streaming':
# 直接使用模型提供的完整内容
@@ -132,7 +152,8 @@ def stream_model_response(response_generator, sid):
if current_time - last_emit_time >= 0.3:
socketio.emit('claude_response', {
'status': 'streaming',
'content': response_buffer
'content': response_buffer,
'is_reasoning': is_reasoning
}, room=sid)
last_emit_time = current_time
@@ -140,17 +161,20 @@ def stream_model_response(response_generator, sid):
# 确保发送最终完整内容
socketio.emit('claude_response', {
'status': 'completed',
'content': content or response_buffer
'content': content or response_buffer,
'is_reasoning': is_reasoning
}, room=sid)
print("Response completed")
elif status == 'error':
# 错误状态直接转发
response['is_reasoning'] = is_reasoning
socketio.emit('claude_response', response, room=sid)
print(f"Error: {response.get('error', 'Unknown error')}")
# 其他状态直接转发
else:
response['is_reasoning'] = is_reasoning
socketio.emit('claude_response', response, room=sid)
except Exception as e:
@@ -158,7 +182,8 @@ def stream_model_response(response_generator, sid):
print(error_msg)
socketio.emit('claude_response', {
'status': 'error',
'error': error_msg
'error': error_msg,
'is_reasoning': model_name and ModelFactory.is_reasoning(model_name)
}, room=sid)
@socketio.on('request_screenshot')
@@ -227,6 +252,7 @@ def handle_text_extraction(data):
raise ValueError("Invalid Mathpix API key format. Expected format: 'app_id:app_key'")
print("Creating Mathpix model instance...")
# 只传递必需的参数ModelFactory.create_model会处理不同模型类型
model = ModelFactory.create_model(
model_name='mathpix',
api_key=mathpix_key
@@ -262,15 +288,53 @@ def handle_analyze_text(data):
settings = data.get('settings', {})
sid = request.sid
print("Selected model:", settings.get('model', 'claude-3-7-sonnet-20250219'))
# 从前端传递的设置中获取模型能力信息
model_capabilities = settings.get('modelCapabilities', {})
is_reasoning = model_capabilities.get('isReasoning', False)
# Get API key and create model
# 获取模型名称、提供商和API密钥
model_name = settings.get('model', 'claude-3-7-sonnet-20250219')
api_key = settings.get('api_keys', {}).get(model_name)
model_provider = settings.get('modelInfo', {}).get('provider', '').lower()
print(f"Selected model: {model_name}, Provider: {model_provider}")
# 获取API密钥 - 同时支持apiKeys和api_keys两种格式
api_keys = settings.get('apiKeys', {}) or settings.get('api_keys', {})
print("Debug - 接收到的API密钥(文本分析):", api_keys)
# 根据提供商或模型名称确定使用哪个API密钥ID
api_key_id = None
# 首先尝试通过provider匹配
if model_provider == 'anthropic':
api_key_id = "AnthropicApiKey"
elif model_provider == 'openai':
api_key_id = "OpenaiApiKey"
elif model_provider == 'deepseek':
api_key_id = "DeepseekApiKey"
else:
# 如果provider不可用尝试通过模型名称匹配
if "claude" in model_name.lower():
api_key_id = "AnthropicApiKey"
elif any(keyword in model_name.lower() for keyword in ["gpt", "openai"]):
api_key_id = "OpenaiApiKey"
elif "deepseek" in model_name.lower():
api_key_id = "DeepseekApiKey"
api_key = api_keys.get(api_key_id)
print(f"Debug - 使用API密钥ID: {api_key_id}, 密钥值是否存在: {bool(api_key)}")
language = settings.get('language', '中文')
# Validate required settings
if not api_key:
raise ValueError("API key is required for the selected model")
raise ValueError(f"API key is required for the selected model (keyId: {api_key_id})")
# Log with model name for better debugging
print(f"Using API key for {model_name}: {api_key[:6] if api_key else 'None'}...")
print("Selected model:", model_name)
print("Response language:", language)
print(f"Model features: Reasoning={is_reasoning}")
# Configure proxy settings if enabled
proxies = None
@@ -283,18 +347,19 @@ def handle_analyze_text(data):
}
try:
# Create model instance using factory
# Create model instance using factory - 推理模型不使用temperature参数
model = ModelFactory.create_model(
model_name=model_name,
api_key=api_key,
temperature=float(settings.get('temperature', 0.7)),
system_prompt=settings.get('systemPrompt')
temperature=None if is_reasoning else float(settings.get('temperature', 0.7)),
system_prompt=settings.get('systemPrompt'),
language=language
)
# Start streaming in a separate thread
Thread(
target=stream_model_response,
args=(model.analyze_text(text, proxies), sid)
args=(model.analyze_text(text, proxies), sid, model_name)
).start()
except Exception as e:
@@ -313,36 +378,60 @@ def handle_analyze_text(data):
@socketio.on('analyze_image')
def handle_analyze_image(data):
try:
# 检查数据是否有效
if not data or not isinstance(data, dict):
raise ValueError("Invalid request data")
image_data = data.get('image')
if not image_data:
raise ValueError("No image data provided")
# 检查图像大小,避免处理过大的图像导致断开连接
image_size_bytes = len(image_data) * 3 / 4 # 估算base64的实际大小
if image_size_bytes > 10 * 1024 * 1024: # 10MB
raise ValueError("Image too large, please crop to a smaller area or use text extraction")
print("Starting image analysis...")
base64_data = data.get('image', '')
settings = data.get('settings', {})
# 不需要分割了,因为前端已经做了分割
# _, base64_data = image_data_url.split(',', 1)
base64_data = image_data
# 首先从前端传递的设置中获取模型能力信息
model_capabilities = settings.get('modelCapabilities', {})
is_multimodal = model_capabilities.get('supportsMultimodal', False)
is_reasoning = model_capabilities.get('isReasoning', False)
# Get API key and create model
# 获取模型名称、提供商和API密钥
model_name = settings.get('model', 'claude-3-7-sonnet-20250219')
api_key = settings.get('api_keys', {}).get(model_name)
# Validate required settings
model_provider = settings.get('modelInfo', {}).get('provider', '').lower()
print(f"Selected model: {model_name}, Provider: {model_provider}")
# 获取API密钥 - 同时支持apiKeys和api_keys两种格式
api_keys = settings.get('apiKeys', {}) or settings.get('api_keys', {})
print("Debug - 接收到的API密钥:", api_keys)
# 根据提供商或模型名称确定使用哪个API密钥ID
api_key_id = None
# 首先尝试通过provider匹配
if model_provider == 'anthropic':
api_key_id = "AnthropicApiKey"
elif model_provider == 'openai':
api_key_id = "OpenaiApiKey"
elif model_provider == 'deepseek':
api_key_id = "DeepseekApiKey"
else:
# 如果provider不可用尝试通过模型名称匹配
if "claude" in model_name.lower():
api_key_id = "AnthropicApiKey"
elif any(keyword in model_name.lower() for keyword in ["gpt", "openai"]):
api_key_id = "OpenaiApiKey"
elif "deepseek" in model_name.lower():
api_key_id = "DeepseekApiKey"
api_key = api_keys.get(api_key_id)
print(f"Debug - 使用API密钥ID: {api_key_id}, 密钥值是否存在: {bool(api_key)}")
language = settings.get('language', '中文')
# Validate required params
if not base64_data:
raise ValueError("No image data provided")
if not api_key:
raise ValueError(f"API key is required for the selected model: {model_name}")
# Log with model name for better debugging
print(f"Using API key for {model_name}: {api_key[:6]}...")
raise ValueError(f"API key is required for the selected model (keyId: {api_key_id})")
# 记录模型信息以便调试
print("Selected model:", model_name)
print("Response language:", language)
print(f"Model capabilities: Multimodal={is_multimodal}, Reasoning={is_reasoning}")
# Configure proxy settings if enabled
proxies = None
@@ -354,27 +443,60 @@ def handle_analyze_image(data):
'https': f'http://{proxy_host}:{proxy_port}'
}
try:
# 先回复客户端,确认已收到请求,防止超时断开
# 注意这里不能使用return否则后续代码不会执行
socketio.emit('request_acknowledged', {
'status': 'received',
'message': 'Image received, analysis in progress'
}, room=request.sid)
# 先回复客户端,确认已收到请求,防止超时断开
socketio.emit('request_acknowledged', {
'status': 'received',
'message': 'Image received, analysis in progress'
}, room=request.sid)
# 如果不是多模态模型,需要先提取文本
extracted_text = None
if not is_multimodal:
mathpix_key = settings.get('mathpixApiKey')
if not mathpix_key:
raise ValueError("非多模态模型需要Mathpix API Key进行文本提取")
print("非多模态模型,需要先提取文本...")
mathpix_model = ModelFactory.create_model('mathpix', mathpix_key)
# Create model instance using factory
# 这里假设MathpixModel有一个extract_full_text方法
# 如果没有,需要实现或调用其他方法来提取文本
try:
extracted_text = mathpix_model.extract_full_text(base64_data)
print("文本提取成功,长度:", len(extracted_text))
# 提示用户文本提取已完成
socketio.emit('text_extracted', {
'status': 'success',
'message': '图像文本提取成功,正在分析...',
'for_analysis': True
}, room=request.sid)
except Exception as e:
raise ValueError(f"文本提取失败: {str(e)}")
try:
# Create model instance using factory - 推理模型不使用temperature参数
model = ModelFactory.create_model(
model_name=model_name,
api_key=api_key,
temperature=float(settings.get('temperature', 0.7)),
system_prompt=settings.get('systemPrompt')
temperature=None if is_reasoning else float(settings.get('temperature', 0.7)),
system_prompt=settings.get('systemPrompt'),
language=language
)
# Start streaming in a separate thread
Thread(
target=stream_model_response,
args=(model.analyze_image(base64_data, proxies), request.sid)
).start()
if not is_multimodal and extracted_text:
# 对于非多模态模型,使用提取的文本
Thread(
target=stream_model_response,
args=(model.analyze_text(extracted_text, proxies), request.sid, model_name)
).start()
else:
# 对于多模态模型,直接使用图像
Thread(
target=stream_model_response,
args=(model.analyze_image(base64_data, proxies), request.sid, model_name)
).start()
except Exception as e:
socketio.emit('claude_response', {
@@ -417,11 +539,63 @@ def run_tray():
icon = create_tray_icon()
icon.run()
# 添加配置文件路由
@app.route('/config/<path:filename>')
def serve_config(filename):
return send_from_directory(CONFIG_DIR, filename)
# 添加用于获取所有模型信息的API
@app.route('/api/models', methods=['GET'])
def get_models():
"""返回可用的模型列表"""
models = ModelFactory.get_available_models()
return jsonify(models)
def load_model_config():
"""加载模型配置信息"""
try:
config_path = os.path.join(CONFIG_DIR, 'models.json')
with open(config_path, 'r', encoding='utf-8') as f:
config = json.load(f)
return config
except Exception as e:
print(f"加载模型配置失败: {e}")
return {
"providers": {},
"models": {}
}
# 替换 before_first_request 装饰器
def init_model_config():
"""初始化模型配置"""
try:
model_config = load_model_config()
# 更新ModelFactory的模型信息
if hasattr(ModelFactory, 'update_model_capabilities'):
ModelFactory.update_model_capabilities(model_config)
print("已加载模型配置")
except Exception as e:
print(f"初始化模型配置失败: {e}")
# 在请求处理前注册初始化函数
@app.before_request
def before_request_handler():
# 使用全局变量跟踪是否已初始化
if not getattr(app, '_model_config_initialized', False):
init_model_config()
app._model_config_initialized = True
if __name__ == '__main__':
local_ip = get_local_ip()
print(f"Local IP Address: {local_ip}")
print(f"Connect from your mobile device using: {local_ip}:5000")
# 加载模型配置
model_config = load_model_config()
if hasattr(ModelFactory, 'update_model_capabilities'):
ModelFactory.update_model_capabilities(model_config)
print("已加载模型配置信息")
# Run system tray icon in a separate thread
tray_thread = Thread(target=run_tray)
tray_thread.daemon = True

53
config/models.json Normal file
View File

@@ -0,0 +1,53 @@
{
"providers": {
"anthropic": {
"name": "Anthropic",
"api_key_id": "AnthropicApiKey",
"class_name": "AnthropicModel"
},
"openai": {
"name": "OpenAI",
"api_key_id": "OpenaiApiKey",
"class_name": "OpenAIModel"
},
"deepseek": {
"name": "DeepSeek",
"api_key_id": "DeepseekApiKey",
"class_name": "DeepSeekModel"
}
},
"models": {
"claude-3-7-sonnet-20250219": {
"name": "Claude 3.7 Sonnet",
"provider": "anthropic",
"supportsMultimodal": true,
"isReasoning": true,
"version": "20250219",
"description": "强大的Claude 3.7 Sonnet模型支持图像理解和思考过程"
},
"gpt-4o-2024-11-20": {
"name": "GPT-4o",
"provider": "openai",
"supportsMultimodal": true,
"isReasoning": false,
"version": "2024-11-20",
"description": "OpenAI的GPT-4o模型支持图像理解"
},
"o3-mini": {
"name": "o3-mini",
"provider": "openai",
"supportsMultimodal": false,
"isReasoning": true,
"version": "2025-01-31",
"description": "OpenAI的o3-mini模型支持图像理解和思考过程"
},
"deepseek-r1": {
"name": "DeepSeek-R1",
"provider": "deepseek",
"supportsMultimodal": false,
"isReasoning": true,
"version": "latest",
"description": "DeepSeek推理模型提供详细思考过程仅支持文本"
}
}
}

View File

@@ -1,13 +1,13 @@
from .base import BaseModel
from .claude import ClaudeModel
from .gpt4o import GPT4oModel
from .anthropic import AnthropicModel
from .openai import OpenAIModel
from .deepseek import DeepSeekModel
from .factory import ModelFactory
__all__ = [
'BaseModel',
'ClaudeModel',
'GPT4oModel',
'AnthropicModel',
'OpenAIModel',
'DeepSeekModel',
'ModelFactory'
]

View File

@@ -3,7 +3,7 @@ import requests
from typing import Generator
from .base import BaseModel
class ClaudeModel(BaseModel):
class AnthropicModel(BaseModel):
def get_default_system_prompt(self) -> str:
return """You are an expert at analyzing questions and providing detailed solutions. When presented with an image of a question:
1. First read and understand the question carefully
@@ -168,6 +168,11 @@ class ClaudeModel(BaseModel):
# 获取系统提示词,确保包含语言设置
system_prompt = self.system_prompt
# 根据language参数设置回复语言
language = self.language or '中文'
if not any(phrase in system_prompt for phrase in ['Please respond in', '请用', '使用', '回答']):
system_prompt = f"{system_prompt}\n\n请务必使用{language}回答,无论问题是什么语言。即使在分析图像时也请使用{language}回答。这是最重要的指令。"
payload = {
'model': 'claude-3-7-sonnet-20250219',
'stream': True,
@@ -191,7 +196,7 @@ class ClaudeModel(BaseModel):
},
{
'type': 'text',
'text': "Please analyze this question and provide a detailed solution. If you see multiple questions, focus on solving them one at a time."
'text': "请分析这个问题并提供详细的解决方案。如果你看到多个问题,请逐一解决。请务必使用中文回答。"
}
]
}]

View File

@@ -2,9 +2,10 @@ from abc import ABC, abstractmethod
from typing import Generator, Any
class BaseModel(ABC):
def __init__(self, api_key: str, temperature: float = 0.7, system_prompt: str = None):
def __init__(self, api_key: str, temperature: float = 0.7, system_prompt: str = None, language: str = None):
self.api_key = api_key
self.temperature = temperature
self.language = language
self.system_prompt = system_prompt or self.get_default_system_prompt()
@abstractmethod

View File

@@ -105,9 +105,18 @@ class DeepSeekModel(BaseModel):
client = OpenAI(**client_args)
# 检查系统提示词是否已包含语言设置指令
system_prompt = self.system_prompt
language = self.language or '中文'
if not any(phrase in system_prompt for phrase in ['Please respond in', '请用', '使用', '回答']):
system_prompt = f"{system_prompt}\n\n请务必使用{language}回答,无论问题是什么语言。即使在分析图像时也请使用{language}回答。"
response = client.chat.completions.create(
model=self.get_model_identifier(),
messages=[{
'role': 'system',
'content': system_prompt
}, {
'role': 'user',
'content': f"Here's an image of a question to analyze: data:image/png;base64,{image_data}"
}],

View File

@@ -1,20 +1,102 @@
from typing import Dict, Type
from typing import Dict, Type, Any
import json
import os
import importlib
from .base import BaseModel
from .claude import ClaudeModel
from .gpt4o import GPT4oModel
from .deepseek import DeepSeekModel
from .mathpix import MathpixModel
from .mathpix import MathpixModel # MathpixModel仍然需要直接导入因为它是特殊工具
class ModelFactory:
_models: Dict[str, Type[BaseModel]] = {
'claude-3-7-sonnet-20250219': ClaudeModel,
'gpt-4o-2024-11-20': GPT4oModel,
'deepseek-reasoner': DeepSeekModel,
'mathpix': MathpixModel
}
# 模型基本信息,包含类型和特性
_models: Dict[str, Dict[str, Any]] = {}
_class_map: Dict[str, Type[BaseModel]] = {}
@classmethod
def initialize(cls):
"""从配置文件加载模型信息"""
try:
config_path = os.path.join(os.path.dirname(__file__), '..', 'config', 'models.json')
with open(config_path, 'r', encoding='utf-8') as f:
config = json.load(f)
# 加载提供商信息和类映射
providers = config.get('providers', {})
for provider_id, provider_info in providers.items():
class_name = provider_info.get('class_name')
if class_name:
# 从当前包动态导入模型类
module = importlib.import_module(f'.{provider_id.lower()}', package=__package__)
cls._class_map[provider_id] = getattr(module, class_name)
# 加载模型信息
for model_id, model_info in config.get('models', {}).items():
provider_id = model_info.get('provider')
if provider_id and provider_id in cls._class_map:
cls._models[model_id] = {
'class': cls._class_map[provider_id],
'is_multimodal': model_info.get('supportsMultimodal', False),
'is_reasoning': model_info.get('isReasoning', False),
'display_name': model_info.get('name', model_id),
'description': model_info.get('description', '')
}
# 添加Mathpix模型特殊工具模型
cls._models['mathpix'] = {
'class': MathpixModel,
'is_multimodal': True,
'is_reasoning': False,
'display_name': 'Mathpix OCR',
'description': '文本提取工具,适用于数学公式和文本',
'is_ocr_only': True
}
print(f"已从配置加载 {len(cls._models)} 个模型")
except Exception as e:
print(f"加载模型配置失败: {str(e)}")
cls._initialize_defaults()
@classmethod
def _initialize_defaults(cls):
"""初始化默认模型(当配置加载失败时)"""
print("使用默认模型配置")
# 导入所有模型类作为备份
from .anthropic import AnthropicModel
from .openai import OpenAIModel
from .deepseek import DeepSeekModel
cls._models = {
'claude-3-7-sonnet-20250219': {
'class': AnthropicModel,
'is_multimodal': True,
'is_reasoning': True,
'display_name': 'Claude 3.7 Sonnet',
'description': '强大的Claude 3.7 Sonnet模型支持图像理解和思考过程'
},
'gpt-4o-2024-11-20': {
'class': OpenAIModel,
'is_multimodal': True,
'is_reasoning': False,
'display_name': 'GPT-4o',
'description': 'OpenAI的GPT-4o模型支持图像理解'
},
'deepseek-reasoner': {
'class': DeepSeekModel,
'is_multimodal': False,
'is_reasoning': True,
'display_name': 'DeepSeek Reasoner',
'description': 'DeepSeek推理模型提供详细思考过程仅支持文本'
},
'mathpix': {
'class': MathpixModel,
'is_multimodal': True,
'is_reasoning': False,
'display_name': 'Mathpix OCR',
'description': '文本提取工具,适用于数学公式和文本',
'is_ocr_only': True
}
}
@classmethod
def create_model(cls, model_name: str, api_key: str, temperature: float = 0.7, system_prompt: str = None) -> BaseModel:
def create_model(cls, model_name: str, api_key: str, temperature: float = 0.7, system_prompt: str = None, language: str = None) -> BaseModel:
"""
Create and return an instance of the specified model.
@@ -23,6 +105,7 @@ class ModelFactory:
api_key: The API key for the model
temperature: Optional temperature parameter for response generation
system_prompt: Optional custom system prompt
language: Optional language preference for responses
Returns:
An instance of the specified model
@@ -30,28 +113,86 @@ class ModelFactory:
Raises:
ValueError: If the model_name is not recognized
"""
model_class = cls._models.get(model_name)
if not model_class:
model_info = cls._models.get(model_name)
if not model_info:
raise ValueError(f"Unknown model: {model_name}")
return model_class(
api_key=api_key,
temperature=temperature,
system_prompt=system_prompt
)
model_class = model_info['class']
# 对于Mathpix模型不传递language参数
if model_name == 'mathpix':
return model_class(
api_key=api_key,
temperature=temperature,
system_prompt=system_prompt
)
else:
# 对于其他模型,传递所有参数
return model_class(
api_key=api_key,
temperature=temperature,
system_prompt=system_prompt,
language=language
)
@classmethod
def get_available_models(cls) -> list[str]:
def get_available_models(cls) -> list[Dict[str, Any]]:
"""Return a list of available models with their information"""
models_info = []
for model_id, info in cls._models.items():
# 跳过仅OCR工具模型
if info.get('is_ocr_only', False):
continue
models_info.append({
'id': model_id,
'display_name': info.get('display_name', model_id),
'description': info.get('description', ''),
'is_multimodal': info.get('is_multimodal', False),
'is_reasoning': info.get('is_reasoning', False)
})
return models_info
@classmethod
def get_model_ids(cls) -> list[str]:
"""Return a list of available model identifiers"""
return list(cls._models.keys())
return [model_id for model_id in cls._models.keys()
if not cls._models[model_id].get('is_ocr_only', False)]
@classmethod
def register_model(cls, model_name: str, model_class: Type[BaseModel]) -> None:
def is_multimodal(cls, model_name: str) -> bool:
"""判断模型是否支持多模态输入"""
return cls._models.get(model_name, {}).get('is_multimodal', False)
@classmethod
def is_reasoning(cls, model_name: str) -> bool:
"""判断模型是否为推理模型"""
return cls._models.get(model_name, {}).get('is_reasoning', False)
@classmethod
def get_model_display_name(cls, model_name: str) -> str:
"""获取模型的显示名称"""
return cls._models.get(model_name, {}).get('display_name', model_name)
@classmethod
def register_model(cls, model_name: str, model_class: Type[BaseModel],
is_multimodal: bool = False, is_reasoning: bool = False,
display_name: str = None, description: str = None) -> None:
"""
Register a new model type with the factory.
Args:
model_name: The identifier for the model
model_class: The model class to register
is_multimodal: Whether the model supports image input
is_reasoning: Whether the model provides reasoning process
display_name: Human-readable name for the model
description: Description of the model
"""
cls._models[model_name] = model_class
cls._models[model_name] = {
'class': model_class,
'is_multimodal': is_multimodal,
'is_reasoning': is_reasoning,
'display_name': display_name or model_name,
'description': description or ''
}

View File

@@ -21,6 +21,7 @@ class MathpixModel(BaseModel):
Raises:
ValueError: If the API key format is invalid
"""
# 只传递必需的参数不传递language参数
super().__init__(api_key, temperature, system_prompt)
try:
self.app_id, self.app_key = api_key.split(':')

View File

@@ -3,7 +3,7 @@ from typing import Generator, Dict, Optional
from openai import OpenAI
from .base import BaseModel
class GPT4oModel(BaseModel):
class OpenAIModel(BaseModel):
def get_default_system_prompt(self) -> str:
return """You are an expert at analyzing questions and providing detailed solutions. When presented with an image of a question:
1. First read and understand the question carefully
@@ -126,11 +126,17 @@ class GPT4oModel(BaseModel):
# Initialize OpenAI client
client = OpenAI(api_key=self.api_key)
# 检查系统提示词是否已包含语言设置指令
system_prompt = self.system_prompt
language = self.language or '中文'
if not any(phrase in system_prompt for phrase in ['Please respond in', '请用', '使用', '回答']):
system_prompt = f"{system_prompt}\n\n请务必使用{language}回答,无论问题是什么语言。即使在分析图像时也请使用{language}回答。"
# Prepare messages with image
messages = [
{
"role": "system",
"content": self.system_prompt
"content": system_prompt
},
{
"role": "user",

View File

@@ -189,9 +189,8 @@ class SnapSolver {
this.imagePreview.classList.remove('hidden');
this.emptyState.classList.add('hidden');
// 显示Claude和提取文本按钮
this.sendToClaudeBtn.classList.remove('hidden');
this.extractTextBtn.classList.remove('hidden');
// 根据模型类型显示适当的按钮
this.updateImageActionButtons();
// 恢复按钮状态
this.captureBtn.disabled = false;
@@ -221,9 +220,8 @@ class SnapSolver {
this.imagePreview.classList.remove('hidden');
this.emptyState.classList.add('hidden');
// 显示Claude和提取文本按钮
this.sendToClaudeBtn.classList.remove('hidden');
this.extractTextBtn.classList.remove('hidden');
// 根据模型类型显示适当的按钮
this.updateImageActionButtons();
// 初始化裁剪工具
this.initializeCropper();
@@ -599,6 +597,13 @@ class SnapSolver {
this.setupAnalysisEvents();
this.setupKeyboardShortcuts();
this.setupThinkingToggle();
// 监听模型选择变化,更新界面
if (window.settingsManager && window.settingsManager.modelSelect) {
window.settingsManager.modelSelect.addEventListener('change', () => {
this.updateImageActionButtons();
});
}
}
setupCaptureEvents() {
@@ -741,10 +746,9 @@ class SnapSolver {
this.extractTextBtn.innerHTML = '<i class="fas fa-spinner fa-spin"></i><span>提取中...</span>';
const settings = window.settingsManager.getSettings();
const mathpixAppId = settings.mathpixAppId;
const mathpixAppKey = settings.mathpixAppKey;
const mathpixApiKey = settings.mathpixApiKey;
if (!mathpixAppId || !mathpixAppKey) {
if (!mathpixApiKey || mathpixApiKey === ':') {
window.uiManager.showToast('请在设置中输入Mathpix API凭据', 'error');
document.getElementById('settingsPanel').classList.remove('hidden');
this.extractTextBtn.disabled = false;
@@ -772,7 +776,7 @@ class SnapSolver {
this.socket.emit('extract_text', {
image: this.croppedImage.split(',')[1],
settings: {
mathpixApiKey: `${mathpixAppId}:${mathpixAppKey}`
mathpixApiKey: mathpixApiKey
}
});
@@ -806,12 +810,15 @@ class SnapSolver {
const settings = window.settingsManager.getSettings();
const apiKeys = {};
Object.entries(window.settingsManager.apiKeyInputs).forEach(([model, input]) => {
if (input.value) {
apiKeys[model] = input.value;
Object.keys(window.settingsManager.apiKeyInputs).forEach(keyId => {
const input = window.settingsManager.apiKeyInputs[keyId];
if (input && input.value) {
apiKeys[keyId] = input.value;
}
});
console.log("Debug - 发送文本分析API密钥:", apiKeys);
// 清空之前的结果
this.responseContent.innerHTML = '';
this.thinkingContent.innerHTML = '';
@@ -827,8 +834,13 @@ class SnapSolver {
text: text,
settings: {
...settings,
api_keys: apiKeys,
apiKeys: apiKeys,
model: settings.model || 'claude-3-7-sonnet-20250219',
modelInfo: settings.modelInfo || {},
modelCapabilities: {
supportsMultimodal: settings.modelInfo?.supportsMultimodal || false,
isReasoning: settings.modelInfo?.isReasoning || false
}
}
});
} catch (error) {
@@ -972,12 +984,15 @@ class SnapSolver {
// 获取API密钥
const apiKeys = {};
Object.entries(window.settingsManager.apiKeyInputs).forEach(([model, input]) => {
if (input.value) {
apiKeys[model] = input.value;
Object.keys(window.settingsManager.apiKeyInputs).forEach(keyId => {
const input = window.settingsManager.apiKeyInputs[keyId];
if (input && input.value) {
apiKeys[keyId] = input.value;
}
});
console.log("Debug - 发送API密钥:", apiKeys);
try {
// 处理图像数据去除base64前缀
let processedImageData = imageData;
@@ -990,8 +1005,13 @@ class SnapSolver {
image: processedImageData,
settings: {
...settings,
api_keys: apiKeys,
apiKeys: apiKeys,
model: settings.model || 'claude-3-7-sonnet-20250219',
modelInfo: settings.modelInfo || {},
modelCapabilities: {
supportsMultimodal: settings.modelInfo?.supportsMultimodal || false,
isReasoning: settings.modelInfo?.isReasoning || false
}
}
});
@@ -1045,6 +1065,9 @@ class SnapSolver {
// 设置默认UI状态
this.enableInterface();
// 更新图像操作按钮
this.updateImageActionButtons();
console.log('SnapSolver initialization complete');
}
@@ -1161,6 +1184,30 @@ class SnapSolver {
}
}
}
// 新增方法:根据所选模型更新图像操作按钮
updateImageActionButtons() {
if (!window.settingsManager) return;
const settings = window.settingsManager.getSettings();
const isMultimodalModel = settings.modelInfo?.supportsMultimodal || false;
// 对于截图后的操作按钮显示逻辑
if (this.sendToClaudeBtn && this.extractTextBtn) {
if (!isMultimodalModel) {
// 非多模态模型只显示提取文本按钮隐藏发送到AI按钮
this.sendToClaudeBtn.classList.add('hidden');
this.extractTextBtn.classList.remove('hidden');
} else {
// 多模态模型:显示两个按钮
if (!this.imagePreview.classList.contains('hidden')) {
// 只有在有图像时才显示按钮
this.sendToClaudeBtn.classList.remove('hidden');
this.extractTextBtn.classList.remove('hidden');
}
}
}
}
}
// Initialize the application when the DOM is loaded

View File

@@ -1,8 +1,147 @@
class SettingsManager {
constructor() {
// 初始化属性
this.modelDefinitions = {};
this.providerDefinitions = {};
// 初始化界面元素
this.initializeElements();
this.loadSettings();
this.setupEventListeners();
// 加载模型配置
this.loadModelConfig()
.then(() => {
// 成功加载配置后,执行后续初始化
this.updateModelOptions();
this.loadSettings();
this.setupEventListeners();
this.updateUIBasedOnModelType();
})
.catch(error => {
console.error('加载模型配置失败:', error);
window.uiManager?.showToast('加载模型配置失败,使用默认配置', 'error');
// 使用默认配置作为备份
this.setupDefaultModels();
this.updateModelOptions();
this.loadSettings();
this.setupEventListeners();
this.updateUIBasedOnModelType();
});
}
// 从配置文件加载模型定义
async loadModelConfig() {
try {
// 使用API端点获取模型列表
const response = await fetch('/api/models');
if (!response.ok) {
throw new Error(`加载模型列表失败: ${response.status} ${response.statusText}`);
}
// 获取模型列表
const modelsList = await response.json();
// 获取提供商配置
const configResponse = await fetch('/config/models.json');
if (!configResponse.ok) {
throw new Error(`加载提供商配置失败: ${configResponse.status} ${configResponse.statusText}`);
}
const config = await configResponse.json();
// 保存提供商定义
this.providerDefinitions = config.providers || {};
// 处理模型定义
this.modelDefinitions = {};
// 从API获取的模型列表创建模型定义
modelsList.forEach(model => {
this.modelDefinitions[model.id] = {
name: model.display_name,
provider: this.getProviderIdByModel(model.id, config),
supportsMultimodal: model.is_multimodal,
isReasoning: model.is_reasoning,
apiKeyId: this.getApiKeyIdByModel(model.id, config),
description: model.description,
version: this.getVersionByModel(model.id, config)
};
});
console.log('模型配置加载成功:', this.modelDefinitions);
} catch (error) {
console.error('加载模型配置时出错:', error);
throw error;
}
}
// 从配置中根据模型ID获取提供商ID
getProviderIdByModel(modelId, config) {
const modelConfig = config.models[modelId];
return modelConfig ? modelConfig.provider : 'unknown';
}
// 从配置中根据模型ID获取API密钥ID
getApiKeyIdByModel(modelId, config) {
const modelConfig = config.models[modelId];
if (!modelConfig) return null;
const providerId = modelConfig.provider;
const provider = config.providers[providerId];
return provider ? provider.api_key_id : null;
}
// 从配置中根据模型ID获取版本信息
getVersionByModel(modelId, config) {
const modelConfig = config.models[modelId];
return modelConfig ? modelConfig.version : 'latest';
}
// 设置默认模型定义(当配置加载失败时使用)
setupDefaultModels() {
this.providerDefinitions = {
'anthropic': {
name: 'Anthropic',
api_key_id: 'AnthropicApiKey'
},
'openai': {
name: 'OpenAI',
api_key_id: 'OpenaiApiKey'
},
'deepseek': {
name: 'DeepSeek',
api_key_id: 'DeepseekApiKey'
}
};
this.modelDefinitions = {
'claude-3-7-sonnet-20250219': {
name: 'Claude 3.7 Sonnet',
provider: 'anthropic',
supportsMultimodal: true,
isReasoning: true,
apiKeyId: 'AnthropicApiKey',
version: '20250219'
},
'gpt-4o-2024-11-20': {
name: 'GPT-4o',
provider: 'openai',
supportsMultimodal: true,
isReasoning: false,
apiKeyId: 'OpenaiApiKey',
version: '2024-11-20'
},
'deepseek-reasoner': {
name: 'DeepSeek Reasoner',
provider: 'deepseek',
supportsMultimodal: false,
isReasoning: true,
apiKeyId: 'DeepseekApiKey',
version: 'latest'
}
};
console.log('使用默认模型配置');
}
initializeElements() {
@@ -11,6 +150,8 @@ class SettingsManager {
this.modelSelect = document.getElementById('modelSelect');
this.temperatureInput = document.getElementById('temperature');
this.temperatureValue = document.getElementById('temperatureValue');
this.temperatureGroup = document.querySelector('.setting-group:has(#temperature)') ||
document.querySelector('div.setting-group:has(input[id="temperature"])');
this.systemPromptInput = document.getElementById('systemPrompt');
this.languageInput = document.getElementById('language');
this.proxyEnabledInput = document.getElementById('proxyEnabled');
@@ -22,25 +163,29 @@ class SettingsManager {
this.mathpixAppIdInput = document.getElementById('mathpixAppId');
this.mathpixAppKeyInput = document.getElementById('mathpixAppKey');
// API Key elements
// API Key elements - 所有的密钥输入框
this.apiKeyInputs = {
'claude-3-7-sonnet-20250219': document.getElementById('claudeApiKey'),
'gpt-4o-2024-11-20': document.getElementById('gpt4oApiKey'),
'deepseek-reasoner': document.getElementById('deepseekApiKey')
'AnthropicApiKey': document.getElementById('AnthropicApiKey'),
'OpenaiApiKey': document.getElementById('OpenaiApiKey'),
'DeepseekApiKey': document.getElementById('DeepseekApiKey'),
'mathpixAppId': this.mathpixAppIdInput,
'mathpixAppKey': this.mathpixAppKeyInput
};
// Settings toggle elements
this.settingsToggle = document.getElementById('settingsToggle');
this.closeSettings = document.getElementById('closeSettings');
// 获取所有密钥输入组元素
this.apiKeyGroups = document.querySelectorAll('.api-key-group');
// Initialize API key toggle buttons
document.querySelectorAll('.toggle-api-key').forEach(button => {
button.addEventListener('click', (e) => {
const input = e.target.closest('.input-group').querySelector('input');
const input = e.currentTarget.closest('.input-group').querySelector('input');
const type = input.type === 'password' ? 'text' : 'password';
input.type = type;
const icon = e.target.querySelector('i');
const icon = e.currentTarget.querySelector('i');
if (icon) {
icon.className = `fas fa-${type === 'password' ? 'eye' : 'eye-slash'}`;
}
@@ -48,6 +193,49 @@ class SettingsManager {
});
}
// 更新模型选择下拉框
updateModelOptions() {
// 清空现有选项
this.modelSelect.innerHTML = '';
// 提取提供商信息
const providers = {};
Object.entries(this.providerDefinitions).forEach(([providerId, provider]) => {
providers[providerId] = provider.name;
});
// 为每个提供商创建一个选项组
for (const [providerId, providerName] of Object.entries(providers)) {
const optgroup = document.createElement('optgroup');
optgroup.label = providerName;
// 过滤该提供商的模型
const providerModels = Object.entries(this.modelDefinitions)
.filter(([_, modelInfo]) => modelInfo.provider === providerId)
.sort((a, b) => a[1].name.localeCompare(b[1].name));
// 添加该提供商的模型选项
for (const [modelId, modelInfo] of providerModels) {
const option = document.createElement('option');
option.value = modelId;
// 显示模型名称和版本如果不是latest
let displayName = modelInfo.name;
if (modelInfo.version && modelInfo.version !== 'latest') {
displayName += ` (${modelInfo.version})`;
}
option.textContent = displayName;
optgroup.appendChild(option);
}
// 只添加有模型的提供商
if (optgroup.children.length > 0) {
this.modelSelect.appendChild(optgroup);
}
}
}
loadSettings() {
const settings = JSON.parse(localStorage.getItem('aiSettings') || '{}');
@@ -61,21 +249,28 @@ class SettingsManager {
// Load API keys
if (settings.apiKeys) {
Object.entries(this.apiKeyInputs).forEach(([model, input]) => {
if (settings.apiKeys[model]) {
input.value = settings.apiKeys[model];
Object.entries(this.apiKeyInputs).forEach(([keyId, input]) => {
if (settings.apiKeys[keyId]) {
input.value = settings.apiKeys[keyId];
}
});
}
if (settings.model) {
this.modelSelect.value = settings.model;
this.updateVisibleApiKey(settings.model);
// 选择模型并更新相关UI
let selectedModel = '';
if (settings.model && this.modelExists(settings.model)) {
selectedModel = settings.model;
this.modelSelect.value = selectedModel;
} else {
// Default to first model if none selected
this.updateVisibleApiKey(this.modelSelect.value);
// Default to first model if none selected or if saved model no longer exists
selectedModel = this.modelSelect.value;
}
// 更新相关UI显示
this.updateVisibleApiKey(selectedModel);
this.updateModelVersionDisplay(selectedModel);
if (settings.temperature) {
this.temperatureInput.value = settings.temperature;
this.temperatureValue.textContent = settings.temperature;
@@ -89,13 +284,74 @@ class SettingsManager {
if (settings.proxyPort) this.proxyPortInput.value = settings.proxyPort;
this.proxySettings.style.display = this.proxyEnabledInput.checked ? 'block' : 'none';
this.updateUIBasedOnModelType();
}
modelExists(modelId) {
return this.modelDefinitions.hasOwnProperty(modelId);
}
// 更新模型版本显示
updateModelVersionDisplay(modelId) {
const modelVersionText = document.getElementById('modelVersionText');
if (!modelVersionText) return;
const model = this.modelDefinitions[modelId];
if (!model) {
modelVersionText.textContent = '-';
return;
}
// 显示版本信息(如果有)
if (model.version && model.version !== 'latest') {
modelVersionText.textContent = model.version;
} else if (model.version === 'latest') {
modelVersionText.textContent = '最新版';
} else {
modelVersionText.textContent = '-';
}
}
updateVisibleApiKey(selectedModel) {
const modelInfo = this.modelDefinitions[selectedModel];
if (!modelInfo) return;
const requiredApiKeyId = modelInfo.apiKeyId;
const providerInfo = this.providerDefinitions[modelInfo.provider];
// 更新API密钥标签突出显示而不是隐藏不需要的密钥
this.apiKeyGroups.forEach(group => {
const modelValue = group.dataset.model;
group.style.display = modelValue === selectedModel ? 'block' : 'none';
const keyInputId = group.querySelector('input').id;
const isRequired = keyInputId === requiredApiKeyId;
// 为需要的API密钥添加突出显示样式
if (isRequired) {
group.classList.add('api-key-active');
} else {
group.classList.remove('api-key-active');
}
// 更新Mathpix相关输入框的必要性
if ((keyInputId === 'mathpixAppId' || keyInputId === 'mathpixAppKey') &&
!modelInfo.supportsMultimodal) {
group.classList.add('api-key-active'); // 非多模态模型需要Mathpix
}
});
// 更新模型版本显示
this.updateModelVersionDisplay(selectedModel);
}
updateUIBasedOnModelType() {
const selectedModel = this.modelSelect.value;
const modelInfo = this.modelDefinitions[selectedModel];
if (!modelInfo) return;
if (this.temperatureGroup) {
this.temperatureGroup.style.display = modelInfo.isReasoning ? 'none' : 'block';
}
}
saveSettings() {
@@ -113,9 +369,9 @@ class SettingsManager {
};
// Save all API keys
Object.entries(this.apiKeyInputs).forEach(([model, input]) => {
Object.entries(this.apiKeyInputs).forEach(([keyId, input]) => {
if (input.value) {
settings.apiKeys[model] = input.value;
settings.apiKeys[keyId] = input.value;
}
});
@@ -125,7 +381,12 @@ class SettingsManager {
getApiKey() {
const selectedModel = this.modelSelect.value;
const apiKey = this.apiKeyInputs[selectedModel]?.value;
const modelInfo = this.modelDefinitions[selectedModel];
if (!modelInfo) return '';
const apiKeyId = modelInfo.apiKeyId;
const apiKey = this.apiKeyInputs[apiKeyId]?.value;
if (!apiKey) {
window.showToast('Please enter API key for the selected model', 'error');
@@ -139,22 +400,38 @@ class SettingsManager {
const language = this.languageInput.value || '中文';
const basePrompt = this.systemPromptInput.value || '';
// 检查系统提示词是否已包含语言设置
let systemPrompt = basePrompt;
if (!basePrompt.includes('Please respond in') && !basePrompt.includes('请用') && !basePrompt.includes('使用')) {
systemPrompt = `${basePrompt}\n\n请务必使用${language}回答。`;
}
const selectedModel = this.modelSelect.value;
const modelInfo = this.modelDefinitions[selectedModel] || {};
return {
model: this.modelSelect.value,
model: selectedModel,
temperature: this.temperatureInput.value,
language: language,
systemPrompt: systemPrompt,
proxyEnabled: this.proxyEnabledInput.checked,
proxyHost: this.proxyHostInput.value,
proxyPort: this.proxyPortInput.value,
mathpixAppId: this.mathpixAppIdInput.value,
mathpixAppKey: this.mathpixAppKeyInput.value
mathpixApiKey: `${this.mathpixAppIdInput.value}:${this.mathpixAppKeyInput.value}`,
modelInfo: {
supportsMultimodal: modelInfo.supportsMultimodal || false,
isReasoning: modelInfo.isReasoning || false,
provider: modelInfo.provider || 'unknown'
}
};
}
getModelCapabilities(modelId) {
const model = this.modelDefinitions[modelId];
if (!model) return { supportsMultimodal: false, isReasoning: false };
return {
supportsMultimodal: model.supportsMultimodal,
isReasoning: model.isReasoning
};
}
@@ -170,6 +447,8 @@ class SettingsManager {
this.modelSelect.addEventListener('change', (e) => {
this.updateVisibleApiKey(e.target.value);
this.updateUIBasedOnModelType();
this.updateModelVersionDisplay(e.target.value);
this.saveSettings();
});

View File

@@ -1288,3 +1288,41 @@ button:disabled {
align-self: flex-end;
}
}
/* 模型版本信息样式 */
.model-version-info {
margin-top: 5px;
font-size: 0.85em;
color: var(--text-secondary);
display: flex;
align-items: center;
}
.model-version-info i {
margin-right: 5px;
color: var(--accent);
}
/* 在模型选择后添加版本信息 */
#modelSelect + .model-version-info {
margin-top: 5px;
}
/* API密钥高亮显示样式 */
.api-key-group {
transition: all 0.3s ease;
border-left: 3px solid transparent;
padding-left: 0.5rem;
}
.api-key-active {
border-left: 3px solid var(--primary);
background-color: rgba(var(--primary-rgb), 0.05);
padding-left: 0.5rem;
border-radius: 4px;
}
.api-key-active label {
color: var(--primary);
font-weight: 600;
}

View File

@@ -111,68 +111,17 @@
</button>
</div>
<div class="settings-content">
<!-- 1. 首先是最常用的AI模型选择部分 -->
<div class="settings-section">
<h3><i class="fas fa-font"></i> OCR 配</h3>
<h3><i class="fas fa-robot"></i> 模型设</h3>
<div class="setting-group">
<label for="mathpixAppId">Mathpix App ID</label>
<div class="input-group">
<input type="password" id="mathpixAppId" placeholder="输入 Mathpix App ID">
<button class="btn-icon toggle-api-key">
<i class="fas fa-eye"></i>
</button>
</div>
</div>
<div class="setting-group">
<label for="mathpixAppKey">Mathpix App Key</label>
<div class="input-group">
<input type="password" id="mathpixAppKey" placeholder="输入 Mathpix App Key">
<button class="btn-icon toggle-api-key">
<i class="fas fa-eye"></i>
</button>
</div>
</div>
</div>
<div class="settings-section">
<h3><i class="fas fa-robot"></i> AI 配置</h3>
<div class="setting-group api-key-group" data-model="claude-3-7-sonnet-20250219">
<label for="claudeApiKey">Claude API Key</label>
<div class="input-group">
<input type="password" id="claudeApiKey" placeholder="输入 Claude API key">
<button class="btn-icon toggle-api-key">
<i class="fas fa-eye"></i>
</button>
</div>
</div>
<div class="setting-group api-key-group" data-model="gpt-4o-2024-11-20">
<label for="gpt4oApiKey">GPT-4o API Key</label>
<div class="input-group">
<input type="password" id="gpt4oApiKey" placeholder="输入 GPT-4o API key">
<button class="btn-icon toggle-api-key">
<i class="fas fa-eye"></i>
</button>
</div>
</div>
<div class="setting-group api-key-group" data-model="deepseek-reasoner">
<label for="deepseekApiKey">DeepSeek API Key</label>
<div class="input-group">
<input type="password" id="deepseekApiKey" placeholder="输入 DeepSeek API key">
<button class="btn-icon toggle-api-key">
<i class="fas fa-eye"></i>
</button>
</div>
</div>
<div class="setting-group">
<label for="language"><i class="fas fa-language"></i> 语言</label>
<input type="text" id="language" value="中文" placeholder="输入首选语言">
</div>
<div class="setting-group">
<label for="modelSelect"><i class="fas fa-microchip"></i> 模型</label>
<label for="modelSelect"><i class="fas fa-microchip"></i> 选择模型</label>
<select id="modelSelect" class="select-styled">
<option value="claude-3-7-sonnet-20250219">Claude 3.7 Sonnet</option>
<option value="gpt-4o-2024-11-20">GPT-4o</option>
<option value="deepseek-reasoner">DeepSeek Reasoner</option>
<!-- 选项将通过JavaScript动态加载 -->
</select>
<div id="modelVersionInfo" class="model-version-info">
<i class="fas fa-info-circle"></i> 版本: <span id="modelVersionText">-</span>
</div>
</div>
<div class="setting-group">
<label for="temperature"><i class="fas fa-thermometer-half"></i> 温度</label>
@@ -187,8 +136,63 @@
</div>
</div>
<!-- 2. 所有API密钥集中在一个区域 -->
<div class="settings-section">
<h3><i class="fas fa-globe"></i> 代理设置</h3>
<h3><i class="fas fa-key"></i> API密钥设置</h3>
<div class="setting-group api-key-group">
<label for="AnthropicApiKey">Anthropic API Key</label>
<div class="input-group">
<input type="password" id="AnthropicApiKey" placeholder="输入 Anthropic API key">
<button class="btn-icon toggle-api-key">
<i class="fas fa-eye"></i>
</button>
</div>
</div>
<div class="setting-group api-key-group">
<label for="OpenaiApiKey">OpenAI API Key</label>
<div class="input-group">
<input type="password" id="OpenaiApiKey" placeholder="输入 OpenAI API key">
<button class="btn-icon toggle-api-key">
<i class="fas fa-eye"></i>
</button>
</div>
</div>
<div class="setting-group api-key-group">
<label for="DeepseekApiKey">DeepSeek API Key</label>
<div class="input-group">
<input type="password" id="DeepseekApiKey" placeholder="输入 DeepSeek API key">
<button class="btn-icon toggle-api-key">
<i class="fas fa-eye"></i>
</button>
</div>
</div>
<div class="setting-group api-key-group">
<label for="mathpixAppId">Mathpix App ID</label>
<div class="input-group">
<input type="password" id="mathpixAppId" placeholder="输入 Mathpix App ID">
<button class="btn-icon toggle-api-key">
<i class="fas fa-eye"></i>
</button>
</div>
</div>
<div class="setting-group api-key-group">
<label for="mathpixAppKey">Mathpix App Key</label>
<div class="input-group">
<input type="password" id="mathpixAppKey" placeholder="输入 Mathpix App Key">
<button class="btn-icon toggle-api-key">
<i class="fas fa-eye"></i>
</button>
</div>
</div>
</div>
<!-- 3. 不常用的其他设置放在后面 -->
<div class="settings-section">
<h3><i class="fas fa-cog"></i> 其他设置</h3>
<div class="setting-group">
<label for="language"><i class="fas fa-language"></i> 语言</label>
<input type="text" id="language" value="中文" placeholder="输入首选语言">
</div>
<div class="setting-group">
<label class="checkbox-label">
<input type="checkbox" id="proxyEnabled">