添加最大输出Token和推理深度设置,优化思考预算管理,改进前端设置界面,提升用户体验

This commit is contained in:
Zylan
2025-03-22 23:28:35 +08:00
parent 07545c6750
commit 707f2728f3
5 changed files with 400 additions and 231 deletions

305
app.py
View File

@@ -12,6 +12,7 @@ from models import ModelFactory
import time
import os
import json
import traceback
app = Flask(__name__)
socketio = SocketIO(app, cors_allowed_origins="*", ping_timeout=30, ping_interval=5, max_http_buffer_size=50 * 1024 * 1024)
@@ -73,7 +74,51 @@ def handle_connect():
def handle_disconnect():
print('Client disconnected')
def create_model_instance(model_id, api_keys, settings):
"""创建模型实例并配置参数"""
# 获取模型信息
model_info = settings.get('modelInfo', {})
is_reasoning = model_info.get('isReasoning', False)
provider = model_info.get('provider', '').lower()
# 确定API密钥ID
api_key_id = None
if provider == 'anthropic':
api_key_id = "AnthropicApiKey"
elif provider == 'openai':
api_key_id = "OpenaiApiKey"
elif provider == 'deepseek':
api_key_id = "DeepseekApiKey"
else:
# 根据模型名称
if "claude" in model_id.lower():
api_key_id = "AnthropicApiKey"
elif any(keyword in model_id.lower() for keyword in ["gpt", "openai"]):
api_key_id = "OpenaiApiKey"
elif "deepseek" in model_id.lower():
api_key_id = "DeepseekApiKey"
api_key = api_keys.get(api_key_id)
if not api_key:
raise ValueError(f"API key is required for the selected model (keyId: {api_key_id})")
# 获取maxTokens参数默认为8192
max_tokens = int(settings.get('maxTokens', 8192))
# 创建模型实例
model_instance = ModelFactory.create_model(
model_name=model_id,
api_key=api_key,
temperature=None if is_reasoning else float(settings.get('temperature', 0.7)),
system_prompt=settings.get('systemPrompt'),
language=settings.get('language', '中文')
)
# 设置最大输出Token
model_instance.max_tokens = max_tokens
return model_instance
def stream_model_response(response_generator, sid, model_name=None):
"""Stream model responses to the client"""
@@ -284,232 +329,92 @@ def handle_text_extraction(data):
@socketio.on('analyze_text')
def handle_analyze_text(data):
try:
text = data.get('text')
text = data.get('text', '')
settings = data.get('settings', {})
sid = request.sid
# 获取推理配置
reasoning_config = settings.get('reasoningConfig', {})
# 获取maxTokens
max_tokens = int(settings.get('maxTokens', 8192))
print(f"Debug - 文本分析请求: {text[:50]}...")
print(f"Debug - 最大Token: {max_tokens}, 推理配置: {reasoning_config}")
# 获取模型和API密钥
model_id = settings.get('model', 'claude-3-7-sonnet-20250219')
api_keys = settings.get('apiKeys', {})
if not text:
socketio.emit('error', {'message': '文本内容不能为空'})
return
# 从前端传递的设置中获取模型能力信息
model_capabilities = settings.get('modelCapabilities', {})
is_reasoning = model_capabilities.get('isReasoning', False)
model_instance = create_model_instance(model_id, api_keys, settings)
# 获取模型名称、提供商和API密钥
model_name = settings.get('model', 'claude-3-7-sonnet-20250219')
model_provider = settings.get('modelInfo', {}).get('provider', '').lower()
# 将推理配置传递给模型
if reasoning_config:
model_instance.reasoning_config = reasoning_config
print(f"Selected model: {model_name}, Provider: {model_provider}")
# 获取API密钥 - 同时支持apiKeys和api_keys两种格式
api_keys = settings.get('apiKeys', {}) or settings.get('api_keys', {})
print("Debug - 接收到的API密钥(文本分析):", api_keys)
# 根据提供商或模型名称确定使用哪个API密钥ID
api_key_id = None
# 首先尝试通过provider匹配
if model_provider == 'anthropic':
api_key_id = "AnthropicApiKey"
elif model_provider == 'openai':
api_key_id = "OpenaiApiKey"
elif model_provider == 'deepseek':
api_key_id = "DeepseekApiKey"
else:
# 如果provider不可用尝试通过模型名称匹配
if "claude" in model_name.lower():
api_key_id = "AnthropicApiKey"
elif any(keyword in model_name.lower() for keyword in ["gpt", "openai"]):
api_key_id = "OpenaiApiKey"
elif "deepseek" in model_name.lower():
api_key_id = "DeepseekApiKey"
api_key = api_keys.get(api_key_id)
print(f"Debug - 使用API密钥ID: {api_key_id}, 密钥值是否存在: {bool(api_key)}")
language = settings.get('language', '中文')
# Validate required settings
if not api_key:
raise ValueError(f"API key is required for the selected model (keyId: {api_key_id})")
# Log with model name for better debugging
print(f"Using API key for {model_name}: {api_key[:6] if api_key else 'None'}...")
print("Selected model:", model_name)
print("Response language:", language)
print(f"Model features: Reasoning={is_reasoning}")
# Configure proxy settings if enabled
# 如果启用代理,配置代理设置
proxies = None
if settings.get('proxyEnabled', False):
proxy_host = settings.get('proxyHost', '127.0.0.1')
proxy_port = settings.get('proxyPort', '4780')
if settings.get('proxyEnabled'):
proxies = {
'http': f'http://{proxy_host}:{proxy_port}',
'https': f'http://{proxy_host}:{proxy_port}'
'http': f"http://{settings.get('proxyHost')}:{settings.get('proxyPort')}",
'https': f"http://{settings.get('proxyHost')}:{settings.get('proxyPort')}"
}
try:
# Create model instance using factory - 推理模型不使用temperature参数
model = ModelFactory.create_model(
model_name=model_name,
api_key=api_key,
temperature=None if is_reasoning else float(settings.get('temperature', 0.7)),
system_prompt=settings.get('systemPrompt'),
language=language
)
for response in model_instance.analyze_text(text, proxies=proxies):
socketio.emit('claude_response', response)
# Start streaming in a separate thread
Thread(
target=stream_model_response,
args=(model.analyze_text(text, proxies), sid, model_name)
).start()
except Exception as e:
socketio.emit('claude_response', {
'status': 'error',
'error': f'API error: {str(e)}'
}, room=sid)
except Exception as e:
print(f"Analysis error: {str(e)}")
socketio.emit('claude_response', {
'status': 'error',
'error': f'Analysis error: {str(e)}'
}, room=request.sid)
print(f"Error in analyze_text: {str(e)}")
traceback.print_exc()
socketio.emit('error', {'message': f'分析文本时出错: {str(e)}'})
@socketio.on('analyze_image')
def handle_analyze_image(data):
try:
print("Starting image analysis...")
base64_data = data.get('image', '')
image_data = data.get('image')
settings = data.get('settings', {})
# 首先从前端传递的设置中获取模型能力信息
model_capabilities = settings.get('modelCapabilities', {})
is_multimodal = model_capabilities.get('supportsMultimodal', False)
is_reasoning = model_capabilities.get('isReasoning', False)
# 获取推理配置
reasoning_config = settings.get('reasoningConfig', {})
# 获取模型名称、提供商和API密钥
model_name = settings.get('model', 'claude-3-7-sonnet-20250219')
model_provider = settings.get('modelInfo', {}).get('provider', '').lower()
# 获取maxTokens
max_tokens = int(settings.get('maxTokens', 8192))
print(f"Selected model: {model_name}, Provider: {model_provider}")
print(f"Debug - 图像分析请求")
print(f"Debug - 最大Token: {max_tokens}, 推理配置: {reasoning_config}")
# 获取API密钥 - 同时支持apiKeys和api_keys两种格式
api_keys = settings.get('apiKeys', {}) or settings.get('api_keys', {})
print("Debug - 接收到的API密钥:", api_keys)
# 获取模型和API密钥
model_id = settings.get('model', 'claude-3-7-sonnet-20250219')
api_keys = settings.get('apiKeys', {})
# 根据提供商或模型名称确定使用哪个API密钥ID
api_key_id = None
if not image_data:
socketio.emit('error', {'message': '图像数据不能为空'})
return
model_instance = create_model_instance(model_id, api_keys, settings)
# 首先尝试通过provider匹配
if model_provider == 'anthropic':
api_key_id = "AnthropicApiKey"
elif model_provider == 'openai':
api_key_id = "OpenaiApiKey"
elif model_provider == 'deepseek':
api_key_id = "DeepseekApiKey"
else:
# 如果provider不可用尝试通过模型名称匹配
if "claude" in model_name.lower():
api_key_id = "AnthropicApiKey"
elif any(keyword in model_name.lower() for keyword in ["gpt", "openai"]):
api_key_id = "OpenaiApiKey"
elif "deepseek" in model_name.lower():
api_key_id = "DeepseekApiKey"
api_key = api_keys.get(api_key_id)
print(f"Debug - 使用API密钥ID: {api_key_id}, 密钥值是否存在: {bool(api_key)}")
language = settings.get('language', '中文')
# Validate required params
if not base64_data:
raise ValueError("No image data provided")
if not api_key:
raise ValueError(f"API key is required for the selected model (keyId: {api_key_id})")
# 记录模型信息以便调试
print("Selected model:", model_name)
print("Response language:", language)
print(f"Model capabilities: Multimodal={is_multimodal}, Reasoning={is_reasoning}")
# Configure proxy settings if enabled
# 将推理配置传递给模型
if reasoning_config:
model_instance.reasoning_config = reasoning_config
# 如果启用代理,配置代理设置
proxies = None
if settings.get('proxyEnabled', False):
proxy_host = settings.get('proxyHost', '127.0.0.1')
proxy_port = settings.get('proxyPort', '4780')
if settings.get('proxyEnabled'):
proxies = {
'http': f'http://{proxy_host}:{proxy_port}',
'https': f'http://{proxy_host}:{proxy_port}'
'http': f"http://{settings.get('proxyHost')}:{settings.get('proxyPort')}",
'https': f"http://{settings.get('proxyHost')}:{settings.get('proxyPort')}"
}
# 先回复客户端,确认已收到请求,防止超时断开
socketio.emit('request_acknowledged', {
'status': 'received',
'message': 'Image received, analysis in progress'
}, room=request.sid)
# 如果不是多模态模型,需要先提取文本
extracted_text = None
if not is_multimodal:
mathpix_key = settings.get('mathpixApiKey')
if not mathpix_key:
raise ValueError("非多模态模型需要Mathpix API Key进行文本提取")
print("非多模态模型,需要先提取文本...")
mathpix_model = ModelFactory.create_model('mathpix', mathpix_key)
for response in model_instance.analyze_image(image_data, proxies=proxies):
socketio.emit('claude_response', response)
# 这里假设MathpixModel有一个extract_full_text方法
# 如果没有,需要实现或调用其他方法来提取文本
try:
extracted_text = mathpix_model.extract_full_text(base64_data)
print("文本提取成功,长度:", len(extracted_text))
# 提示用户文本提取已完成
socketio.emit('text_extracted', {
'status': 'success',
'message': '图像文本提取成功,正在分析...',
'for_analysis': True
}, room=request.sid)
except Exception as e:
raise ValueError(f"文本提取失败: {str(e)}")
try:
# Create model instance using factory - 推理模型不使用temperature参数
model = ModelFactory.create_model(
model_name=model_name,
api_key=api_key,
temperature=None if is_reasoning else float(settings.get('temperature', 0.7)),
system_prompt=settings.get('systemPrompt'),
language=language
)
# Start streaming in a separate thread
if not is_multimodal and extracted_text:
# 对于非多模态模型,使用提取的文本
Thread(
target=stream_model_response,
args=(model.analyze_text(extracted_text, proxies), request.sid, model_name)
).start()
else:
# 对于多模态模型,直接使用图像
Thread(
target=stream_model_response,
args=(model.analyze_image(base64_data, proxies), request.sid, model_name)
).start()
except Exception as e:
socketio.emit('claude_response', {
'status': 'error',
'error': f'API error: {str(e)}'
}, room=request.sid)
except Exception as e:
print(f"Analysis error: {str(e)}")
socketio.emit('claude_response', {
'status': 'error',
'error': f'Analysis error: {str(e)}'
}, room=request.sid)
print(f"Error in analyze_image: {str(e)}")
traceback.print_exc()
socketio.emit('error', {'message': f'分析图像时出错: {str(e)}'})
@socketio.on('capture_screenshot')
def handle_capture_screenshot(data):

View File

@@ -31,16 +31,17 @@ class AnthropicModel(BaseModel):
'accept': 'application/json',
}
# 获取最大输出Token设置
max_tokens = 8192 # 默认值
if hasattr(self, 'max_tokens') and self.max_tokens:
max_tokens = self.max_tokens
payload = {
'model': self.get_model_identifier(),
'stream': True,
'max_tokens': 8192,
'max_tokens': max_tokens,
'temperature': 1,
'system': self.system_prompt,
'thinking': {
'type': 'enabled',
'budget_tokens': 4096
},
'messages': [{
'role': 'user',
'content': [
@@ -51,6 +52,33 @@ class AnthropicModel(BaseModel):
]
}]
}
# 处理推理配置
if hasattr(self, 'reasoning_config') and self.reasoning_config:
# 如果设置了extended reasoning
if self.reasoning_config.get('reasoning_depth') == 'extended':
think_budget = self.reasoning_config.get('think_budget', max_tokens // 2)
payload['thinking'] = {
'type': 'enabled',
'budget_tokens': think_budget
}
# 如果设置了instant模式
elif self.reasoning_config.get('speed_mode') == 'instant':
payload['speed_mode'] = 'instant'
# 默认启用思考但使用较小的预算
else:
payload['thinking'] = {
'type': 'enabled',
'budget_tokens': min(4096, max_tokens // 4)
}
# 默认设置
else:
payload['thinking'] = {
'type': 'enabled',
'budget_tokens': min(4096, max_tokens // 4)
}
print(f"Debug - 推理配置: max_tokens={max_tokens}, thinking={payload.get('thinking', payload.get('speed_mode', 'default'))}")
response = requests.post(
'https://api.anthropic.com/v1/messages',
@@ -173,15 +201,16 @@ class AnthropicModel(BaseModel):
if not any(phrase in system_prompt for phrase in ['Please respond in', '请用', '使用', '回答']):
system_prompt = f"{system_prompt}\n\n请务必使用{language}回答,无论问题是什么语言。即使在分析图像时也请使用{language}回答。这是最重要的指令。"
# 获取最大输出Token设置
max_tokens = 8192 # 默认值
if hasattr(self, 'max_tokens') and self.max_tokens:
max_tokens = self.max_tokens
payload = {
'model': 'claude-3-7-sonnet-20250219',
'stream': True,
'max_tokens': 8192,
'max_tokens': max_tokens,
'temperature': 1,
'thinking': {
'type': 'enabled',
'budget_tokens': 4096
},
'system': system_prompt,
'messages': [{
'role': 'user',
@@ -201,6 +230,33 @@ class AnthropicModel(BaseModel):
]
}]
}
# 处理推理配置
if hasattr(self, 'reasoning_config') and self.reasoning_config:
# 如果设置了extended reasoning
if self.reasoning_config.get('reasoning_depth') == 'extended':
think_budget = self.reasoning_config.get('think_budget', max_tokens // 2)
payload['thinking'] = {
'type': 'enabled',
'budget_tokens': think_budget
}
# 如果设置了instant模式
elif self.reasoning_config.get('speed_mode') == 'instant':
payload['speed_mode'] = 'instant'
# 默认启用思考但使用较小的预算
else:
payload['thinking'] = {
'type': 'enabled',
'budget_tokens': min(4096, max_tokens // 4)
}
# 默认设置
else:
payload['thinking'] = {
'type': 'enabled',
'budget_tokens': min(4096, max_tokens // 4)
}
print(f"Debug - 图像分析推理配置: max_tokens={max_tokens}, thinking={payload.get('thinking', payload.get('speed_mode', 'default'))}")
response = requests.post(
'https://api.anthropic.com/v1/messages',

View File

@@ -162,6 +162,16 @@ class SettingsManager {
this.proxyPortInput = document.getElementById('proxyPort');
this.proxySettings = document.getElementById('proxySettings');
// 最大Token设置元素 - 现在是输入框而不是滑块
this.maxTokensInput = document.getElementById('maxTokens');
// 理性推理相关元素
this.reasoningDepthSelect = document.getElementById('reasoningDepth');
this.reasoningSettingGroup = document.querySelector('.reasoning-setting-group');
this.thinkBudgetPercentInput = document.getElementById('thinkBudgetPercent');
this.thinkBudgetPercentValue = document.getElementById('thinkBudgetPercentValue');
this.thinkBudgetGroup = document.querySelector('.think-budget-group');
// Initialize Mathpix inputs
this.mathpixAppIdInput = document.getElementById('mathpixAppId');
this.mathpixAppKeyInput = document.getElementById('mathpixAppKey');
@@ -252,42 +262,71 @@ class SettingsManager {
// Load API keys
if (settings.apiKeys) {
Object.entries(this.apiKeyInputs).forEach(([keyId, input]) => {
if (settings.apiKeys[keyId]) {
input.value = settings.apiKeys[keyId];
Object.entries(settings.apiKeys).forEach(([keyId, value]) => {
const input = this.apiKeyInputs[keyId];
if (input) {
input.value = value;
}
});
}
// 选择模型并更新相关UI
let selectedModel = '';
// Load model selection
if (settings.model && this.modelExists(settings.model)) {
selectedModel = settings.model;
this.modelSelect.value = selectedModel;
} else {
// Default to first model if none selected or if saved model no longer exists
selectedModel = this.modelSelect.value;
this.modelSelect.value = settings.model;
this.updateVisibleApiKey(settings.model);
}
// 更新相关UI显示
this.updateVisibleApiKey(selectedModel);
this.updateModelVersionDisplay(selectedModel);
// Load max tokens setting - 现在直接设置输入框值
const maxTokens = parseInt(settings.maxTokens || '8192');
this.maxTokensInput.value = maxTokens;
// Load reasoning depth & think budget settings
if (settings.reasoningDepth) {
this.reasoningDepthSelect.value = settings.reasoningDepth;
}
// 加载思考预算百分比
const thinkBudgetPercent = parseInt(settings.thinkBudgetPercent || '50');
if (this.thinkBudgetPercentInput) {
this.thinkBudgetPercentInput.value = thinkBudgetPercent;
}
// 更新思考预算显示
this.updateThinkBudgetDisplay();
// 初始化思考预算滑块背景颜色
this.updateRangeSliderBackground(this.thinkBudgetPercentInput);
// Load other settings
if (settings.temperature) {
this.temperatureInput.value = settings.temperature;
this.temperatureValue.textContent = settings.temperature;
this.updateRangeSliderBackground(this.temperatureInput);
}
if (settings.language) this.languageInput.value = settings.language;
if (settings.systemPrompt) this.systemPromptInput.value = settings.systemPrompt;
if (settings.systemPrompt) {
this.systemPromptInput.value = settings.systemPrompt;
}
if (settings.language) {
this.languageInput.value = settings.language;
}
// Load proxy settings
if (settings.proxyEnabled !== undefined) {
this.proxyEnabledInput.checked = settings.proxyEnabled;
this.proxySettings.style.display = settings.proxyEnabled ? 'block' : 'none';
}
if (settings.proxyHost) this.proxyHostInput.value = settings.proxyHost;
if (settings.proxyPort) this.proxyPortInput.value = settings.proxyPort;
this.proxySettings.style.display = this.proxyEnabledInput.checked ? 'block' : 'none';
if (settings.proxyHost) {
this.proxyHostInput.value = settings.proxyHost;
}
if (settings.proxyPort) {
this.proxyPortInput.value = settings.proxyPort;
}
// Update UI based on model type
this.updateUIBasedOnModelType();
}
@@ -333,9 +372,26 @@ class SettingsManager {
if (!modelInfo) return;
// 处理温度设置显示
if (this.temperatureGroup) {
this.temperatureGroup.style.display = modelInfo.isReasoning ? 'none' : 'block';
}
// 处理深度推理设置显示
const isAnthropicReasoning = modelInfo.isReasoning && modelInfo.provider === 'anthropic';
// 只有对Claude 3.7 Sonnet这样的Anthropic推理模型才显示深度推理设置
if (this.reasoningSettingGroup) {
this.reasoningSettingGroup.style.display = isAnthropicReasoning ? 'block' : 'none';
}
// 只有当启用深度推理且是Anthropic推理模型时才显示思考预算设置
if (this.thinkBudgetGroup) {
const showThinkBudget = isAnthropicReasoning &&
this.reasoningDepthSelect &&
this.reasoningDepthSelect.value === 'extended';
this.thinkBudgetGroup.style.display = showThinkBudget ? 'block' : 'none';
}
}
saveSettings() {
@@ -344,6 +400,9 @@ class SettingsManager {
mathpixAppId: this.mathpixAppIdInput.value,
mathpixAppKey: this.mathpixAppKeyInput.value,
model: this.modelSelect.value,
maxTokens: this.maxTokensInput.value,
reasoningDepth: this.reasoningDepthSelect?.value || 'standard',
thinkBudgetPercent: this.thinkBudgetPercentInput?.value || '50',
temperature: this.temperatureInput.value,
language: this.languageInput.value,
systemPrompt: this.systemPromptInput.value,
@@ -392,8 +451,30 @@ class SettingsManager {
const selectedModel = this.modelSelect.value;
const modelInfo = this.modelDefinitions[selectedModel] || {};
// 获取最大Token数
const maxTokens = parseInt(this.maxTokensInput?.value || '8192');
// 获取推理深度设置
const reasoningDepth = this.reasoningDepthSelect?.value || 'standard';
const thinkBudgetPercent = parseInt(this.thinkBudgetPercentInput?.value || '50');
// 计算思考预算的实际Token数
const thinkBudget = Math.floor(maxTokens * (thinkBudgetPercent / 100));
// 构建推理配置参数
const reasoningConfig = {};
if (modelInfo.provider === 'anthropic' && modelInfo.isReasoning) {
if (reasoningDepth === 'extended') {
reasoningConfig.reasoning_depth = 'extended';
reasoningConfig.think_budget = thinkBudget;
} else {
reasoningConfig.speed_mode = 'instant';
}
}
return {
model: selectedModel,
maxTokens: maxTokens,
temperature: this.temperatureInput.value,
language: language,
systemPrompt: systemPrompt,
@@ -405,7 +486,8 @@ class SettingsManager {
supportsMultimodal: modelInfo.supportsMultimodal || false,
isReasoning: modelInfo.isReasoning || false,
provider: modelInfo.provider || 'unknown'
}
},
reasoningConfig: reasoningConfig
};
}
@@ -441,8 +523,50 @@ class SettingsManager {
}
});
// 最大Token输入框事件处理
if (this.maxTokensInput) {
this.maxTokensInput.addEventListener('change', (e) => {
// 验证输入值在有效范围内
let value = parseInt(e.target.value);
if (isNaN(value)) value = 8192;
value = Math.max(1000, Math.min(128000, value));
this.maxTokensInput.value = value;
// 更新思考预算显示
this.updateThinkBudgetDisplay();
this.saveSettings();
});
}
// 推理深度选择事件处理
if (this.reasoningDepthSelect) {
this.reasoningDepthSelect.addEventListener('change', () => {
// 更新思考预算组的可见性
if (this.thinkBudgetGroup) {
const showThinkBudget = this.reasoningDepthSelect.value === 'extended';
this.thinkBudgetGroup.style.display = showThinkBudget ? 'block' : 'none';
}
this.saveSettings();
});
}
// 思考预算占比滑块事件处理
if (this.thinkBudgetPercentInput && this.thinkBudgetPercentValue) {
this.thinkBudgetPercentInput.addEventListener('input', (e) => {
// 更新思考预算显示
this.updateThinkBudgetDisplay();
// 更新滑块背景
this.updateRangeSliderBackground(e.target);
this.saveSettings();
});
}
this.temperatureInput.addEventListener('input', (e) => {
this.temperatureValue.textContent = e.target.value;
this.updateRangeSliderBackground(e.target);
this.saveSettings();
});
@@ -465,6 +589,30 @@ class SettingsManager {
this.settingsPanel.classList.add('hidden');
});
}
// 辅助方法:更新滑块的背景颜色
updateRangeSliderBackground(slider) {
if (!slider) return;
const value = slider.value;
const min = slider.min || 0;
const max = slider.max || 100;
const percentage = (value - min) / (max - min) * 100;
slider.style.background = `linear-gradient(to right, var(--primary) 0%, var(--primary) ${percentage}%, var(--border-color) ${percentage}%, var(--border-color) 100%)`;
}
// 更新思考预算显示
updateThinkBudgetDisplay() {
if (this.thinkBudgetPercentInput && this.thinkBudgetPercentValue) {
const percent = parseInt(this.thinkBudgetPercentInput.value);
// 只显示百分比不显示token数量
this.thinkBudgetPercentValue.textContent = `${percent}%`;
// 更新滑块背景
this.updateRangeSliderBackground(this.thinkBudgetPercentInput);
}
}
/**
* 初始化可折叠内容的交互逻辑

View File

@@ -716,6 +716,28 @@ textarea:focus {
transform: translateY(-1px);
}
.input-styled {
width: 100%;
padding: 8px 10px;
background-color: var(--input-bg-color);
border: 1px solid var(--border-color);
border-radius: 4px;
color: var(--input-text-color);
font-size: 1rem;
transition: border-color 0.2s;
}
.input-styled:focus {
border-color: var(--accent-color);
outline: none;
}
[data-theme="dark"] .input-styled {
background-color: var(--input-bg-color-dark);
color: var(--input-text-color-dark);
border-color: var(--border-color-dark);
}
.input-group {
position: relative;
display: flex;
@@ -748,7 +770,7 @@ input[type="range"] {
flex: 1;
height: 6px;
-webkit-appearance: none;
background: linear-gradient(to right, var(--primary) 0%, var(--primary) 70%, var(--border-color) 70%, var(--border-color) 100%);
background: linear-gradient(to right, var(--primary) 0%, var(--primary) 50%, var(--border-color) 50%, var(--border-color) 100%);
border-radius: 3px;
cursor: pointer;
}
@@ -1341,3 +1363,23 @@ button:disabled {
padding-top: 1rem;
border-top: 1px dashed var(--border-color);
}
/* 推理深度设置相关样式 */
.reasoning-setting-group {
padding: 10px;
border-radius: 4px;
background-color: var(--highlight-bg-color);
}
.think-budget-group {
margin-top: 0;
padding: 10px;
border-radius: 4px;
background-color: var(--highlight-bg-color);
}
/* 推理设置在暗模式下的样式 */
[data-theme="dark"] .reasoning-setting-group,
[data-theme="dark"] .think-budget-group {
background-color: var(--highlight-bg-color-dark);
}

View File

@@ -123,6 +123,24 @@
<i class="fas fa-info-circle"></i> 版本: <span id="modelVersionText">-</span>
</div>
</div>
<div class="setting-group">
<label for="maxTokens"><i class="fas fa-text-width"></i> 最大输出Token</label>
<input type="number" id="maxTokens" min="1000" max="128000" step="1000" value="8192" class="input-styled">
</div>
<div class="setting-group reasoning-setting-group">
<label for="reasoningDepth"><i class="fas fa-brain"></i> 推理深度</label>
<select id="reasoningDepth" class="select-styled">
<option value="standard">标准模式 (快速响应)</option>
<option value="extended">深度思考 (更详细分析)</option>
</select>
</div>
<div class="setting-group think-budget-group">
<label for="thinkBudgetPercent"><i class="fas fa-hourglass-half"></i> 思考预算占比</label>
<div class="range-group">
<input type="range" id="thinkBudgetPercent" min="10" max="80" step="5" value="50">
<span id="thinkBudgetPercentValue">50%</span>
</div>
</div>
<div class="setting-group">
<label for="temperature"><i class="fas fa-thermometer-half"></i> 温度</label>
<div class="range-group">