mirror of
https://github.com/Zippland/Snap-Solver.git
synced 2026-02-05 08:21:25 +08:00
重构前端UI和交互逻辑,提升用户体验和代码可维护性
This commit is contained in:
@@ -91,39 +91,47 @@ class ClaudeModel(BaseModel):
|
||||
if 'delta' in data:
|
||||
if 'text' in data['delta']:
|
||||
text_chunk = data['delta']['text']
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": text_chunk
|
||||
}
|
||||
response_buffer += text_chunk
|
||||
# 只在每累积一定数量的字符后才发送,减少UI跳变
|
||||
if len(text_chunk) >= 10 or text_chunk.endswith(('.', '!', '?', '。', '!', '?', '\n')):
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": response_buffer
|
||||
}
|
||||
|
||||
elif 'thinking' in data['delta']:
|
||||
thinking_chunk = data['delta']['thinking']
|
||||
thinking_content += thinking_chunk
|
||||
yield {
|
||||
"status": "thinking",
|
||||
"content": thinking_content
|
||||
}
|
||||
# 只在每累积一定数量的字符后才发送,减少UI跳变
|
||||
if len(thinking_chunk) >= 20 or thinking_chunk.endswith(('.', '!', '?', '。', '!', '?', '\n')):
|
||||
yield {
|
||||
"status": "thinking",
|
||||
"content": thinking_content
|
||||
}
|
||||
|
||||
# 处理新的extended_thinking格式
|
||||
elif data.get('type') == 'extended_thinking_delta':
|
||||
if 'delta' in data and 'text' in data['delta']:
|
||||
thinking_chunk = data['delta']['text']
|
||||
thinking_content += thinking_chunk
|
||||
yield {
|
||||
"status": "thinking",
|
||||
"content": thinking_content
|
||||
}
|
||||
# 只在每累积一定数量的字符后才发送,减少UI跳变
|
||||
if len(thinking_chunk) >= 20 or thinking_chunk.endswith(('.', '!', '?', '。', '!', '?', '\n')):
|
||||
yield {
|
||||
"status": "thinking",
|
||||
"content": thinking_content
|
||||
}
|
||||
|
||||
elif data.get('type') == 'message_stop':
|
||||
# 确保发送完整的思考内容
|
||||
if thinking_content:
|
||||
yield {
|
||||
"status": "thinking_complete",
|
||||
"content": thinking_content
|
||||
}
|
||||
# 确保发送完整的响应内容
|
||||
yield {
|
||||
"status": "completed",
|
||||
"content": ""
|
||||
"content": response_buffer
|
||||
}
|
||||
|
||||
elif data.get('type') == 'error':
|
||||
@@ -144,7 +152,7 @@ class ClaudeModel(BaseModel):
|
||||
"error": f"Streaming error: {str(e)}"
|
||||
}
|
||||
|
||||
def analyze_image(self, image_data, prompt, socket=None, proxies=None):
|
||||
def analyze_image(self, image_data, proxies=None):
|
||||
yield {"status": "started"}
|
||||
|
||||
api_key = self.api_key
|
||||
@@ -157,6 +165,9 @@ class ClaudeModel(BaseModel):
|
||||
'content-type': 'application/json'
|
||||
}
|
||||
|
||||
# 获取系统提示词,确保包含语言设置
|
||||
system_prompt = self.system_prompt
|
||||
|
||||
payload = {
|
||||
'model': 'claude-3-7-sonnet-20250219',
|
||||
'stream': True,
|
||||
@@ -166,7 +177,7 @@ class ClaudeModel(BaseModel):
|
||||
'type': 'enabled',
|
||||
'budget_tokens': 4096
|
||||
},
|
||||
'system': "You are a helpful AI assistant that specializes in solving math problems. You should provide step-by-step solutions and explanations for any math problem presented to you. If you're given an image, analyze any mathematical content in it and provide a detailed solution.",
|
||||
'system': system_prompt,
|
||||
'messages': [{
|
||||
'role': 'user',
|
||||
'content': [
|
||||
@@ -225,36 +236,44 @@ class ClaudeModel(BaseModel):
|
||||
if 'delta' in data:
|
||||
if 'text' in data['delta']:
|
||||
text_chunk = data['delta']['text']
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": text_chunk
|
||||
}
|
||||
response_buffer += text_chunk
|
||||
# 只在每累积一定数量的字符后才发送,减少UI跳变
|
||||
if len(text_chunk) >= 10 or text_chunk.endswith(('.', '!', '?', '。', '!', '?', '\n')):
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": response_buffer
|
||||
}
|
||||
|
||||
elif 'thinking' in data['delta']:
|
||||
thinking_chunk = data['delta']['thinking']
|
||||
thinking_content += thinking_chunk
|
||||
yield {
|
||||
"status": "thinking",
|
||||
"content": thinking_content
|
||||
}
|
||||
# 只在每累积一定数量的字符后才发送,减少UI跳变
|
||||
if len(thinking_chunk) >= 20 or thinking_chunk.endswith(('.', '!', '?', '。', '!', '?', '\n')):
|
||||
yield {
|
||||
"status": "thinking",
|
||||
"content": thinking_content
|
||||
}
|
||||
|
||||
# 处理新的extended_thinking格式
|
||||
elif data.get('type') == 'extended_thinking_delta':
|
||||
if 'delta' in data and 'text' in data['delta']:
|
||||
thinking_chunk = data['delta']['text']
|
||||
thinking_content += thinking_chunk
|
||||
yield {
|
||||
"status": "thinking",
|
||||
"content": thinking_content
|
||||
}
|
||||
# 只在每累积一定数量的字符后才发送,减少UI跳变
|
||||
if len(thinking_chunk) >= 20 or thinking_chunk.endswith(('.', '!', '?', '。', '!', '?', '\n')):
|
||||
yield {
|
||||
"status": "thinking",
|
||||
"content": thinking_content
|
||||
}
|
||||
|
||||
elif data.get('type') == 'message_stop':
|
||||
# 确保发送完整的思考内容
|
||||
if thinking_content:
|
||||
yield {
|
||||
"status": "thinking_complete",
|
||||
"content": thinking_content
|
||||
}
|
||||
# 确保发送完整的响应内容
|
||||
yield {
|
||||
"status": "completed",
|
||||
"content": response_buffer
|
||||
|
||||
@@ -35,12 +35,10 @@ class GPT4oModel(BaseModel):
|
||||
if 'https' in proxies:
|
||||
os.environ['https_proxy'] = proxies['https']
|
||||
|
||||
# Create OpenAI client
|
||||
client = OpenAI(
|
||||
api_key=self.api_key,
|
||||
base_url="https://api.openai.com/v1" # Replace with actual GPT-4o API endpoint
|
||||
)
|
||||
# Initialize OpenAI client
|
||||
client = OpenAI(api_key=self.api_key)
|
||||
|
||||
# Prepare messages
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
@@ -60,39 +58,49 @@ class GPT4oModel(BaseModel):
|
||||
max_tokens=4000
|
||||
)
|
||||
|
||||
# 使用累积缓冲区
|
||||
response_buffer = ""
|
||||
|
||||
for chunk in response:
|
||||
if hasattr(chunk.choices[0].delta, 'content'):
|
||||
content = chunk.choices[0].delta.content
|
||||
if content:
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": content
|
||||
}
|
||||
# 累积内容
|
||||
response_buffer += content
|
||||
|
||||
# 只在累积一定数量的字符或遇到句子结束标记时才发送
|
||||
if len(content) >= 10 or content.endswith(('.', '!', '?', '。', '!', '?', '\n')):
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": response_buffer
|
||||
}
|
||||
|
||||
# 确保发送最终完整内容
|
||||
if response_buffer:
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": response_buffer
|
||||
}
|
||||
|
||||
# Send completion status
|
||||
yield {
|
||||
"status": "completed",
|
||||
"content": ""
|
||||
"content": response_buffer
|
||||
}
|
||||
|
||||
finally:
|
||||
# Restore original environment state
|
||||
for key, value in original_env.items():
|
||||
if value is None:
|
||||
os.environ.pop(key, None)
|
||||
if key in os.environ:
|
||||
del os.environ[key]
|
||||
else:
|
||||
os.environ[key] = value
|
||||
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
if "invalid_api_key" in error_msg.lower():
|
||||
error_msg = "Invalid API key provided"
|
||||
elif "rate_limit" in error_msg.lower():
|
||||
error_msg = "Rate limit exceeded. Please try again later."
|
||||
|
||||
yield {
|
||||
"status": "error",
|
||||
"error": f"GPT-4o API error: {error_msg}"
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
def analyze_image(self, image_data: str, proxies: dict = None) -> Generator[dict, None, None]:
|
||||
@@ -115,12 +123,10 @@ class GPT4oModel(BaseModel):
|
||||
if 'https' in proxies:
|
||||
os.environ['https_proxy'] = proxies['https']
|
||||
|
||||
# Create OpenAI client
|
||||
client = OpenAI(
|
||||
api_key=self.api_key,
|
||||
base_url="https://api.openai.com/v1" # Replace with actual GPT-4o API endpoint
|
||||
)
|
||||
# Initialize OpenAI client
|
||||
client = OpenAI(api_key=self.api_key)
|
||||
|
||||
# Prepare messages with image
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
@@ -132,13 +138,12 @@ class GPT4oModel(BaseModel):
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": image_data if image_data.startswith('data:') else f"data:image/png;base64,{image_data}",
|
||||
"detail": "high"
|
||||
"url": f"data:image/jpeg;base64,{image_data}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"text": "Please analyze this question and provide a detailed solution. If you see multiple questions, focus on solving them one at a time."
|
||||
"text": "Please analyze this image and provide a detailed solution."
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -152,37 +157,47 @@ class GPT4oModel(BaseModel):
|
||||
max_tokens=4000
|
||||
)
|
||||
|
||||
# 使用累积缓冲区
|
||||
response_buffer = ""
|
||||
|
||||
for chunk in response:
|
||||
if hasattr(chunk.choices[0].delta, 'content'):
|
||||
content = chunk.choices[0].delta.content
|
||||
if content:
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": content
|
||||
}
|
||||
# 累积内容
|
||||
response_buffer += content
|
||||
|
||||
# 只在累积一定数量的字符或遇到句子结束标记时才发送
|
||||
if len(content) >= 10 or content.endswith(('.', '!', '?', '。', '!', '?', '\n')):
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": response_buffer
|
||||
}
|
||||
|
||||
# 确保发送最终完整内容
|
||||
if response_buffer:
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": response_buffer
|
||||
}
|
||||
|
||||
# Send completion status
|
||||
yield {
|
||||
"status": "completed",
|
||||
"content": ""
|
||||
"content": response_buffer
|
||||
}
|
||||
|
||||
finally:
|
||||
# Restore original environment state
|
||||
for key, value in original_env.items():
|
||||
if value is None:
|
||||
os.environ.pop(key, None)
|
||||
if key in os.environ:
|
||||
del os.environ[key]
|
||||
else:
|
||||
os.environ[key] = value
|
||||
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
if "invalid_api_key" in error_msg.lower():
|
||||
error_msg = "Invalid API key provided"
|
||||
elif "rate_limit" in error_msg.lower():
|
||||
error_msg = "Rate limit exceeded. Please try again later."
|
||||
|
||||
yield {
|
||||
"status": "error",
|
||||
"error": f"GPT-4o API error: {error_msg}"
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user