mirror of
https://github.com/Zippland/Snap-Solver.git
synced 2026-01-19 01:21:13 +08:00
ai factory
This commit is contained in:
13
models/__init__.py
Normal file
13
models/__init__.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from .base import BaseModel
|
||||
from .claude import ClaudeModel
|
||||
from .gpt4o import GPT4oModel
|
||||
from .deepseek import DeepSeekModel
|
||||
from .factory import ModelFactory
|
||||
|
||||
__all__ = [
|
||||
'BaseModel',
|
||||
'ClaudeModel',
|
||||
'GPT4oModel',
|
||||
'DeepSeekModel',
|
||||
'ModelFactory'
|
||||
]
|
||||
BIN
models/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
models/__pycache__/__init__.cpython-312.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/base.cpython-312.pyc
Normal file
BIN
models/__pycache__/base.cpython-312.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/claude.cpython-312.pyc
Normal file
BIN
models/__pycache__/claude.cpython-312.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/deepseek.cpython-312.pyc
Normal file
BIN
models/__pycache__/deepseek.cpython-312.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/factory.cpython-312.pyc
Normal file
BIN
models/__pycache__/factory.cpython-312.pyc
Normal file
Binary file not shown.
BIN
models/__pycache__/gpt4o.cpython-312.pyc
Normal file
BIN
models/__pycache__/gpt4o.cpython-312.pyc
Normal file
Binary file not shown.
36
models/base.py
Normal file
36
models/base.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Generator, Any
|
||||
|
||||
class BaseModel(ABC):
|
||||
def __init__(self, api_key: str, temperature: float = 0.7, system_prompt: str = None):
|
||||
self.api_key = api_key
|
||||
self.temperature = temperature
|
||||
self.system_prompt = system_prompt or self.get_default_system_prompt()
|
||||
|
||||
@abstractmethod
|
||||
def analyze_image(self, image_data: str, proxies: dict = None) -> Generator[dict, None, None]:
|
||||
"""
|
||||
Analyze the given image and yield response chunks.
|
||||
|
||||
Args:
|
||||
image_data: Base64 encoded image data
|
||||
proxies: Optional proxy configuration
|
||||
|
||||
Yields:
|
||||
dict: Response chunks with status and content
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_default_system_prompt(self) -> str:
|
||||
"""Return the default system prompt for this model"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_model_identifier(self) -> str:
|
||||
"""Return the model identifier used in API calls"""
|
||||
pass
|
||||
|
||||
def validate_api_key(self) -> bool:
|
||||
"""Validate if the API key is in the correct format"""
|
||||
return bool(self.api_key and self.api_key.strip())
|
||||
121
models/claude.py
Normal file
121
models/claude.py
Normal file
@@ -0,0 +1,121 @@
|
||||
import json
|
||||
import requests
|
||||
from typing import Generator
|
||||
from .base import BaseModel
|
||||
|
||||
class ClaudeModel(BaseModel):
|
||||
def get_default_system_prompt(self) -> str:
|
||||
return """You are an expert at analyzing questions and providing detailed solutions. When presented with an image of a question:
|
||||
1. First read and understand the question carefully
|
||||
2. Break down the key components of the question
|
||||
3. Provide a clear, step-by-step solution
|
||||
4. If relevant, explain any concepts or theories involved
|
||||
5. If there are multiple approaches, explain the most efficient one first"""
|
||||
|
||||
def get_model_identifier(self) -> str:
|
||||
return "claude-3-5-sonnet-20241022"
|
||||
|
||||
def analyze_image(self, image_data: str, proxies: dict = None) -> Generator[dict, None, None]:
|
||||
"""Stream Claude's response for image analysis"""
|
||||
try:
|
||||
# Initial status
|
||||
yield {"status": "started", "content": ""}
|
||||
|
||||
api_key = self.api_key.strip()
|
||||
if api_key.startswith('Bearer '):
|
||||
api_key = api_key[7:]
|
||||
|
||||
headers = {
|
||||
'x-api-key': api_key,
|
||||
'anthropic-version': '2023-06-01',
|
||||
'content-type': 'application/json',
|
||||
'accept': 'application/json',
|
||||
}
|
||||
|
||||
payload = {
|
||||
'model': self.get_model_identifier(),
|
||||
'stream': True,
|
||||
'max_tokens': 4096,
|
||||
'temperature': self.temperature,
|
||||
'system': self.system_prompt,
|
||||
'messages': [{
|
||||
'role': 'user',
|
||||
'content': [
|
||||
{
|
||||
'type': 'image',
|
||||
'source': {
|
||||
'type': 'base64',
|
||||
'media_type': 'image/png',
|
||||
'data': image_data
|
||||
}
|
||||
},
|
||||
{
|
||||
'type': 'text',
|
||||
'text': "Please analyze this question and provide a detailed solution. If you see multiple questions, focus on solving them one at a time."
|
||||
}
|
||||
]
|
||||
}]
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
'https://api.anthropic.com/v1/messages',
|
||||
headers=headers,
|
||||
json=payload,
|
||||
stream=True,
|
||||
proxies=proxies,
|
||||
timeout=60
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
error_msg = f'API error: {response.status_code}'
|
||||
try:
|
||||
error_data = response.json()
|
||||
if 'error' in error_data:
|
||||
error_msg += f" - {error_data['error']['message']}"
|
||||
except:
|
||||
error_msg += f" - {response.text}"
|
||||
yield {"status": "error", "error": error_msg}
|
||||
return
|
||||
|
||||
for chunk in response.iter_lines():
|
||||
if not chunk:
|
||||
continue
|
||||
|
||||
try:
|
||||
chunk_str = chunk.decode('utf-8')
|
||||
if not chunk_str.startswith('data: '):
|
||||
continue
|
||||
|
||||
chunk_str = chunk_str[6:]
|
||||
data = json.loads(chunk_str)
|
||||
|
||||
if data.get('type') == 'content_block_delta':
|
||||
if 'delta' in data and 'text' in data['delta']:
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": data['delta']['text']
|
||||
}
|
||||
|
||||
elif data.get('type') == 'message_stop':
|
||||
yield {
|
||||
"status": "completed",
|
||||
"content": ""
|
||||
}
|
||||
|
||||
elif data.get('type') == 'error':
|
||||
error_msg = data.get('error', {}).get('message', 'Unknown error')
|
||||
yield {
|
||||
"status": "error",
|
||||
"error": error_msg
|
||||
}
|
||||
break
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"JSON decode error: {str(e)}")
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
yield {
|
||||
"status": "error",
|
||||
"error": f"Streaming error: {str(e)}"
|
||||
}
|
||||
84
models/deepseek.py
Normal file
84
models/deepseek.py
Normal file
@@ -0,0 +1,84 @@
|
||||
import json
|
||||
import requests
|
||||
from typing import Generator
|
||||
from openai import OpenAI
|
||||
from .base import BaseModel
|
||||
|
||||
class DeepSeekModel(BaseModel):
|
||||
def get_default_system_prompt(self) -> str:
|
||||
return """You are an expert at analyzing questions and providing detailed solutions. When presented with an image of a question:
|
||||
1. First read and understand the question carefully
|
||||
2. Break down the key components of the question
|
||||
3. Provide a clear, step-by-step solution
|
||||
4. If relevant, explain any concepts or theories involved
|
||||
5. If there are multiple approaches, explain the most efficient one first"""
|
||||
|
||||
def get_model_identifier(self) -> str:
|
||||
return "deepseek-reasoner"
|
||||
|
||||
def analyze_image(self, image_data: str, proxies: dict = None) -> Generator[dict, None, None]:
|
||||
"""Stream DeepSeek's response for image analysis"""
|
||||
try:
|
||||
# Initial status
|
||||
yield {"status": "started", "content": ""}
|
||||
|
||||
# Configure client with proxy if needed
|
||||
client_args = {
|
||||
"api_key": self.api_key,
|
||||
"base_url": "https://api.deepseek.com"
|
||||
}
|
||||
|
||||
if proxies:
|
||||
session = requests.Session()
|
||||
session.proxies = proxies
|
||||
client_args["http_client"] = session
|
||||
|
||||
client = OpenAI(**client_args)
|
||||
|
||||
response = client.chat.completions.create(
|
||||
model=self.get_model_identifier(),
|
||||
messages=[{
|
||||
'role': 'user',
|
||||
'content': f"Here's an image of a question to analyze: data:image/png;base64,{image_data}"
|
||||
}],
|
||||
stream=True
|
||||
)
|
||||
|
||||
for chunk in response:
|
||||
try:
|
||||
if hasattr(chunk.choices[0].delta, 'reasoning_content'):
|
||||
content = chunk.choices[0].delta.reasoning_content
|
||||
if content:
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": content
|
||||
}
|
||||
elif hasattr(chunk.choices[0].delta, 'content'):
|
||||
content = chunk.choices[0].delta.content
|
||||
if content:
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": content
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
print(f"Chunk processing error: {str(e)}")
|
||||
continue
|
||||
|
||||
# Send completion status
|
||||
yield {
|
||||
"status": "completed",
|
||||
"content": ""
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
if "invalid_api_key" in error_msg.lower():
|
||||
error_msg = "Invalid API key provided"
|
||||
elif "rate_limit" in error_msg.lower():
|
||||
error_msg = "Rate limit exceeded. Please try again later."
|
||||
|
||||
yield {
|
||||
"status": "error",
|
||||
"error": f"DeepSeek API error: {error_msg}"
|
||||
}
|
||||
55
models/factory.py
Normal file
55
models/factory.py
Normal file
@@ -0,0 +1,55 @@
|
||||
from typing import Dict, Type
|
||||
from .base import BaseModel
|
||||
from .claude import ClaudeModel
|
||||
from .gpt4o import GPT4oModel
|
||||
from .deepseek import DeepSeekModel
|
||||
|
||||
class ModelFactory:
|
||||
_models: Dict[str, Type[BaseModel]] = {
|
||||
'claude-3-5-sonnet-20241022': ClaudeModel,
|
||||
'gpt-4o-2024-11-20': GPT4oModel,
|
||||
'deepseek-reasoner': DeepSeekModel
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def create_model(cls, model_name: str, api_key: str, temperature: float = 0.7, system_prompt: str = None) -> BaseModel:
|
||||
"""
|
||||
Create and return an instance of the specified model.
|
||||
|
||||
Args:
|
||||
model_name: The identifier of the model to create
|
||||
api_key: The API key for the model
|
||||
temperature: Optional temperature parameter for response generation
|
||||
system_prompt: Optional custom system prompt
|
||||
|
||||
Returns:
|
||||
An instance of the specified model
|
||||
|
||||
Raises:
|
||||
ValueError: If the model_name is not recognized
|
||||
"""
|
||||
model_class = cls._models.get(model_name)
|
||||
if not model_class:
|
||||
raise ValueError(f"Unknown model: {model_name}")
|
||||
|
||||
return model_class(
|
||||
api_key=api_key,
|
||||
temperature=temperature,
|
||||
system_prompt=system_prompt
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_available_models(cls) -> list[str]:
|
||||
"""Return a list of available model identifiers"""
|
||||
return list(cls._models.keys())
|
||||
|
||||
@classmethod
|
||||
def register_model(cls, model_name: str, model_class: Type[BaseModel]) -> None:
|
||||
"""
|
||||
Register a new model type with the factory.
|
||||
|
||||
Args:
|
||||
model_name: The identifier for the model
|
||||
model_class: The model class to register
|
||||
"""
|
||||
cls._models[model_name] = model_class
|
||||
94
models/gpt4o.py
Normal file
94
models/gpt4o.py
Normal file
@@ -0,0 +1,94 @@
|
||||
import json
|
||||
import requests
|
||||
from typing import Generator
|
||||
from openai import OpenAI
|
||||
from .base import BaseModel
|
||||
|
||||
class GPT4oModel(BaseModel):
|
||||
def get_default_system_prompt(self) -> str:
|
||||
return """You are an expert at analyzing questions and providing detailed solutions. When presented with an image of a question:
|
||||
1. First read and understand the question carefully
|
||||
2. Break down the key components of the question
|
||||
3. Provide a clear, step-by-step solution
|
||||
4. If relevant, explain any concepts or theories involved
|
||||
5. If there are multiple approaches, explain the most efficient one first"""
|
||||
|
||||
def get_model_identifier(self) -> str:
|
||||
return "gpt-4o-2024-11-20"
|
||||
|
||||
def analyze_image(self, image_data: str, proxies: dict = None) -> Generator[dict, None, None]:
|
||||
"""Stream GPT-4o's response for image analysis"""
|
||||
try:
|
||||
# Initial status
|
||||
yield {"status": "started", "content": ""}
|
||||
|
||||
# Configure client with proxy if needed
|
||||
client_args = {
|
||||
"api_key": self.api_key,
|
||||
"base_url": "https://api.openai.com/v1" # Replace with actual GPT-4o API endpoint
|
||||
}
|
||||
|
||||
if proxies:
|
||||
session = requests.Session()
|
||||
session.proxies = proxies
|
||||
client_args["http_client"] = session
|
||||
|
||||
client = OpenAI(**client_args)
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": self.system_prompt
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": f"data:image/png;base64,{image_data}",
|
||||
"detail": "high"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"text": "Please analyze this question and provide a detailed solution. If you see multiple questions, focus on solving them one at a time."
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
response = client.chat.completions.create(
|
||||
model=self.get_model_identifier(),
|
||||
messages=messages,
|
||||
temperature=self.temperature,
|
||||
stream=True,
|
||||
max_tokens=4000
|
||||
)
|
||||
|
||||
for chunk in response:
|
||||
if hasattr(chunk.choices[0].delta, 'content'):
|
||||
content = chunk.choices[0].delta.content
|
||||
if content:
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": content
|
||||
}
|
||||
|
||||
# Send completion status
|
||||
yield {
|
||||
"status": "completed",
|
||||
"content": ""
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
if "invalid_api_key" in error_msg.lower():
|
||||
error_msg = "Invalid API key provided"
|
||||
elif "rate_limit" in error_msg.lower():
|
||||
error_msg = "Rate limit exceeded. Please try again later."
|
||||
|
||||
yield {
|
||||
"status": "error",
|
||||
"error": f"GPT-4o API error: {error_msg}"
|
||||
}
|
||||
Reference in New Issue
Block a user