mirror of
https://github.com/Zippland/Snap-Solver.git
synced 2026-02-07 08:22:19 +08:00
ocr
This commit is contained in:
@@ -21,6 +21,20 @@ class BaseModel(ABC):
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def analyze_text(self, text: str, proxies: dict = None) -> Generator[dict, None, None]:
|
||||
"""
|
||||
Analyze the given text and yield response chunks.
|
||||
|
||||
Args:
|
||||
text: Text to analyze
|
||||
proxies: Optional proxy configuration
|
||||
|
||||
Yields:
|
||||
dict: Response chunks with status and content
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_default_system_prompt(self) -> str:
|
||||
"""Return the default system prompt for this model"""
|
||||
|
||||
@@ -15,6 +15,103 @@ class ClaudeModel(BaseModel):
|
||||
def get_model_identifier(self) -> str:
|
||||
return "claude-3-5-sonnet-20241022"
|
||||
|
||||
def analyze_text(self, text: str, proxies: dict = None) -> Generator[dict, None, None]:
|
||||
"""Stream Claude's response for text analysis"""
|
||||
try:
|
||||
# Initial status
|
||||
yield {"status": "started", "content": ""}
|
||||
|
||||
api_key = self.api_key.strip()
|
||||
if api_key.startswith('Bearer '):
|
||||
api_key = api_key[7:]
|
||||
|
||||
headers = {
|
||||
'x-api-key': api_key,
|
||||
'anthropic-version': '2023-06-01',
|
||||
'content-type': 'application/json',
|
||||
'accept': 'application/json',
|
||||
}
|
||||
|
||||
payload = {
|
||||
'model': self.get_model_identifier(),
|
||||
'stream': True,
|
||||
'max_tokens': 4096,
|
||||
'temperature': self.temperature,
|
||||
'system': self.system_prompt,
|
||||
'messages': [{
|
||||
'role': 'user',
|
||||
'content': [
|
||||
{
|
||||
'type': 'text',
|
||||
'text': text
|
||||
}
|
||||
]
|
||||
}]
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
'https://api.anthropic.com/v1/messages',
|
||||
headers=headers,
|
||||
json=payload,
|
||||
stream=True,
|
||||
proxies=proxies,
|
||||
timeout=60
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
error_msg = f'API error: {response.status_code}'
|
||||
try:
|
||||
error_data = response.json()
|
||||
if 'error' in error_data:
|
||||
error_msg += f" - {error_data['error']['message']}"
|
||||
except:
|
||||
error_msg += f" - {response.text}"
|
||||
yield {"status": "error", "error": error_msg}
|
||||
return
|
||||
|
||||
for chunk in response.iter_lines():
|
||||
if not chunk:
|
||||
continue
|
||||
|
||||
try:
|
||||
chunk_str = chunk.decode('utf-8')
|
||||
if not chunk_str.startswith('data: '):
|
||||
continue
|
||||
|
||||
chunk_str = chunk_str[6:]
|
||||
data = json.loads(chunk_str)
|
||||
|
||||
if data.get('type') == 'content_block_delta':
|
||||
if 'delta' in data and 'text' in data['delta']:
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": data['delta']['text']
|
||||
}
|
||||
|
||||
elif data.get('type') == 'message_stop':
|
||||
yield {
|
||||
"status": "completed",
|
||||
"content": ""
|
||||
}
|
||||
|
||||
elif data.get('type') == 'error':
|
||||
error_msg = data.get('error', {}).get('message', 'Unknown error')
|
||||
yield {
|
||||
"status": "error",
|
||||
"error": error_msg
|
||||
}
|
||||
break
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"JSON decode error: {str(e)}")
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
yield {
|
||||
"status": "error",
|
||||
"error": f"Streaming error: {str(e)}"
|
||||
}
|
||||
|
||||
def analyze_image(self, image_data: str, proxies: dict = None) -> Generator[dict, None, None]:
|
||||
"""Stream Claude's response for image analysis"""
|
||||
try:
|
||||
|
||||
@@ -16,6 +16,76 @@ class DeepSeekModel(BaseModel):
|
||||
def get_model_identifier(self) -> str:
|
||||
return "deepseek-reasoner"
|
||||
|
||||
def analyze_text(self, text: str, proxies: dict = None) -> Generator[dict, None, None]:
|
||||
"""Stream DeepSeek's response for text analysis"""
|
||||
try:
|
||||
# Initial status
|
||||
yield {"status": "started", "content": ""}
|
||||
|
||||
# Configure client with proxy if needed
|
||||
client_args = {
|
||||
"api_key": self.api_key,
|
||||
"base_url": "https://api.deepseek.com"
|
||||
}
|
||||
|
||||
if proxies:
|
||||
session = requests.Session()
|
||||
session.proxies = proxies
|
||||
client_args["http_client"] = session
|
||||
|
||||
client = OpenAI(**client_args)
|
||||
|
||||
response = client.chat.completions.create(
|
||||
model=self.get_model_identifier(),
|
||||
messages=[{
|
||||
'role': 'system',
|
||||
'content': self.system_prompt
|
||||
}, {
|
||||
'role': 'user',
|
||||
'content': text
|
||||
}],
|
||||
stream=True
|
||||
)
|
||||
|
||||
for chunk in response:
|
||||
try:
|
||||
if hasattr(chunk.choices[0].delta, 'reasoning_content'):
|
||||
content = chunk.choices[0].delta.reasoning_content
|
||||
if content:
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": content
|
||||
}
|
||||
elif hasattr(chunk.choices[0].delta, 'content'):
|
||||
content = chunk.choices[0].delta.content
|
||||
if content:
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": content
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
print(f"Chunk processing error: {str(e)}")
|
||||
continue
|
||||
|
||||
# Send completion status
|
||||
yield {
|
||||
"status": "completed",
|
||||
"content": ""
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
if "invalid_api_key" in error_msg.lower():
|
||||
error_msg = "Invalid API key provided"
|
||||
elif "rate_limit" in error_msg.lower():
|
||||
error_msg = "Rate limit exceeded. Please try again later."
|
||||
|
||||
yield {
|
||||
"status": "error",
|
||||
"error": f"DeepSeek API error: {error_msg}"
|
||||
}
|
||||
|
||||
def analyze_image(self, image_data: str, proxies: dict = None) -> Generator[dict, None, None]:
|
||||
"""Stream DeepSeek's response for image analysis"""
|
||||
try:
|
||||
|
||||
@@ -16,6 +16,71 @@ class GPT4oModel(BaseModel):
|
||||
def get_model_identifier(self) -> str:
|
||||
return "gpt-4o-2024-11-20"
|
||||
|
||||
def analyze_text(self, text: str, proxies: dict = None) -> Generator[dict, None, None]:
|
||||
"""Stream GPT-4o's response for text analysis"""
|
||||
try:
|
||||
# Initial status
|
||||
yield {"status": "started", "content": ""}
|
||||
|
||||
# Configure client with proxy if needed
|
||||
client_args = {
|
||||
"api_key": self.api_key,
|
||||
"base_url": "https://api.openai.com/v1" # Replace with actual GPT-4o API endpoint
|
||||
}
|
||||
|
||||
if proxies:
|
||||
session = requests.Session()
|
||||
session.proxies = proxies
|
||||
client_args["http_client"] = session
|
||||
|
||||
client = OpenAI(**client_args)
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": self.system_prompt
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": text
|
||||
}
|
||||
]
|
||||
|
||||
response = client.chat.completions.create(
|
||||
model=self.get_model_identifier(),
|
||||
messages=messages,
|
||||
temperature=self.temperature,
|
||||
stream=True,
|
||||
max_tokens=4000
|
||||
)
|
||||
|
||||
for chunk in response:
|
||||
if hasattr(chunk.choices[0].delta, 'content'):
|
||||
content = chunk.choices[0].delta.content
|
||||
if content:
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": content
|
||||
}
|
||||
|
||||
# Send completion status
|
||||
yield {
|
||||
"status": "completed",
|
||||
"content": ""
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
if "invalid_api_key" in error_msg.lower():
|
||||
error_msg = "Invalid API key provided"
|
||||
elif "rate_limit" in error_msg.lower():
|
||||
error_msg = "Rate limit exceeded. Please try again later."
|
||||
|
||||
yield {
|
||||
"status": "error",
|
||||
"error": f"GPT-4o API error: {error_msg}"
|
||||
}
|
||||
|
||||
def analyze_image(self, image_data: str, proxies: dict = None) -> Generator[dict, None, None]:
|
||||
"""Stream GPT-4o's response for image analysis"""
|
||||
try:
|
||||
|
||||
Reference in New Issue
Block a user