mirror of
https://github.com/Zippland/Snap-Solver.git
synced 2026-03-03 16:45:26 +08:00
gpt4o run successfully
This commit is contained in:
221
models/gpt4o.py
221
models/gpt4o.py
@@ -1,6 +1,5 @@
|
||||
import json
|
||||
import requests
|
||||
from typing import Generator
|
||||
import os
|
||||
from typing import Generator, Dict, Optional
|
||||
from openai import OpenAI
|
||||
from .base import BaseModel
|
||||
|
||||
@@ -22,52 +21,67 @@ class GPT4oModel(BaseModel):
|
||||
# Initial status
|
||||
yield {"status": "started", "content": ""}
|
||||
|
||||
# Configure client with proxy if needed
|
||||
client_args = {
|
||||
"api_key": self.api_key,
|
||||
"base_url": "https://api.openai.com/v1" # Replace with actual GPT-4o API endpoint
|
||||
# Save original environment state
|
||||
original_env = {
|
||||
'http_proxy': os.environ.get('http_proxy'),
|
||||
'https_proxy': os.environ.get('https_proxy')
|
||||
}
|
||||
|
||||
if proxies:
|
||||
session = requests.Session()
|
||||
session.proxies = proxies
|
||||
client_args["http_client"] = session
|
||||
|
||||
client = OpenAI(**client_args)
|
||||
try:
|
||||
# Set proxy environment variables if provided
|
||||
if proxies:
|
||||
if 'http' in proxies:
|
||||
os.environ['http_proxy'] = proxies['http']
|
||||
if 'https' in proxies:
|
||||
os.environ['https_proxy'] = proxies['https']
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": self.system_prompt
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": text
|
||||
# Create OpenAI client
|
||||
client = OpenAI(
|
||||
api_key=self.api_key,
|
||||
base_url="https://api.openai.com/v1" # Replace with actual GPT-4o API endpoint
|
||||
)
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": self.system_prompt
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": text
|
||||
}
|
||||
]
|
||||
|
||||
response = client.chat.completions.create(
|
||||
model=self.get_model_identifier(),
|
||||
messages=messages,
|
||||
temperature=self.temperature,
|
||||
stream=True,
|
||||
max_tokens=4000
|
||||
)
|
||||
|
||||
for chunk in response:
|
||||
if hasattr(chunk.choices[0].delta, 'content'):
|
||||
content = chunk.choices[0].delta.content
|
||||
if content:
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": content
|
||||
}
|
||||
|
||||
# Send completion status
|
||||
yield {
|
||||
"status": "completed",
|
||||
"content": ""
|
||||
}
|
||||
]
|
||||
|
||||
response = client.chat.completions.create(
|
||||
model=self.get_model_identifier(),
|
||||
messages=messages,
|
||||
temperature=self.temperature,
|
||||
stream=True,
|
||||
max_tokens=4000
|
||||
)
|
||||
|
||||
for chunk in response:
|
||||
if hasattr(chunk.choices[0].delta, 'content'):
|
||||
content = chunk.choices[0].delta.content
|
||||
if content:
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": content
|
||||
}
|
||||
|
||||
# Send completion status
|
||||
yield {
|
||||
"status": "completed",
|
||||
"content": ""
|
||||
}
|
||||
finally:
|
||||
# Restore original environment state
|
||||
for key, value in original_env.items():
|
||||
if value is None:
|
||||
os.environ.pop(key, None)
|
||||
else:
|
||||
os.environ[key] = value
|
||||
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
@@ -87,64 +101,79 @@ class GPT4oModel(BaseModel):
|
||||
# Initial status
|
||||
yield {"status": "started", "content": ""}
|
||||
|
||||
# Configure client with proxy if needed
|
||||
client_args = {
|
||||
"api_key": self.api_key,
|
||||
"base_url": "https://api.openai.com/v1" # Replace with actual GPT-4o API endpoint
|
||||
# Save original environment state
|
||||
original_env = {
|
||||
'http_proxy': os.environ.get('http_proxy'),
|
||||
'https_proxy': os.environ.get('https_proxy')
|
||||
}
|
||||
|
||||
if proxies:
|
||||
session = requests.Session()
|
||||
session.proxies = proxies
|
||||
client_args["http_client"] = session
|
||||
|
||||
client = OpenAI(**client_args)
|
||||
try:
|
||||
# Set proxy environment variables if provided
|
||||
if proxies:
|
||||
if 'http' in proxies:
|
||||
os.environ['http_proxy'] = proxies['http']
|
||||
if 'https' in proxies:
|
||||
os.environ['https_proxy'] = proxies['https']
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": self.system_prompt
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": f"data:image/png;base64,{image_data}",
|
||||
"detail": "high"
|
||||
# Create OpenAI client
|
||||
client = OpenAI(
|
||||
api_key=self.api_key,
|
||||
base_url="https://api.openai.com/v1" # Replace with actual GPT-4o API endpoint
|
||||
)
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": self.system_prompt
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": f"data:image/png;base64,{image_data}",
|
||||
"detail": "high"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"text": "Please analyze this question and provide a detailed solution. If you see multiple questions, focus on solving them one at a time."
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "text",
|
||||
"text": "Please analyze this question and provide a detailed solution. If you see multiple questions, focus on solving them one at a time."
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
response = client.chat.completions.create(
|
||||
model=self.get_model_identifier(),
|
||||
messages=messages,
|
||||
temperature=self.temperature,
|
||||
stream=True,
|
||||
max_tokens=4000
|
||||
)
|
||||
|
||||
for chunk in response:
|
||||
if hasattr(chunk.choices[0].delta, 'content'):
|
||||
content = chunk.choices[0].delta.content
|
||||
if content:
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": content
|
||||
}
|
||||
|
||||
# Send completion status
|
||||
yield {
|
||||
"status": "completed",
|
||||
"content": ""
|
||||
}
|
||||
]
|
||||
|
||||
response = client.chat.completions.create(
|
||||
model=self.get_model_identifier(),
|
||||
messages=messages,
|
||||
temperature=self.temperature,
|
||||
stream=True,
|
||||
max_tokens=4000
|
||||
)
|
||||
|
||||
for chunk in response:
|
||||
if hasattr(chunk.choices[0].delta, 'content'):
|
||||
content = chunk.choices[0].delta.content
|
||||
if content:
|
||||
yield {
|
||||
"status": "streaming",
|
||||
"content": content
|
||||
}
|
||||
|
||||
# Send completion status
|
||||
yield {
|
||||
"status": "completed",
|
||||
"content": ""
|
||||
}
|
||||
finally:
|
||||
# Restore original environment state
|
||||
for key, value in original_env.items():
|
||||
if value is None:
|
||||
os.environ.pop(key, None)
|
||||
else:
|
||||
os.environ[key] = value
|
||||
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
|
||||
Reference in New Issue
Block a user