fix: add logs

This commit is contained in:
zhayujie
2026-01-31 17:29:32 +08:00
parent 89e229ab75
commit 86a6182e41
4 changed files with 21 additions and 19 deletions

View File

@@ -78,8 +78,9 @@ class AgentStreamExecutor:
Returns:
Final response text
"""
# Log user message
logger.info(f"\n{'='*50}")
# Log user message with model info
logger.info(f"{'='*50}")
logger.info(f"🤖 Model: {self.model.model}")
logger.info(f"👤 用户: {user_message}")
logger.info(f"{'='*50}")
@@ -102,7 +103,7 @@ class AgentStreamExecutor:
try:
while turn < self.max_turns:
turn += 1
logger.info(f"\n🔄 {turn}")
logger.info(f"{turn}")
self._emit_event("turn_start", {"turn": turn})
# Check if memory flush is needed (before calling LLM)
@@ -145,7 +146,7 @@ class AgentStreamExecutor:
final_response = (
"抱歉,我暂时无法生成回复。请尝试换一种方式描述你的需求,或稍后再试。"
)
logger.info(f"💭 Generated fallback response for empty LLM output")
logger.info(f"Generated fallback response for empty LLM output")
else:
logger.info(f"💭 {assistant_msg[:150]}{'...' if len(assistant_msg) > 150 else ''}")
@@ -239,7 +240,7 @@ class AgentStreamExecutor:
raise
finally:
logger.info(f"🏁 完成({turn}轮)\n")
logger.info(f"🏁 完成({turn}轮)")
self._emit_event("agent_end", {"final_response": final_response})
return final_response
@@ -365,8 +366,8 @@ class AgentStreamExecutor:
if is_retryable and retry_count < max_retries:
wait_time = (retry_count + 1) * 2 # Exponential backoff: 2s, 4s, 6s
logger.warning(f"⚠️ LLM API error (attempt {retry_count + 1}/{max_retries}): {e}")
logger.info(f"🔄 Retrying in {wait_time}s...")
logger.warning(f"⚠️ LLM API error (attempt {retry_count + 1}/{max_retries}): {e}")
logger.info(f"Retrying in {wait_time}s...")
time.sleep(wait_time)
return self._call_llm_stream(
retry_on_empty=retry_on_empty,
@@ -486,9 +487,9 @@ class AgentStreamExecutor:
if tool_name == "bash" and result.status == "success":
command = arguments.get("command", "")
if "init_skill.py" in command and self.agent.skill_manager:
logger.info("🔄 Detected skill creation, refreshing skills...")
logger.info("Detected skill creation, refreshing skills...")
self.agent.refresh_skills()
logger.info(f"Skills refreshed! Now have {len(self.agent.skill_manager.skills)} skills")
logger.info(f"Skills refreshed! Now have {len(self.agent.skill_manager.skills)} skills")
self._emit_event("tool_execution_end", {
"tool_call_id": tool_id,

View File

@@ -505,14 +505,9 @@ def _linkai_call_with_tools(self, messages, tools=None, stream=False, **kwargs):
Formatted response in OpenAI format or generator for streaming
"""
try:
# Debug logging
logger.info(f"[LinkAI] ⭐ LinkAI call_with_tools method called")
logger.info(f"[LinkAI] messages count (before conversion): {len(messages) if messages else 0}")
# Convert messages from Claude format to OpenAI format
# This is important because Agent uses Claude format internally
messages = self._convert_messages_to_openai_format(messages)
logger.info(f"[LinkAI] messages count (after conversion): {len(messages) if messages else 0}")
# Convert tools from Claude format to OpenAI format
if tools:
@@ -528,7 +523,7 @@ def _linkai_call_with_tools(self, messages, tools=None, stream=False, **kwargs):
# Replace existing system message
messages[0] = {"role": "system", "content": system_prompt}
logger.info(f"[LinkAI] Final messages count: {len(messages)}, tools count: {len(tools) if tools else 0}, stream: {stream}")
logger.debug(f"[LinkAI] messages: {len(messages)}, tools: {len(tools) if tools else 0}, stream: {stream}")
# Build request parameters (LinkAI uses OpenAI-compatible format)
body = {
@@ -583,8 +578,8 @@ def _handle_linkai_sync_response(self, base_url, headers, body):
if res.status_code == 200:
response = res.json()
logger.info(f"[LinkAI] call_with_tools reply, model={response.get('model')}, "
f"total_tokens={response.get('usage', {}).get('total_tokens', 0)}")
logger.debug(f"[LinkAI] reply: model={response.get('model')}, "
f"tokens={response.get('usage', {}).get('total_tokens', 0)}")
# LinkAI response is already in OpenAI-compatible format
return response

View File

@@ -74,12 +74,14 @@ class AgentLLMModel(LLMModel):
if self._bot is None:
# If use_linkai is enabled, use LinkAI bot directly
if self._use_linkai:
logger.info("[AgentBridge] Using LinkAI bot for agent")
self._bot = self.bridge.find_chat_bot(const.LINKAI)
else:
self._bot = self.bridge.get_bot(self.bot_type)
# Automatically add tool calling support if not present
self._bot = add_openai_compatible_support(self._bot)
# Log bot info
bot_name = type(self._bot).__name__
return self._bot
def call(self, request: LLMRequest):
@@ -331,7 +333,7 @@ class AgentBridge:
runtime_info = {
"model": conf().get("model", "unknown"),
"workspace": workspace_root,
"channel": "web" # TODO: get from actual channel, default to "web" to hide if not specified
"channel": conf().get("channel_type", "unknown") # Get from config
}
system_prompt = prompt_builder.build(

View File

@@ -47,6 +47,10 @@ class Channel(object):
try:
logger.info("[Channel] Using agent mode")
# Add channel_type to context if not present
if context and "channel_type" not in context:
context["channel_type"] = self.channel_type
# Use agent bridge to handle the query
return Bridge().fetch_agent_reply(
query=query,