diff --git a/agent/protocol/agent_stream.py b/agent/protocol/agent_stream.py index 95ad64f..2cd89df 100644 --- a/agent/protocol/agent_stream.py +++ b/agent/protocol/agent_stream.py @@ -78,8 +78,9 @@ class AgentStreamExecutor: Returns: Final response text """ - # Log user message - logger.info(f"\n{'='*50}") + # Log user message with model info + logger.info(f"{'='*50}") + logger.info(f"šŸ¤– Model: {self.model.model}") logger.info(f"šŸ‘¤ ē”Øęˆ·: {user_message}") logger.info(f"{'='*50}") @@ -102,7 +103,7 @@ class AgentStreamExecutor: try: while turn < self.max_turns: turn += 1 - logger.info(f"\nšŸ”„ 第 {turn} č½®") + logger.info(f"第 {turn} č½®") self._emit_event("turn_start", {"turn": turn}) # Check if memory flush is needed (before calling LLM) @@ -145,7 +146,7 @@ class AgentStreamExecutor: final_response = ( "ęŠ±ę­‰ļ¼Œęˆ‘ęš‚ę—¶ę— ę³•ē”Ÿęˆå›žå¤ć€‚čÆ·å°čÆ•ę¢äø€ē§ę–¹å¼ęčæ°ä½ ēš„éœ€ę±‚ļ¼Œęˆ–ēØåŽå†čÆ•ć€‚" ) - logger.info(f"šŸ’­ Generated fallback response for empty LLM output") + logger.info(f"Generated fallback response for empty LLM output") else: logger.info(f"šŸ’­ {assistant_msg[:150]}{'...' if len(assistant_msg) > 150 else ''}") @@ -239,7 +240,7 @@ class AgentStreamExecutor: raise finally: - logger.info(f"šŸ 完成({turn}č½®)\n") + logger.info(f"šŸ 完成({turn}č½®)") self._emit_event("agent_end", {"final_response": final_response}) return final_response @@ -365,8 +366,8 @@ class AgentStreamExecutor: if is_retryable and retry_count < max_retries: wait_time = (retry_count + 1) * 2 # Exponential backoff: 2s, 4s, 6s - logger.warning(f"āš ļø LLM API error (attempt {retry_count + 1}/{max_retries}): {e}") - logger.info(f"šŸ”„ Retrying in {wait_time}s...") + logger.warning(f"āš ļø LLM API error (attempt {retry_count + 1}/{max_retries}): {e}") + logger.info(f"Retrying in {wait_time}s...") time.sleep(wait_time) return self._call_llm_stream( retry_on_empty=retry_on_empty, @@ -486,9 +487,9 @@ class AgentStreamExecutor: if tool_name == "bash" and result.status == "success": command = arguments.get("command", "") if "init_skill.py" in command and self.agent.skill_manager: - logger.info("šŸ”„ Detected skill creation, refreshing skills...") + logger.info("Detected skill creation, refreshing skills...") self.agent.refresh_skills() - logger.info(f"āœ… Skills refreshed! Now have {len(self.agent.skill_manager.skills)} skills") + logger.info(f"Skills refreshed! Now have {len(self.agent.skill_manager.skills)} skills") self._emit_event("tool_execution_end", { "tool_call_id": tool_id, diff --git a/bot/linkai/link_ai_bot.py b/bot/linkai/link_ai_bot.py index c94a72c..9410062 100644 --- a/bot/linkai/link_ai_bot.py +++ b/bot/linkai/link_ai_bot.py @@ -505,14 +505,9 @@ def _linkai_call_with_tools(self, messages, tools=None, stream=False, **kwargs): Formatted response in OpenAI format or generator for streaming """ try: - # Debug logging - logger.info(f"[LinkAI] ⭐ LinkAI call_with_tools method called") - logger.info(f"[LinkAI] messages count (before conversion): {len(messages) if messages else 0}") - # Convert messages from Claude format to OpenAI format # This is important because Agent uses Claude format internally messages = self._convert_messages_to_openai_format(messages) - logger.info(f"[LinkAI] messages count (after conversion): {len(messages) if messages else 0}") # Convert tools from Claude format to OpenAI format if tools: @@ -528,7 +523,7 @@ def _linkai_call_with_tools(self, messages, tools=None, stream=False, **kwargs): # Replace existing system message messages[0] = {"role": "system", "content": system_prompt} - logger.info(f"[LinkAI] Final messages count: {len(messages)}, tools count: {len(tools) if tools else 0}, stream: {stream}") + logger.debug(f"[LinkAI] messages: {len(messages)}, tools: {len(tools) if tools else 0}, stream: {stream}") # Build request parameters (LinkAI uses OpenAI-compatible format) body = { @@ -583,8 +578,8 @@ def _handle_linkai_sync_response(self, base_url, headers, body): if res.status_code == 200: response = res.json() - logger.info(f"[LinkAI] call_with_tools reply, model={response.get('model')}, " - f"total_tokens={response.get('usage', {}).get('total_tokens', 0)}") + logger.debug(f"[LinkAI] reply: model={response.get('model')}, " + f"tokens={response.get('usage', {}).get('total_tokens', 0)}") # LinkAI response is already in OpenAI-compatible format return response diff --git a/bridge/agent_bridge.py b/bridge/agent_bridge.py index 43a9126..8cc0313 100644 --- a/bridge/agent_bridge.py +++ b/bridge/agent_bridge.py @@ -74,12 +74,14 @@ class AgentLLMModel(LLMModel): if self._bot is None: # If use_linkai is enabled, use LinkAI bot directly if self._use_linkai: - logger.info("[AgentBridge] Using LinkAI bot for agent") self._bot = self.bridge.find_chat_bot(const.LINKAI) else: self._bot = self.bridge.get_bot(self.bot_type) # Automatically add tool calling support if not present self._bot = add_openai_compatible_support(self._bot) + + # Log bot info + bot_name = type(self._bot).__name__ return self._bot def call(self, request: LLMRequest): @@ -331,7 +333,7 @@ class AgentBridge: runtime_info = { "model": conf().get("model", "unknown"), "workspace": workspace_root, - "channel": "web" # TODO: get from actual channel, default to "web" to hide if not specified + "channel": conf().get("channel_type", "unknown") # Get from config } system_prompt = prompt_builder.build( diff --git a/channel/channel.py b/channel/channel.py index 7f043e5..08799c6 100644 --- a/channel/channel.py +++ b/channel/channel.py @@ -47,6 +47,10 @@ class Channel(object): try: logger.info("[Channel] Using agent mode") + # Add channel_type to context if not present + if context and "channel_type" not in context: + context["channel_type"] = self.channel_type + # Use agent bridge to handle the query return Bridge().fetch_agent_reply( query=query,