Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
aiofiles==24.1.0
aiohappyeyeballs==2.6.1
aiohttp==3.12.15
aiosignal==1.4.0
Expand Down
53 changes: 37 additions & 16 deletions src/core/dynamic_actor.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
"""支持 ReAct 范式与 Function Calling 的动态智能体实现。"""

import hashlib
import json
import logging
import os
from collections import OrderedDict
from datetime import datetime
from typing import Any

Expand All @@ -25,8 +27,6 @@

CACHEABLE_TOOLS = {"read_file", "read_files", "list_directory"}

CACHEABLE_TOOLS = {"read_file", "read_files", "list_directory"}


class DynamicActor:
"""用于执行特定任务的自主智能体(采用 ReAct 范式)。"""
Expand Down Expand Up @@ -60,7 +60,9 @@ def __init__(

# 工具映射
self.tool_map = {tool.name: tool for tool in tools}
self.tool_call_cache: dict[tuple[str, str], dict[str, Any]] = {}
# Use OrderedDict for proper LRU cache behavior
self.tool_call_cache: OrderedDict[str, dict[str, Any]] = OrderedDict()
self._cache_max_size = 100 # Limit cache size to prevent memory bloat

async def execute(self, progress_manager) -> dict[str, Any]:
"""
Expand Down Expand Up @@ -372,6 +374,8 @@ async def _handle_function_call(self, response: dict) -> tuple[str, dict[str, An
try:
if cache_key in self.tool_call_cache:
cached = self.tool_call_cache[cache_key]
# Move to end for LRU behavior (most recently used)
self.tool_call_cache.move_to_end(cache_key)
logger.info(
f"{ACTOR_LOG_PREFIX} tool_cache_hit actor={self.actor_id} name={function_name}"
)
Expand Down Expand Up @@ -1232,16 +1236,33 @@ def _format_memory(self) -> str:

return "\n".join(formatted_lines)

def _build_cache_key(self, function_name: str, function_args: dict) -> str:
"""构建工具调用的缓存键。"""
import hashlib
import json

# 创建包含函数名和参数的唯一键
cache_data = {"function": function_name, "args": function_args}

# 使用 JSON 序列化并生成哈希
cache_string = json.dumps(cache_data, sort_keys=True, ensure_ascii=False)
cache_hash = hashlib.md5(cache_string.encode("utf-8")).hexdigest()

return f"{function_name}:{cache_hash}"
def _build_cache_key(self, function_name: str, function_args: dict) -> str | None:
"""构建工具调用的缓存键(优化版本)。"""
try:
# 对于可缓存的工具,使用简化的键生成策略
if function_name in CACHEABLE_TOOLS:
# 优化:直接使用参数的字符串表示而非JSON序列化
# 这样可以避免重复序列化的开销
if "path" in function_args or "file_path" in function_args:
# 文件相关操作,使用路径作为键的主要部分
path = function_args.get("path") or function_args.get("file_path", "")
cache_key = f"{function_name}:{path}"
else:
# 其他操作,使用简化的哈希
args_str = str(sorted(function_args.items()))
cache_hash = hashlib.md5(args_str.encode("utf-8")).hexdigest()[:16] # 使用前16个字符即可
cache_key = f"{function_name}:{cache_hash}"

# 检查缓存大小并清理(使用OrderedDict的LRU特性)
if len(self.tool_call_cache) >= self._cache_max_size:
# True LRU: Remove oldest 25% of entries from beginning
# Note: To make this full LRU, we should move accessed items to end
for _ in range(self._cache_max_size // 4):
self.tool_call_cache.popitem(last=False) # Remove from beginning (oldest)

return cache_key

return None # 不可缓存的工具返回None
except Exception as e:
logger.warning(f"{ACTOR_LOG_PREFIX} cache_key_error function={function_name} error={e}")
return None
25 changes: 14 additions & 11 deletions src/core/dynamic_planner.py
Original file line number Diff line number Diff line change
Expand Up @@ -632,21 +632,13 @@ def _format_planning_history(self) -> str:
return "\n".join(lines)

def _apply_task_updates(self, current_tasks: list[Task], updates: list[dict]) -> list[Task]:
"""将 LLM 生成的任务更新应用到任务树。"""
"""将 LLM 生成的任务更新应用到任务树(优化版本)。"""
# 任务数量限制检查
MAX_TOTAL_TASKS = 15
MAX_SINGLE_UPDATE_TASKS = 5

# 计算当前总任务数(包括子任务)
def count_all_tasks(tasks: list[Task]) -> int:
total = 0
for task in tasks:
total += 1
if task.subtasks:
total += count_all_tasks(task.subtasks)
return total

current_total = count_all_tasks(current_tasks)
# 使用迭代计算总任务数,避免递归开销
current_total = self._count_tasks_iterative(current_tasks)

# 计算本次更新要添加的任务数
add_updates = [u for u in updates if u.get("action") == "add"]
Expand Down Expand Up @@ -1010,6 +1002,17 @@ def get_current_state(self) -> dict[str, Any]:
"planning_decisions": len(self.planning_history),
}

def _count_tasks_iterative(self, tasks: list[Task]) -> int:
"""使用迭代方式计算任务总数,避免递归开销。"""
total = 0
stack = list(tasks)
while stack:
task = stack.pop()
total += 1
if task.subtasks:
stack.extend(task.subtasks)
return total

def _get_all_tasks(self) -> list[Task]:
"""从任务树拉平成列表以获取全部任务。"""
all_tasks = []
Expand Down
19 changes: 10 additions & 9 deletions src/core/mini_aime.py
Original file line number Diff line number Diff line change
Expand Up @@ -997,17 +997,18 @@ def _extract_tasks_from_state(self, state: SystemState) -> list[Task]:

def _flatten_task_tree(self, tasks: list[Task]) -> list[Task]:
"""
将任务树展平为列表。
将任务树展平为列表(使用迭代而非递归,避免深度递归开销)
"""
result = []

def traverse(task_list: list[Task]):
for task in task_list:
result.append(task)
if task.subtasks:
traverse(task.subtasks)

traverse(tasks)
stack = list(tasks)

while stack:
task = stack.pop()
result.append(task)
if task.subtasks:
# 逆序添加以保持原始顺序
stack.extend(reversed(task.subtasks))

return result

def _has_summary_task(self) -> bool:
Expand Down
Loading