Compare commits

..

5 Commits

Author SHA1 Message Date
wuyifan18
f50dcd2fe7 update 2025-10-21 10:13:14 +08:00
wuyifan18
744945f68d update 2025-10-21 10:09:18 +08:00
wuyifan18
25853d8174 update 2025-10-19 18:54:11 +08:00
wuyifan18
ceb57acd23 update 2025-10-19 18:14:21 +08:00
wuyifan18
da2318a40c update 2025-10-18 14:31:34 +08:00
380 changed files with 27415 additions and 54392 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -1,434 +0,0 @@
"""
Word 文档 LLM 报告导出器
调用大模型生成专业的任务执行报告
"""
import json
import os
import re
from datetime import datetime
from typing import Dict, Any, Optional
class DocxLLMExporter:
"""Word 文档 LLM 报告导出器 - 调用大模型生成报告"""
# LLM 配置(从 config.yaml 加载)
LLM_CONFIG = {
'OPENAI_API_BASE': None,
'OPENAI_API_KEY': None,
'OPENAI_API_MODEL': None,
}
# Prompt 模板
PROMPT_TEMPLATE = """你是一位专业的项目管理顾问和报告分析师。你的任务是将以下任务执行数据生成一份详细、专业、结构化的执行报告。
## 任务基本信息
- 任务名称:{task_name}
## 任务大纲(规划阶段)
{task_outline}
## 执行结果
{rehearsal_log}
## 参与智能体
{agents}
## 智能体评分
{agent_scores}
---
## 报告要求
请生成一份完整的任务执行报告,包含以下章节:
### 1. 执行摘要
用 2-3 句话概括本次任务的整体执行情况。
### 2. 任务概述
- 任务背景与目标
- 任务范围与边界
### 3. 任务规划分析
- 任务拆解的合理性
- 智能体角色分配的优化建议
- 工作流程设计
### 4. 执行过程回顾
- 各阶段的完成情况
- 关键决策点
- 遇到的问题及解决方案
### 5. 成果产出分析
- 产出物的质量评估
- 产出与预期目标的匹配度
### 6. 团队协作分析
- 智能体之间的协作模式
- 信息传递效率
### 7. 质量评估
- 整体完成质量评分1-10分
- 各维度的具体评分及理由
### 8. 经验教训与改进建议
- 成功经验
- 存在的问题与不足
- 改进建议
---
## 输出格式要求
- 使用 Markdown 格式输出
- 语言:简体中文
- 适当使用列表、表格增强可读性
- 报告长度必须达到 4000-6000 字,每个章节都要详细展开,不要遗漏任何章节
- 每个章节的内容要充实,提供具体的分析和建议
- 注意:所有加粗标记必须成对出现,如 **文本**,不要单独使用 ** 或缺少结束标记
- 禁止使用 mermaid、graph TD、flowchart 等图表代码,如果需要描述流程请用纯文字描述
- 不要生成附录章节(如有关键参数对照表、工艺流程图等),如果确实需要附录再生成
- 不要在报告中显示"报告总字数"这样的统计信息
"""
def __init__(self):
self._load_llm_config()
def _load_llm_config(self):
"""从配置文件加载 LLM 配置"""
try:
import yaml
# 尝试多个可能的配置文件路径
possible_paths = [
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'config.yaml'),
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'backend', 'config', 'config.yaml'),
os.path.join(os.getcwd(), 'config', 'config.yaml'),
]
for config_path in possible_paths:
if os.path.exists(config_path):
with open(config_path, 'r', encoding='utf-8') as f:
config = yaml.safe_load(f)
if config:
self.LLM_CONFIG['OPENAI_API_BASE'] = config.get('OPENAI_API_BASE')
self.LLM_CONFIG['OPENAI_API_KEY'] = config.get('OPENAI_API_KEY')
self.LLM_CONFIG['OPENAI_API_MODEL'] = config.get('OPENAI_API_MODEL')
print(f"已加载 LLM 配置: {self.LLM_CONFIG['OPENAI_API_MODEL']}")
return
except Exception as e:
print(f"加载 LLM 配置失败: {e}")
def generate(self, task_data: Dict[str, Any], file_path: str) -> bool:
"""生成 Word 文档(调用 LLM 生成报告)"""
try:
# 1. 准备数据
task_name = task_data.get('task_name', '未命名任务')
task_outline = task_data.get('task_outline')
rehearsal_log = task_data.get('rehearsal_log')
agent_scores = task_data.get('agent_scores')
# 2. 提取参与智能体(从 task_outline 的 Collaboration Process 中提取)
agents = self._extract_agents(task_outline)
# 3. 过滤 agent_scores只保留参与当前任务的智能体评分
filtered_agent_scores = self._filter_agent_scores(agent_scores, agents)
# 4. 格式化数据为 JSON 字符串
task_outline_str = json.dumps(task_outline, ensure_ascii=False, indent=2) if task_outline else ''
rehearsal_log_str = json.dumps(rehearsal_log, ensure_ascii=False, indent=2) if rehearsal_log else ''
agents_str = ', '.join(agents) if agents else ''
agent_scores_str = json.dumps(filtered_agent_scores, ensure_ascii=False, indent=2) if filtered_agent_scores else ''
# 5. 构建 Prompt
prompt = self.PROMPT_TEMPLATE.format(
task_name=task_name,
task_outline=task_outline_str,
rehearsal_log=rehearsal_log_str,
agents=agents_str,
agent_scores=agent_scores_str
)
# 6. 调用 LLM 生成报告
print("正在调用大模型生成报告...")
report_content = self._call_llm(prompt)
if not report_content:
print("LLM 生成报告失败")
return False
# 7. 清理报告内容:去掉开头的"任务执行报告"标题(如果存在)
report_content = self._clean_report_title(report_content)
print(f"报告生成成功,长度: {len(report_content)} 字符")
# 8. 将 Markdown 转换为 Word 文档
self._save_as_word(report_content, file_path)
return True
except Exception as e:
print(f"Word LLM 导出失败: {e}")
import traceback
traceback.print_exc()
return False
def _clean_report_title(self, content: str) -> str:
"""清理报告开头的重复标题"""
lines = content.split('\n')
if not lines:
return content
# 检查第一行是否是"任务执行报告"
first_line = lines[0].strip()
if first_line == '任务执行报告' or first_line == '# 任务执行报告':
# 去掉第一行
lines = lines[1:]
# 去掉可能的空行
while lines and not lines[0].strip():
lines.pop(0)
return '\n'.join(lines)
def _extract_agents(self, task_outline: Any) -> list:
"""从 task_outline 中提取参与智能体列表"""
agents = set()
if not task_outline or not isinstance(task_outline, dict):
return []
collaboration_process = task_outline.get('Collaboration Process', [])
if not collaboration_process or not isinstance(collaboration_process, list):
return []
for step in collaboration_process:
if isinstance(step, dict):
agent_selection = step.get('AgentSelection', [])
if isinstance(agent_selection, list):
for agent in agent_selection:
if agent:
agents.add(agent)
return list(agents)
def _filter_agent_scores(self, agent_scores: Any, agents: list) -> dict:
"""过滤 agent_scores只保留参与当前任务的智能体评分"""
if not agent_scores or not isinstance(agent_scores, dict):
return {}
if not agents:
return {}
filtered = {}
for step_id, step_data in agent_scores.items():
if not isinstance(step_data, dict):
continue
aspect_list = step_data.get('aspectList', [])
agent_scores_data = step_data.get('agentScores', {})
if not agent_scores_data:
continue
# 只保留在 agents 列表中的智能体评分
filtered_scores = {}
for agent_name, scores in agent_scores_data.items():
if agent_name in agents and isinstance(scores, dict):
filtered_scores[agent_name] = scores
if filtered_scores:
filtered[step_id] = {
'aspectList': aspect_list,
'agentScores': filtered_scores
}
return filtered
def _call_llm(self, prompt: str) -> str:
"""调用大模型 API 生成报告"""
try:
import openai
# 验证配置
if not self.LLM_CONFIG['OPENAI_API_KEY']:
print("错误: OPENAI_API_KEY 未配置")
return ""
if not self.LLM_CONFIG['OPENAI_API_BASE']:
print("错误: OPENAI_API_BASE 未配置")
return ""
if not self.LLM_CONFIG['OPENAI_API_MODEL']:
print("错误: OPENAI_API_MODEL 未配置")
return ""
# 配置 OpenAI 客户端
client = openai.OpenAI(
api_key=self.LLM_CONFIG['OPENAI_API_KEY'],
base_url=self.LLM_CONFIG['OPENAI_API_BASE']
)
# 调用 API
response = client.chat.completions.create(
model=self.LLM_CONFIG['OPENAI_API_MODEL'],
messages=[
{"role": "user", "content": prompt}
],
temperature=0.7,
max_tokens=12000,
)
if response and response.choices:
return response.choices[0].message.content
return ""
except ImportError:
print("请安装 openai 库: pip install openai")
return ""
except Exception as e:
print(f"调用 LLM 失败: {e}")
return ""
def _save_as_word(self, markdown_content: str, file_path: str):
"""将 Markdown 内容保存为 Word 文档"""
try:
from docx import Document
from docx.shared import Pt, Inches
from docx.enum.text import WD_ALIGN_PARAGRAPH
doc = Document()
# 提取文档标题(从第一个 # 标题获取)
lines = markdown_content.split('\n')
first_title = None
content_start = 0
for i, line in enumerate(lines):
line = line.strip()
if line.startswith('# '):
first_title = line[2:].strip()
content_start = i + 1
break
# 添加文档标题
if first_title:
title = doc.add_heading(first_title, level=0)
title.alignment = WD_ALIGN_PARAGRAPH.CENTER
# 解析剩余的 Markdown 内容
remaining_content = '\n'.join(lines[content_start:])
self._parse_markdown_to_doc(remaining_content, doc)
# 添加时间戳
doc.add_paragraph(f"\n\n导出时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
doc.save(file_path)
print(f"Word 文档已保存: {file_path}")
except ImportError:
print("请安装 python-docx 库: pip install python-docx")
raise
except Exception as e:
print(f"保存 Word 文档失败: {e}")
raise
def _parse_markdown_to_doc(self, markdown_content: str, doc):
"""解析 Markdown 内容并添加到 Word 文档"""
lines = markdown_content.split('\n')
i = 0
table_rows = []
in_table = False
while i < len(lines):
line = lines[i].rstrip()
# 空行处理
if not line:
in_table = False
if table_rows:
self._add_table_to_doc(table_rows, doc)
table_rows = []
i += 1
continue
# 表格分隔线检测(跳过 |---| 或 |:---| 等格式的行)
stripped = line.strip()
if stripped.startswith('|') and stripped.endswith('|') and '---' in stripped:
i += 1
continue
# 表格检测:检查是否是表格行
if '|' in line and line.strip().startswith('|'):
# 收集表格行
cells = [cell.strip() for cell in line.split('|')[1:-1]]
if cells and any(cells): # 跳过空行
table_rows.append(cells)
in_table = True
i += 1
continue
else:
# 如果之前在表格中,现在不是表格行了,添加表格
if in_table and table_rows:
self._add_table_to_doc(table_rows, doc)
table_rows = []
in_table = False
# 标题处理
if line.startswith('### '):
doc.add_heading(line[4:].strip(), level=3)
elif line.startswith('## '):
doc.add_heading(line[3:].strip(), level=1)
elif line.startswith('# '):
doc.add_heading(line[2:].strip(), level=0)
# 无序列表处理(去掉 • 或 - 符号)
elif line.startswith('- ') or line.startswith('* ') or line.startswith(''):
# 去掉列表符号,保留内容
text = line[2:].strip() if line.startswith(('- ', '* ')) else line[1:].strip()
self._add_formatted_paragraph(text, doc, 'List Bullet')
# 普通段落(处理加粗)
else:
# 使用格式化方法处理加粗
self._add_formatted_paragraph(line, doc)
i += 1
# 处理最后的表格
if table_rows:
self._add_table_to_doc(table_rows, doc)
def _add_table_to_doc(self, table_rows: list, doc):
"""将表格行添加到 Word 文档"""
if not table_rows:
return
# 创建表格
table = doc.add_table(rows=len(table_rows), cols=len(table_rows[0]))
table.style = 'Light Grid Accent 1'
for i, row_data in enumerate(table_rows):
row = table.rows[i]
for j, cell_text in enumerate(row_data):
cell = row.cells[j]
cell.text = ''
# 处理加粗
parts = re.split(r'(\*\*.+?\*\*)', cell_text)
for part in parts:
if part.startswith('**') and part.endswith('**'):
run = cell.paragraphs[0].add_run(part[2:-2])
run.bold = True
elif part:
cell.paragraphs[0].add_run(part)
def _add_formatted_paragraph(self, text: str, doc, style: str = None):
"""添加带格式的段落"""
# 处理加粗文本
para = doc.add_paragraph(style=style)
# 分割文本处理加粗
parts = re.split(r'(\*\*.+?\*\*)', text)
for part in parts:
if part.startswith('**') and part.endswith('**'):
# 加粗文本
run = para.add_run(part[2:-2])
run.bold = True
else:
para.add_run(part)

View File

@@ -1,269 +0,0 @@
"""
信息图 LLM 报告导出器
调用大模型生成信息图展示的格式化内容
"""
import json
import os
import re
from datetime import datetime
from typing import Dict, Any, Optional
class InfographicLLMExporter:
"""信息图 LLM 报告导出器 - 调用大模型生成信息图内容"""
LLM_CONFIG = {
'OPENAI_API_BASE': None,
'OPENAI_API_KEY': None,
'OPENAI_API_MODEL': None,
}
PROMPT_TEMPLATE = """你是一位专业的可视化设计师和数据分析专家。你的任务是将以下任务执行数据生成适合信息图展示的格式化内容。
## 任务基本信息
- 任务名称:{task_name}
## 任务大纲(规划阶段)
{task_outline}
## 执行结果
{rehearsal_log}
## 参与智能体
{agents}
## 智能体评分
{agent_scores}
---
## 信息图内容要求
请生成以下信息图展示内容JSON 格式输出):
{{
"summary": "执行摘要 - 2-3句话概括整体执行情况",
"highlights": [
"亮点1 - 取得的显著成果",
"亮点2 - 关键突破或创新",
"亮点3 - 重要的里程碑"
],
"statistics": {{
"total_steps": 执行总步骤数,
"agent_count": 参与智能体数量,
"completion_rate": 完成率(百分比),
"quality_score": 质量评分(1-10)
}},
"key_insights": [
"关键洞察1 - 从执行过程中总结的洞见",
"关键洞察2 - 值得关注的趋势或模式",
"关键洞察3 - 对未来工作的建议"
],
"timeline": [
{{"step": "步骤名称", "status": "完成/进行中/未完成", "key_result": "关键产出"}},
...
],
"agent_performance": [
{{"name": "智能体名称", "score": 评分, "contribution": "主要贡献"}},
...
]
}}
---
## 输出要求
- 输出必须是有效的 JSON 格式
- 语言:简体中文
- 所有字符串值使用中文
- statistics 中的数值必须是整数或浮点数
- 确保 JSON 格式正确,不要有语法错误
- 不要输出 JSON 之外的任何内容
- **重要**"执行结果"rehearsal_log只是参考数据用于帮助你分析整体执行情况生成摘要、亮点、统计数据等。**不要在任何输出字段中直接复制或输出原始执行结果数据**,而应该对这些数据进行分析和提炼,生成适合信息图展示的格式化内容。
"""
def __init__(self):
self._load_llm_config()
def _load_llm_config(self):
"""从配置文件加载 LLM 配置"""
try:
import yaml
possible_paths = [
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'config.yaml'),
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'backend', 'config', 'config.yaml'),
os.path.join(os.getcwd(), 'config', 'config.yaml'),
]
for config_path in possible_paths:
if os.path.exists(config_path):
with open(config_path, 'r', encoding='utf-8') as f:
config = yaml.safe_load(f)
if config:
self.LLM_CONFIG['OPENAI_API_BASE'] = config.get('OPENAI_API_BASE')
self.LLM_CONFIG['OPENAI_API_KEY'] = config.get('OPENAI_API_KEY')
self.LLM_CONFIG['OPENAI_API_MODEL'] = config.get('OPENAI_API_MODEL')
print(f"已加载 LLM 配置: {self.LLM_CONFIG['OPENAI_API_MODEL']}")
return
except Exception as e:
print(f"加载 LLM 配置失败: {e}")
def generate(self, task_data: Dict[str, Any]) -> Dict[str, Any]:
"""生成信息图内容(调用 LLM 生成)"""
try:
task_name = task_data.get('task_name', '未命名任务')
task_outline = task_data.get('task_outline')
rehearsal_log = task_data.get('rehearsal_log')
agent_scores = task_data.get('agent_scores')
agents = self._extract_agents(task_outline)
filtered_agent_scores = self._filter_agent_scores(agent_scores, agents)
task_outline_str = json.dumps(task_outline, ensure_ascii=False, indent=2) if task_outline else ''
rehearsal_log_str = json.dumps(rehearsal_log, ensure_ascii=False, indent=2) if rehearsal_log else ''
agents_str = ', '.join(agents) if agents else ''
agent_scores_str = json.dumps(filtered_agent_scores, ensure_ascii=False, indent=2) if filtered_agent_scores else ''
prompt = self.PROMPT_TEMPLATE.format(
task_name=task_name,
task_outline=task_outline_str,
rehearsal_log=rehearsal_log_str,
agents=agents_str,
agent_scores=agent_scores_str
)
print("正在调用大模型生成信息图内容...")
llm_result = self._call_llm(prompt)
if not llm_result:
print("LLM 生成信息图内容失败")
return None
infographic_data = self._parse_llm_result(llm_result)
if not infographic_data:
print("解析 LLM 结果失败")
return None
print(f"信息图内容生成成功")
return infographic_data
except Exception as e:
print(f"信息图 LLM 生成失败: {e}")
import traceback
traceback.print_exc()
return None
def _extract_agents(self, task_outline: Any) -> list:
"""从 task_outline 中提取参与智能体列表"""
agents = set()
if not task_outline or not isinstance(task_outline, dict):
return []
collaboration_process = task_outline.get('Collaboration Process', [])
if not collaboration_process or not isinstance(collaboration_process, list):
return []
for step in collaboration_process:
if isinstance(step, dict):
agent_selection = step.get('AgentSelection', [])
if isinstance(agent_selection, list):
for agent in agent_selection:
if agent:
agents.add(agent)
return list(agents)
def _filter_agent_scores(self, agent_scores: Any, agents: list) -> dict:
"""过滤 agent_scores只保留参与当前任务的智能体评分"""
if not agent_scores or not isinstance(agent_scores, dict):
return {}
if not agents:
return {}
filtered = {}
for step_id, step_data in agent_scores.items():
if not isinstance(step_data, dict):
continue
aspect_list = step_data.get('aspectList', [])
agent_scores_data = step_data.get('agentScores', {})
if not agent_scores_data:
continue
filtered_scores = {}
for agent_name, scores in agent_scores_data.items():
if agent_name in agents and isinstance(scores, dict):
filtered_scores[agent_name] = scores
if filtered_scores:
filtered[step_id] = {
'aspectList': aspect_list,
'agentScores': filtered_scores
}
return filtered
def _call_llm(self, prompt: str) -> str:
"""调用大模型 API 生成内容"""
try:
import openai
if not self.LLM_CONFIG['OPENAI_API_KEY']:
print("错误: OPENAI_API_KEY 未配置")
return ""
if not self.LLM_CONFIG['OPENAI_API_BASE']:
print("错误: OPENAI_API_BASE 未配置")
return ""
if not self.LLM_CONFIG['OPENAI_API_MODEL']:
print("错误: OPENAI_API_MODEL 未配置")
return ""
client = openai.OpenAI(
api_key=self.LLM_CONFIG['OPENAI_API_KEY'],
base_url=self.LLM_CONFIG['OPENAI_API_BASE']
)
response = client.chat.completions.create(
model=self.LLM_CONFIG['OPENAI_API_MODEL'],
messages=[
{"role": "user", "content": prompt}
],
temperature=0.7,
max_tokens=8000,
)
if response and response.choices:
return response.choices[0].message.content
return ""
except ImportError:
print("请安装 openai 库: pip install openai")
return ""
except Exception as e:
print(f"调用 LLM 失败: {e}")
return ""
def _parse_llm_result(self, llm_result: str) -> Optional[Dict[str, Any]]:
"""解析 LLM 返回的 JSON 字符串"""
try:
json_str = llm_result.strip()
if json_str.startswith("```json"):
json_str = json_str[7:]
if json_str.startswith("```"):
json_str = json_str[3:]
if json_str.endswith("```"):
json_str = json_str[:-3]
json_str = json_str.strip()
return json.loads(json_str)
except json.JSONDecodeError as e:
print(f"JSON 解析失败: {e}")
print(f"原始结果: {llm_result[:500]}...")
return None
except Exception as e:
print(f"解析失败: {e}")
return None

View File

@@ -1,315 +0,0 @@
"""
Markdown LLM 报告导出器
调用大模型生成专业的任务执行报告Markdown 格式)
"""
import json
import os
import re
from datetime import datetime
from typing import Dict, Any, Optional
class MarkdownLLMExporter:
"""Markdown LLM 报告导出器 - 调用大模型生成报告"""
# LLM 配置(从 config.yaml 加载)
LLM_CONFIG = {
'OPENAI_API_BASE': None,
'OPENAI_API_KEY': None,
'OPENAI_API_MODEL': None,
}
# Prompt 模板
PROMPT_TEMPLATE = """你是一位专业的项目管理顾问和报告分析师。你的任务是将以下任务执行数据生成一份详细、专业、结构化的执行报告。
## 任务基本信息
- 任务名称:{task_name}
## 任务大纲(规划阶段)
{task_outline}
## 执行结果
{rehearsal_log}
## 参与智能体
{agents}
## 智能体评分
{agent_scores}
---
## 报告要求
请生成一份完整的任务执行报告,包含以下章节:
### 1. 执行摘要
用 2-3 句话概括本次任务的整体执行情况。
### 2. 任务概述
- 任务背景与目标
- 任务范围与边界
### 3. 任务规划分析
- 任务拆解的合理性
- 智能体角色分配的优化建议
- 工作流程设计
### 4. 执行过程回顾
- 各阶段的完成情况
- 关键决策点
- 遇到的问题及解决方案
### 5. 成果产出分析
- 产出物的质量评估
- 产出与预期目标的匹配度
### 6. 团队协作分析
- 智能体之间的协作模式
- 信息传递效率
### 7. 质量评估
- 整体完成质量评分1-10分
- 各维度的具体评分及理由
### 8. 经验教训与改进建议
- 成功经验
- 存在的问题与不足
- 改进建议
---
## 输出格式要求
- 使用 Markdown 格式输出
- 语言:简体中文
- 适当使用列表、表格增强可读性
- 报告长度必须达到 4000-6000 字,每个章节都要详细展开,不要遗漏任何章节
- 每个章节的内容要充实,提供具体的分析和建议
- 注意:所有加粗标记必须成对出现,如 **文本**,不要单独使用 ** 或缺少结束标记
- 禁止使用 mermaid、graph TD、flowchart 等图表代码,如果需要描述流程请用纯文字描述
- 不要生成附录章节(如有关键参数对照表、工艺流程图等),如果确实需要附录再生成
- 不要在报告中显示"报告总字数"这样的统计信息
"""
def __init__(self):
self._load_llm_config()
def _load_llm_config(self):
"""从配置文件加载 LLM 配置"""
try:
import yaml
# 尝试多个可能的配置文件路径
possible_paths = [
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'config.yaml'),
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'backend', 'config', 'config.yaml'),
os.path.join(os.getcwd(), 'config', 'config.yaml'),
]
for config_path in possible_paths:
if os.path.exists(config_path):
with open(config_path, 'r', encoding='utf-8') as f:
config = yaml.safe_load(f)
if config:
self.LLM_CONFIG['OPENAI_API_BASE'] = config.get('OPENAI_API_BASE')
self.LLM_CONFIG['OPENAI_API_KEY'] = config.get('OPENAI_API_KEY')
self.LLM_CONFIG['OPENAI_API_MODEL'] = config.get('OPENAI_API_MODEL')
print(f"已加载 LLM 配置: {self.LLM_CONFIG['OPENAI_API_MODEL']}")
return
except Exception as e:
print(f"加载 LLM 配置失败: {e}")
def generate(self, task_data: Dict[str, Any], file_path: str) -> bool:
"""生成 Markdown 文件(调用 LLM 生成报告)"""
try:
# 1. 准备数据
task_name = task_data.get('task_name', '未命名任务')
task_outline = task_data.get('task_outline')
rehearsal_log = task_data.get('rehearsal_log')
agent_scores = task_data.get('agent_scores')
# 2. 提取参与智能体(从 task_outline 的 Collaboration Process 中提取)
agents = self._extract_agents(task_outline)
# 3. 过滤 agent_scores只保留参与当前任务的智能体评分
filtered_agent_scores = self._filter_agent_scores(agent_scores, agents)
# 4. 格式化数据为 JSON 字符串
task_outline_str = json.dumps(task_outline, ensure_ascii=False, indent=2) if task_outline else ''
rehearsal_log_str = json.dumps(rehearsal_log, ensure_ascii=False, indent=2) if rehearsal_log else ''
agents_str = ', '.join(agents) if agents else ''
agent_scores_str = json.dumps(filtered_agent_scores, ensure_ascii=False, indent=2) if filtered_agent_scores else ''
# 5. 构建 Prompt
prompt = self.PROMPT_TEMPLATE.format(
task_name=task_name,
task_outline=task_outline_str,
rehearsal_log=rehearsal_log_str,
agents=agents_str,
agent_scores=agent_scores_str
)
# 6. 调用 LLM 生成报告
print("正在调用大模型生成 Markdown 报告...")
report_content = self._call_llm(prompt)
if not report_content:
print("LLM 生成报告失败")
return False
# 7. 清理报告内容:去掉开头的重复标题(如果存在)
report_content = self._clean_report_title(report_content)
print(f"Markdown 报告生成成功,长度: {len(report_content)} 字符")
# 8. 保存为 Markdown 文件
self._save_as_markdown(report_content, task_name, file_path)
return True
except Exception as e:
print(f"Markdown LLM 导出失败: {e}")
import traceback
traceback.print_exc()
return False
def _clean_report_title(self, content: str) -> str:
"""清理报告开头的重复标题"""
lines = content.split('\n')
if not lines:
return content
# 检查第一行是否是"任务执行报告"
first_line = lines[0].strip()
if first_line == '任务执行报告' or first_line == '# 任务执行报告':
# 去掉第一行
lines = lines[1:]
# 去掉可能的空行
while lines and not lines[0].strip():
lines.pop(0)
return '\n'.join(lines)
def _extract_agents(self, task_outline: Any) -> list:
"""从 task_outline 中提取参与智能体列表"""
agents = set()
if not task_outline or not isinstance(task_outline, dict):
return []
collaboration_process = task_outline.get('Collaboration Process', [])
if not collaboration_process or not isinstance(collaboration_process, list):
return []
for step in collaboration_process:
if isinstance(step, dict):
agent_selection = step.get('AgentSelection', [])
if isinstance(agent_selection, list):
for agent in agent_selection:
if agent:
agents.add(agent)
return list(agents)
def _filter_agent_scores(self, agent_scores: Any, agents: list) -> dict:
"""过滤 agent_scores只保留参与当前任务的智能体评分"""
if not agent_scores or not isinstance(agent_scores, dict):
return {}
if not agents:
return {}
filtered = {}
for step_id, step_data in agent_scores.items():
if not isinstance(step_data, dict):
continue
aspect_list = step_data.get('aspectList', [])
agent_scores_data = step_data.get('agentScores', {})
if not agent_scores_data:
continue
# 只保留在 agents 列表中的智能体评分
filtered_scores = {}
for agent_name, scores in agent_scores_data.items():
if agent_name in agents and isinstance(scores, dict):
filtered_scores[agent_name] = scores
if filtered_scores:
filtered[step_id] = {
'aspectList': aspect_list,
'agentScores': filtered_scores
}
return filtered
def _call_llm(self, prompt: str) -> str:
"""调用大模型 API 生成报告"""
try:
import openai
# 验证配置
if not self.LLM_CONFIG['OPENAI_API_KEY']:
print("错误: OPENAI_API_KEY 未配置")
return ""
if not self.LLM_CONFIG['OPENAI_API_BASE']:
print("错误: OPENAI_API_BASE 未配置")
return ""
if not self.LLM_CONFIG['OPENAI_API_MODEL']:
print("错误: OPENAI_API_MODEL 未配置")
return ""
# 配置 OpenAI 客户端
client = openai.OpenAI(
api_key=self.LLM_CONFIG['OPENAI_API_KEY'],
base_url=self.LLM_CONFIG['OPENAI_API_BASE']
)
# 调用 API
response = client.chat.completions.create(
model=self.LLM_CONFIG['OPENAI_API_MODEL'],
messages=[
{"role": "user", "content": prompt}
],
temperature=0.7,
max_tokens=12000,
)
if response and response.choices:
return response.choices[0].message.content
return ""
except ImportError:
print("请安装 openai 库: pip install openai")
return ""
except Exception as e:
print(f"调用 LLM 失败: {e}")
return ""
def _save_as_markdown(self, markdown_content: str, task_name: str, file_path: str):
"""将 Markdown 内容保存为文件"""
try:
# 在内容前添加标题(如果还没有的话)
if not markdown_content.strip().startswith('# '):
markdown_content = f"# {task_name}\n\n{markdown_content}"
# 添加时间戳
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
footer = f"\n\n---\n*导出时间: {timestamp}*"
# 如果内容末尾没有明显的分隔符,添加分隔符
if not markdown_content.rstrip().endswith('---'):
markdown_content = markdown_content.rstrip() + footer
else:
# 已有分隔符,在它之前添加时间戳
markdown_content = markdown_content.rstrip() + footer
with open(file_path, 'w', encoding='utf-8') as f:
f.write(markdown_content)
print(f"Markdown 文件已保存: {file_path}")
except Exception as e:
print(f"保存 Markdown 文件失败: {e}")
raise

View File

@@ -1,374 +0,0 @@
"""
思维导图 LLM 导出器
调用大模型生成专业的任务执行思维导图
"""
import json
import os
from datetime import datetime
from typing import Dict, Any
class MindmapLLMExporter:
"""思维导图 LLM 导出器 - 调用大模型生成思维导图"""
LLM_CONFIG = {
'OPENAI_API_BASE': None,
'OPENAI_API_KEY': None,
'OPENAI_API_MODEL': None,
}
PROMPT_TEMPLATE = """你是一位专业的项目管理顾问和可视化设计师。你的任务是将以下任务执行数据生成一份结构清晰、层次分明的思维导图。
## 任务基本信息
- 任务名称:{task_name}
## 任务大纲(规划阶段)
{task_outline}
## 执行结果
{rehearsal_log}
## 参与智能体
{agents}
## 智能体评分
{agent_scores}
---
## 思维导图要求
请生成一份完整的思维导图,包含以下核心分支:
### 1. 任务概述
- 任务背景与目标
- 任务范围
### 2. 任务规划
- 任务拆解
- 智能体角色分配
- 工作流程设计
### 3. 执行过程
- 各阶段的完成情况
- 关键决策点
- 遇到的问题及解决方案
### 4. 成果产出
- 产出物列表
- 质量评估
### 5. 团队协作
- 智能体之间的协作模式
- 信息传递效率
### 6. 质量评估
- 整体评分
- 各维度评分
### 7. 经验与改进
- 成功经验
- 改进建议
---
## 输出格式要求
请直接输出 JSON 格式,不要包含任何 Markdown 标记。JSON 结构如下:
```json
{{
"title": "思维导图标题",
"root": "中心主题",
"branches": [
{{
"name": "分支主题名称",
"children": [
{{
"name": "子主题名称",
"children": [
{{"name": "具体内容1"}},
{{"name": "具体内容2"}}
]
}}
]
}},
{{
"name": "另一个分支主题",
"children": [
{{"name": "子主题A"}},
{{"name": "子主题B"}}
]
}}
]
}}
```
注意:
- 思维导图层次分明,每个分支至少有 2-3 层深度
- 内容简洁,每个节点字数控制在 50-100 字以内
- 使用简体中文
- 确保 JSON 格式正确,不要有语法错误
- 不要生成"报告总字数"这样的统计信息
"""
def __init__(self):
self._load_llm_config()
def _load_llm_config(self):
"""从配置文件加载 LLM 配置"""
try:
import yaml
possible_paths = [
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'config.yaml'),
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'backend', 'config', 'config.yaml'),
os.path.join(os.getcwd(), 'config', 'config.yaml'),
]
for config_path in possible_paths:
if os.path.exists(config_path):
with open(config_path, 'r', encoding='utf-8') as f:
config = yaml.safe_load(f)
if config:
self.LLM_CONFIG['OPENAI_API_BASE'] = config.get('OPENAI_API_BASE')
self.LLM_CONFIG['OPENAI_API_KEY'] = config.get('OPENAI_API_KEY')
self.LLM_CONFIG['OPENAI_API_MODEL'] = config.get('OPENAI_API_MODEL')
print(f"已加载 LLM 配置: {self.LLM_CONFIG['OPENAI_API_MODEL']}")
return
except Exception as e:
print(f"加载 LLM 配置失败: {e}")
def generate(self, task_data: Dict[str, Any], file_path: str) -> bool:
"""生成思维导图(调用 LLM 生成)"""
try:
task_name = task_data.get('task_name', '未命名任务')
task_outline = task_data.get('task_outline')
rehearsal_log = task_data.get('rehearsal_log')
agent_scores = task_data.get('agent_scores')
agents = self._extract_agents(task_outline)
filtered_agent_scores = self._filter_agent_scores(agent_scores, agents)
task_outline_str = json.dumps(task_outline, ensure_ascii=False, indent=2) if task_outline else ''
rehearsal_log_str = json.dumps(rehearsal_log, ensure_ascii=False, indent=2) if rehearsal_log else ''
agents_str = ', '.join(agents) if agents else ''
agent_scores_str = json.dumps(filtered_agent_scores, ensure_ascii=False, indent=2) if filtered_agent_scores else ''
prompt = self.PROMPT_TEMPLATE.format(
task_name=task_name,
task_outline=task_outline_str,
rehearsal_log=rehearsal_log_str,
agents=agents_str,
agent_scores=agent_scores_str
)
print("正在调用大模型生成思维导图...")
mindmap_json_str = self._call_llm(prompt)
if not mindmap_json_str:
print("LLM 生成思维导图失败")
return False
print(f"思维导图内容生成成功,长度: {len(mindmap_json_str)} 字符")
self._create_mindmap_from_json(mindmap_json_str, file_path, task_name)
return True
except Exception as e:
print(f"思维导图 LLM 导出失败: {e}")
import traceback
traceback.print_exc()
return False
def _extract_agents(self, task_outline: Any) -> list:
"""从 task_outline 中提取参与智能体列表"""
agents = set()
if not task_outline or not isinstance(task_outline, dict):
return []
collaboration_process = task_outline.get('Collaboration Process', [])
if not collaboration_process or not isinstance(collaboration_process, list):
return []
for step in collaboration_process:
if isinstance(step, dict):
agent_selection = step.get('AgentSelection', [])
if isinstance(agent_selection, list):
for agent in agent_selection:
if agent:
agents.add(agent)
return list(agents)
def _filter_agent_scores(self, agent_scores: Any, agents: list) -> dict:
"""过滤 agent_scores只保留参与当前任务的智能体评分"""
if not agent_scores or not isinstance(agent_scores, dict):
return {}
if not agents:
return {}
filtered = {}
for step_id, step_data in agent_scores.items():
if not isinstance(step_data, dict):
continue
aspect_list = step_data.get('aspectList', [])
agent_scores_data = step_data.get('agentScores', {})
if not agent_scores_data:
continue
filtered_scores = {}
for agent_name, scores in agent_scores_data.items():
if agent_name in agents and isinstance(scores, dict):
filtered_scores[agent_name] = scores
if filtered_scores:
filtered[step_id] = {'aspectList': aspect_list, 'agentScores': filtered_scores}
return filtered
def _call_llm(self, prompt: str) -> str:
"""调用大模型 API 生成思维导图"""
try:
import openai
if not self.LLM_CONFIG['OPENAI_API_KEY']:
print("错误: OPENAI_API_KEY 未配置")
return ""
if not self.LLM_CONFIG['OPENAI_API_BASE']:
print("错误: OPENAI_API_BASE 未配置")
return ""
if not self.LLM_CONFIG['OPENAI_API_MODEL']:
print("错误: OPENAI_API_MODEL 未配置")
return ""
client = openai.OpenAI(
api_key=self.LLM_CONFIG['OPENAI_API_KEY'],
base_url=self.LLM_CONFIG['OPENAI_API_BASE']
)
response = client.chat.completions.create(
model=self.LLM_CONFIG['OPENAI_API_MODEL'],
messages=[{"role": "user", "content": prompt}],
temperature=0.7,
max_tokens=8000,
)
if response and response.choices:
return response.choices[0].message.content
return ""
except ImportError:
print("请安装 openai 库: pip install openai")
return ""
except Exception as e:
print(f"调用 LLM 失败: {e}")
return ""
def _fix_json_string(self, json_str: str) -> str:
"""修复常见的 JSON 语法错误"""
import re
json_str = json_str.strip()
json_str = re.sub(r',\s*}', '}', json_str)
json_str = re.sub(r',\s*]', ']', json_str)
json_str = re.sub(r'""', '"', json_str)
single_quotes = re.findall(r"'[^']*'", json_str)
for sq in single_quotes:
if '"' not in sq:
json_str = json_str.replace(sq, sq.replace("'", '"'))
trailing_comma_pattern = re.compile(r',(\s*[}\]])')
json_str = trailing_comma_pattern.sub(r'\1', json_str)
unquoted_key_pattern = re.compile(r'([{,]\s*)([a-zA-Z_][a-zA-Z0-9_]*)\s*:')
def fix_unquoted_key(match):
prefix = match.group(1)
key = match.group(2)
return f'{prefix}"{key}":'
json_str = unquoted_key_pattern.sub(fix_unquoted_key, json_str)
unquoted_value_pattern = re.compile(r':\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*([,}\]])')
def replace_unquoted(match):
value = match.group(1)
end = match.group(2)
if value.lower() in ('true', 'false', 'null'):
return f': {value}{end}'
else:
return f': "{value}"{end}'
json_str = unquoted_value_pattern.sub(replace_unquoted, json_str)
print(f"JSON 修复后长度: {len(json_str)} 字符")
return json_str
def _create_mindmap_from_json(self, json_str: str, file_path: str, task_name: str):
"""从 JSON 字符串创建思维导图Markdown 格式)"""
try:
json_str = json_str.strip()
if '```json' in json_str:
json_str = json_str.split('```json')[1].split('```')[0]
elif '```' in json_str:
json_str = json_str.split('```')[1].split('```')[0]
json_str = self._fix_json_string(json_str)
mindmap_data = json.loads(json_str)
# 生成 Markdown 格式
markdown_content = self._generate_markdown(mindmap_data, task_name)
with open(file_path, 'w', encoding='utf-8') as f:
f.write(markdown_content)
print(f"思维导图已保存: {file_path}")
except json.JSONDecodeError as e:
print(f"JSON 解析失败: {e}")
raise
except Exception as e:
print(f"创建思维导图失败: {e}")
raise
def _generate_markdown(self, mindmap_data: dict, task_name: str) -> str:
"""将思维导图数据转换为 Markdown 格式markmap 兼容)"""
lines = []
# 直接使用无序列表,不需要标题和代码块包裹
# 根节点使用 2 个空格缩进
lines.append(f" - {task_name}")
branches = mindmap_data.get('branches', [])
for branch in branches:
# 一级分支使用 4 个空格缩进(比根节点多 2 个空格,是根节点的子节点)
self._add_branch(branch, lines, indent=4)
return '\n'.join(lines)
def _add_branch(self, branch: dict, lines: list, indent: int = 2):
"""递归添加分支到思维导图"""
name = branch.get('name', '')
if not name:
return
prefix = ' ' * indent
lines.append(f"{prefix}- {name}")
children = branch.get('children', [])
if children:
for child in children:
if isinstance(child, dict):
self._add_child_node(child, lines, indent + 2)
else:
lines.append(f"{' ' * (indent + 2)}- {child}")
def _add_child_node(self, child: dict, lines: list, indent: int):
"""添加子节点"""
name = child.get('name', '')
if not name:
return
prefix = ' ' * indent
lines.append(f"{prefix}- {name}")
children = child.get('children', [])
if children:
for grandchild in children:
if isinstance(grandchild, dict):
self._add_child_node(grandchild, lines, indent + 2)
else:
lines.append(f"{' ' * (indent + 2)}- {grandchild}")

View File

@@ -1,772 +0,0 @@
"""
PowerPoint 演示文稿 LLM 导出器
调用大模型生成专业的任务执行演示文稿
"""
import json
import os
from datetime import datetime
from typing import Dict, Any
from pptx import Presentation
from pptx.util import Inches, Pt
from pptx.enum.text import PP_ALIGN
from pptx.chart.data import CategoryChartData
from pptx.enum.chart import XL_CHART_TYPE
class PptLLMExporter:
"""PowerPoint 演示文稿 LLM 导出器 - 调用大模型生成 PPT"""
# LLM 配置(从 config.yaml 加载)
LLM_CONFIG = {
'OPENAI_API_BASE': None,
'OPENAI_API_KEY': None,
'OPENAI_API_MODEL': None,
}
# Prompt 模板 - 与 docx_llm.py 内容对齐
PROMPT_TEMPLATE = """你是一位专业的项目管理顾问和演示文稿设计师。你的任务是将以下任务执行数据生成一份详细、专业的 PowerPoint 演示文稿内容。
## 任务基本信息
- 任务名称:{task_name}
## 任务大纲(规划阶段)
{task_outline}
## 执行结果
{execution_results}
## 参与智能体
{agents}
## 智能体评分
{agent_scores}
---
## 演示文稿要求
请生成一份完整的演示文稿包含以下幻灯片共11页
1. 封面页标题、用户ID、日期
2. 目录页5个主要章节其他作为小点
3. 执行摘要:任务整体执行情况概述
4. 任务概述:任务背景与目标、任务范围与边界
5. 任务规划分析:任务拆解的合理性、智能体角色分配的优化建议、工作流程设计
6. 执行过程回顾:各阶段的完成情况、关键决策点、遇到的问题及解决方案
7. 成果产出分析:产出物的质量评估、产出与预期目标的匹配度
8. 团队协作分析:智能体之间的协作模式、信息传递效率
9. 质量评估整体完成质量评分1-10分、各维度的具体评分及理由
10. 经验教训与改进建议:成功经验、存在的问题与不足、改进建议
11. 结束页:感谢语
---
## 输出格式要求
请直接输出 JSON 格式,不要包含任何 Markdown 标记。JSON 结构如下:
```json
{{
"title": "{task_name}",
"user_id": "{user_id}",
"date": "{date}",
"slides": [
{{
"type": "title",
"title": "主标题",
"user_id": "用户ID",
"date": "日期"
}},
{{
"type": "toc",
"title": "目录",
"sections": [
{{"title": "一、执行摘要", "subs": []}},
{{"title": "二、任务概述与规划", "subs": ["任务背景与目标", "任务范围与边界", "任务拆解", "智能体角色"]}},
{{"title": "三、执行过程与成果", "subs": ["阶段完成情况", "关键决策点", "产出质量", "目标匹配度"]}},
{{"title": "四、团队协作与质量评估", "subs": ["协作模式", "信息传递", "质量评分"]}},
{{"title": "五、经验建议与总结", "subs": ["成功经验", "问题与不足", "改进建议"]}}
]
}},
{{
"type": "content",
"title": "执行摘要",
"bullets": ["要点1", "要点2", "要点3"]
}},
{{
"type": "content",
"title": "任务概述",
"bullets": ["任务背景与目标", "任务范围与边界"]
}},
{{
"type": "content",
"title": "任务规划分析",
"bullets": ["任务拆解的合理性", "智能体角色分配", "工作流程设计"]
}},
{{
"type": "content",
"title": "执行过程回顾",
"bullets": ["各阶段完成情况", "关键决策点", "遇到的问题及解决方案"]
}},
{{
"type": "content",
"title": "成果产出分析",
"bullets": ["产出物质量评估", "与预期目标匹配度"]
}},
{{
"type": "content",
"title": "团队协作分析",
"bullets": ["智能体协作模式", "信息传递效率"]
}},
{{
"type": "content",
"title": "质量评估",
"bullets": ["整体评分1-10分", "各维度评分及理由"]
}},
{{
"type": "content",
"title": "经验教训与改进建议",
"bullets": ["成功经验", "存在的问题与不足", "改进建议"]
}},
{{
"type": "ending",
"title": "感谢聆听"
}},
{{
"type": "table",
"title": "质量评分表",
"headers": ["维度", "评分", "说明"],
"rows": [["整体质量", "8.5", "表现优秀"], ["协作效率", "7.5", "有待提升"]]
}},
{{
"type": "chart",
"title": "任务完成进度",
"chart_type": "bar",
"categories": ["规划阶段", "执行阶段", "评估阶段"],
"series": [
{{"name": "完成度", "values": [100, 85, 90]}}
]
}}
]
}}
```
**重要说明:**
1. **幻灯片顺序**slides 数组中的幻灯片顺序必须严格按照以下顺序:
- 第1页type="title" 封面页
- 第2页type="toc" 目录页
- 第3-10页type="content" 内容页8页左右
- 第11-14页type="table""chart" 表格/图表页
- 最后一页type="ending" 结束页
2. **每张幻灯片必须包含完整的字段**
- `content` 类型必须有 `title` 和 `bullets` 数组
- `table` 类型必须有 `title`、`headers` 数组和 `rows` 数组
- `chart` 类型必须有 `title`、`chart_type`、`categories` 数组和 `series` 数组
- `ending` 类型必须有 `title`
3. **禁止出现"单击此处添加标题"**:所有幻灯片都必须填写完整内容,不能留空
注意:
- 幻灯片数量为11-15 页
- 每页内容详细一点,使用要点式呈现,需要表格或者图表增强可读性
- 不要生成"报告总字数"这样的统计信息
- 所有文字使用简体中文
- 根据实际任务数据生成内容,不是编造
- **禁止使用中文引号「」『』""** ,所有引号必须是英文双引号 ""
- user_id 填写:{user_id}
- date 填写:{date}
"""
def __init__(self):
self._load_llm_config()
def _load_llm_config(self):
"""从配置文件加载 LLM 配置"""
try:
import yaml
possible_paths = [
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'config.yaml'),
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'backend', 'config', 'config.yaml'),
os.path.join(os.getcwd(), 'config', 'config.yaml'),
]
for config_path in possible_paths:
if os.path.exists(config_path):
with open(config_path, 'r', encoding='utf-8') as f:
config = yaml.safe_load(f)
if config:
self.LLM_CONFIG['OPENAI_API_BASE'] = config.get('OPENAI_API_BASE')
self.LLM_CONFIG['OPENAI_API_KEY'] = config.get('OPENAI_API_KEY')
self.LLM_CONFIG['OPENAI_API_MODEL'] = config.get('OPENAI_API_MODEL')
print(f"已加载 LLM 配置: {self.LLM_CONFIG['OPENAI_API_MODEL']}")
return
except Exception as e:
print(f"加载 LLM 配置失败: {e}")
def generate(self, task_data: Dict[str, Any], file_path: str) -> bool:
"""生成 PowerPoint 演示文稿(调用 LLM 生成)"""
try:
task_name = task_data.get('task_name', '未命名任务')
task_content = task_data.get('task_content', '')
task_outline = task_data.get('task_outline')
rehearsal_log = task_data.get('rehearsal_log')
agent_scores = task_data.get('agent_scores')
user_id = task_data.get('user_id', '')
date = task_data.get('date', '')
agents = self._extract_agents(task_outline)
filtered_agent_scores = self._filter_agent_scores(agent_scores, agents)
task_outline_str = json.dumps(task_outline, ensure_ascii=False, indent=2) if task_outline else ''
rehearsal_log_str = json.dumps(rehearsal_log, ensure_ascii=False, indent=2) if rehearsal_log else ''
agents_str = ', '.join(agents) if agents else ''
agent_scores_str = json.dumps(filtered_agent_scores, ensure_ascii=False, indent=2) if filtered_agent_scores else ''
prompt = self.PROMPT_TEMPLATE.format(
task_name=task_name,
task_content=task_content,
task_outline=task_outline_str,
execution_results=rehearsal_log_str,
agents=agents_str,
agent_scores=agent_scores_str,
user_id=user_id,
date=date
)
print("正在调用大模型生成 PPT 内容...")
ppt_json_str = self._call_llm(prompt)
if not ppt_json_str:
print("LLM 生成 PPT 失败")
return False
print(f"PPT 内容生成成功,长度: {len(ppt_json_str)} 字符")
self._create_ppt_from_json(ppt_json_str, file_path, task_name)
return True
except Exception as e:
print(f"PPT LLM 导出失败: {e}")
import traceback
traceback.print_exc()
return False
def _extract_agents(self, task_outline: Any) -> list:
agents = set()
if not task_outline or not isinstance(task_outline, dict):
return []
collaboration_process = task_outline.get('Collaboration Process', [])
if not collaboration_process or not isinstance(collaboration_process, list):
return []
for step in collaboration_process:
if isinstance(step, dict):
agent_selection = step.get('AgentSelection', [])
if isinstance(agent_selection, list):
for agent in agent_selection:
if agent:
agents.add(agent)
return list(agents)
def _filter_agent_scores(self, agent_scores: Any, agents: list) -> dict:
if not agent_scores or not isinstance(agent_scores, dict):
return {}
if not agents:
return {}
filtered = {}
for step_id, step_data in agent_scores.items():
if not isinstance(step_data, dict):
continue
aspect_list = step_data.get('aspectList', [])
agent_scores_data = step_data.get('agentScores', {})
if not agent_scores_data:
continue
filtered_scores = {}
for agent_name, scores in agent_scores_data.items():
if agent_name in agents and isinstance(scores, dict):
filtered_scores[agent_name] = scores
if filtered_scores:
filtered[step_id] = {'aspectList': aspect_list, 'agentScores': filtered_scores}
return filtered
def _call_llm(self, prompt: str) -> str:
try:
import openai
if not self.LLM_CONFIG['OPENAI_API_KEY']:
print("错误: OPENAI_API_KEY 未配置")
return ""
if not self.LLM_CONFIG['OPENAI_API_BASE']:
print("错误: OPENAI_API_BASE 未配置")
return ""
if not self.LLM_CONFIG['OPENAI_API_MODEL']:
print("错误: OPENAI_API_MODEL 未配置")
return ""
client = openai.OpenAI(
api_key=self.LLM_CONFIG['OPENAI_API_KEY'],
base_url=self.LLM_CONFIG['OPENAI_API_BASE']
)
response = client.chat.completions.create(
model=self.LLM_CONFIG['OPENAI_API_MODEL'],
messages=[{"role": "user", "content": prompt}],
temperature=0.7,
max_tokens=8000,
)
if response and response.choices:
return response.choices[0].message.content
return ""
except ImportError:
print("请安装 openai 库: pip install openai")
return ""
except Exception as e:
print(f"调用 LLM 失败: {e}")
return ""
def _fix_json_string(self, json_str: str) -> str:
"""修复常见的 JSON 语法错误"""
import re
json_str = json_str.strip()
# 首先尝试提取 JSON 代码块
if '```json' in json_str:
json_str = json_str.split('```json')[1].split('```')[0]
elif '```' in json_str:
# 检查是否有结束标记
parts = json_str.split('```')
if len(parts) >= 3:
json_str = parts[1]
else:
# 没有结束标记,可能是代码块内容
json_str = parts[1] if len(parts) > 1 else json_str
json_str = json_str.strip()
# 关键修复:处理中文双引号包含英文双引号的情况
# 例如:"工作流程采用"提出-评估-改进"的模式"
# 需要:找到中文引号对,去除它们,同时转义内部的英文引号
import re
# 中文引号 Unicode
CHINESE_LEFT = '\u201c' # "
CHINESE_RIGHT = '\u201d' # "
# 使用正则匹配中文引号对之间的内容,并处理内部的英文引号
# 匹配 "内容" 格式(中文引号对)
pattern = re.compile(r'(\u201c)(.*?)(\u201d)')
def replace_chinese_quotes(match):
content = match.group(2)
# 将内部未转义的英文双引号转义
content = re.sub(r'(?<!\\)"', r'\\"', content)
return content
json_str = pattern.sub(replace_chinese_quotes, json_str)
# 现在移除所有中文引号字符(已经被处理过了)
json_str = json_str.replace(CHINESE_LEFT, '')
json_str = json_str.replace(CHINESE_RIGHT, '')
json_str = json_str.replace('\u2018', "'") # 中文左单引号 '
json_str = json_str.replace('\u2019', "'") # 中文右单引号 '
# 移除行尾的逗号trailing comma
json_str = re.sub(r',\s*}', '}', json_str)
json_str = re.sub(r',\s*]', ']', json_str)
# 修复空字符串
json_str = re.sub(r'""', '"', json_str)
# 修复单引号
single_quotes = re.findall(r"'[^']*'", json_str)
for sq in single_quotes:
if '"' not in sq:
json_str = json_str.replace(sq, sq.replace("'", '"'))
# 移除 trailing comma
trailing_comma_pattern = re.compile(r',(\s*[}\]])')
json_str = trailing_comma_pattern.sub(r'\1', json_str)
# 修复未加引号的 key
unquoted_key_pattern = re.compile(r'([{,]\s*)([a-zA-Z_][a-zA-Z0-9_]*)\s*:')
def fix_unquoted_key(match):
prefix = match.group(1)
key = match.group(2)
return f'{prefix}"{key}":'
json_str = unquoted_key_pattern.sub(fix_unquoted_key, json_str)
# 修复未加引号的值(但保留 true/false/null
unquoted_value_pattern = re.compile(r':\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*([,}\]])')
def replace_unquoted(match):
value = match.group(1)
end = match.group(2)
if value.lower() in ('true', 'false', 'null'):
return f': {value}{end}'
else:
return f': "{value}"{end}'
json_str = unquoted_value_pattern.sub(replace_unquoted, json_str)
# 新增:修复多余的逗号(连续逗号)
json_str = re.sub(r',,+,', ',', json_str)
# 新增修复缺少冒号的情况key 后面没有冒号)
# 匹配 "key" value 而不是 "key": value
json_str = re.sub(r'(\"[^\"]+\")\s+([\[{])', r'\1: \2', json_str)
# 新增:尝试修复换行符问题,将单个 \n 替换为 \\n
# 但这可能会导致问题,所以先注释掉
# json_str = json_str.replace('\n', '\\n')
# 新增:移除可能的 BOM 或不可见字符
json_str = json_str.replace('\ufeff', '')
print(f"JSON 修复后长度: {len(json_str)} 字符")
return json_str
def _create_ppt_from_json(self, json_str: str, file_path: str, task_name: str):
"""从 JSON 字符串创建 PPT直接创建不使用模板"""
try:
json_str = json_str.strip()
if '```json' in json_str:
json_str = json_str.split('```json')[1].split('```')[0]
elif '```' in json_str:
json_str = json_str.split('```')[1].split('```')[0]
json_str = self._fix_json_string(json_str)
# 尝试解析,失败时输出更多信息帮助调试
try:
replace_data = json.loads(json_str)
except json.JSONDecodeError as e:
# 输出错误位置附近的 JSON 内容帮助调试
print(f"JSON 解析失败: {e}")
print(f"错误位置: 第 {e.lineno} 行, 第 {e.colno}")
start = max(0, e.pos - 100)
end = min(len(json_str), e.pos + 100)
context = json_str[start:end]
print(f"错误上下文: ...{context}...")
raise
# 直接创建空白演示文稿,不使用模板
prs = Presentation()
prs.slide_width = Inches(13.333)
prs.slide_height = Inches(7.5)
slides_data = replace_data.get('slides', [])
# 分离 ending 幻灯片和其他幻灯片,确保 ending 在最后
ending_slides = [s for s in slides_data if s.get('type') == 'ending']
other_slides = [s for s in slides_data if s.get('type') != 'ending']
# 先创建其他幻灯片
for slide_data in other_slides:
slide_type = slide_data.get('type', 'content')
if slide_type == 'title':
self._add_title_slide(prs, slide_data)
elif slide_type == 'toc':
self._add_toc_slide(prs, slide_data)
elif slide_type == 'content':
self._add_content_slide(prs, slide_data)
elif slide_type == 'two_column':
self._add_two_column_slide(prs, slide_data)
elif slide_type == 'table':
self._add_table_slide(prs, slide_data)
elif slide_type == 'chart':
self._add_chart_slide(prs, slide_data)
# 最后创建 ending 幻灯片
for slide_data in ending_slides:
self._add_ending_slide(prs, slide_data)
prs.save(file_path)
print(f"PowerPoint 已保存: {file_path}")
except ImportError:
print("请安装 python-pptx 库: pip install python-pptx")
raise
except json.JSONDecodeError as e:
print(f"JSON 解析失败: {e}")
raise
except Exception as e:
print(f"创建 PPT 失败: {e}")
raise
def _add_title_slide(self, prs, data):
"""添加封面页"""
slide_layout = prs.slide_layouts[6] # 空白布局
slide = prs.slides.add_slide(slide_layout)
# 删除默认的占位符形状
for shape in list(slide.shapes):
if shape.has_text_frame and shape.text_frame.text.strip() in ['单击此处添加标题', '单击此处添加内容', 'Click to add title', 'Click to add text']:
slide.shapes.remove(shape)
# 主标题
title_box = slide.shapes.add_textbox(Inches(0.5), Inches(2.5), Inches(12.333), Inches(1.5))
tf = title_box.text_frame
p = tf.paragraphs[0]
p.text = data.get('title', '')
p.font.size = Pt(44)
p.font.bold = True
p.alignment = PP_ALIGN.CENTER
# 用户ID
user_id_box = slide.shapes.add_textbox(Inches(0.5), Inches(4.2), Inches(12.333), Inches(0.5))
tf = user_id_box.text_frame
p = tf.paragraphs[0]
p.text = f"用户ID: {data.get('user_id', '')}"
p.font.size = Pt(18)
p.alignment = PP_ALIGN.CENTER
# 日期
date_box = slide.shapes.add_textbox(Inches(0.5), Inches(4.8), Inches(12.333), Inches(0.5))
tf = date_box.text_frame
p = tf.paragraphs[0]
p.text = f"日期: {data.get('date', '')}"
p.font.size = Pt(18)
p.alignment = PP_ALIGN.CENTER
def _add_toc_slide(self, prs, data):
"""添加目录页"""
slide_layout = prs.slide_layouts[6] # 空白布局
slide = prs.slides.add_slide(slide_layout)
# 删除默认的占位符形状
for shape in list(slide.shapes):
if shape.has_text_frame and shape.text_frame.text.strip() in ['单击此处添加标题', '单击此处添加内容', 'Click to add title', 'Click to add text']:
slide.shapes.remove(shape)
# 标题
title_box = slide.shapes.add_textbox(Inches(0.5), Inches(0.4), Inches(12.333), Inches(0.8))
tf = title_box.text_frame
p = tf.paragraphs[0]
p.text = data.get('title', '目录')
p.font.size = Pt(36)
p.font.bold = True
# 目录内容 - 使用两栏布局
content_box = slide.shapes.add_textbox(Inches(0.5), Inches(1.5), Inches(12.333), Inches(5.5))
tf = content_box.text_frame
tf.word_wrap = True
sections = data.get('sections', [])
for i, section in enumerate(sections):
section_title = section.get('title', '')
subs = section.get('subs', [])
if i == 0:
p = tf.paragraphs[0]
else:
p = tf.add_paragraph()
# 大章节标题
p.text = section_title
p.level = 0
p.font.size = Pt(20)
p.font.bold = True
# 小点
for sub in subs:
p_sub = tf.add_paragraph()
p_sub.text = sub
p_sub.level = 1 # 次级缩进
p_sub.font.size = Pt(16)
def _add_ending_slide(self, prs, data):
"""添加结束页"""
slide_layout = prs.slide_layouts[6] # 空白布局
slide = prs.slides.add_slide(slide_layout)
# 删除默认的占位符形状
for shape in list(slide.shapes):
if shape.has_text_frame and shape.text_frame.text.strip() in ['单击此处添加标题', '单击此处添加内容', 'Click to add title', 'Click to add text']:
slide.shapes.remove(shape)
# 结束语
title_box = slide.shapes.add_textbox(Inches(0.5), Inches(3), Inches(12.333), Inches(1.5))
tf = title_box.text_frame
p = tf.paragraphs[0]
p.text = data.get('title', '感谢聆听')
p.font.size = Pt(44)
p.font.bold = True
p.alignment = PP_ALIGN.CENTER
def _add_content_slide(self, prs, data):
"""添加内容页"""
slide_layout = prs.slide_layouts[6] # 空白布局
slide = prs.slides.add_slide(slide_layout)
# 删除默认的占位符形状(避免出现"单击此处添加标题"
for shape in list(slide.shapes):
if shape.has_text_frame and shape.text_frame.text.strip() in ['单击此处添加标题', '单击此处添加内容', 'Click to add title', 'Click to add text']:
slide.shapes.remove(shape)
# 标题
title_box = slide.shapes.add_textbox(Inches(0.5), Inches(0.3), Inches(12.333), Inches(0.8))
tf = title_box.text_frame
p = tf.paragraphs[0]
p.text = data.get('title', '')
p.font.size = Pt(32)
p.font.bold = True
# 内容
content_box = slide.shapes.add_textbox(Inches(0.5), Inches(1.3), Inches(12.333), Inches(5.8))
tf = content_box.text_frame
tf.word_wrap = True
bullets = data.get('bullets', [])
for i, bullet in enumerate(bullets):
if i == 0:
p = tf.paragraphs[0]
else:
p = tf.add_paragraph()
p.text = bullet
p.level = 0
p.font.size = Pt(18)
def _add_two_column_slide(self, prs, data):
"""添加双栏内容页"""
slide_layout = prs.slide_layouts[6] # 空白布局
slide = prs.slides.add_slide(slide_layout)
# 删除默认的占位符形状
for shape in list(slide.shapes):
if shape.has_text_frame and shape.text_frame.text.strip() in ['单击此处添加标题', '单击此处添加内容', 'Click to add title', 'Click to add text']:
slide.shapes.remove(shape)
# 标题
title_box = slide.shapes.add_textbox(Inches(0.5), Inches(0.3), Inches(12.333), Inches(0.8))
tf = title_box.text_frame
p = tf.paragraphs[0]
p.text = data.get('title', '')
p.font.size = Pt(32)
p.font.bold = True
# 左侧
left_box = slide.shapes.add_textbox(Inches(0.5), Inches(1.3), Inches(5.8), Inches(5.8))
tf = left_box.text_frame
tf.word_wrap = True
left_title = data.get('left_title', '')
p = tf.paragraphs[0]
p.text = left_title
p.font.size = Pt(22)
p.font.bold = True
left_bullets = data.get('left_bullets', [])
for bullet in left_bullets:
p = tf.add_paragraph()
p.text = bullet
p.level = 1
p.font.size = Pt(16)
# 右侧
right_box = slide.shapes.add_textbox(Inches(7.0), Inches(1.3), Inches(5.8), Inches(5.8))
tf = right_box.text_frame
tf.word_wrap = True
right_title = data.get('right_title', '')
p = tf.paragraphs[0]
p.text = right_title
p.font.size = Pt(22)
p.font.bold = True
right_bullets = data.get('right_bullets', [])
for bullet in right_bullets:
p = tf.add_paragraph()
p.text = bullet
p.level = 1
p.font.size = Pt(16)
def _add_table_slide(self, prs, data):
"""添加表格页"""
slide_layout = prs.slide_layouts[6] # 空白布局
slide = prs.slides.add_slide(slide_layout)
# 删除默认的占位符形状
for shape in list(slide.shapes):
if shape.has_text_frame and shape.text_frame.text.strip() in ['单击此处添加标题', '单击此处添加内容', 'Click to add title', 'Click to add text']:
slide.shapes.remove(shape)
# 标题
title_box = slide.shapes.add_textbox(Inches(0.5), Inches(0.3), Inches(12), Inches(0.6))
tf = title_box.text_frame
p = tf.paragraphs[0]
p.text = data.get('title', '')
p.font.size = Pt(32)
p.font.bold = True
# 表格
headers = data.get('headers', [])
rows = data.get('rows', [])
if headers and rows:
x, y, cx, cy = Inches(0.5), Inches(1.5), Inches(12), Inches(3)
table = slide.shapes.add_table(len(rows) + 1, len(headers), x, y, cx, cy).table
# 表头
for i, header in enumerate(headers):
table.cell(0, i).text = header
# 数据
for row_idx, row_data in enumerate(rows):
for col_idx, cell_data in enumerate(row_data):
table.cell(row_idx + 1, col_idx).text = cell_data
def _add_chart_slide(self, prs, data):
"""添加图表页"""
slide_layout = prs.slide_layouts[6] # 空白布局
slide = prs.slides.add_slide(slide_layout)
# 删除默认的占位符形状
for shape in list(slide.shapes):
if shape.has_text_frame and shape.text_frame.text.strip() in ['单击此处添加标题', '单击此处添加内容', 'Click to add title', 'Click to add text']:
slide.shapes.remove(shape)
# 标题
title_box = slide.shapes.add_textbox(Inches(0.5), Inches(0.3), Inches(12.333), Inches(0.6))
tf = title_box.text_frame
p = tf.paragraphs[0]
p.text = data.get('title', '')
p.font.size = Pt(32)
p.font.bold = True
# 图表数据
chart_type_str = data.get('chart_type', 'bar').lower()
categories = data.get('categories', [])
series_list = data.get('series', [])
# 映射图表类型
chart_type_map = {
'bar': XL_CHART_TYPE.COLUMN_CLUSTERED,
'column': XL_CHART_TYPE.COLUMN_CLUSTERED,
'line': XL_CHART_TYPE.LINE,
'pie': XL_CHART_TYPE.PIE,
'饼图': XL_CHART_TYPE.PIE,
'柱状图': XL_CHART_TYPE.COLUMN_CLUSTERED,
'折线图': XL_CHART_TYPE.LINE,
}
chart_type = chart_type_map.get(chart_type_str, XL_CHART_TYPE.COLUMN_CLUSTERED)
# 创建图表数据
chart_data = CategoryChartData()
chart_data.categories = categories
for series in series_list:
series_name = series.get('name', '数据')
values = series.get('values', [])
chart_data.add_series(series_name, values)
# 添加图表
x, y, cx, cy = Inches(0.5), Inches(1.2), Inches(12.333), Inches(5.5)
chart = slide.shapes.add_chart(chart_type, x, y, cx, cy, chart_data).chart
# 兼容旧版本
def _load_llm_config_old(self):
self._load_llm_config()

View File

@@ -1,481 +0,0 @@
"""
Excel 文档 LLM 报告导出器
调用大模型生成专业的任务执行报告并保存为Excel格式
"""
import json
import os
from datetime import datetime
from typing import Dict, Any
try:
from openpyxl import Workbook
from openpyxl.styles import Font, Alignment, PatternFill, Border, Side
from openpyxl.utils import get_column_letter
except ImportError:
pass
class XlsxLLMExporter:
"""Excel 文档 LLM 报告导出器 - 调用大模型生成报告"""
LLM_CONFIG = {
'OPENAI_API_BASE': None,
'OPENAI_API_KEY': None,
'OPENAI_API_MODEL': None,
}
PROMPT_TEMPLATE = """你是一位专业的项目管理顾问和数据分析专家。你的任务是将以下任务执行数据生成一份详细、专业、结构化的执行报告。
## 任务基本信息
- 任务名称:{task_name}
## 任务大纲(规划阶段)
{task_outline}
## 执行结果
{rehearsal_log}
## 参与智能体
{agents}
## 智能体评分
{agent_scores}
---
## 报告要求
请生成一份完整的任务执行报告,包含以下章节:
### 1. 执行摘要
用 2-3 句话概括本次任务的整体执行情况。
### 2. 任务概述
- 任务背景与目标
- 任务范围与边界
### 3. 任务规划分析
- 任务拆解的合理性
- 智能体角色分配的优化建议
- 工作流程设计
### 4. 执行过程回顾
- 各阶段的完成情况
- 关键决策点
- 遇到的问题及解决方案
### 5. 成果产出分析
- 产出物的质量评估
- 产出与预期目标的匹配度
### 6. 团队协作分析
- 智能体之间的协作模式
- 信息传递效率
### 7. 质量评估
- 整体完成质量评分1-10分
- 各维度的具体评分及理由
### 8. 经验教训与改进建议
- 成功经验
- 存在的问题与不足
- 改进建议
---
## 输出格式要求
- 使用 Markdown 格式输出
- 语言:简体中文
- 适当使用列表、表格增强可读性
- 报告长度必须达到 3000-5000 字,每个章节都要详细展开,不要遗漏任何章节
- 每个章节的内容要充实,提供具体的分析和建议
- 注意:所有加粗标记必须成对出现,如 **文本**,不要单独使用 ** 或缺少结束标记
- 禁止使用 mermaid、graph TD、flowchart 等图表代码,如果需要描述流程请用纯文字描述
- 不要生成附录章节
- 不要在报告中显示"报告总字数"这样的统计信息
"""
def __init__(self):
self._load_llm_config()
def _load_llm_config(self):
"""从配置文件加载 LLM 配置"""
try:
import yaml
possible_paths = [
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'config.yaml'),
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'backend', 'config', 'config.yaml'),
os.path.join(os.getcwd(), 'config', 'config.yaml'),
]
for config_path in possible_paths:
if os.path.exists(config_path):
with open(config_path, 'r', encoding='utf-8') as f:
config = yaml.safe_load(f)
if config:
self.LLM_CONFIG['OPENAI_API_BASE'] = config.get('OPENAI_API_BASE')
self.LLM_CONFIG['OPENAI_API_KEY'] = config.get('OPENAI_API_KEY')
self.LLM_CONFIG['OPENAI_API_MODEL'] = config.get('OPENAI_API_MODEL')
print(f"已加载 LLM 配置: {self.LLM_CONFIG['OPENAI_API_MODEL']}")
return
except Exception as e:
print(f"加载 LLM 配置失败: {e}")
def generate(self, task_data: Dict[str, Any], file_path: str) -> bool:
"""生成 Excel 文档(调用 LLM 生成报告)"""
try:
task_name = task_data.get('task_name', '未命名任务')
task_outline = task_data.get('task_outline')
rehearsal_log = task_data.get('rehearsal_log')
agent_scores = task_data.get('agent_scores')
agents = self._extract_agents(task_outline)
filtered_agent_scores = self._filter_agent_scores(agent_scores, agents)
task_outline_str = json.dumps(task_outline, ensure_ascii=False, indent=2) if task_outline else ''
rehearsal_log_str = json.dumps(rehearsal_log, ensure_ascii=False, indent=2) if rehearsal_log else ''
agents_str = ', '.join(agents) if agents else ''
agent_scores_str = json.dumps(filtered_agent_scores, ensure_ascii=False, indent=2) if filtered_agent_scores else ''
prompt = self.PROMPT_TEMPLATE.format(
task_name=task_name,
task_outline=task_outline_str,
rehearsal_log=rehearsal_log_str,
agents=agents_str,
agent_scores=agent_scores_str
)
print("正在调用大模型生成 Excel 报告...")
report_content = self._call_llm(prompt)
if not report_content:
print("LLM 生成报告失败")
return False
report_content = self._clean_report_title(report_content)
print(f"报告生成成功,长度: {len(report_content)} 字符")
self._save_as_excel(report_content, file_path, task_name)
return True
except Exception as e:
print(f"Excel LLM 导出失败: {e}")
import traceback
traceback.print_exc()
return False
def _clean_report_title(self, content: str) -> str:
"""清理报告开头的重复标题"""
lines = content.split('\n')
if not lines:
return content
first_line = lines[0].strip()
if first_line == '任务执行报告' or first_line == '# 任务执行报告':
lines = lines[1:]
while lines and not lines[0].strip():
lines.pop(0)
return '\n'.join(lines)
def _extract_agents(self, task_outline: Any) -> list:
"""从 task_outline 中提取参与智能体列表"""
agents = set()
if not task_outline or not isinstance(task_outline, dict):
return []
collaboration_process = task_outline.get('Collaboration Process', [])
if not collaboration_process or not isinstance(collaboration_process, list):
return []
for step in collaboration_process:
if isinstance(step, dict):
agent_selection = step.get('AgentSelection', [])
if isinstance(agent_selection, list):
for agent in agent_selection:
if agent:
agents.add(agent)
return list(agents)
def _filter_agent_scores(self, agent_scores: Any, agents: list) -> dict:
"""过滤 agent_scores只保留参与当前任务的智能体评分"""
if not agent_scores or not isinstance(agent_scores, dict):
return {}
if not agents:
return {}
filtered = {}
for step_id, step_data in agent_scores.items():
if not isinstance(step_data, dict):
continue
aspect_list = step_data.get('aspectList', [])
agent_scores_data = step_data.get('agentScores', {})
if not agent_scores_data:
continue
filtered_scores = {}
for agent_name, scores in agent_scores_data.items():
if agent_name in agents and isinstance(scores, dict):
filtered_scores[agent_name] = scores
if filtered_scores:
filtered[step_id] = {
'aspectList': aspect_list,
'agentScores': filtered_scores
}
return filtered
def _call_llm(self, prompt: str) -> str:
"""调用大模型 API 生成报告"""
try:
import openai
if not self.LLM_CONFIG['OPENAI_API_KEY']:
print("错误: OPENAI_API_KEY 未配置")
return ""
if not self.LLM_CONFIG['OPENAI_API_BASE']:
print("错误: OPENAI_API_BASE 未配置")
return ""
if not self.LLM_CONFIG['OPENAI_API_MODEL']:
print("错误: OPENAI_API_MODEL 未配置")
return ""
client = openai.OpenAI(
api_key=self.LLM_CONFIG['OPENAI_API_KEY'],
base_url=self.LLM_CONFIG['OPENAI_API_BASE']
)
response = client.chat.completions.create(
model=self.LLM_CONFIG['OPENAI_API_MODEL'],
messages=[
{"role": "user", "content": prompt}
],
temperature=0.7,
max_tokens=10000,
)
if response and response.choices:
return response.choices[0].message.content
return ""
except ImportError:
print("请安装 openai 库: pip install openai")
return ""
except Exception as e:
print(f"调用 LLM 失败: {e}")
return ""
def _save_as_excel(self, markdown_content: str, file_path: str, task_name: str):
"""将 Markdown 内容保存为 Excel 文档"""
try:
from openpyxl import Workbook
from openpyxl.styles import Font, Alignment, PatternFill, Border, Side
from openpyxl.utils import get_column_letter
wb = Workbook()
ws = wb.active
ws.title = "任务执行报告"
header_fill = PatternFill(start_color="4472C4", end_color="4472C4", fill_type="solid")
header_font = Font(bold=True, color="FFFFFF", size=12)
title_font = Font(bold=True, size=14)
section_font = Font(bold=True, size=11)
normal_font = Font(size=10)
thin_border = Border(
left=Side(style='thin'),
right=Side(style='thin'),
top=Side(style='thin'),
bottom=Side(style='thin')
)
ws.column_dimensions['A'].width = 20
ws.column_dimensions['B'].width = 80
row = 1
ws[f'A{row}'] = task_name
ws[f'A{row}'].font = Font(bold=True, size=16)
ws[f'A{row}'].alignment = Alignment(horizontal='center', vertical='center')
ws.merge_cells(f'A{row}:B{row}')
row += 1
ws[f'A{row}'] = f"导出时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
ws[f'A{row}'].font = Font(size=9, italic=True)
ws[f'A{row}'].alignment = Alignment(horizontal='center', vertical='center')
ws.merge_cells(f'A{row}:B{row}')
row += 2
lines = markdown_content.split('\n')
current_section = ""
table_data = []
in_table = False
for line in lines:
line = line.rstrip()
if not line:
if in_table and table_data:
row = self._write_table_to_excel(ws, table_data, row, header_fill, header_font, thin_border, normal_font)
table_data = []
in_table = False
continue
stripped = line.strip()
if stripped.startswith('|') and stripped.endswith('|') and '---' in stripped:
continue
if '|' in line and line.strip().startswith('|'):
cells = [cell.strip() for cell in line.split('|')[1:-1]]
if cells and any(cells):
table_data.append(cells)
in_table = True
continue
else:
if in_table and table_data:
row = self._write_table_to_excel(ws, table_data, row, header_fill, header_font, thin_border, normal_font)
table_data = []
in_table = False
if line.startswith('### '):
if table_data:
row = self._write_table_to_excel(ws, table_data, row, header_fill, header_font, thin_border, normal_font)
table_data = []
current_section = line[4:].strip()
ws[f'A{row}'] = current_section
ws[f'A{row}'].font = Font(bold=True, size=12, color="4472C4")
ws[f'A{row}'].alignment = Alignment(horizontal='center', vertical='center')
ws.merge_cells(f'A{row}:B{row}')
row += 1
elif line.startswith('## '):
if table_data:
row = self._write_table_to_excel(ws, table_data, row, header_fill, header_font, thin_border, normal_font)
table_data = []
section_title = line[2:].strip()
ws[f'A{row}'] = section_title
ws[f'A{row}'].font = Font(bold=True, size=13)
ws[f'A{row}'].alignment = Alignment(horizontal='center', vertical='center')
ws.merge_cells(f'A{row}:B{row}')
row += 1
elif line.startswith('# '):
pass
elif line.startswith('#### '):
if table_data:
row = self._write_table_to_excel(ws, table_data, row, header_fill, header_font, thin_border, normal_font)
table_data = []
current_section = line[5:].strip()
ws[f'A{row}'] = current_section
ws[f'A{row}'].font = Font(bold=True, size=11, color="4472C4")
ws[f'A{row}'].alignment = Alignment(horizontal='center', vertical='center')
ws.merge_cells(f'A{row}:B{row}')
row += 1
elif line.startswith('- ') or line.startswith('* ') or line.startswith(''):
text = line[2:].strip() if line.startswith(('- ', '* ')) else line[1:].strip()
text = self._clean_markdown(text)
ws[f'A{row}'] = "" + text
ws[f'A{row}'].font = normal_font
ws[f'A{row}'].alignment = Alignment(horizontal='center', vertical='center', wrap_text=True)
ws.merge_cells(f'A{row}:B{row}')
row += 1
elif line.startswith('**') and '**:' in line:
parts = line.split(':', 1)
if len(parts) == 2:
key = self._clean_markdown(parts[0])
value = self._clean_markdown(parts[1])
ws[f'A{row}'] = f"{key}: {value}"
ws[f'A{row}'].font = Font(bold=True, size=10)
ws[f'A{row}'].alignment = Alignment(horizontal='center', vertical='center', wrap_text=True)
ws.merge_cells(f'A{row}:B{row}')
row += 1
else:
clean_line = self._clean_markdown(line)
if clean_line:
ws[f'A{row}'] = clean_line
ws[f'A{row}'].font = normal_font
ws[f'A{row}'].alignment = Alignment(horizontal='center', vertical='center', wrap_text=True)
ws.merge_cells(f'A{row}:B{row}')
row += 1
if table_data:
row = self._write_table_to_excel(ws, table_data, row, header_fill, header_font, thin_border, normal_font)
ws.column_dimensions['B'].width = 80
# 设置自适应行高
for r in range(1, row + 1):
ws.row_dimensions[r].bestFit = True
for col in ['A', 'B']:
for r in range(1, row + 1):
cell = ws[f'{col}{r}']
if cell.border is None or cell.border == Border():
cell.border = Border(
left=Side(style='none'),
right=Side(style='none'),
top=Side(style='none'),
bottom=Side(style='none')
)
wb.save(file_path)
print(f"Excel 文档已保存: {file_path}")
except ImportError:
print("请安装 openpyxl 库: pip install openpyxl")
raise
except Exception as e:
print(f"保存 Excel 文档失败: {e}")
raise
def _write_table_to_excel(self, ws, table_data, row, header_fill, header_font, border, normal_font):
"""将表格数据写入 Excel"""
if not table_data:
return row
if len(table_data) == 1:
ws[f'A{row}'] = table_data[0][0] if table_data[0] else ""
ws[f'A{row}'].font = normal_font
ws[f'A{row}'].alignment = Alignment(horizontal='center', vertical='center', wrap_text=True)
ws.merge_cells(f'A{row}:B{row}')
return row + 1
max_cols = max(len(row_data) for row_data in table_data)
max_cols = min(max_cols, 2)
for col_idx in range(max_cols):
col_letter = get_column_letter(col_idx + 1)
ws.column_dimensions[col_letter].width = 25 if max_cols > 1 else 80
start_row = row
for row_idx, row_data in enumerate(table_data):
for col_idx in range(min(len(row_data), max_cols)):
col_letter = get_column_letter(col_idx + 1)
cell = ws[f'{col_letter}{row + row_idx}']
cell.value = self._clean_markdown(row_data[col_idx])
cell.font = header_font if row_idx == 0 else normal_font
cell.fill = header_fill if row_idx == 0 else PatternFill()
cell.alignment = Alignment(horizontal='center', vertical='center', wrap_text=True)
cell.border = border
if max_cols > 1:
for col_idx in range(len(row_data), max_cols):
col_letter = get_column_letter(col_idx + 1)
ws[f'{col_letter}{row + row_idx}'].border = border
return row + len(table_data) + 1
def _clean_markdown(self, text: str) -> str:
"""清理 Markdown 格式标记"""
import re
text = re.sub(r'\*\*(.+?)\*\*', r'\1', text)
text = re.sub(r'\*(.+?)\*', r'\1', text)
text = re.sub(r'__(.+?)__', r'\1', text)
text = re.sub(r'_(.+?)_', r'\1', text)
text = re.sub(r'~~(.+?)~~', r'\1', text)
text = re.sub(r'`(.+?)`', r'\1', text)
text = text.replace('\\n', '\n').replace('\\t', '\t')
return text.strip()

View File

@@ -1,35 +1,24 @@
import asyncio
import httpx
from openai import OpenAI, AsyncOpenAI, max_retries
import openai
import yaml
from termcolor import colored
import os
# Helper function to avoid circular import
def print_colored(text, text_color="green", background="on_white"):
print(colored(text, text_color, background))
# load config (apikey, apibase, model)
yaml_file = os.path.join(os.getcwd(), "config", "config.yaml")
try:
with open(yaml_file, "r", encoding="utf-8") as file:
yaml_data = yaml.safe_load(file)
except Exception:
yaml_data = {}
yaml_file = {}
OPENAI_API_BASE = os.getenv("OPENAI_API_BASE") or yaml_data.get(
"OPENAI_API_BASE", "https://api.openai.com"
)
openai.api_base = OPENAI_API_BASE
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") or yaml_data.get(
"OPENAI_API_KEY", ""
)
OPENAI_API_MODEL = os.getenv("OPENAI_API_MODEL") or yaml_data.get(
"OPENAI_API_MODEL", ""
)
# Initialize OpenAI clients
client = OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_API_BASE)
async_client = AsyncOpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_API_BASE)
openai.api_key = OPENAI_API_KEY
MODEL: str = os.getenv("OPENAI_API_MODEL") or yaml_data.get(
"OPENAI_API_MODEL", "gpt-4-turbo-preview"
)
@@ -46,10 +35,8 @@ MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY") or yaml_data.get(
# for LLM completion
def LLM_Completion(
messages: list[dict], stream: bool = True, useGroq: bool = True,model_config: dict = None
messages: list[dict], stream: bool = True, useGroq: bool = True
) -> str:
if model_config:
return _call_with_custom_config(messages,stream,model_config)
if not useGroq or not FAST_DESIGN_MODE:
force_gpt4 = True
useGroq = False
@@ -82,106 +69,16 @@ def LLM_Completion(
return _chat_completion(messages=messages)
def _call_with_custom_config(messages: list[dict], stream: bool, model_config: dict) ->str:
"使用自定义配置调用API"
api_url = model_config.get("apiUrl", OPENAI_API_BASE)
api_key = model_config.get("apiKey", OPENAI_API_KEY)
api_model = model_config.get("apiModel", OPENAI_API_MODEL)
temp_client = OpenAI(api_key=api_key, base_url=api_url)
temp_async_client = AsyncOpenAI(api_key=api_key, base_url=api_url)
try:
if stream:
try:
loop = asyncio.get_event_loop()
except RuntimeError as ex:
if "There is no current event loop in thread" in str(ex):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(
_achat_completion_stream_custom(messages=messages, temp_async_client=temp_async_client, api_model=api_model)
)
else:
response = temp_client.chat.completions.create(
messages=messages,
model=api_model,
temperature=0.3,
max_tokens=4096,
timeout=180
)
# 检查响应是否有效
if not response.choices or len(response.choices) == 0:
raise Exception(f"API returned empty response for model {api_model}")
if not response.choices[0] or not response.choices[0].message:
raise Exception(f"API returned invalid response format for model {api_model}")
full_reply_content = response.choices[0].message.content
if full_reply_content is None:
raise Exception(f"API returned None content for model {api_model}")
#print(colored(full_reply_content, "blue", "on_white"), end="")
return full_reply_content
except Exception as e:
print_colored(f"[API Error] Custom API error: {str(e)}", "red")
raise
async def _achat_completion_stream_custom(messages:list[dict], temp_async_client, api_model: str ) -> str:
max_retries=3
for attempt in range(max_retries):
try:
response = await temp_async_client.chat.completions.create(
messages=messages,
model=api_model,
temperature=0.3,
max_tokens=4096,
stream=True,
timeout=180
)
collected_chunks = []
collected_messages = []
async for chunk in response:
collected_chunks.append(chunk)
choices = chunk.choices
if len(choices) > 0 and choices[0] is not None:
chunk_message = choices[0].delta
if chunk_message is not None:
collected_messages.append(chunk_message)
# if chunk_message.content:
# print(colored(chunk_message.content, "blue", "on_white"), end="")
# print()
full_reply_content = "".join(
[m.content or "" for m in collected_messages if m is not None]
)
# 检查最终结果是否为空
if not full_reply_content or full_reply_content.strip() == "":
raise Exception(f"Stream API returned empty content for model {api_model}")
return full_reply_content
except httpx.RemoteProtocolError as e:
if attempt < max_retries - 1:
wait_time = (attempt + 1) *2
print_colored(f"[API Warn] Stream connection interrupted, retrying in {wait_time}s...", "yellow")
await asyncio.sleep(wait_time)
continue
except Exception as e:
print_colored(f"[API Error] Custom API stream error: {str(e)}", "red")
raise
async def _achat_completion_stream_groq(messages: list[dict]) -> str:
from groq import AsyncGroq
groq_client = AsyncGroq(api_key=GROQ_API_KEY)
client = AsyncGroq(api_key=GROQ_API_KEY)
max_attempts = 5
for attempt in range(max_attempts):
print("Attempt to use Groq (Fase Design Mode):")
try:
response = await groq_client.chat.completions.create(
stream = await client.chat.completions.create(
messages=messages,
# model='gemma-7b-it',
model="mixtral-8x7b-32768",
@@ -195,34 +92,25 @@ async def _achat_completion_stream_groq(messages: list[dict]) -> str:
if attempt < max_attempts - 1: # i is zero indexed
continue
else:
raise Exception("failed")
raise "failed"
# 检查响应是否有效
if not response.choices or len(response.choices) == 0:
raise Exception("Groq API returned empty response")
if not response.choices[0] or not response.choices[0].message:
raise Exception("Groq API returned invalid response format")
full_reply_content = response.choices[0].message.content
if full_reply_content is None:
raise Exception("Groq API returned None content")
# print(colored(full_reply_content, "blue", "on_white"), end="")
# print()
full_reply_content = stream.choices[0].message.content
print(colored(full_reply_content, "blue", "on_white"), end="")
print()
return full_reply_content
async def _achat_completion_stream_mixtral(messages: list[dict]) -> str:
from mistralai.client import MistralClient
from mistralai.models.chat_completion import ChatMessage
mistral_client = MistralClient(api_key=MISTRAL_API_KEY)
client = MistralClient(api_key=MISTRAL_API_KEY)
# client=AsyncGroq(api_key=GROQ_API_KEY)
max_attempts = 5
for attempt in range(max_attempts):
try:
messages[len(messages) - 1]["role"] = "user"
stream = mistral_client.chat(
stream = client.chat(
messages=[
ChatMessage(
role=message["role"], content=message["content"]
@@ -231,35 +119,31 @@ async def _achat_completion_stream_mixtral(messages: list[dict]) -> str:
],
# model = "mistral-small-latest",
model="open-mixtral-8x7b",
# response_format={"type": "json_object"},
)
break # If the operation is successful, break the loop
except Exception:
if attempt < max_attempts - 1: # i is zero indexed
continue
else:
raise Exception("failed")
# 检查响应是否有效
if not stream.choices or len(stream.choices) == 0:
raise Exception("Mistral API returned empty response")
if not stream.choices[0] or not stream.choices[0].message:
raise Exception("Mistral API returned invalid response format")
raise "failed"
full_reply_content = stream.choices[0].message.content
if full_reply_content is None:
raise Exception("Mistral API returned None content")
# print(colored(full_reply_content, "blue", "on_white"), end="")
# print()
print(colored(full_reply_content, "blue", "on_white"), end="")
print()
return full_reply_content
async def _achat_completion_stream_gpt35(messages: list[dict]) -> str:
response = await async_client.chat.completions.create(
openai.api_key = OPENAI_API_KEY
openai.api_base = OPENAI_API_BASE
response = await openai.ChatCompletion.acreate(
messages=messages,
max_tokens=4096,
n=1,
stop=None,
temperature=0.3,
timeout=600,
timeout=3,
model="gpt-3.5-turbo-16k",
stream=True,
)
@@ -270,38 +154,40 @@ async def _achat_completion_stream_gpt35(messages: list[dict]) -> str:
# iterate through the stream of events
async for chunk in response:
collected_chunks.append(chunk) # save the event response
choices = chunk.choices
if len(choices) > 0 and choices[0] is not None:
chunk_message = choices[0].delta
if chunk_message is not None:
collected_messages.append(chunk_message) # save the message
# if chunk_message.content:
# print(
# colored(chunk_message.content, "blue", "on_white"),
# end="",
# )
# print()
choices = chunk["choices"]
if len(choices) > 0:
chunk_message = chunk["choices"][0].get(
"delta", {}
) # extract the message
collected_messages.append(chunk_message) # save the message
if "content" in chunk_message:
print(
colored(chunk_message["content"], "blue", "on_white"),
end="",
)
print()
full_reply_content = "".join(
[m.content or "" for m in collected_messages if m is not None]
[m.get("content", "") for m in collected_messages]
)
# 检查最终结果是否为空
if not full_reply_content or full_reply_content.strip() == "":
raise Exception("Stream API (gpt-3.5) returned empty content")
return full_reply_content
def _achat_completion_json(messages: list[dict] ) -> str:
async def _achat_completion_json(messages: list[dict]) -> str:
openai.api_key = OPENAI_API_KEY
openai.api_base = OPENAI_API_BASE
max_attempts = 5
for attempt in range(max_attempts):
try:
response = async_client.chat.completions.create(
stream = await openai.ChatCompletion.acreate(
messages=messages,
max_tokens=4096,
n=1,
stop=None,
temperature=0.3,
timeout=600,
timeout=3,
model=MODEL,
response_format={"type": "json_object"},
)
@@ -310,87 +196,62 @@ def _achat_completion_json(messages: list[dict] ) -> str:
if attempt < max_attempts - 1: # i is zero indexed
continue
else:
raise Exception("failed")
raise "failed"
# 检查响应是否有效
if not response.choices or len(response.choices) == 0:
raise Exception("OpenAI API returned empty response")
if not response.choices[0] or not response.choices[0].message:
raise Exception("OpenAI API returned invalid response format")
full_reply_content = response.choices[0].message.content
if full_reply_content is None:
raise Exception("OpenAI API returned None content")
# print(colored(full_reply_content, "blue", "on_white"), end="")
# print()
full_reply_content = stream.choices[0].message.content
print(colored(full_reply_content, "blue", "on_white"), end="")
print()
return full_reply_content
async def _achat_completion_stream(messages: list[dict]) -> str:
try:
response = await async_client.chat.completions.create(
**_cons_kwargs(messages), stream=True
)
openai.api_key = OPENAI_API_KEY
openai.api_base = OPENAI_API_BASE
response = await openai.ChatCompletion.acreate(
**_cons_kwargs(messages), stream=True
)
# create variables to collect the stream of chunks
collected_chunks = []
collected_messages = []
# iterate through the stream of events
async for chunk in response:
collected_chunks.append(chunk) # save the event response
choices = chunk.choices
if len(choices) > 0 and choices[0] is not None:
chunk_message = choices[0].delta
if chunk_message is not None:
collected_messages.append(chunk_message) # save the message
# if chunk_message.content:
# print(
# colored(chunk_message.content, "blue", "on_white"),
# end="",
# )
# print()
# create variables to collect the stream of chunks
collected_chunks = []
collected_messages = []
# iterate through the stream of events
async for chunk in response:
collected_chunks.append(chunk) # save the event response
choices = chunk["choices"]
if len(choices) > 0:
chunk_message = chunk["choices"][0].get(
"delta", {}
) # extract the message
collected_messages.append(chunk_message) # save the message
if "content" in chunk_message:
print(
colored(chunk_message["content"], "blue", "on_white"),
end="",
)
print()
full_reply_content = "".join(
[m.content or "" for m in collected_messages if m is not None]
)
# 检查最终结果是否为空
if not full_reply_content or full_reply_content.strip() == "":
raise Exception("Stream API returned empty content")
return full_reply_content
except Exception as e:
print_colored(f"[API Error] OpenAI API stream error: {str(e)}", "red")
raise
full_reply_content = "".join(
[m.get("content", "") for m in collected_messages]
)
return full_reply_content
def _chat_completion(messages: list[dict]) -> str:
try:
rsp = client.chat.completions.create(**_cons_kwargs(messages))
# 检查响应是否有效
if not rsp.choices or len(rsp.choices) == 0:
raise Exception("OpenAI API returned empty response")
if not rsp.choices[0] or not rsp.choices[0].message:
raise Exception("OpenAI API returned invalid response format")
content = rsp.choices[0].message.content
if content is None:
raise Exception("OpenAI API returned None content")
return content
except Exception as e:
print_colored(f"[API Error] OpenAI API error: {str(e)}", "red")
raise
print(messages, flush=True)
rsp = openai.ChatCompletion.create(**_cons_kwargs(messages))
content = rsp["choices"][0]["message"]["content"]
print(content, flush=True)
return content
def _cons_kwargs(messages: list[dict]) -> dict:
kwargs = {
"messages": messages,
"max_tokens": 2000,
"temperature": 0.3,
"timeout": 600,
"max_tokens": 4096,
"n": 1,
"stop": None,
"temperature": 0.5,
"timeout": 3,
}
kwargs_mode = {"model": MODEL}
kwargs.update(kwargs_mode)

View File

@@ -7,8 +7,6 @@ PROMPT_ABILITY_REQUIREMENT_GENERATION = """
## Instruction
Based on "General Goal" and "Current Task", output a formatted "Ability Requirement" which lists at least 3 different ability requirement that is required by the "Current Task". The ability should be summarized concisely within a few words.
**IMPORTANT LANGUAGE REQUIREMENT: You must respond in Chinese (中文) for all ability requirements.**
## General Goal (The general goal for the collaboration plan, "Current Task" is just one of its substep)
{General_Goal}
@@ -43,7 +41,7 @@ def generate_AbilityRequirement(General_Goal, Current_Task):
),
},
]
#print(messages[1]["content"])
print(messages[1]["content"])
return read_LLM_Completion(messages)["AbilityRequirement"]
@@ -51,8 +49,6 @@ PROMPT_AGENT_ABILITY_SCORING = """
## Instruction
Based on "Agent Board" and "Ability Requirement", output a score for each agent to estimate the possibility that the agent can fulfil the "Ability Requirement". The score should be 1-5. Provide a concise reason before you assign the score.
**IMPORTANT LANGUAGE REQUIREMENT: You must respond in Chinese (中文) for all reasons and explanations.**
## AgentBoard
{Agent_Board}
@@ -104,7 +100,7 @@ def agentAbilityScoring(Agent_Board, Ability_Requirement_List):
),
},
]
#print(messages[1]["content"])
print(messages[1]["content"])
scoreTable[Ability_Requirement] = read_LLM_Completion(messages)
return scoreTable
@@ -137,6 +133,5 @@ def AgentSelectModify_init(stepTask, General_Goal, Agent_Board):
def AgentSelectModify_addAspect(aspectList, Agent_Board):
newAspect = aspectList[-1]
scoreTable = agentAbilityScoring(Agent_Board, [newAspect])
scoreTable = agentAbilityScoring(Agent_Board, aspectList)
return scoreTable

View File

@@ -33,9 +33,7 @@ class JSON_ABILITY_REQUIREMENT_GENERATION(BaseModel):
PROMPT_AGENT_SELECTION_GENERATION = """
## Instruction
Based on "General Goal", "Current Task" and "Agent Board", output a formatted "Agent Selection Plan". Your selection should consider the ability needed for "Current Task" and the profile of each agent in "Agent Board".
**IMPORTANT LANGUAGE REQUIREMENT: You must respond in Chinese (中文) for all explanations and reasoning, though agent names should remain in their original form.**
Based on "General Goal", "Current Task" and "Agent Board", output a formatted "Agent Selection Plan". Your selection should consider the ability needed for "Current Task" and the profile of each agent in "Agent Board". Agent Selection Plan ranks from high to low according to ability.
## General Goal (Specify the general goal for the collaboration plan)
{General_Goal}
@@ -77,14 +75,11 @@ def generate_AbilityRequirement(General_Goal, Current_Task):
),
},
]
print(messages[1]["content"])
return read_LLM_Completion(messages)["AbilityRequirement"]
def generate_AgentSelection(General_Goal, Current_Task, Agent_Board):
# Check if Agent_Board is None or empty
if Agent_Board is None or len(Agent_Board) == 0:
raise ValueError("Agent_Board cannot be None or empty. Please ensure agents are set via /setAgents endpoint before generating a plan.")
messages = [
{
"role": "system",
@@ -99,13 +94,12 @@ def generate_AgentSelection(General_Goal, Current_Task, Agent_Board):
),
},
]
#print(messages[1]["content"])
print(messages[1]["content"])
agentboard_set = {agent["Name"] for agent in Agent_Board}
while True:
candidate = read_LLM_Completion(messages)["AgentSelectionPlan"]
# 添加调试打印
if len(candidate) > MAX_TEAM_SIZE:
teamSize = random.randint(2, MAX_TEAM_SIZE)
candidate = candidate[0:teamSize]
@@ -113,6 +107,7 @@ def generate_AgentSelection(General_Goal, Current_Task, Agent_Board):
continue
AgentSelectionPlan = sorted(candidate)
AgentSelectionPlan_set = set(AgentSelectionPlan)
# Check if every item in AgentSelectionPlan is in agentboard
if AgentSelectionPlan_set.issubset(agentboard_set):
break # If all items are in agentboard, break the loop

View File

@@ -1,53 +1,55 @@
from AgentCoord.PlanEngine.planOutline_Generator import generate_PlanOutline
# from AgentCoord.PlanEngine.AgentSelection_Generator import (
# generate_AgentSelection,
# )
from AgentCoord.PlanEngine.AgentSelection_Generator import (
generate_AgentSelection,
)
from AgentCoord.PlanEngine.taskProcess_Generator import generate_TaskProcess
import AgentCoord.util as util
def generate_basePlan(
General_Goal, Agent_Board, AgentProfile_Dict, InitialObject_List
General_Goal, Agent_Board, AgentProfile_Dict, InitialObject_List, context
):
"""
优化模式:生成大纲 + 智能体选择,但不生成任务流程
优化用户体验:
1. 快速生成大纲和分配智能体
2. 用户可以看到完整的大纲和智能体图标
3. TaskProcess由前端通过 fillStepTask API 异步填充
"""
# 参数保留以保持接口兼容性
_ = AgentProfile_Dict
PlanOutline = generate_PlanOutline(
InitialObject_List=InitialObject_List, General_Goal=General_Goal
)
basePlan = {
"General Goal": General_Goal,
"Initial Input Object": InitialObject_List,
"Collaboration Process": []
"Collaboration Process": [],
}
PlanOutline = generate_PlanOutline(
InitialObject_List=[], General_Goal=General_Goal + context
)
for stepItem in PlanOutline:
# # 为每个步骤分配智能体
# Current_Task = {
# "TaskName": stepItem["StepName"],
# "InputObject_List": stepItem["InputObject_List"],
# "OutputObject": stepItem["OutputObject"],
# "TaskContent": stepItem["TaskContent"],
# }
# AgentSelection = generate_AgentSelection(
# General_Goal=General_Goal,
# Current_Task=Current_Task,
# Agent_Board=Agent_Board,
# )
# 添加智能体选择,但不添加任务流程
stepItem["AgentSelection"] = []
stepItem["TaskProcess"] = [] # 空数组,由前端异步填充
stepItem["Collaboration_Brief_frontEnd"] = {
"template": "",
"data": {}
Current_Task = {
"TaskName": stepItem["StepName"],
"InputObject_List": stepItem["InputObject_List"],
"OutputObject": stepItem["OutputObject"],
"TaskContent": stepItem["TaskContent"],
}
AgentSelection = generate_AgentSelection(
General_Goal=General_Goal,
Current_Task=Current_Task,
Agent_Board=Agent_Board,
)
Current_Task_Description = {
"TaskName": stepItem["StepName"],
"AgentInvolved": [
{"Name": name, "Profile": AgentProfile_Dict[name]}
for name in AgentSelection
],
"InputObject_List": stepItem["InputObject_List"],
"OutputObject": stepItem["OutputObject"],
"CurrentTaskDescription": util.generate_template_sentence_for_CollaborationBrief(
stepItem["InputObject_List"],
stepItem["OutputObject"],
AgentSelection,
stepItem["TaskContent"],
),
}
TaskProcess = generate_TaskProcess(
General_Goal=General_Goal + context,
Current_Task_Description=Current_Task_Description,
)
# add the generated AgentSelection and TaskProcess to the stepItem
stepItem["AgentSelection"] = AgentSelection
stepItem["TaskProcess"] = TaskProcess
basePlan["Collaboration Process"].append(stepItem)
basePlan["General Goal"] = General_Goal
return basePlan

View File

@@ -9,8 +9,6 @@ PROMPT_PLAN_OUTLINE_BRANCHING = """
Based on "Existing Steps", your task is to comeplete the "Remaining Steps" for the plan for "General Goal".
Note: "Modification Requirement" specifies how to modify the "Baseline Completion" for a better/alternative solution.
**IMPORTANT LANGUAGE REQUIREMENT: You must respond in Chinese (中文) for all content, including StepName, TaskContent, and OutputObject fields.**
## General Goal (Specify the general goal for the plan)
{General_Goal}
@@ -87,7 +85,7 @@ def branch_PlanOutline(
InitialObject_List=str(InitialObject_List),
General_Goal=General_Goal,
)
#print(prompt)
print(prompt)
branch_List = []
for _ in range(branch_Number):
messages = [

View File

@@ -29,8 +29,6 @@ PROMPT_TASK_PROCESS_BRANCHING = """
Based on "Existing Steps", your task is to comeplete the "Remaining Steps" for the "Task for Current Step".
Note: "Modification Requirement" specifies how to modify the "Baseline Completion" for a better/alternative solution.
**IMPORTANT LANGUAGE REQUIREMENT: You must respond in Chinese (中文) for the Description field and all explanations, while keeping ID, ActionType, and AgentName in their original format.**
## General Goal (The general goal for the collaboration plan, you just design the plan for one of its step (i.e. "Task for Current Step"))
{General_Goal}
@@ -57,39 +55,26 @@ Note: "Modification Requirement" specifies how to modify the "Baseline Completio
"ID": "Action4",
"ActionType": "Propose",
"AgentName": "Mia",
"Description": "提议关于人工智能情感发展的心理学理论,重点关注爱与依恋的概念。",
"Description": "Propose psychological theories on love and attachment that could be applied to AI's emotional development.",
"ImportantInput": [
"InputObject:Story Outline"
]
}},
{{
"ID": "Action5",
"ActionType": "Critique",
"ActionType": "Propose",
"AgentName": "Noah",
"Description": "对Mia提出的心理学理论进行批判性评估分析其在AI情感发展场景中的适用性和局限性。",
"ImportantInput": [
"ActionResult:Action4"
]
"Description": "Propose ethical considerations and philosophical questions regarding AI's capacity for love.",
"ImportantInput": []
}},
{{
"ID": "Action6",
"ActionType": "Improve",
"AgentName": "Liam",
"Description": "基于Noah的批判性反馈改进和完善心理学理论框架使其更贴合AI情感发展的实际需求。",
"ImportantInput": [
"ActionResult:Action4",
"ActionResult:Action5"
]
}},
{{
"ID": "Action7",
"ActionType": "Finalize",
"AgentName": "Mia",
"Description": "综合所有提议、批判和改进意见整合并提交最终的AI情感发展心理学理论框架。",
"AgentName": "Liam",
"Description": "Combine the poetic elements and ethical considerations into a cohesive set of core love elements for the story.",
"ImportantInput": [
"ActionResult:Action4",
"ActionResult:Action5",
"ActionResult:Action6"
"ActionResult:Action1",
"ActionResult:Action5"
]
}}
]
@@ -99,12 +84,7 @@ Note: "Modification Requirement" specifies how to modify the "Baseline Completio
ImportantInput: Specify if there is any previous result that should be taken special consideration during the execution the action. Should be of format "InputObject:xx" or "ActionResult:xx".
InputObject_List: List existing objects that should be utilized in current step.
AgentName: Specify the agent who will perform the action, You CAN ONLY USE THE NAME APPEARS IN "AgentInvolved".
ActionType: Specify the type of action. **CRITICAL REQUIREMENTS:**
1. The "Remaining Steps" MUST include ALL FOUR action types in the following order: Propose -> Critique -> Improve -> Finalize
2. Each action type (Propose, Critique, Improve, Finalize) MUST appear at least once
3. The actions must follow the sequence: Propose actions first, then Critique actions, then Improve actions, and Finalize must be the last action
4. Even if only one agent is involved in a phase, that phase must still have its corresponding action type
5. The last action must ALWAYS be of type "Finalize"
ActionType: Specify the type of action, note that only the last action can be of type "Finalize", and the last action must be "Finalize".
"""
@@ -155,7 +135,7 @@ def branch_TaskProcess(
General_Goal=General_Goal,
Act_Set=ACT_SET,
)
#print(prompt)
print(prompt)
branch_List = []
for i in range(branch_Number):
messages = [

View File

@@ -5,9 +5,7 @@ import json
PROMPT_PLAN_OUTLINE_GENERATION = """
## Instruction
Based on "Output Format Example", "General Goal", and "Initial Key Object List", output a formatted "Plan_Outline".
**IMPORTANT LANGUAGE REQUIREMENT: You must respond in Chinese (中文) for all content, including StepName, TaskContent, and OutputObject fields.**
Based on "Output Format Example", "General Goal", and "Initial Key Object List", output a formatted "Plan_Outline". The number of steps in the Plan Outline cannot exceed 5.
## Initial Key Object List (Specify the list of initial key objects available, each initial key object should be the input object of at least one Step)
{InitialObject_List}
@@ -53,6 +51,7 @@ TaskContent: Describe the task of the current step.
InputObject_List: The list of the input obejects that will be used in current step.
OutputObject: The name of the final output object of current step.
请用中文回答
"""
@@ -85,16 +84,4 @@ def generate_PlanOutline(InitialObject_List, General_Goal):
),
},
]
result = read_LLM_Completion(messages)
if isinstance(result, dict) and "Plan_Outline" in result:
return result["Plan_Outline"]
else:
# 如果格式不正确,返回默认的计划大纲
return [
{
"StepName": "Default Step",
"TaskContent": "Generated default plan step due to format error",
"InputObject_List": [],
"OutputObject": "Default Output"
}
]
return read_LLM_Completion(messages)["Plan_Outline"]

View File

@@ -25,8 +25,6 @@ PROMPT_TASK_PROCESS_GENERATION = """
## Instruction
Based on "General Goal", "Task for Current Step", "Action Set" and "Output Format Example", design a plan for "Task for Current Step", output a formatted "Task_Process_Plan".
**IMPORTANT LANGUAGE REQUIREMENT: You must respond in Chinese (中文) for the Description field and all explanations, while keeping ID, ActionType, and AgentName in their original format.**
## General Goal (The general goal for the collaboration plan, you just design the plan for one of its step (i.e. "Task for Current Step"))
{General_Goal}
@@ -92,6 +90,7 @@ InputObject_List: List existing objects that should be utilized in current step.
AgentName: Specify the agent who will perform the action, You CAN ONLY USE THE NAME APPEARS IN "AgentInvolved".
ActionType: Specify the type of action, note that only the last action can be of type "Finalize", and the last action must be "Finalize".
请用中文回答
"""
@@ -124,7 +123,7 @@ def generate_TaskProcess(General_Goal, Current_Task_Description):
),
},
]
#print(messages[1]["content"])
print(messages[1]["content"])
# write a callback function, if read_LLM_Completion(messages)["Task_Process_Plan"] dont have the right format, call this function again
while True:

View File

@@ -9,8 +9,6 @@ You are within a multi-agent collaboration for the "Current Task".
Now it's your turn to take action. Read the "Context Information" and take your action following "Instruction for Your Current Action".
Note: Important Input for your action are marked with *Important Input*
**IMPORTANT LANGUAGE REQUIREMENT: You must respond in Chinese (中文) for all your answers and outputs.**
## Context Information
### General Goal (The "Current Task" is indeed a substep of the general goal)
@@ -82,33 +80,10 @@ class BaseAction():
Important_Mark = ""
action_Record += PROMPT_TEMPLATE_ACTION_RECORD.format(AgentName = actionInfo["AgentName"], Action_Description = actionInfo["AgentName"], Action_Result = actionInfo["Action_Result"], Important_Mark = Important_Mark)
# Handle missing agent profiles gracefully
model_config = None
if agentName not in AgentProfile_Dict:
agentProfile = f"AI Agent named {agentName}"
else:
# agentProfile = AgentProfile_Dict[agentName]
agent_config = AgentProfile_Dict[agentName]
agentProfile = agent_config.get("profile",f"AI Agent named {agentName}")
if agent_config.get("useCustomAPI",False):
model_config = {
"apiModel":agent_config.get("apiModel"),
"apiUrl":agent_config.get("apiUrl"),
"apiKey":agent_config.get("apiKey"),
}
prompt = PROMPT_TEMPLATE_TAKE_ACTION_BASE.format(
agentName = agentName,
agentProfile = agentProfile,
General_Goal = General_Goal,
Current_Task_Description = TaskDescription,
Input_Objects = inputObject_Record,
History_Action = action_Record,
Action_Description = self.info["Description"],
Action_Custom_Note = self.Action_Custom_Note
)
#print_colored(text = prompt, text_color="red")
prompt = PROMPT_TEMPLATE_TAKE_ACTION_BASE.format(agentName = agentName, agentProfile = AgentProfile_Dict[agentName], General_Goal = General_Goal, Current_Task_Description = TaskDescription, Input_Objects = inputObject_Record, History_Action = action_Record, Action_Description = self.info["Description"], Action_Custom_Note = self.Action_Custom_Note)
print_colored(text = prompt, text_color="red")
messages = [{"role":"system", "content": prompt}]
ActionResult = LLM_Completion(messages,True,False,model_config=model_config)
ActionResult = LLM_Completion(messages,True,False)
ActionInfo_with_Result = copy.deepcopy(self.info)
ActionInfo_with_Result["Action_Result"] = ActionResult

View File

@@ -1,7 +1,7 @@
from AgentCoord.RehearsalEngine_V2.Action import BaseAction
ACTION_CUSTOM_NOTE = '''
注意由于你在对话中你的批评必须简洁、清晰且易于阅读不要让人感到压力过大。如果你要列出一些观点最多列出2点。
Note: Since you are in a conversation, your critique must be concise, clear and easy to read, don't overwhelm others. If you want to list some points, list at most 2 points.
'''

View File

@@ -2,9 +2,9 @@ from AgentCoord.util.converter import read_outputObject_content
from AgentCoord.RehearsalEngine_V2.Action import BaseAction
ACTION_CUSTOM_NOTE = '''
注意:你可以在给出{OutputName}的最终内容之前先说一些话。当你决定给出{OutputName}的最终内容时,应该这样包含:
Note: You can say something before you give the final content of {OutputName}. When you decide to give the final content of {OutputName}, it should be enclosed like this:
```{OutputName}
{OutputName}的内容)
(the content of {OutputName})
```
'''

View File

@@ -1,12 +1,12 @@
from AgentCoord.RehearsalEngine_V2.Action import BaseAction
ACTION_CUSTOM_NOTE = '''
注意:你可以在提供改进版本的内容之前先说一些话。
你提供的改进版本必须是完整的版本(例如,如果你提供改进的故事,你应该给出完整的故事内容,而不仅仅是报告你在哪里改进了)。
当你决定提供内容的改进版本时,应该这样开始:
Note: You can say something before you provide the improved version of the content.
The improved version you provide must be a completed version (e.g. if you provide a improved story, you should give completed story content, rather than just reporting where you have improved).
When you decide to give the improved version of the content, it should be start like this:
## xxx的改进版本
(改进版本的内容)
(the improved version of the content)
```
'''

View File

@@ -4,7 +4,7 @@ from termcolor import colored
# Accept inputs: num_StepToRun (the number of step to run, if None, run to the end), plan, RehearsalLog, AgentProfile_Dict
def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict):
def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict, context):
# Prepare for execution
KeyObjects = {}
finishedStep_index = -1
@@ -83,15 +83,11 @@ def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict):
}
# start the group chat
util.print_colored(TaskDescription, text_color="green")
ActionHistory = []
action_count = 0
total_actions = len(TaskProcess)
for ActionInfo in TaskProcess:
action_count += 1
actionType = ActionInfo["ActionType"]
agentName = ActionInfo["AgentName"]
if actionType in Action.customAction_Dict:
currentAction = Action.customAction_Dict[actionType](
info=ActionInfo,
@@ -105,7 +101,7 @@ def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict):
KeyObjects=KeyObjects,
)
ActionInfo_with_Result = currentAction.run(
General_Goal=plan["General Goal"],
General_Goal=plan["General Goal"] + "\n\n### Useful Information (some information can help accomplish the task)\n如果使用该信息,请在回答中显示指出是来自数联网的数据。例如,基于数联网数据搜索结果。" + context,
TaskDescription=TaskDescription,
agentName=agentName,
AgentProfile_Dict=AgentProfile_Dict,
@@ -117,9 +113,18 @@ def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict):
ActionHistory.append(ActionInfo_with_Result)
# post processing for the group chat (finish)
objectLogNode["content"] = KeyObjects[OutputName]
if StepRun_count == len(plan["Collaboration Process"][(finishedStep_index + 1): run_to]):
objectLogNode["content"] += context
RehearsalLog.append(stepLogNode)
RehearsalLog.append(objectLogNode)
stepLogNode["ActionHistory"] = ActionHistory
# Return Output
print(
colored(
"$Run " + str(StepRun_count) + "step$",
color="black",
on_color="on_white",
)
)
return RehearsalLog

View File

@@ -1,619 +0,0 @@
"""
优化版执行计划 - 支持动态追加步骤
在执行过程中可以接收新的步骤并追加到执行队列
"""
import asyncio
import json
import time
from typing import List, Dict, Set, Generator, Any
import AgentCoord.RehearsalEngine_V2.Action as Action
import AgentCoord.util as util
from termcolor import colored
from AgentCoord.RehearsalEngine_V2.execution_state import execution_state_manager
from AgentCoord.RehearsalEngine_V2.dynamic_execution_manager import dynamic_execution_manager
# ==================== 配置参数 ====================
# 最大并发请求数
MAX_CONCURRENT_REQUESTS = 2
# 批次之间的延迟
BATCH_DELAY = 1.0
# 429错误重试次数和延迟
MAX_RETRIES = 3
RETRY_DELAY = 5.0
# ==================== 限流器 ====================
class RateLimiter:
"""
异步限流器,控制并发请求数量
"""
def __init__(self, max_concurrent: int = MAX_CONCURRENT_REQUESTS):
self.semaphore = asyncio.Semaphore(max_concurrent)
self.max_concurrent = max_concurrent
async def __aenter__(self):
await self.semaphore.acquire()
return self
async def __aexit__(self, *args):
self.semaphore.release()
# 全局限流器实例
rate_limiter = RateLimiter()
def build_action_dependency_graph(TaskProcess: List[Dict]) -> Dict[int, List[int]]:
"""
构建动作依赖图
Args:
TaskProcess: 任务流程列表
Returns:
依赖映射字典 {action_index: [dependent_action_indices]}
"""
dependency_map = {i: [] for i in range(len(TaskProcess))}
for i, action in enumerate(TaskProcess):
important_inputs = action.get('ImportantInput', [])
if not important_inputs:
continue
# 检查是否依赖其他动作的ActionResult
for j, prev_action in enumerate(TaskProcess):
if i == j:
continue
# 判断是否依赖前一个动作的结果
if any(
inp.startswith('ActionResult:') and
inp == f'ActionResult:{prev_action["ID"]}'
for inp in important_inputs
):
dependency_map[i].append(j)
return dependency_map
def get_parallel_batches(TaskProcess: List[Dict], dependency_map: Dict[int, List[int]]) -> List[List[int]]:
"""
将动作分为多个批次,每批内部可以并行执行
Args:
TaskProcess: 任务流程列表
dependency_map: 依赖图
Returns:
批次列表 [[batch1_indices], [batch2_indices], ...]
"""
batches = []
completed: Set[int] = set()
while len(completed) < len(TaskProcess):
# 找出所有依赖已满足的动作
ready_to_run = [
i for i in range(len(TaskProcess))
if i not in completed and
all(dep in completed for dep in dependency_map[i])
]
if not ready_to_run:
# 避免死循环
remaining = [i for i in range(len(TaskProcess)) if i not in completed]
if remaining:
ready_to_run = remaining[:1]
else:
break
batches.append(ready_to_run)
completed.update(ready_to_run)
return batches
async def execute_single_action_async(
ActionInfo: Dict,
General_Goal: str,
TaskDescription: str,
OutputName: str,
KeyObjects: Dict,
ActionHistory: List,
agentName: str,
AgentProfile_Dict: Dict,
InputName_List: List[str]
) -> Dict:
"""
异步执行单个动作
Args:
ActionInfo: 动作信息
General_Goal: 总体目标
TaskDescription: 任务描述
OutputName: 输出对象名称
KeyObjects: 关键对象字典
ActionHistory: 动作历史
agentName: 智能体名称
AgentProfile_Dict: 智能体配置字典
InputName_List: 输入名称列表
Returns:
动作执行结果
"""
actionType = ActionInfo["ActionType"]
# 创建动作实例
if actionType in Action.customAction_Dict:
currentAction = Action.customAction_Dict[actionType](
info=ActionInfo,
OutputName=OutputName,
KeyObjects=KeyObjects,
)
else:
currentAction = Action.BaseAction(
info=ActionInfo,
OutputName=OutputName,
KeyObjects=KeyObjects,
)
# 在线程池中运行,避免阻塞事件循环
loop = asyncio.get_event_loop()
ActionInfo_with_Result = await loop.run_in_executor(
None,
lambda: currentAction.run(
General_Goal=General_Goal,
TaskDescription=TaskDescription,
agentName=agentName,
AgentProfile_Dict=AgentProfile_Dict,
InputName_List=InputName_List,
OutputName=OutputName,
KeyObjects=KeyObjects,
ActionHistory=ActionHistory,
)
)
return ActionInfo_with_Result
async def execute_step_async_streaming(
stepDescrip: Dict,
General_Goal: str,
AgentProfile_Dict: Dict,
KeyObjects: Dict,
step_index: int,
total_steps: int,
execution_id: str = None,
RehearsalLog: List = None # 用于追加日志到历史记录
) -> Generator[Dict, None, None]:
"""
异步执行单个步骤,支持流式返回
Args:
stepDescrip: 步骤描述
General_Goal: 总体目标
AgentProfile_Dict: 智能体配置字典
KeyObjects: 关键对象字典
step_index: 步骤索引
total_steps: 总步骤数
execution_id: 执行ID
Yields:
执行事件字典
"""
# 准备步骤信息
StepName = (
util.camel_case_to_normal(stepDescrip["StepName"])
if util.is_camel_case(stepDescrip["StepName"])
else stepDescrip["StepName"]
)
TaskContent = stepDescrip["TaskContent"]
InputName_List = (
[
(
util.camel_case_to_normal(obj)
if util.is_camel_case(obj)
else obj
)
for obj in stepDescrip["InputObject_List"]
]
if stepDescrip["InputObject_List"] is not None
else None
)
OutputName = (
util.camel_case_to_normal(stepDescrip["OutputObject"])
if util.is_camel_case(stepDescrip["OutputObject"])
else stepDescrip["OutputObject"]
)
Agent_List = stepDescrip["AgentSelection"]
TaskProcess = stepDescrip["TaskProcess"]
TaskDescription = (
util.converter.generate_template_sentence_for_CollaborationBrief(
input_object_list=InputName_List,
output_object=OutputName,
agent_list=Agent_List,
step_task=TaskContent,
)
)
# 初始化日志节点
inputObject_Record = [
{InputName: KeyObjects[InputName]} for InputName in InputName_List
]
stepLogNode = {
"LogNodeType": "step",
"NodeId": StepName,
"InputName_List": InputName_List,
"OutputName": OutputName,
"chatLog": [],
"inputObject_Record": inputObject_Record,
}
objectLogNode = {
"LogNodeType": "object",
"NodeId": OutputName,
"content": None,
}
# 返回步骤开始事件
yield {
"type": "step_start",
"step_index": step_index,
"total_steps": total_steps,
"step_name": StepName,
"task_description": TaskDescription,
}
# 构建动作依赖图
dependency_map = build_action_dependency_graph(TaskProcess)
batches = get_parallel_batches(TaskProcess, dependency_map)
ActionHistory = []
total_actions = len(TaskProcess)
completed_actions = 0
# 步骤开始日志
util.print_colored(
f"📋 步骤 {step_index + 1}/{total_steps}: {StepName} ({total_actions} 个动作, 分 {len(batches)} 批执行)",
text_color="cyan"
)
# 分批执行动作
for batch_index, batch_indices in enumerate(batches):
# 在每个批次执行前检查暂停状态
should_continue = await execution_state_manager.async_check_pause(execution_id)
if not should_continue:
return
batch_size = len(batch_indices)
# 批次执行日志
if batch_size > 1:
util.print_colored(
f"🚦 批次 {batch_index + 1}/{len(batches)}: 并行执行 {batch_size} 个动作",
text_color="blue"
)
else:
util.print_colored(
f"🔄 批次 {batch_index + 1}/{len(batches)}: 串行执行",
text_color="yellow"
)
# 并行执行当前批次的所有动作
tasks = [
execute_single_action_async(
TaskProcess[i],
General_Goal=General_Goal,
TaskDescription=TaskDescription,
OutputName=OutputName,
KeyObjects=KeyObjects,
ActionHistory=ActionHistory,
agentName=TaskProcess[i]["AgentName"],
AgentProfile_Dict=AgentProfile_Dict,
InputName_List=InputName_List
)
for i in batch_indices
]
# 等待当前批次完成
batch_results = await asyncio.gather(*tasks)
# 逐个返回结果
for i, result in enumerate(batch_results):
action_index_in_batch = batch_indices[i]
completed_actions += 1
util.print_colored(
f"✅ 动作 {completed_actions}/{total_actions} 完成: {result['ActionType']} by {result['AgentName']}",
text_color="green"
)
ActionHistory.append(result)
# 立即返回该动作结果
yield {
"type": "action_complete",
"step_index": step_index,
"step_name": StepName,
"action_index": action_index_in_batch,
"total_actions": total_actions,
"completed_actions": completed_actions,
"action_result": result,
"batch_info": {
"batch_index": batch_index,
"batch_size": batch_size,
"is_parallel": batch_size > 1
}
}
# 步骤完成
objectLogNode["content"] = KeyObjects[OutputName]
stepLogNode["ActionHistory"] = ActionHistory
# 收集该步骤使用的 agent去重
assigned_agents_in_step = list(set(Agent_List)) if Agent_List else []
# 追加到 RehearsalLog因为 RehearsalLog 是可变对象,会反映到原列表)
if RehearsalLog is not None:
RehearsalLog.append(stepLogNode)
RehearsalLog.append(objectLogNode)
yield {
"type": "step_complete",
"step_index": step_index,
"step_name": StepName,
"step_log_node": stepLogNode,
"object_log_node": objectLogNode,
"assigned_agents": {StepName: assigned_agents_in_step}, # 该步骤使用的 agent
}
def executePlan_streaming_dynamic(
plan: Dict,
num_StepToRun: int,
RehearsalLog: List,
AgentProfile_Dict: Dict,
existingKeyObjects: Dict = None,
execution_id: str = None
) -> Generator[str, None, None]:
"""
动态执行计划,支持在执行过程中追加新步骤
Args:
plan: 执行计划
num_StepToRun: 要运行的步骤数
RehearsalLog: 已执行的历史记录
AgentProfile_Dict: 智能体配置
existingKeyObjects: 已存在的KeyObjects
execution_id: 执行ID用于动态追加步骤
Yields:
SSE格式的事件字符串
"""
# 初始化执行状态
general_goal = plan.get("General Goal", "")
# 确保有 execution_id
if execution_id is None:
import time
execution_id = f"{general_goal}_{int(time.time() * 1000)}"
execution_state_manager.start_execution(execution_id, general_goal)
# 准备执行
KeyObjects = existingKeyObjects.copy() if existingKeyObjects else {}
finishedStep_index = -1
for logNode in RehearsalLog:
if logNode["LogNodeType"] == "step":
finishedStep_index += 1
if logNode["LogNodeType"] == "object":
KeyObjects[logNode["NodeId"]] = logNode["content"]
# 确定要运行的步骤范围
if num_StepToRun is None:
run_to = len(plan["Collaboration Process"])
else:
run_to = (finishedStep_index + 1) + num_StepToRun
steps_to_run = plan["Collaboration Process"][(finishedStep_index + 1): run_to]
# 使用动态执行管理器
if execution_id:
# 初始化执行管理器使用传入的execution_id
actual_execution_id = dynamic_execution_manager.start_execution(general_goal, steps_to_run, execution_id)
total_steps = len(steps_to_run)
# 使用队列实现流式推送
async def produce_events(queue: asyncio.Queue):
"""异步生产者"""
try:
step_index = 0
if execution_id:
# 动态模式:循环获取下一个步骤
# 等待新步骤的最大次数(避免无限等待)
max_empty_wait_cycles = 5 # 最多等待60次每次等待1秒
empty_wait_count = 0
while True:
# 检查暂停状态
should_continue = await execution_state_manager.async_check_pause(execution_id)
if not should_continue:
util.print_colored("🛑 用户请求停止执行", "red")
await queue.put({
"type": "error",
"message": "执行已被用户停止"
})
break
# 获取下一个步骤
stepDescrip = dynamic_execution_manager.get_next_step(execution_id)
if stepDescrip is None:
# 没有更多步骤了,检查是否应该继续等待
empty_wait_count += 1
# 获取执行信息
execution_info = dynamic_execution_manager.get_execution_info(execution_id)
if execution_info:
queue_total_steps = execution_info.get("total_steps", 0)
completed_steps = execution_info.get("completed_steps", 0)
# 如果没有步骤在队列中queue_total_steps为0立即退出
if queue_total_steps == 0:
break
# 如果所有步骤都已完成,等待可能的新步骤
if completed_steps >= queue_total_steps:
if empty_wait_count >= max_empty_wait_cycles:
# 等待超时,退出执行
break
else:
# 等待新步骤追加
await asyncio.sleep(1)
continue
else:
# 还有步骤未完成,继续尝试获取
await asyncio.sleep(0.5)
empty_wait_count = 0 # 重置等待计数
continue
else:
# 执行信息不存在,退出
break
# 重置等待计数
empty_wait_count = 0
# 获取最新的总步骤数(用于显示)
execution_info = dynamic_execution_manager.get_execution_info(execution_id)
current_total_steps = execution_info.get("total_steps", total_steps) if execution_info else total_steps
# 执行步骤
async for event in execute_step_async_streaming(
stepDescrip,
plan["General Goal"],
AgentProfile_Dict,
KeyObjects,
step_index,
current_total_steps, # 使用动态更新的总步骤数
execution_id,
RehearsalLog # 传递 RehearsalLog 用于追加日志
):
if execution_state_manager.is_stopped(execution_id):
await queue.put({
"type": "error",
"message": "执行已被用户停止"
})
return
await queue.put(event)
# 标记步骤完成
dynamic_execution_manager.mark_step_completed(execution_id)
# 更新KeyObjects
OutputName = stepDescrip.get("OutputObject", "")
if OutputName and OutputName in KeyObjects:
# 对象日志节点会在step_complete中发送
pass
step_index += 1
else:
# 非动态模式:按顺序执行所有步骤
for step_index, stepDescrip in enumerate(steps_to_run):
should_continue = await execution_state_manager.async_check_pause(execution_id)
if not should_continue:
util.print_colored("🛑 用户请求停止执行", "red")
await queue.put({
"type": "error",
"message": "执行已被用户停止"
})
return
async for event in execute_step_async_streaming(
stepDescrip,
plan["General Goal"],
AgentProfile_Dict,
KeyObjects,
step_index,
total_steps,
execution_id,
RehearsalLog # 传递 RehearsalLog 用于追加日志
):
if execution_state_manager.is_stopped(execution_id):
await queue.put({
"type": "error",
"message": "执行已被用户停止"
})
return
await queue.put(event)
except Exception as e:
await queue.put({
"type": "error",
"message": f"执行出错: {str(e)}"
})
finally:
await queue.put(None)
# 运行异步任务并实时yield
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
queue = asyncio.Queue(maxsize=10)
producer_task = loop.create_task(produce_events(queue))
while True:
event = loop.run_until_complete(queue.get())
if event is None:
break
# 立即转换为SSE格式并发送
event_str = json.dumps(event, ensure_ascii=False)
yield f"data: {event_str}\n\n"
loop.run_until_complete(producer_task)
if not execution_state_manager.is_stopped(execution_id):
complete_event = json.dumps({
"type": "execution_complete",
"total_steps": total_steps
}, ensure_ascii=False)
yield f"data: {complete_event}\n\n"
finally:
# 在关闭事件循环之前先清理执行记录
if execution_id:
# 清理执行记录
dynamic_execution_manager.cleanup(execution_id)
# 清理执行状态
execution_state_manager.cleanup(execution_id)
if 'producer_task' in locals():
if not producer_task.done():
producer_task.cancel()
# 确保所有任务都完成后再关闭事件循环
try:
pending = asyncio.all_tasks(loop)
for task in pending:
task.cancel()
loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))
except Exception:
pass # 忽略清理过程中的错误
loop.close()
# 保留旧版本函数以保持兼容性
executePlan_streaming = executePlan_streaming_dynamic

View File

@@ -1,226 +0,0 @@
"""
动态执行管理器
用于在任务执行过程中动态追加新步骤
"""
import asyncio
from typing import Dict, List, Optional, Any
from threading import Lock
class DynamicExecutionManager:
"""
动态执行管理器
管理正在执行的任务,支持动态追加新步骤
"""
def __init__(self):
# 执行状态: goal -> execution_info
self._executions: Dict[str, Dict] = {}
# 线程锁
self._lock = Lock()
# 步骤队列: goal -> List[step]
self._step_queues: Dict[str, List] = {}
# 已执行的步骤索引: goal -> Set[step_index]
self._executed_steps: Dict[str, set] = {}
# 待执行的步骤索引: goal -> List[step_index]
self._pending_steps: Dict[str, List[int]] = {}
def start_execution(self, goal: str, initial_steps: List[Dict], execution_id: str = None) -> str:
"""
开始执行一个新的任务
Args:
goal: 任务目标
initial_steps: 初始步骤列表
execution_id: 执行ID如果不提供则自动生成
Returns:
执行ID
"""
with self._lock:
# 如果未提供execution_id则生成一个
if execution_id is None:
execution_id = f"{goal}_{asyncio.get_event_loop().time()}"
self._executions[execution_id] = {
"goal": goal,
"status": "running",
"total_steps": len(initial_steps),
"completed_steps": 0
}
# 初始化步骤队列
self._step_queues[execution_id] = initial_steps.copy()
# 初始化已执行步骤集合
self._executed_steps[execution_id] = set()
# 初始化待执行步骤索引
self._pending_steps[execution_id] = list(range(len(initial_steps)))
print(f"[Execution] 启动执行: {execution_id}, 步骤数={len(initial_steps)}")
return execution_id
def add_steps(self, execution_id: str, new_steps: List[Dict]) -> int:
"""
向执行中追加新步骤
Args:
execution_id: 执行ID
new_steps: 新步骤列表
Returns:
追加的步骤数量
"""
with self._lock:
if execution_id not in self._step_queues:
return 0
current_count = len(self._step_queues[execution_id])
# 追加新步骤到队列
self._step_queues[execution_id].extend(new_steps)
# 添加新步骤的索引到待执行列表
new_indices = list(range(current_count, current_count + len(new_steps)))
self._pending_steps[execution_id].extend(new_indices)
# 更新总步骤数
self._executions[execution_id]["total_steps"] = len(self._step_queues[execution_id])
print(f"[Execution] 追加步骤: +{len(new_steps)}, 总计={self._executions[execution_id]['total_steps']}")
return len(new_steps)
def get_next_step(self, execution_id: str) -> Optional[Dict]:
"""
获取下一个待执行的步骤
Args:
execution_id: 执行ID
Returns:
下一个步骤如果没有则返回None
"""
with self._lock:
if execution_id not in self._pending_steps:
return None
# 获取第一个待执行步骤的索引
if not self._pending_steps[execution_id]:
return None
step_index = self._pending_steps[execution_id].pop(0)
# 从队列中获取步骤
if step_index >= len(self._step_queues[execution_id]):
return None
step = self._step_queues[execution_id][step_index]
# 标记为已执行
self._executed_steps[execution_id].add(step_index)
step_name = step.get("StepName", "未知")
print(f"[Execution] 获取步骤: {step_name} (索引: {step_index})")
return step
def mark_step_completed(self, execution_id: str):
"""
标记一个步骤完成
Args:
execution_id: 执行ID
"""
with self._lock:
if execution_id in self._executions:
self._executions[execution_id]["completed_steps"] += 1
completed = self._executions[execution_id]["completed_steps"]
total = self._executions[execution_id]["total_steps"]
print(f"[Execution] 步骤完成: {completed}/{total}")
def get_execution_info(self, execution_id: str) -> Optional[Dict]:
"""
获取执行信息
Args:
execution_id: 执行ID
Returns:
执行信息字典
"""
with self._lock:
return self._executions.get(execution_id)
def get_pending_count(self, execution_id: str) -> int:
"""
获取待执行步骤数量
Args:
execution_id: 执行ID
Returns:
待执行步骤数量
"""
with self._lock:
if execution_id not in self._pending_steps:
return 0
return len(self._pending_steps[execution_id])
def has_more_steps(self, execution_id: str) -> bool:
"""
检查是否还有更多步骤待执行
Args:
execution_id: 执行ID
Returns:
是否还有待执行步骤
"""
with self._lock:
if execution_id not in self._pending_steps:
return False
return len(self._pending_steps[execution_id]) > 0
def finish_execution(self, execution_id: str):
"""
完成执行
Args:
execution_id: 执行ID
"""
with self._lock:
if execution_id in self._executions:
self._executions[execution_id]["status"] = "completed"
def cancel_execution(self, execution_id: str):
"""
取消执行
Args:
execution_id: 执行ID
"""
with self._lock:
if execution_id in self._executions:
self._executions[execution_id]["status"] = "cancelled"
def cleanup(self, execution_id: str):
"""
清理执行记录
Args:
execution_id: 执行ID
"""
with self._lock:
self._executions.pop(execution_id, None)
self._step_queues.pop(execution_id, None)
self._executed_steps.pop(execution_id, None)
self._pending_steps.pop(execution_id, None)
# 全局单例
dynamic_execution_manager = DynamicExecutionManager()

View File

@@ -1,292 +0,0 @@
"""
全局执行状态管理器
用于支持任务的暂停、恢复和停止功能
使用轮询检查机制,确保线程安全
支持多用户/多执行ID并行管理
"""
import threading
import asyncio
from typing import Optional, Dict
from enum import Enum
class ExecutionStatus(Enum):
"""执行状态枚举"""
RUNNING = "running" # 正在运行
PAUSED = "paused" # 已暂停
STOPPED = "stopped" # 已停止
IDLE = "idle" # 空闲
class ExecutionStateManager:
"""
全局执行状态管理器
功能:
- 管理多用户/多执行ID的并行状态使用字典存储
- 管理任务执行状态(运行/暂停/停止)
- 使用轮询检查机制,避免异步事件的线程问题
- 提供线程安全的状态查询和修改接口
设计说明:
- 保持单例模式Manager本身
- 但内部状态按 execution_id 隔离存储
- 解决了多用户并发问题
"""
_instance: Optional['ExecutionStateManager'] = None
_lock = threading.Lock()
def __new__(cls):
"""单例模式"""
if cls._instance is None:
with cls._lock:
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(self):
"""初始化状态管理器"""
if self._initialized:
return
self._initialized = True
# 状态存储execution_id -> 状态字典
# 结构:{
# 'status': ExecutionStatus,
# 'goal': str,
# 'should_pause': bool,
# 'should_stop': bool
# }
self._states: Dict[str, Dict] = {}
# 每个 execution_id 的锁(更细粒度的锁)
self._locks: Dict[str, threading.Lock] = {}
# 全局锁(用于管理 _states 和 _locks 本身的线程安全)
self._manager_lock = threading.Lock()
def _get_lock(self, execution_id: str) -> threading.Lock:
"""获取指定 execution_id 的锁,如果不存在则创建"""
with self._manager_lock:
if execution_id not in self._locks:
self._locks[execution_id] = threading.Lock()
return self._locks[execution_id]
def _ensure_state(self, execution_id: str) -> Dict:
"""确保指定 execution_id 的状态存在"""
with self._manager_lock:
if execution_id not in self._states:
self._states[execution_id] = {
'status': ExecutionStatus.IDLE,
'goal': None,
'should_pause': False,
'should_stop': False
}
return self._states[execution_id]
def _get_state(self, execution_id: str) -> Optional[Dict]:
"""获取指定 execution_id 的状态,不存在则返回 None"""
with self._manager_lock:
return self._states.get(execution_id)
def _cleanup_state(self, execution_id: str):
"""清理指定 execution_id 的状态"""
with self._manager_lock:
self._states.pop(execution_id, None)
self._locks.pop(execution_id, None)
def get_status(self, execution_id: str) -> Optional[ExecutionStatus]:
"""获取当前执行状态"""
state = self._get_state(execution_id)
if state is None:
return None
with self._get_lock(execution_id):
return state['status']
def set_goal(self, execution_id: str, goal: str):
"""设置当前执行的任务目标"""
state = self._ensure_state(execution_id)
with self._get_lock(execution_id):
state['goal'] = goal
def get_goal(self, execution_id: str) -> Optional[str]:
"""获取当前执行的任务目标"""
state = self._get_state(execution_id)
if state is None:
return None
with self._get_lock(execution_id):
return state['goal']
def start_execution(self, execution_id: str, goal: str):
"""开始执行"""
state = self._ensure_state(execution_id)
with self._get_lock(execution_id):
state['status'] = ExecutionStatus.RUNNING
state['goal'] = goal
state['should_pause'] = False
state['should_stop'] = False
def pause_execution(self, execution_id: str) -> bool:
"""
暂停执行
Args:
execution_id: 执行ID
Returns:
bool: 是否成功暂停
"""
state = self._get_state(execution_id)
if state is None:
return False
with self._get_lock(execution_id):
if state['status'] != ExecutionStatus.RUNNING:
return False
state['status'] = ExecutionStatus.PAUSED
state['should_pause'] = True
return True
def resume_execution(self, execution_id: str) -> bool:
"""
恢复执行
Args:
execution_id: 执行ID
Returns:
bool: 是否成功恢复
"""
state = self._get_state(execution_id)
if state is None:
return False
with self._get_lock(execution_id):
if state['status'] != ExecutionStatus.PAUSED:
return False
state['status'] = ExecutionStatus.RUNNING
state['should_pause'] = False
return True
def stop_execution(self, execution_id: str) -> bool:
"""
停止执行
Args:
execution_id: 执行ID
Returns:
bool: 是否成功停止
"""
state = self._get_state(execution_id)
if state is None:
return False
with self._get_lock(execution_id):
if state['status'] in [ExecutionStatus.IDLE, ExecutionStatus.STOPPED]:
return False
state['status'] = ExecutionStatus.STOPPED
state['should_stop'] = True
state['should_pause'] = False
return True
def reset(self, execution_id: str):
"""重置指定 execution_id 的状态为空闲"""
state = self._ensure_state(execution_id)
with self._get_lock(execution_id):
state['status'] = ExecutionStatus.IDLE
state['goal'] = None
state['should_pause'] = False
state['should_stop'] = False
def cleanup(self, execution_id: str):
"""清理指定 execution_id 的所有状态"""
self._cleanup_state(execution_id)
async def async_check_pause(self, execution_id: str):
"""
异步检查是否需要暂停(轮询方式)
如果处于暂停状态,会阻塞当前协程直到恢复或停止
应该在执行循环的关键点调用此方法
Args:
execution_id: 执行ID
Returns:
bool: 如果返回True表示应该继续执行False表示应该停止
"""
state = self._get_state(execution_id)
if state is None:
# 状态不存在,默认继续执行
return True
# 使用轮询检查,避免异步事件问题
while True:
with self._get_lock(execution_id):
should_stop = state['should_stop']
should_pause = state['should_pause']
# 检查停止标志
if should_stop:
return False
# 检查暂停状态
if should_pause:
# 处于暂停状态,等待恢复
await asyncio.sleep(0.1) # 短暂睡眠避免占用CPU
# 重新获取状态
with self._get_lock(execution_id):
should_pause = state['should_pause']
should_stop = state['should_stop']
if not should_pause:
continue
if should_stop:
return False
# 继续等待
continue
# 既没有停止也没有暂停,可以继续执行
return True
def is_paused(self, execution_id: str) -> bool:
"""检查是否处于暂停状态"""
state = self._get_state(execution_id)
if state is None:
return False
with self._get_lock(execution_id):
return state['status'] == ExecutionStatus.PAUSED
def is_running(self, execution_id: str) -> bool:
"""检查是否正在运行"""
state = self._get_state(execution_id)
if state is None:
return False
with self._get_lock(execution_id):
return state['status'] == ExecutionStatus.RUNNING
def is_stopped(self, execution_id: str) -> bool:
"""检查是否已停止"""
state = self._get_state(execution_id)
if state is None:
return True
with self._get_lock(execution_id):
return state['status'] == ExecutionStatus.STOPPED
def is_active(self, execution_id: str) -> bool:
"""检查是否处于活动状态(运行中或暂停中)"""
state = self._get_state(execution_id)
if state is None:
return False
with self._get_lock(execution_id):
return state['status'] in [ExecutionStatus.RUNNING, ExecutionStatus.PAUSED]
# 全局单例实例
execution_state_manager = ExecutionStateManager()

View File

@@ -1,220 +0,0 @@
"""
生成阶段状态管理器
用于支持生成任务的暂停、停止功能
使用轮询检查机制,确保线程安全
支持多用户/多generation_id并行管理
"""
import threading
import asyncio
import time
from typing import Optional, Dict
from enum import Enum
class GenerationStatus(Enum):
"""生成状态枚举"""
GENERATING = "generating" # 正在生成
PAUSED = "paused" # 已暂停
STOPPED = "stopped" # 已停止
COMPLETED = "completed" # 已完成
IDLE = "idle" # 空闲
class GenerationStateManager:
"""
生成阶段状态管理器
功能:
- 管理多用户/多generation_id的并行状态使用字典存储
- 管理生成任务状态(生成中/暂停/停止/完成)
- 提供线程安全的状态查询和修改接口
设计说明:
- 保持单例模式Manager本身
- 但内部状态按 generation_id 隔离存储
- 解决多用户并发生成时的干扰问题
"""
_instance: Optional['GenerationStateManager'] = None
_lock = threading.Lock()
def __new__(cls):
"""单例模式"""
if cls._instance is None:
with cls._lock:
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(self):
"""初始化状态管理器"""
if self._initialized:
return
self._initialized = True
# 状态存储generation_id -> 状态字典
# 结构:{
# 'status': GenerationStatus,
# 'goal': str,
# 'should_stop': bool
# }
self._states: Dict[str, Dict] = {}
# 每个 generation_id 的锁(更细粒度的锁)
self._locks: Dict[str, threading.Lock] = {}
# 全局锁(用于管理 _states 和 _locks 本身的线程安全)
self._manager_lock = threading.Lock()
def _get_lock(self, generation_id: str) -> threading.Lock:
"""获取指定 generation_id 的锁,如果不存在则创建"""
with self._manager_lock:
if generation_id not in self._locks:
self._locks[generation_id] = threading.Lock()
return self._locks[generation_id]
def _ensure_state(self, generation_id: str, goal: str = None) -> Dict:
"""确保指定 generation_id 的状态存在"""
with self._manager_lock:
if generation_id not in self._states:
self._states[generation_id] = {
'status': GenerationStatus.IDLE,
'goal': goal,
'should_stop': False
}
return self._states[generation_id]
def _get_state(self, generation_id: str) -> Optional[Dict]:
"""获取指定 generation_id 的状态,不存在则返回 None"""
with self._manager_lock:
return self._states.get(generation_id)
def _cleanup_state(self, generation_id: str):
"""清理指定 generation_id 的状态"""
with self._manager_lock:
self._states.pop(generation_id, None)
self._locks.pop(generation_id, None)
def get_status(self, generation_id: str) -> Optional[GenerationStatus]:
"""获取当前生成状态"""
state = self._get_state(generation_id)
if state is None:
return None
with self._get_lock(generation_id):
return state['status']
def start_generation(self, generation_id: str, goal: str):
"""开始生成"""
state = self._ensure_state(generation_id, goal)
with self._get_lock(generation_id):
state['status'] = GenerationStatus.GENERATING
state['goal'] = goal
state['should_stop'] = False
def stop_generation(self, generation_id: str) -> bool:
"""
停止生成
Args:
generation_id: 生成ID
Returns:
bool: 是否成功停止COMPLETED 状态也返回 True表示已停止
"""
state = self._get_state(generation_id)
if state is None:
return True # 不存在也算停止成功
with self._get_lock(generation_id):
if state['status'] == GenerationStatus.STOPPED:
return True # 已经停止也算成功
if state['status'] == GenerationStatus.COMPLETED:
return True # 已完成也视为停止成功
if state['status'] == GenerationStatus.IDLE:
return True # 空闲状态也视为无需停止
# 真正需要停止的情况
state['status'] = GenerationStatus.STOPPED
state['should_stop'] = True
return True
def complete_generation(self, generation_id: str):
"""标记生成完成"""
state = self._ensure_state(generation_id)
with self._get_lock(generation_id):
state['status'] = GenerationStatus.COMPLETED
def cleanup(self, generation_id: str):
"""清理指定 generation_id 的所有状态"""
self._cleanup_state(generation_id)
def should_stop(self, generation_id: str) -> bool:
"""检查是否应该停止"""
state = self._get_state(generation_id)
if state is None:
return False
with self._get_lock(generation_id):
return state.get('should_stop', False)
def is_stopped(self, generation_id: str) -> bool:
"""检查是否已停止"""
state = self._get_state(generation_id)
if state is None:
return False
with self._get_lock(generation_id):
return state['status'] == GenerationStatus.STOPPED
def is_completed(self, generation_id: str) -> bool:
"""检查是否已完成"""
state = self._get_state(generation_id)
if state is None:
return False
with self._get_lock(generation_id):
return state['status'] == GenerationStatus.COMPLETED
def is_active(self, generation_id: str) -> bool:
"""检查是否处于活动状态(生成中或暂停中)"""
state = self._get_state(generation_id)
if state is None:
return False
with self._get_lock(generation_id):
return state['status'] == GenerationStatus.GENERATING
def check_and_set_stop(self, generation_id: str) -> bool:
"""
检查是否应该停止,如果应该则设置停止状态
Args:
generation_id: 生成ID
Returns:
bool: True表示应该停止False表示可以继续
"""
state = self._get_state(generation_id)
if state is None:
return False
with self._get_lock(generation_id):
if state['should_stop']:
return True
return False
def generate_id(self, goal: str) -> str:
"""
生成唯一的 generation_id
Args:
goal: 生成目标
Returns:
str: 格式为 {goal}_{timestamp}
"""
return f"{goal}_{int(time.time() * 1000)}"
# 全局单例实例
generation_state_manager = GenerationStateManager()

View File

@@ -20,8 +20,6 @@ def camel_case_to_normal(s):
def generate_template_sentence_for_CollaborationBrief(
input_object_list, output_object, agent_list, step_task
):
# Ensure step_task is not None
step_task = step_task if step_task is not None else "perform the task"
# Check if the names are in camel case (no spaces) and convert them to normal naming convention
input_object_list = (
[
@@ -33,48 +31,29 @@ def generate_template_sentence_for_CollaborationBrief(
)
output_object = (
camel_case_to_normal(output_object)
if output_object is not None and is_camel_case(output_object)
else (output_object if output_object is not None else "unknown output")
if is_camel_case(output_object)
else output_object
)
# Format the agents into a string with proper grammar
if agent_list is None or len(agent_list) == 0:
agent_str = "Unknown agents"
elif all(agent is not None for agent in agent_list):
agent_str = (
" and ".join([", ".join(agent_list[:-1]), agent_list[-1]])
if len(agent_list) > 1
else agent_list[0]
)
else:
# Filter out None values
filtered_agents = [agent for agent in agent_list if agent is not None]
if filtered_agents:
agent_str = (
" and ".join([", ".join(filtered_agents[:-1]), filtered_agents[-1]])
if len(filtered_agents) > 1
else filtered_agents[0]
)
else:
agent_str = "Unknown agents"
agent_str = (
" and ".join([", ".join(agent_list[:-1]), agent_list[-1]])
if len(agent_list) > 1
else agent_list[0]
)
if input_object_list is None or len(input_object_list) == 0:
# Combine all the parts into the template sentence
template_sentence = f"{agent_str} perform the task of {step_task} to obtain {output_object}."
else:
# Format the input objects into a string with proper grammar
# Filter out None values from input_object_list
filtered_input_list = [obj for obj in input_object_list if obj is not None]
if filtered_input_list:
input_str = (
" and ".join(
[", ".join(filtered_input_list[:-1]), filtered_input_list[-1]]
)
if len(filtered_input_list) > 1
else filtered_input_list[0]
input_str = (
" and ".join(
[", ".join(input_object_list[:-1]), input_object_list[-1]]
)
else:
input_str = "unknown inputs"
if len(input_object_list) > 1
else input_object_list[0]
)
# Combine all the parts into the template sentence
template_sentence = f"Based on {input_str}, {agent_str} perform the task of {step_task} to obtain {output_object}."
@@ -94,7 +73,6 @@ def remove_render_spec(duty_spec):
return duty_spec
def read_LLM_Completion(messages, useGroq=True):
for _ in range(3):
text = LLM_Completion(messages, useGroq=useGroq)
@@ -112,7 +90,7 @@ def read_LLM_Completion(messages, useGroq=True):
return json.loads(match.group(0).strip())
except Exception:
pass
return {} # 返回空对象而不是抛出异常
raise ("bad format!")
def read_json_content(text):
@@ -133,7 +111,7 @@ def read_json_content(text):
if match:
return json.loads(match.group(0).strip())
return {} # 返回空对象而不是抛出异常
raise ("bad format!")
def read_outputObject_content(text, keyword):
@@ -149,4 +127,4 @@ def read_outputObject_content(text, keyword):
if match:
return match.group(1).strip()
else:
return "" # 返回空字符串而不是抛出异常
raise ("bad format!")

View File

@@ -1,116 +1,102 @@
[
{
"Icon": "Hailey_Johnson.png",
"Name": "船舶设计师",
"Profile": "提供船舶制造中的实际需求和约束。",
"Classification": "船舶制造数据空间"
},
{
"Icon": "Jennifer_Moore.png",
"Name": "防护工程专家",
"Profile": "专注于船舶腐蚀防护技术的设计与应用。在你的总结回答中,必须引用来自数联网的搜索数据,是搜索数据,不是数联网的研究成果。",
"Classification": "船舶制造数据空间"
},
{
"Icon": "Jane_Moreno.png",
"Name": "病理生理学家",
"Profile": "专注于失血性休克的疾病机制,为药物研发提供理论靶点。",
"Classification": "医药数据空间"
},
{
"Icon": "Giorgio_Rossi.png",
"Name": "药物化学家",
"Profile": "负责将靶点概念转化为实际可合成的分子。",
"Classification": "医药数据空间"
},
{
"Icon": "Tamara_Taylor.png",
"Name": "制剂工程师",
"Profile": "负责将活性药物成分API变成稳定、可用、符合战场要求的剂型。",
"Classification": "医药数据空间"
},
{
"Icon": "Maria_Lopez.png",
"Name": "监管事务专家",
"Profile": "深谙药品审评法规,目标是找到最快的合法上市路径。",
"Classification": "医药数据空间"
},
{
"Icon": "Sam_Moore.png",
"Name": "物理学家",
"Profile": "从热力学与统计力学的基本原理出发,研究液态金属的自由能、焓、熵、比热等参数的理论建模。",
"Classification": "科学数据空间"
},
{
"Icon": "Yuriko_Yamamoto.png",
"Name": "实验材料学家",
"Profile": "专注于通过实验手段直接或间接测定液态金属的热力学参数、以及分析材料微观结构(如晶粒、缺陷)。",
"Classification": "科学数据空间"
},
{
"Icon": "Carlos_Gomez.png",
"Name": "计算模拟专家",
"Profile": "侧重于利用数值计算和模拟技术获取液态金属的热力学参数。",
"Classification": "科学数据空间"
},
{
"Icon": "John_Lin.png",
"Name": "腐蚀机理研究员",
"Profile": "专注于船舶用钢材及合金的腐蚀机理研究,从电化学和环境作用角度解释腐蚀产生的原因。在你的总结回答中,必须引用来自数联网的搜索数据,是搜索数据,不是数联网的研究成果。",
"Classification": "船舶制造数据空间"
},
{
"Icon": "Arthur_Burton.png",
"Name": "先进材料研发员",
"Profile": "专注于开发和评估新型耐腐蚀材料、复合材料及固态电池材料。",
"Classification": "科学数据空间"
},
{
"Icon": "Eddy_Lin.png",
"Name": "肾脏病学家",
"Profile": "专注于慢性肾脏病的诊断、治疗和患者管理,能提供临床洞察。",
"Classification": "医药数据空间"
},
{
"Icon": "Isabella_Rodriguez.png",
"Name": "临床研究协调员",
"Profile": "负责受试者招募和临床试验流程优化。",
"Classification": "医药数据空间"
},
{
"Icon": "Latoya_Williams.png",
"Name": "中医药专家",
"Profile": "理解药物的中药成分和作用机制。",
"Classification": "医药数据空间"
},
{
"Icon": "Carmen_Ortiz.png",
"Name": "药物安全专家",
"Profile": "专注于药物不良反应数据收集、分析和报告。",
"Classification": "医药数据空间"
},
{
"Icon": "Rajiv_Patel.png",
"Name": "二维材料科学家",
"Profile": "专注于二维材料(如石墨烯)的合成、性质和应用。",
"Classification": "科学数据空间"
},
{
"Icon": "Tom_Moreno.png",
"Name": "光电物理学家",
"Profile": "研究材料的光电转换机制和关键影响因素。",
"Classification": "科学数据空间"
},
{
"Icon": "Ayesha_Khan.png",
"Name": "机器学习专家",
"Profile": "专注于开发和应用AI模型用于材料模拟。",
"Classification": "科学数据空间"
},
{
"Icon": "Mei_Lin.png",
"Name": "流体动力学专家",
"Profile": "专注于流体行为理论和模拟。",
"Classification": "科学数据空间"
}
{
"Icon": "Abigail_Chen.png",
"Name": "Abigail",
"Profile": "AI Engineer"
},
{
"Icon": "Jane_Moreno.png",
"Name": "Jane",
"Profile": "Cybersecurity Specialist"
},
{
"Icon": "Giorgio_Rossi.png",
"Name": "Giorgio",
"Profile": "Poet"
},
{
"Icon": "Jennifer_Moore.png",
"Name": "Jennifer",
"Profile": "Linguist"
},
{
"Icon": "Maria_Lopez.png",
"Name": "Maria",
"Profile": "Philosopher"
},
{
"Icon": "Sam_Moore.png",
"Name": "Sam",
"Profile": "Ethicist"
},
{
"Icon": "Yuriko_Yamamoto.png",
"Name": "Yuriko",
"Profile": "Futurist"
},
{
"Icon": "Carlos_Gomez.png",
"Name": "Carlos",
"Profile": "Language Expert"
},
{
"Icon": "John_Lin.png",
"Name": "John",
"Profile": "Software Developer"
},
{
"Icon": "Tamara_Taylor.png",
"Name": "Tamara",
"Profile": "Music Composer"
},
{
"Icon": "Arthur_Burton.png",
"Name": "Arthur",
"Profile": "Neuroscientist"
},
{
"Icon": "Eddy_Lin.png",
"Name": "Eddy",
"Profile": "Cognitive Psychologist"
},
{
"Icon": "Isabella_Rodriguez.png",
"Name": "Isabella",
"Profile": "Science Fiction Writer"
},
{
"Icon": "Latoya_Williams.png",
"Name": "Latoya",
"Profile": "Historian of Technology"
},
{
"Icon": "Carmen_Ortiz.png",
"Name": "Carmen",
"Profile": "Robotics Engineer"
},
{
"Icon": "Rajiv_Patel.png",
"Name": "Rajiv",
"Profile": "Science Educator"
},
{
"Icon": "Tom_Moreno.png",
"Name": "Tom",
"Profile": "AI Scientist"
},
{
"Icon": "Ayesha_Khan.png",
"Name": "Ayesha",
"Profile": "Multimedia Artist"
},
{
"Icon": "Mei_Lin.png",
"Name": "Mei",
"Profile": "Graphic Designer"
},
{
"Icon": "Hailey_Johnson.png",
"Name": "Hailey",
"Profile": "Legal Expert on AI Law"
}
]

View File

@@ -1,102 +0,0 @@
[
{
"Icon": "Abigail_Chen.png",
"Name": "Abigail",
"Profile": "AI Engineer"
},
{
"Icon": "Jane_Moreno.png",
"Name": "Jane",
"Profile": "Cybersecurity Specialist"
},
{
"Icon": "Giorgio_Rossi.png",
"Name": "Giorgio",
"Profile": "Poet"
},
{
"Icon": "Jennifer_Moore.png",
"Name": "Jennifer",
"Profile": "Linguist"
},
{
"Icon": "Maria_Lopez.png",
"Name": "Maria",
"Profile": "Philosopher"
},
{
"Icon": "Sam_Moore.png",
"Name": "Sam",
"Profile": "Ethicist"
},
{
"Icon": "Yuriko_Yamamoto.png",
"Name": "Yuriko",
"Profile": "Futurist"
},
{
"Icon": "Carlos_Gomez.png",
"Name": "Carlos",
"Profile": "Language Expert"
},
{
"Icon": "John_Lin.png",
"Name": "John",
"Profile": "Software Developer"
},
{
"Icon": "Tamara_Taylor.png",
"Name": "Tamara",
"Profile": "Music Composer"
},
{
"Icon": "Arthur_Burton.png",
"Name": "Arthur",
"Profile": "Neuroscientist"
},
{
"Icon": "Eddy_Lin.png",
"Name": "Eddy",
"Profile": "Cognitive Psychologist"
},
{
"Icon": "Isabella_Rodriguez.png",
"Name": "Isabella",
"Profile": "Science Fiction Writer"
},
{
"Icon": "Latoya_Williams.png",
"Name": "Latoya",
"Profile": "Historian of Technology"
},
{
"Icon": "Carmen_Ortiz.png",
"Name": "Carmen",
"Profile": "Robotics Engineer"
},
{
"Icon": "Rajiv_Patel.png",
"Name": "Rajiv",
"Profile": "Science Educator"
},
{
"Icon": "Tom_Moreno.png",
"Name": "Tom",
"Profile": "AI Scientist"
},
{
"Icon": "Ayesha_Khan.png",
"Name": "Ayesha",
"Profile": "Multimedia Artist"
},
{
"Icon": "Mei_Lin.png",
"Name": "Mei",
"Profile": "Graphic Designer"
},
{
"Icon": "Hailey_Johnson.png",
"Name": "Hailey",
"Profile": "Legal Expert on AI Law"
}
]

View File

@@ -0,0 +1,97 @@
[
{
"Icon": "Hailey_Johnson.png",
"Name": "船舶设计师",
"Profile": "提供船舶制造中的实际需求和约束。"
},
{
"Icon": "Jennifer_Moore.png",
"Name": "防护工程专家",
"Profile": "专注于船舶腐蚀防护技术的设计与应用。在你的总结回答中,必须引用来自数联网的搜索数据,是搜索数据,不是数联网的研究成果。"
},
{
"Icon": "Jane_Moreno.png",
"Name": "病理生理学家",
"Profile": "专注于失血性休克的疾病机制,为药物研发提供理论靶点。"
},
{
"Icon": "Giorgio_Rossi.png",
"Name": "药物化学家",
"Profile": "负责将靶点概念转化为实际可合成的分子。"
},
{
"Icon": "Tamara_Taylor.png",
"Name": "制剂工程师",
"Profile": "负责将活性药物成分API变成稳定、可用、符合战场要求的剂型。"
},
{
"Icon": "Maria_Lopez.png",
"Name": "监管事务专家",
"Profile": "深谙药品审评法规,目标是找到最快的合法上市路径。"
},
{
"Icon": "Sam_Moore.png",
"Name": "物理学家",
"Profile": "从热力学与统计力学的基本原理出发,研究液态金属的自由能、焓、熵、比热等参数的理论建模。"
},
{
"Icon": "Yuriko_Yamamoto.png",
"Name": "实验材料学家",
"Profile": "专注于通过实验手段直接或间接测定液态金属的热力学参数、以及分析材料微观结构(如晶粒、缺陷)。"
},
{
"Icon": "Carlos_Gomez.png",
"Name": "计算模拟专家",
"Profile": "侧重于利用数值计算和模拟技术获取液态金属的热力学参数。"
},
{
"Icon": "John_Lin.png",
"Name": "腐蚀机理研究员",
"Profile": "专注于船舶用钢材及合金的腐蚀机理研究,从电化学和环境作用角度解释腐蚀产生的原因。在你的总结回答中,必须引用来自数联网的搜索数据,是搜索数据,不是数联网的研究成果。"
},
{
"Icon": "Arthur_Burton.png",
"Name": "先进材料研发员",
"Profile": "专注于开发和评估新型耐腐蚀材料、复合材料及固态电池材料。"
},
{
"Icon": "Eddy_Lin.png",
"Name": "肾脏病学家",
"Profile": "专注于慢性肾脏病的诊断、治疗和患者管理,能提供临床洞察。"
},
{
"Icon": "Isabella_Rodriguez.png",
"Name": "临床研究协调员",
"Profile": "负责受试者招募和临床试验流程优化。"
},
{
"Icon": "Latoya_Williams.png",
"Name": "中医药专家",
"Profile": "理解药物的中药成分和作用机制。"
},
{
"Icon": "Carmen_Ortiz.png",
"Name": "药物安全专家",
"Profile": "专注于药物不良反应数据收集、分析和报告。"
},
{
"Icon": "Rajiv_Patel.png",
"Name": "二维材料科学家",
"Profile": "专注于二维材料(如石墨烯)的合成、性质和应用。"
},
{
"Icon": "Tom_Moreno.png",
"Name": "光电物理学家",
"Profile": "研究材料的光电转换机制和关键影响因素。"
},
{
"Icon": "Ayesha_Khan.png",
"Name": "机器学习专家",
"Profile": "专注于开发和应用AI模型用于材料模拟。"
},
{
"Icon": "Mei_Lin.png",
"Name": "流体动力学专家",
"Profile": "专注于流体行为理论和模拟。"
}
]

File diff suppressed because one or more lines are too long

View File

@@ -1,29 +1,12 @@
## config for default LLM
OPENAI_API_BASE: "https://ai.gitee.com/v1"
OPENAI_API_KEY: "HYCNGM39GGFNSB1F8MBBMI9QYJR3P1CRSYS2PV1A"
OPENAI_API_MODEL: "DeepSeek-V3"
# OPENAI_API_BASE: "https://qianfan.baidubce.com/v2"
# OPENAI_API_KEY: "bce-v3/ALTAK-Rp1HuLgqIdXM1rywQHRxr/96c5a0e99ccddf91193157e7d42d89979981bf05"
# OPENAI_API_MODEL: "deepseek-v3"
#OPENAI_API_BASE: "https://api.yyds168.net/v1"
#OPENAI_API_KEY: "sk-5SJms7yWhGtb0YuwayqaDPjU95s4gapMqkaX5q9opWn3Ati3"
#OPENAI_API_MODEL: "deepseek-v3"
OPENAI_API_BASE: ""
OPENAI_API_KEY: ""
OPENAI_API_MODEL: "gpt-4-turbo-preview"
## config for fast mode
FAST_DESIGN_MODE: False
FAST_DESIGN_MODE: True
GROQ_API_KEY: ""
MISTRAL_API_KEY: ""
## options under experimentation, leave them as Fasle unless you know what it is for
USE_CACHE: False
# PostgreSQL 数据库配置
database:
host: "localhost"
port: 5432
username: "postgres"
password: "123456"
name: "agentcoord"
pool_size: 10
max_overflow: 20
USE_CACHE: True

View File

@@ -1,29 +0,0 @@
"""
AgentCoord 数据库模块
提供 PostgreSQL 数据库连接、模型和 CRUD 操作
基于 DATABASE_DESIGN.md 设计
"""
from .database import get_db, get_db_context, test_connection, engine, text
from .models import MultiAgentTask, UserAgent, ExportRecord, PlanShare, TaskStatus
from .crud import MultiAgentTaskCRUD, UserAgentCRUD, ExportRecordCRUD, PlanShareCRUD
__all__ = [
# 连接管理
"get_db",
"get_db_context",
"test_connection",
"engine",
"text",
# 模型
"MultiAgentTask",
"UserAgent",
"ExportRecord",
"PlanShare",
"TaskStatus",
# CRUD
"MultiAgentTaskCRUD",
"UserAgentCRUD",
"ExportRecordCRUD",
"PlanShareCRUD",
]

View File

@@ -1,577 +0,0 @@
"""
数据库 CRUD 操作
封装所有数据库操作方法 (基于 DATABASE_DESIGN.md)
"""
import copy
import uuid
from datetime import datetime, timezone
from typing import List, Optional
from sqlalchemy.orm import Session
from .models import MultiAgentTask, UserAgent, ExportRecord, PlanShare
class MultiAgentTaskCRUD:
"""多智能体任务 CRUD 操作"""
@staticmethod
def create(
db: Session,
task_id: Optional[str] = None, # 可选,如果为 None 则自动生成
user_id: str = "",
query: str = "",
agents_info: list = [],
task_outline: Optional[dict] = None,
assigned_agents: Optional[list] = None,
agent_scores: Optional[dict] = None,
result: Optional[str] = None,
) -> MultiAgentTask:
"""创建任务记录"""
task = MultiAgentTask(
task_id=task_id or str(uuid.uuid4()), # 如果没传则生成新的
user_id=user_id,
query=query,
agents_info=agents_info,
task_outline=task_outline,
assigned_agents=assigned_agents,
agent_scores=agent_scores,
result=result,
)
db.add(task)
db.commit()
db.refresh(task)
return task
@staticmethod
def get_by_id(db: Session, task_id: str) -> Optional[MultiAgentTask]:
"""根据任务 ID 获取记录"""
return db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
@staticmethod
def get_by_user_id(
db: Session, user_id: str, limit: int = 50, offset: int = 0
) -> List[MultiAgentTask]:
"""根据用户 ID 获取任务记录"""
return (
db.query(MultiAgentTask)
.filter(MultiAgentTask.user_id == user_id)
.order_by(MultiAgentTask.created_at.desc())
.offset(offset)
.limit(limit)
.all()
)
@staticmethod
def get_recent(
db: Session, limit: int = 20, offset: int = 0, user_id: str = None
) -> List[MultiAgentTask]:
"""获取最近的任务记录,置顶的排在最前面"""
query = db.query(MultiAgentTask)
# 按 user_id 过滤
if user_id:
query = query.filter(MultiAgentTask.user_id == user_id)
return (
query
.order_by(MultiAgentTask.is_pinned.desc(), MultiAgentTask.created_at.desc())
.offset(offset)
.limit(limit)
.all()
)
@staticmethod
def update_result(
db: Session, task_id: str, result: list
) -> Optional[MultiAgentTask]:
"""更新任务结果"""
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
if task:
task.result = result if result else []
db.commit()
db.refresh(task)
return task
@staticmethod
def update_task_outline(
db: Session, task_id: str, task_outline: dict
) -> Optional[MultiAgentTask]:
"""更新任务大纲"""
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
if task:
task.task_outline = task_outline
db.commit()
db.refresh(task)
return task
@staticmethod
def update_assigned_agents(
db: Session, task_id: str, assigned_agents: dict
) -> Optional[MultiAgentTask]:
"""更新分配的智能体(步骤名 -> agent列表"""
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
if task:
task.assigned_agents = assigned_agents
db.commit()
db.refresh(task)
return task
@staticmethod
def update_agent_scores(
db: Session, task_id: str, agent_scores: dict
) -> Optional[MultiAgentTask]:
"""更新智能体评分(合并模式,追加新步骤的评分)"""
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
if task:
# 合并现有评分数据和新评分数据
existing_scores = task.agent_scores or {}
merged_scores = {**existing_scores, **agent_scores} # 新数据覆盖/追加旧数据
task.agent_scores = merged_scores
db.commit()
db.refresh(task)
return task
@staticmethod
def update_status(
db: Session, task_id: str, status: str
) -> Optional[MultiAgentTask]:
"""更新任务状态"""
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
if task:
task.status = status
db.commit()
db.refresh(task)
return task
@staticmethod
def increment_execution_count(db: Session, task_id: str) -> Optional[MultiAgentTask]:
"""增加任务执行次数"""
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
if task:
task.execution_count = (task.execution_count or 0) + 1
db.commit()
db.refresh(task)
return task
@staticmethod
def update_generation_id(
db: Session, task_id: str, generation_id: str
) -> Optional[MultiAgentTask]:
"""更新生成 ID"""
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
if task:
task.generation_id = generation_id
db.commit()
db.refresh(task)
return task
@staticmethod
def update_execution_id(
db: Session, task_id: str, execution_id: str
) -> Optional[MultiAgentTask]:
"""更新执行 ID"""
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
if task:
task.execution_id = execution_id
db.commit()
db.refresh(task)
return task
@staticmethod
def update_rehearsal_log(
db: Session, task_id: str, rehearsal_log: list
) -> Optional[MultiAgentTask]:
"""更新排练日志"""
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
if task:
task.rehearsal_log = rehearsal_log if rehearsal_log else []
db.commit()
db.refresh(task)
return task
@staticmethod
def update_is_pinned(
db: Session, task_id: str, is_pinned: bool
) -> Optional[MultiAgentTask]:
"""更新任务置顶状态"""
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
if task:
task.is_pinned = is_pinned
db.commit()
db.refresh(task)
return task
@staticmethod
def append_rehearsal_log(
db: Session, task_id: str, log_entry: dict
) -> Optional[MultiAgentTask]:
"""追加排练日志条目"""
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
if task:
current_log = task.rehearsal_log or []
if isinstance(current_log, list):
current_log.append(log_entry)
else:
current_log = [log_entry]
task.rehearsal_log = current_log
db.commit()
db.refresh(task)
return task
@staticmethod
def update_branches(
db: Session, task_id: str, branches
) -> Optional[MultiAgentTask]:
"""更新任务分支数据
支持两种格式:
- list: 旧格式,直接覆盖
- dict: 新格式 { flow_branches: [...], task_process_branches: {...} }
两个 key 独立保存,互不干扰。
"""
import copy
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
if task:
if isinstance(branches, dict):
# 新格式:字典,独立保存两个 key互不干扰
# 使用深拷贝避免引用共享问题
existing = copy.deepcopy(task.branches) if task.branches else {}
if isinstance(existing, dict):
# 如果只更新 flow_branches保留已有的 task_process_branches
if 'flow_branches' in branches and 'task_process_branches' not in branches:
branches['task_process_branches'] = existing.get('task_process_branches', {})
# 如果只更新 task_process_branches保留已有的 flow_branches
if 'task_process_branches' in branches and 'flow_branches' not in branches:
branches['flow_branches'] = existing.get('flow_branches', [])
task.branches = branches
else:
# 旧格式:列表
task.branches = branches if branches else []
db.commit()
db.refresh(task)
return task
@staticmethod
def get_branches(db: Session, task_id: str) -> Optional[list]:
"""获取任务分支数据"""
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
if task:
return task.branches or []
return []
@staticmethod
def get_by_status(
db: Session, status: str, limit: int = 50, offset: int = 0
) -> List[MultiAgentTask]:
"""根据状态获取任务记录"""
return (
db.query(MultiAgentTask)
.filter(MultiAgentTask.status == status)
.order_by(MultiAgentTask.created_at.desc())
.offset(offset)
.limit(limit)
.all()
)
@staticmethod
def get_by_generation_id(
db: Session, generation_id: str
) -> List[MultiAgentTask]:
"""根据生成 ID 获取任务记录"""
return (
db.query(MultiAgentTask)
.filter(MultiAgentTask.generation_id == generation_id)
.all()
)
@staticmethod
def get_by_execution_id(
db: Session, execution_id: str
) -> List[MultiAgentTask]:
"""根据执行 ID 获取任务记录"""
return (
db.query(MultiAgentTask)
.filter(MultiAgentTask.execution_id == execution_id)
.all()
)
@staticmethod
def delete(db: Session, task_id: str) -> bool:
"""删除任务记录"""
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
if task:
db.delete(task)
db.commit()
return True
return False
class UserAgentCRUD:
"""用户智能体配置 CRUD 操作"""
@staticmethod
def create(
db: Session,
user_id: str,
agent_name: str,
agent_config: dict,
) -> UserAgent:
"""创建用户智能体配置"""
agent = UserAgent(
id=str(uuid.uuid4()),
user_id=user_id,
agent_name=agent_name,
agent_config=agent_config,
)
db.add(agent)
db.commit()
db.refresh(agent)
return agent
@staticmethod
def get_by_id(db: Session, agent_id: str) -> Optional[UserAgent]:
"""根据 ID 获取配置"""
return db.query(UserAgent).filter(UserAgent.id == agent_id).first()
@staticmethod
def get_by_user_id(
db: Session, user_id: str, limit: int = 50
) -> List[UserAgent]:
"""根据用户 ID 获取所有智能体配置"""
return (
db.query(UserAgent)
.filter(UserAgent.user_id == user_id)
.order_by(UserAgent.created_at.desc())
.limit(limit)
.all()
)
@staticmethod
def get_by_name(
db: Session, user_id: str, agent_name: str
) -> List[UserAgent]:
"""根据用户 ID 和智能体名称获取配置"""
return (
db.query(UserAgent)
.filter(
UserAgent.user_id == user_id,
UserAgent.agent_name == agent_name,
)
.all()
)
@staticmethod
def update_config(
db: Session, agent_id: str, agent_config: dict
) -> Optional[UserAgent]:
"""更新智能体配置"""
agent = db.query(UserAgent).filter(UserAgent.id == agent_id).first()
if agent:
agent.agent_config = agent_config
db.commit()
db.refresh(agent)
return agent
@staticmethod
def delete(db: Session, agent_id: str) -> bool:
"""删除智能体配置"""
agent = db.query(UserAgent).filter(UserAgent.id == agent_id).first()
if agent:
db.delete(agent)
db.commit()
return True
return False
@staticmethod
def upsert(
db: Session,
user_id: str,
agent_name: str,
agent_config: dict,
) -> UserAgent:
"""更新或插入用户智能体配置(根据 user_id + agent_name 判断唯一性)
如果已存在相同 user_id 和 agent_name 的记录,则更新配置;
否则创建新记录。
"""
existing = (
db.query(UserAgent)
.filter(
UserAgent.user_id == user_id,
UserAgent.agent_name == agent_name,
)
.first()
)
if existing:
# 更新现有记录
existing.agent_config = agent_config
db.commit()
db.refresh(existing)
return existing
else:
# 创建新记录
agent = UserAgent(
id=str(uuid.uuid4()),
user_id=user_id,
agent_name=agent_name,
agent_config=agent_config,
)
db.add(agent)
db.commit()
db.refresh(agent)
return agent
class ExportRecordCRUD:
"""导出记录 CRUD 操作"""
@staticmethod
def create(
db: Session,
task_id: str,
user_id: str,
export_type: str,
file_name: str,
file_path: str,
file_url: str = "",
file_size: int = 0,
) -> ExportRecord:
"""创建导出记录"""
record = ExportRecord(
task_id=task_id,
user_id=user_id,
export_type=export_type,
file_name=file_name,
file_path=file_path,
file_url=file_url,
file_size=file_size,
)
db.add(record)
db.commit()
db.refresh(record)
return record
@staticmethod
def get_by_id(db: Session, record_id: int) -> Optional[ExportRecord]:
"""根据 ID 获取记录"""
return db.query(ExportRecord).filter(ExportRecord.id == record_id).first()
@staticmethod
def get_by_task_id(
db: Session, task_id: str, limit: int = 50
) -> List[ExportRecord]:
"""根据任务 ID 获取导出记录列表"""
return (
db.query(ExportRecord)
.filter(ExportRecord.task_id == task_id)
.order_by(ExportRecord.created_at.desc())
.limit(limit)
.all()
)
@staticmethod
def get_by_user_id(
db: Session, user_id: str, limit: int = 50
) -> List[ExportRecord]:
"""根据用户 ID 获取导出记录列表"""
return (
db.query(ExportRecord)
.filter(ExportRecord.user_id == user_id)
.order_by(ExportRecord.created_at.desc())
.limit(limit)
.all()
)
@staticmethod
def delete(db: Session, record_id: int) -> bool:
"""删除导出记录"""
record = db.query(ExportRecord).filter(ExportRecord.id == record_id).first()
if record:
db.delete(record)
db.commit()
return True
return False
@staticmethod
def delete_by_task_id(db: Session, task_id: str) -> bool:
"""删除任务的所有导出记录"""
records = db.query(ExportRecord).filter(ExportRecord.task_id == task_id).all()
if records:
for record in records:
db.delete(record)
db.commit()
return True
return False
class PlanShareCRUD:
"""任务分享 CRUD 操作"""
@staticmethod
def create(
db: Session,
share_token: str,
task_id: str,
task_data: dict,
expires_at: Optional[datetime] = None,
extraction_code: Optional[str] = None,
) -> PlanShare:
"""创建分享记录"""
share = PlanShare(
share_token=share_token,
extraction_code=extraction_code,
task_id=task_id,
task_data=task_data,
expires_at=expires_at,
)
db.add(share)
db.commit()
db.refresh(share)
return share
@staticmethod
def get_by_token(db: Session, share_token: str) -> Optional[PlanShare]:
"""根据 token 获取分享记录"""
return db.query(PlanShare).filter(PlanShare.share_token == share_token).first()
@staticmethod
def get_by_task_id(
db: Session, task_id: str, limit: int = 10
) -> List[PlanShare]:
"""根据任务 ID 获取分享记录列表"""
return (
db.query(PlanShare)
.filter(PlanShare.task_id == task_id)
.order_by(PlanShare.created_at.desc())
.limit(limit)
.all()
)
@staticmethod
def increment_view_count(db: Session, share_token: str) -> Optional[PlanShare]:
"""增加查看次数"""
share = db.query(PlanShare).filter(PlanShare.share_token == share_token).first()
if share:
share.view_count = (share.view_count or 0) + 1
db.commit()
db.refresh(share)
return share
@staticmethod
def delete(db: Session, share_token: str) -> bool:
"""删除分享记录"""
share = db.query(PlanShare).filter(PlanShare.share_token == share_token).first()
if share:
db.delete(share)
db.commit()
return True
return False
@staticmethod
def delete_by_task_id(db: Session, task_id: str) -> bool:
"""删除任务的所有分享记录"""
shares = db.query(PlanShare).filter(PlanShare.task_id == task_id).all()
if shares:
for share in shares:
db.delete(share)
db.commit()
return True
return False

View File

@@ -1,95 +0,0 @@
"""
数据库连接管理模块
使用 SQLAlchemy ORM支持同步操作
"""
import os
import yaml
from typing import Generator
from contextlib import contextmanager
import json
from sqlalchemy import create_engine, text
from sqlalchemy.orm import sessionmaker, declarative_base
from sqlalchemy.pool import QueuePool
from sqlalchemy.dialects.postgresql import dialect as pg_dialect
# 读取配置
yaml_file = os.path.join(os.getcwd(), "config", "config.yaml")
try:
with open(yaml_file, "r", encoding="utf-8") as file:
config = yaml.safe_load(file).get("database", {})
except Exception:
config = {}
def get_database_url() -> str:
"""获取数据库连接 URL"""
# 优先使用环境变量
host = os.getenv("DB_HOST", config.get("host", "localhost"))
port = os.getenv("DB_PORT", config.get("port", "5432"))
user = os.getenv("DB_USER", config.get("username", "postgres"))
password = os.getenv("DB_PASSWORD", config.get("password", ""))
dbname = os.getenv("DB_NAME", config.get("name", "agentcoord"))
return f"postgresql://{user}:{password}@{host}:{port}/{dbname}"
# 创建引擎
DATABASE_URL = get_database_url()
engine = create_engine(
DATABASE_URL,
poolclass=QueuePool,
pool_size=config.get("pool_size", 10),
max_overflow=config.get("max_overflow", 20),
pool_pre_ping=True,
echo=False,
# JSONB 类型处理器配置
json_serializer=lambda obj: json.dumps(obj, ensure_ascii=False),
json_deserializer=lambda s: json.loads(s) if isinstance(s, str) else s
)
# 创建会话工厂
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
# 基础类
Base = declarative_base()
def get_db() -> Generator:
"""
获取数据库会话
用法: for db in get_db(): ...
"""
db = SessionLocal()
try:
yield db
finally:
db.close()
@contextmanager
def get_db_context() -> Generator:
"""
上下文管理器方式获取数据库会话
用法: with get_db_context() as db: ...
"""
db = SessionLocal()
try:
yield db
db.commit()
except Exception as e:
db.rollback()
raise
finally:
db.close()
def test_connection() -> bool:
"""测试数据库连接"""
try:
with engine.connect() as conn:
conn.execute(text("SELECT 1"))
return True
except Exception as e:
return False

View File

@@ -1,22 +0,0 @@
"""
数据库初始化脚本
运行此脚本创建所有表结构
基于 DATABASE_DESIGN.md 设计
"""
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from db.database import engine, Base
from db.models import MultiAgentTask, UserAgent, PlanShare
def init_database():
"""初始化数据库表结构"""
Base.metadata.create_all(bind=engine)
if __name__ == "__main__":
init_database()

View File

@@ -1,170 +0,0 @@
"""
SQLAlchemy ORM 数据模型
对应数据库表结构 (基于 DATABASE_DESIGN.md)
"""
import uuid
from datetime import datetime, timezone
from enum import Enum as PyEnum
from sqlalchemy import Column, String, Text, DateTime, Integer, Enum, Index, ForeignKey, Boolean
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import relationship
from .database import Base
class TaskStatus(str, PyEnum):
"""任务状态枚举"""
GENERATING = "generating" # 生成中 - TaskProcess 生成阶段
EXECUTING = "executing" # 执行中 - 任务执行阶段
STOPPED = "stopped" # 已停止 - 用户手动停止执行
COMPLETED = "completed" # 已完成 - 任务正常完成
def utc_now():
"""获取当前 UTC 时间"""
return datetime.now(timezone.utc)
class MultiAgentTask(Base):
"""多智能体任务记录模型"""
__tablename__ = "multi_agent_tasks"
task_id = Column(String(64), primary_key=True)
user_id = Column(String(64), nullable=False, index=True)
query = Column(Text, nullable=False)
agents_info = Column(JSONB, nullable=False)
task_outline = Column(JSONB)
assigned_agents = Column(JSONB)
agent_scores = Column(JSONB)
result = Column(JSONB)
status = Column(
Enum(TaskStatus, name="task_status_enum", create_type=False),
default=TaskStatus.GENERATING,
nullable=False
)
execution_count = Column(Integer, default=0, nullable=False)
generation_id = Column(String(64))
execution_id = Column(String(64))
rehearsal_log = Column(JSONB)
branches = Column(JSONB) # 任务大纲探索分支数据
is_pinned = Column(Boolean, default=False, nullable=False) # 置顶标志
created_at = Column(DateTime(timezone=True), default=utc_now)
updated_at = Column(DateTime(timezone=True), default=utc_now, onupdate=utc_now)
__table_args__ = (
Index("idx_multi_agent_tasks_status", "status"),
Index("idx_multi_agent_tasks_generation_id", "generation_id"),
Index("idx_multi_agent_tasks_execution_id", "execution_id"),
)
def to_dict(self) -> dict:
"""转换为字典"""
return {
"task_id": self.task_id,
"user_id": self.user_id,
"query": self.query,
"agents_info": self.agents_info,
"task_outline": self.task_outline,
"assigned_agents": self.assigned_agents,
"agent_scores": self.agent_scores,
"result": self.result,
"status": self.status.value if self.status else None,
"execution_count": self.execution_count,
"generation_id": self.generation_id,
"execution_id": self.execution_id,
"rehearsal_log": self.rehearsal_log,
"branches": self.branches,
"is_pinned": self.is_pinned,
"created_at": self.created_at.isoformat() if self.created_at else None,
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
class ExportRecord(Base):
"""导出记录模型"""
__tablename__ = "export_records"
id = Column(Integer, primary_key=True, autoincrement=True)
task_id = Column(String(64), nullable=False, index=True) # 关联任务ID
user_id = Column(String(64), nullable=False, index=True) # 用户ID
export_type = Column(String(32), nullable=False) # 导出类型: doc/markdown/mindmap/infographic/excel/ppt
file_name = Column(String(256), nullable=False) # 文件名
file_path = Column(String(512), nullable=False) # 服务器存储路径
file_url = Column(String(512)) # 访问URL
file_size = Column(Integer, default=0) # 文件大小(字节)
created_at = Column(DateTime(timezone=True), default=utc_now)
__table_args__ = (
Index("idx_export_records_task_user", "task_id", "user_id"),
)
def to_dict(self) -> dict:
"""转换为字典"""
return {
"id": self.id,
"task_id": self.task_id,
"user_id": self.user_id,
"export_type": self.export_type,
"file_name": self.file_name,
"file_path": self.file_path,
"file_url": self.file_url,
"file_size": self.file_size,
"created_at": self.created_at.isoformat() if self.created_at else None,
}
class UserAgent(Base):
"""用户保存的智能体配置模型 (可选表)"""
__tablename__ = "user_agents"
id = Column(String(64), primary_key=True)
user_id = Column(String(64), nullable=False, index=True)
agent_name = Column(String(100), nullable=False)
agent_config = Column(JSONB, nullable=False)
created_at = Column(DateTime(timezone=True), default=utc_now)
__table_args__ = (
Index("idx_user_agents_user_created", "user_id", "created_at"),
)
def to_dict(self) -> dict:
"""转换为字典"""
return {
"id": self.id,
"user_id": self.user_id,
"agent_name": self.agent_name,
"agent_config": self.agent_config,
"created_at": self.created_at.isoformat() if self.created_at else None,
}
class PlanShare(Base):
"""任务分享记录模型"""
__tablename__ = "plan_shares"
id = Column(Integer, primary_key=True, autoincrement=True)
share_token = Column(String(64), unique=True, index=True, nullable=False) # 唯一分享码
extraction_code = Column(String(8), nullable=True) # 提取码4位字母数字
task_id = Column(String(64), nullable=False, index=True) # 关联的任务ID
task_data = Column(JSONB, nullable=False) # 完整的任务数据(脱敏后)
created_at = Column(DateTime(timezone=True), default=utc_now)
expires_at = Column(DateTime(timezone=True), nullable=True) # 过期时间
view_count = Column(Integer, default=0) # 查看次数
__table_args__ = (
Index("idx_plan_shares_token", "share_token"),
)
def to_dict(self) -> dict:
"""转换为字典"""
return {
"id": self.id,
"share_token": self.share_token,
"extraction_code": self.extraction_code,
"task_id": self.task_id,
"task_data": self.task_data,
"created_at": self.created_at.isoformat() if self.created_at else None,
"expires_at": self.expires_at.isoformat() if self.expires_at else None,
"view_count": self.view_count,
}

View File

@@ -1,106 +0,0 @@
-- AgentCoord 数据库表结构
-- 基于 DATABASE_DESIGN.md 设计
-- 执行方式: psql -U postgres -d agentcoord -f schema.sql
-- =============================================================================
-- 表1: multi_agent_tasks (多智能体任务记录)
-- 状态枚举: pending/planning/generating/executing/completed/failed
-- =============================================================================
CREATE TABLE IF NOT EXISTS multi_agent_tasks (
task_id VARCHAR(64) PRIMARY KEY,
user_id VARCHAR(64) NOT NULL,
query TEXT NOT NULL,
agents_info JSONB NOT NULL,
task_outline JSONB,
assigned_agents JSONB,
agent_scores JSONB,
result JSONB,
status VARCHAR(20) DEFAULT 'pending',
execution_count INTEGER DEFAULT 0,
generation_id VARCHAR(64),
execution_id VARCHAR(64),
rehearsal_log JSONB,
branches JSONB, -- 任务大纲探索分支数据
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
);
-- 索引
CREATE INDEX IF NOT EXISTS idx_multi_agent_tasks_user_id ON multi_agent_tasks(user_id);
CREATE INDEX IF NOT EXISTS idx_multi_agent_tasks_created_at ON multi_agent_tasks(created_at DESC);
CREATE INDEX IF NOT EXISTS idx_multi_agent_tasks_status ON multi_agent_tasks(status);
CREATE INDEX IF NOT EXISTS idx_multi_agent_tasks_generation_id ON multi_agent_tasks(generation_id);
CREATE INDEX IF NOT EXISTS idx_multi_agent_tasks_execution_id ON multi_agent_tasks(execution_id);
-- =============================================================================
-- 表2: user_agents (用户保存的智能体配置) - 可选表
-- =============================================================================
CREATE TABLE IF NOT EXISTS user_agents (
id VARCHAR(64) PRIMARY KEY,
user_id VARCHAR(64) NOT NULL,
agent_name VARCHAR(100) NOT NULL,
agent_config JSONB NOT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_user_agents_user_id ON user_agents(user_id);
-- =============================================================================
-- 更新时间触发器函数
-- =============================================================================
CREATE OR REPLACE FUNCTION update_updated_at_column()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
-- 为 multi_agent_tasks 表创建触发器
CREATE TRIGGER update_multi_agent_tasks_updated_at
BEFORE UPDATE ON multi_agent_tasks
FOR EACH ROW
EXECUTE FUNCTION update_updated_at_column();
-- =============================================================================
-- 表3: export_records (导出记录)
-- =============================================================================
CREATE TABLE IF NOT EXISTS export_records (
id SERIAL PRIMARY KEY,
task_id VARCHAR(64) NOT NULL,
user_id VARCHAR(64) NOT NULL,
export_type VARCHAR(32) NOT NULL,
file_name VARCHAR(256) NOT NULL,
file_path VARCHAR(512) NOT NULL,
file_url VARCHAR(512),
file_size INTEGER DEFAULT 0,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_export_records_task_user ON export_records(task_id, user_id);
-- =============================================================================
-- 表4: plan_shares (任务分享记录)
-- =============================================================================
CREATE TABLE IF NOT EXISTS plan_shares (
id SERIAL PRIMARY KEY,
share_token VARCHAR(64) NOT NULL UNIQUE,
extraction_code VARCHAR(8),
task_id VARCHAR(64) NOT NULL,
task_data JSONB NOT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
expires_at TIMESTAMP WITH TIME ZONE,
view_count INTEGER DEFAULT 0
);
CREATE INDEX IF NOT EXISTS idx_plan_shares_token ON plan_shares(share_token);
CREATE INDEX IF NOT EXISTS idx_plan_shares_task_id ON plan_shares(task_id);
DO $$
BEGIN
RAISE NOTICE '✅ PostgreSQL 数据库表结构创建完成!';
RAISE NOTICE '表: multi_agent_tasks (多智能体任务记录)';
RAISE NOTICE '表: user_agents (用户智能体配置)';
RAISE NOTICE '表: export_records (导出记录)';
RAISE NOTICE '表: plan_shares (任务分享记录)';
END $$;

View File

@@ -1,13 +1,7 @@
Flask==3.0.2
openai==2.8.1
openai==0.28.1
PyYAML==6.0.1
termcolor==2.4.0
groq==0.4.2
mistralai==0.1.6
flask-socketio==5.3.6
python-socketio==5.11.0
simple-websocket==1.0.0
eventlet==0.40.4
python-docx==1.1.2
openpyxl==3.1.5
python-pptx==1.0.2
socksio==1.0.0

File diff suppressed because it is too large Load Diff

View File

@@ -21,11 +21,12 @@ services:
ports:
- "8000:8000"
environment:
- OPENAI_API_BASE=htts://api.openai.com
- OPENAI_API_KEY=
- OPENAI_API_MODEL=gpt-4-turbo-preview
- FAST_DESIGN_MODE=True
- OPENAI_API_BASE=https://api.moleapi.com/v1
- OPENAI_API_KEY=sk-sps7FBCbEvu85DfPoS8SdnPwYLEoW7u5Dd8vCDTXqPLpHuyb
- OPENAI_API_MODEL=gpt-4.1-mini
- FAST_DESIGN_MODE=False
- GROQ_API_KEY=
- USE_CACHE=True
networks:
- agentcoord-network

View File

@@ -1,31 +0,0 @@
.DS_Store
.pnp
.pnp.js
.env.local
.env.*.local
.history
*.log*
node_modules/
.yarn-integrity
.pnpm-store/
*.tsbuildinfo
.eslintcache
.changeset/pre.json
dist/
coverage/
release/
output/
output_resource/
log/
.vscode/**/*
!.vscode/settings.json
!.vscode/extensions.json
.idea/
**/*/typings/auto-generated
modern.config.local.*

View File

@@ -1,32 +0,0 @@
.DS_Store
.env
.pnp
.pnp.js
.env.local
.env.*.local
.history
*.log*
node_modules/
.yarn-integrity
.pnpm-store/
*.tsbuildinfo
.eslintcache
.changeset/pre.json
dist/
coverage/
release/
output/
output_resource/
log/
.vscode/**/*
!.vscode/settings.json
!.vscode/extensions.json
.idea/
**/*/typings/auto-generated
modern.config.local.*

View File

@@ -1,22 +0,0 @@
FROM node:20-alpine AS base
FROM base AS installer
WORKDIR /app
COPY package.json .npmrc ./
RUN npm install
FROM base AS builder
WORKDIR /app
COPY --from=installer /app/node_modules ./node_modules
COPY . .
RUN npm run build
FROM base AS runner
WORKDIR /app
EXPOSE 8080/tcp
COPY .npmrc ./
RUN npm install @modern-js/app-tools @modern-js/runtime --no-optional --no-shrinkwrap && mkdir src
COPY modern.config.ts package.json ./
COPY --from=builder /app/dist ./dist
ENV API_BASE=
CMD ["npm", "run", "serve"]

View File

@@ -1,39 +0,0 @@
# AgentCoord Frontend
This is the frontend for the AgentCoord project. Root project is located [here](https://github.com/AgentCoord/AgentCoord)
## Installation
You can launch the frontend by simply using `docker-compose` in the root directory of the project.
Or, you can launch the frontend manually by following the steps below.
1. Install the dependencies by running `npm install`.
```bash
npm install
```
2. Build the frontend by running `npm run build`.
```bash
npm run build
```
3. Start the frontend by running `npm run serve`.
```bash
npm run serve
```
Then you can access the frontend by visiting `http://localhost:8080`.
## Development
You can run the frontend in development mode by running `npm run dev`.
```bash
npm run dev
```
The frontend website requires the backend server to be running. You can configure the backend server address by copying the `.env.example` file to `.env` and changing the `API_BASE` value to the backend server address.

View File

@@ -1,66 +0,0 @@
{
"name": "agent-coord-frontend",
"version": "0.0.1",
"scripts": {
"reset": "npx rimraf ./**/node_modules",
"dev": "modern dev",
"build": "modern build",
"start": "modern start",
"serve": "modern serve",
"new": "modern new",
"lint": "modern lint",
"prepare": "husky install",
"upgrade": "modern upgrade"
},
"engines": {
"node": ">=16.18.1"
},
"lint-staged": {
"*.{js,jsx,ts,tsx,mjs,cjs}": [
"node --max_old_space_size=8192 ./node_modules/eslint/bin/eslint.js --fix --color --cache --quiet"
]
},
"eslintIgnore": [
"node_modules/",
"dist/"
],
"dependencies": {
"@emotion/react": "^11.11.3",
"@emotion/styled": "^11.11.0",
"@modern-js/runtime": "2.46.1",
"@mui/icons-material": "^5.15.6",
"@mui/material": "^5.15.6",
"@sttot/api-hooks": "^1.2.5",
"d3": "^7.8.5",
"localforage": "^1.10.0",
"lodash": "^4.17.21",
"mobx": "^6.12.0",
"mobx-react-lite": "^4.0.6",
"re-resizable": "^6.9.11",
"react": "~18.2.0",
"react-dom": "~18.2.0",
"react-markdown": "^9.0.1",
"react-rnd": "^10.4.1",
"rehype-highlight": "^7.0.0",
"rehype-katex": "^7.0.0",
"remark-gfm": "^4.0.0",
"remark-math": "^6.0.0"
},
"devDependencies": {
"@modern-js-app/eslint-config": "2.46.1",
"@modern-js/app-tools": "2.46.1",
"@modern-js/eslint-config": "2.46.1",
"@modern-js/tsconfig": "2.46.1",
"@types/d3": "^7.4.3",
"@types/jest": "~29.2.4",
"@types/lodash": "^4.14.202",
"@types/node": "~16.11.7",
"@types/react": "~18.0.26",
"@types/react-dom": "~18.0.10",
"husky": "~8.0.1",
"lint-staged": "~13.1.0",
"prettier": "~2.8.1",
"rimraf": "~3.0.2",
"typescript": "~5.0.4"
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,170 +0,0 @@
import React from 'react';
import { observer } from 'mobx-react-lite';
import { Divider, SxProps } from '@mui/material';
import Box from '@mui/material/Box';
import UnfoldLessIcon from '@mui/icons-material/UnfoldLess';
import MoreHorizIcon from '@mui/icons-material/MoreHoriz';
import MarkdownBlock from '@/components/MarkdownBlock';
import { globalStorage } from '@/storage';
import type { IExecuteStepHistoryItem } from '@/apis/execute-plan';
import AgentIcon from '@/components/AgentIcon';
import { getAgentActionStyle } from '@/storage/plan';
export interface IStepHistoryItemProps {
item: IExecuteStepHistoryItem;
actionRef: React.RefObject<HTMLDivElement | HTMLElement>;
style?: SxProps;
hoverCallback: (isHovered: boolean) => void;
handleExpand?: () => void;
}
export default observer(
({
item,
actionRef,
hoverCallback,
handleExpand,
style = {},
}: IStepHistoryItemProps) => {
const [expand, setExpand] = React.useState(false);
const refDetail = React.useRef<HTMLDivElement>(null);
// 使用useEffect来更新detail容器的高度
React.useEffect(() => {
if (refDetail.current) {
refDetail.current.style.height = expand
? `${refDetail.current.scrollHeight}px`
: '0px';
}
if (handleExpand) {
let count = 0;
const intervalId = setInterval(() => {
handleExpand();
count++;
if (count >= 20) {
clearInterval(intervalId);
}
}, 10);
}
}, [expand]);
const s = { ...getAgentActionStyle(item.type), ...style } as SxProps;
React.useEffect(() => {
console.log(item);
}, [item]);
return (
<Box
ref={actionRef}
className="step-history-item"
sx={{
userSelect: 'none',
borderRadius: '10px',
padding: '4px',
fontSize: '14px',
position: 'relative',
marginTop: '4px',
backgroundColor: (s as any).backgroundColor,
border: `2px solid ${(s as any).borderColor}`,
}}
onMouseOver={() => hoverCallback(true)}
onMouseOut={() => hoverCallback(false)}
>
<Box sx={{ display: 'flex', alignItems: 'center' }}>
<AgentIcon
name={globalStorage.agentIconMap.get(item.agent)}
style={{ height: '36px', width: 'auto', margin: '0px' }}
tooltipInfo={globalStorage.agentMap.get(item.agent)}
/>
<Box component="span" sx={{ fontWeight: 500, marginLeft: '4px' }}>
{item.agent}
</Box>
<Box component="span" sx={{ fontWeight: 400 }}>
: {item.type}
</Box>
</Box>
{item.result ? (
<Box
ref={refDetail}
sx={{
overflow: 'hidden',
transition: 'height 200ms ease-out', // 添加过渡效果
}}
>
{expand ? (
<>
<Divider
sx={{
margin: '4px 0px',
borderBottom: '2px dashed', // 设置为虚线
borderColor: '#0003',
}}
/>
<Box
sx={{
marginLeft: '6px',
marginBottom: '4px',
color: '#0009',
fontWeight: 400,
}}
>
{item.description}
</Box>
<MarkdownBlock
text={item.result}
style={{
marginTop: '5px',
borderRadius: '10px',
padding: '6px',
background: '#FFF9',
fontSize: '12px',
maxHeight: '240px',
overflowY: 'auto',
border: '1px solid #0003',
whiteSpace: 'pre-wrap',
wordWrap: 'break-word',
marginBottom: '5px',
}}
/>
</>
) : (
<></>
)}
<Box
onClick={e => {
setExpand(v => !v);
e.stopPropagation();
}}
sx={{
position: 'absolute',
right: '8px',
// bottom: '12px',
top: '24px',
cursor: 'pointer',
userSelect: 'none',
height: '14px',
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
background: '#0002',
borderRadius: '8px',
marginLeft: '4px',
padding: '0 4px',
'&:hover': {
background: '#0003',
},
}}
>
{expand ? (
<UnfoldLessIcon
sx={{ fontSize: '16px', transform: 'rotate(90deg)' }}
/>
) : (
<MoreHorizIcon sx={{ fontSize: '16px' }} />
)}
</Box>
</Box>
) : (
<></>
)}
</Box>
);
},
);

View File

@@ -1,101 +0,0 @@
import React from 'react';
import { observer } from 'mobx-react-lite';
import { SxProps } from '@mui/material';
import Box from '@mui/material/Box';
import IconButton from '@mui/material/IconButton';
import FormControl from '@mui/material/FormControl';
import FilledInput from '@mui/material/FilledInput';
import TelegramIcon from '@mui/icons-material/Telegram';
import CircularProgress from '@mui/material/CircularProgress';
import { globalStorage } from '@/storage';
export interface UserGoalInputProps {
style?: SxProps;
}
export default observer(({ style = {} }: UserGoalInputProps) => {
const inputRef = React.useRef<string>('');
const inputElementRef = React.useRef<HTMLInputElement>(null);
React.useEffect(() => {
if (inputElementRef.current) {
if (globalStorage.planManager) {
inputElementRef.current.value = globalStorage.planManager.goal;
} else {
inputElementRef.current.value = globalStorage.briefGoal;
}
}
}, [globalStorage.planManager]);
return (
<FormControl
sx={{
position: 'relative',
...style,
}}
>
<FilledInput
disabled={
globalStorage.api.busy ||
!globalStorage.api.agentsReady ||
globalStorage.api.planReady
}
placeholder="Yout Goal"
fullWidth
inputRef={inputElementRef}
onChange={event => (inputRef.current = event.target.value)}
size="small"
sx={{
borderRadius: '10px',
background: '#E1E1E1',
borderBottom: 'none !important',
'&::before': {
borderBottom: 'none !important',
},
'& > input': {
padding: '10px',
},
}}
startAdornment={
<Box
sx={{
color: '#4A9C9E',
fontWeight: 800,
fontSize: '18px',
textWrap: 'nowrap',
userSelect: 'none',
}}
>
\General Goal:
</Box>
}
/>
{globalStorage.api.planGenerating ? (
<CircularProgress
sx={{
position: 'absolute',
right: '12px',
top: '20px',
width: '24px !important',
height: '24px !important',
}}
/>
) : (
<IconButton
disabled={
globalStorage.api.busy ||
!globalStorage.api.agentsReady ||
globalStorage.api.planReady
}
color="primary"
aria-label="提交"
sx={{ position: 'absolute', right: '6px', top: '12px' }}
onClick={() => {
globalStorage.form.goal = inputRef.current;
globalStorage.generatePlanBase();
}}
>
<TelegramIcon />
</IconButton>
)}
</FormControl>
);
});

View File

@@ -1,14 +0,0 @@
{
"extends": "@modern-js/tsconfig/base",
"compilerOptions": {
"declaration": false,
"jsx": "preserve",
"baseUrl": "./",
"paths": {
"@/*": ["./src/*"],
"@shared/*": ["./shared/*"]
}
},
"include": ["src", "shared", "config", "modern.config.ts"],
"exclude": ["**/node_modules"]
}

View File

@@ -1,6 +1,31 @@
node_modules
dist
.idea
.vscode
.git
.gitignore
.DS_Store
.pnp
.pnp.js
.env.local
.env.*.local
.history
*.log*
node_modules/
.yarn-integrity
.pnpm-store/
*.tsbuildinfo
.eslintcache
.changeset/pre.json
dist/
coverage/
release/
output/
output_resource/
log/
.vscode/**/*
!.vscode/settings.json
!.vscode/extensions.json
.idea/
**/*/typings/auto-generated
modern.config.local.*

View File

@@ -1,8 +0,0 @@
[*.{js,jsx,mjs,cjs,ts,tsx,mts,cts,vue,css,scss,sass,less,styl}]
charset = utf-8
indent_size = 2
indent_style = space
insert_final_newline = true
trim_trailing_whitespace = true
end_of_line = lf
max_line_length = 100

View File

@@ -1 +0,0 @@
API_BASE=http://127.0.0.1:8000

View File

@@ -1,79 +0,0 @@
{
"globals": {
"Component": true,
"ComponentPublicInstance": true,
"ComputedRef": true,
"DirectiveBinding": true,
"EffectScope": true,
"ExtractDefaultPropTypes": true,
"ExtractPropTypes": true,
"ExtractPublicPropTypes": true,
"InjectionKey": true,
"MaybeRef": true,
"MaybeRefOrGetter": true,
"PropType": true,
"Ref": true,
"ShallowRef": true,
"Slot": true,
"Slots": true,
"VNode": true,
"WritableComputedRef": true,
"computed": true,
"createApp": true,
"customRef": true,
"defineAsyncComponent": true,
"defineComponent": true,
"effectScope": true,
"getCurrentInstance": true,
"getCurrentScope": true,
"getCurrentWatcher": true,
"h": true,
"inject": true,
"isProxy": true,
"isReactive": true,
"isReadonly": true,
"isRef": true,
"isShallow": true,
"markRaw": true,
"nextTick": true,
"onActivated": true,
"onBeforeMount": true,
"onBeforeUnmount": true,
"onBeforeUpdate": true,
"onDeactivated": true,
"onErrorCaptured": true,
"onMounted": true,
"onRenderTracked": true,
"onRenderTriggered": true,
"onScopeDispose": true,
"onServerPrefetch": true,
"onUnmounted": true,
"onUpdated": true,
"onWatcherCleanup": true,
"provide": true,
"reactive": true,
"readonly": true,
"ref": true,
"resolveComponent": true,
"shallowReactive": true,
"shallowReadonly": true,
"shallowRef": true,
"toRaw": true,
"toRef": true,
"toRefs": true,
"toValue": true,
"triggerRef": true,
"unref": true,
"useAttrs": true,
"useCssModule": true,
"useCssVars": true,
"useId": true,
"useModel": true,
"useSlots": true,
"useTemplateRef": true,
"watch": true,
"watchEffect": true,
"watchPostEffect": true,
"watchSyncEffect": true
}
}

View File

@@ -1 +0,0 @@
* text=auto eol=lf

54
frontend/.gitignore vendored
View File

@@ -1,36 +1,32 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*
node_modules
.DS_Store
dist
dist-ssr
coverage
*.local
.env
# Editor directories and files
.vscode/*
!.vscode/extensions.json
.idea
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?
.pnp
.pnp.js
.env.local
.env.*.local
.history
*.log*
node_modules/
.yarn-integrity
.pnpm-store/
*.tsbuildinfo
.eslintcache
.changeset/pre.json
# Cypress
/cypress/videos/
/cypress/screenshots/
dist/
coverage/
release/
output/
output_resource/
log/
# Vitest
__screenshots__/
.vscode/**/*
!.vscode/settings.json
!.vscode/extensions.json
.idea/
**/*/typings/auto-generated
modern.config.local.*

View File

@@ -1,6 +0,0 @@
{
"$schema": "https://json.schemastore.org/prettierrc",
"semi": false,
"singleQuote": true,
"printWidth": 100
}

View File

@@ -1,9 +0,0 @@
{
"recommendations": [
"Vue.volar",
"vitest.explorer",
"dbaeumer.vscode-eslint",
"EditorConfig.EditorConfig",
"esbenp.prettier-vscode"
]
}

View File

@@ -1,95 +0,0 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Development Commands
```bash
# Install dependencies
pnpm install
# Development server with hot reload
pnpm dev
# Build for production
pnpm build
# Type checking
pnpm type-check
# Lint and fix code
pnpm lint
# Format code
pnpm format
# Run unit tests
pnpm test:unit
```
## Project Architecture
This is a **Multi-Agent Coordination Platform** (多智能体协同平台) built with Vue 3, TypeScript, and Vite. The application enables users to create and manage AI agents with specialized roles and coordinate them to complete complex tasks through visual workflows.
### Tech Stack
- **Vue 3** with Composition API and TypeScript
- **Vite** for build tooling and development
- **Element Plus** for UI components
- **Pinia** for state management
- **Tailwind CSS** for styling
- **Vue Router** for routing (minimal usage)
- **JSPlumb** for visual workflow connections
- **Axios** for API requests with custom interceptors
### Key Architecture Components
#### State Management (`src/stores/modules/agents.ts`)
Central store managing:
- Agent definitions with profiles and icons
- Task workflow data structures (`IRawStepTask`, `TaskProcess`)
- Search functionality and current task state
- Raw plan responses with UUID generation for tasks
#### Request Layer (`src/utils/request.ts`)
Custom Axios wrapper with:
- Proxy configuration for `/api` -> `http://localhost:8000`
- Response interceptors for error handling
- `useRequest` hook for reactive data fetching
- Integrated Element Plus notifications
#### Component Structure
- **Layout System** (`src/layout/`): Main application layout with Header and Main sections
- **Task Templates** (`src/layout/components/Main/TaskTemplate/`): Different task types including AgentRepo, TaskSyllabus, and TaskResult
- **Visual Workflow**: JSPlumb integration for drag-and-drop agent coordination flows
#### Icon System
- SVG icons stored in `src/assets/icons/`
- Custom `SvgIcon` component with vite-plugin-svg-icons
- Icon categories include specialist roles (doctor, engineer, researcher, etc.)
### Build Configuration
#### Vite (`vite.config.ts`)
- Element Plus auto-import and component resolution
- SVG icon caching with custom symbol IDs
- Proxy setup for API requests to backend
- Path aliases: `@/` maps to `src/`
#### Docker Deployment
- Multi-stage build: Node.js build + Caddy web server
- API proxy configured via Caddyfile
- Environment variable support for different deployment modes
### Data Models
Key interfaces for the agent coordination system:
- `Agent`: Name, Profile, Icon
- `IRawStepTask`: Individual task steps with agent selection and inputs
- `TaskProcess`: Action descriptions with important inputs
- `IRichText`: Template-based content formatting with style support
### Development Notes
- Uses pnpm as package manager (required by package.json)
- Node version constraint: ^20.19.0 or >=22.12.0
- Dark theme enabled by default in App.vue
- Auto-imports configured for Vue APIs
- No traditional Vue routes - uses component-based navigation

View File

@@ -1,27 +1,23 @@
ARG CADDY_VERSION=2.6
ARG BUILD_ENV=prod
FROM node:20-alpine AS base
FROM node:20.19.0 as base
FROM base AS installer
WORKDIR /app
COPY package.json .npmrc ./
RUN npm install
FROM base AS builder
WORKDIR /app
COPY --from=installer /app/node_modules ./node_modules
COPY . .
RUN npm install -g pnpm
RUN pnpm install
RUN pnpm build
RUN npm run build
# The base for mode ENVIRONMENT=prod
FROM caddy:${CADDY_VERSION}-alpine as prod
# Workaround for https://github.com/alpinelinux/docker-alpine/issues/98#issuecomment-679278499
RUN sed -i 's/https/http/' /etc/apk/repositories \
&& apk add --no-cache bash
COPY docker/Caddyfile /etc/caddy/
COPY --from=base /app/dist /frontend
# Run stage
FROM ${BUILD_ENV}
EXPOSE 80 443
VOLUME ["/data", "/etc/caddy"]
CMD ["caddy", "run", "--config", "/etc/caddy/Caddyfile", "--adapter", "caddyfile"]
FROM base AS runner
WORKDIR /app
EXPOSE 8080/tcp
COPY .npmrc ./
COPY src ./src
COPY modern.config.ts package.json ./
COPY --from=builder /app/dist ./dist
COPY --from=builder /app/node_modules ./node_modules
ENV API_BASE=
CMD ["npm", "run", "serve"]

View File

@@ -1,106 +1,39 @@
# 多智能体协同平台 (Agent Coordination Platform)
# AgentCoord Frontend
一个强大的可视化平台用于创建和管理具有专门角色的AI智能体通过直观的工作流程协调它们来完成复杂任务。
This is the frontend for the AgentCoord project. Root project is located [here](https://github.com/AgentCoord/AgentCoord)
## ✨ 功能特性
## Installation
- **多智能体系统**创建具有专门角色和专业知识的AI智能体
- **可视化工作流编辑器**使用JSPlumb设计智能体协调流程的拖放界面
- **任务管理**:定义、执行和跟踪复杂的多步骤任务
- **实时通信**:无缝的智能体交互和协调
- **丰富的模板系统**:支持样式的灵活内容格式化
- **TypeScript支持**:整个应用程序的完整类型安全
You can launch the frontend by simply using `docker-compose` in the root directory of the project.
## 🚀 快速开始
Or, you can launch the frontend manually by following the steps below.
### 开发命令
1. Install the dependencies by running `npm install`.
```bash
# 安装依赖
pnpm install
# 开发服务器(热重载)
pnpm dev
# 生产构建
pnpm build
# 类型检查
pnpm type-check
# 代码检查和修复
pnpm lint
# 代码格式化
pnpm format
# 运行单元测试
pnpm test:unit
npm install
```
### 系统要求
2. Build the frontend by running `npm run build`.
- Node.js ^20.19.0 或 >=22.12.0
- pnpm必需的包管理器
## 🏗️ 架构设计
### 技术栈
- **Vue 3**Composition API 和 TypeScript
- **Vite**:构建工具和开发环境
- **Element Plus**UI组件库
- **Pinia**:状态管理
- **Tailwind CSS**:样式框架
- **JSPlumb**:可视化工作流连接
- **Axios**API请求与自定义拦截器
### 核心组件
#### 状态管理
中央存储管理智能体定义、任务工作流和协调状态
#### 请求层
自定义Axios包装器具有代理配置和集成通知
#### 可视化工作流
JSPlumb集成用于拖放智能体协调流程
#### 图标系统
基于SVG的图标用于不同的智能体专业化和角色
## 📁 项目结构
```
src/
├── assets/ # 静态资源,包括智能体图标
├── components/ # 可复用的Vue组件
├── layout/ # 应用布局和主要组件
├── stores/ # Pinia状态管理
├── utils/ # 工具函数和请求层
├── views/ # 页面组件
└── App.vue # 根组件
```bash
npm run build
```
## 🎯 开发指南
3. Start the frontend by running `npm run serve`.
### IDE设置
```bash
npm run serve
```
[VS Code](https://code.visualstudio.com/) + [Vue (Official)](https://marketplace.visualstudio.com/items?itemName=Vue.volar)禁用Vetur
Then you can access the frontend by visiting `http://localhost:8080`.
### 浏览器开发工具
## Development
- 基于Chromium的浏览器
- [Vue.js devtools](https://chromewebstore.google.com/detail/vuejs-devtools/nhdogjmejiglipccpnnnanhbledajbpd)
- 在DevTools中启用自定义对象格式化程序
- Firefox
- [Vue.js devtools](https://addons.mozilla.org/en-US/firefox/addon/vue-js-devtools/)
- 在DevTools中启用自定义对象格式化程序
You can run the frontend in development mode by running `npm run dev`.
## 🚀 部署
```bash
npm run dev
```
应用程序支持Docker部署使用多阶段构建过程Node.js用于构建Caddy作为Web服务器。
## 📄 许可证
MIT许可证 - 详见LICENSE文件
The frontend website requires the backend server to be running. You can configure the backend server address by copying the `.env.example` file to `.env` and changing the `API_BASE` value to the backend server address.

View File

@@ -1,75 +0,0 @@
/* eslint-disable */
/* prettier-ignore */
// @ts-nocheck
// noinspection JSUnusedGlobalSymbols
// Generated by unplugin-auto-import
// biome-ignore lint: disable
export {}
declare global {
const EffectScope: typeof import('vue').EffectScope
const ElMessage: typeof import('element-plus/es').ElMessage
const ElNotification: typeof import('element-plus/es').ElNotification
const computed: typeof import('vue').computed
const createApp: typeof import('vue').createApp
const customRef: typeof import('vue').customRef
const defineAsyncComponent: typeof import('vue').defineAsyncComponent
const defineComponent: typeof import('vue').defineComponent
const effectScope: typeof import('vue').effectScope
const getCurrentInstance: typeof import('vue').getCurrentInstance
const getCurrentScope: typeof import('vue').getCurrentScope
const getCurrentWatcher: typeof import('vue').getCurrentWatcher
const h: typeof import('vue').h
const inject: typeof import('vue').inject
const isProxy: typeof import('vue').isProxy
const isReactive: typeof import('vue').isReactive
const isReadonly: typeof import('vue').isReadonly
const isRef: typeof import('vue').isRef
const isShallow: typeof import('vue').isShallow
const markRaw: typeof import('vue').markRaw
const nextTick: typeof import('vue').nextTick
const onActivated: typeof import('vue').onActivated
const onBeforeMount: typeof import('vue').onBeforeMount
const onBeforeUnmount: typeof import('vue').onBeforeUnmount
const onBeforeUpdate: typeof import('vue').onBeforeUpdate
const onDeactivated: typeof import('vue').onDeactivated
const onErrorCaptured: typeof import('vue').onErrorCaptured
const onMounted: typeof import('vue').onMounted
const onRenderTracked: typeof import('vue').onRenderTracked
const onRenderTriggered: typeof import('vue').onRenderTriggered
const onScopeDispose: typeof import('vue').onScopeDispose
const onServerPrefetch: typeof import('vue').onServerPrefetch
const onUnmounted: typeof import('vue').onUnmounted
const onUpdated: typeof import('vue').onUpdated
const onWatcherCleanup: typeof import('vue').onWatcherCleanup
const provide: typeof import('vue').provide
const reactive: typeof import('vue').reactive
const readonly: typeof import('vue').readonly
const ref: typeof import('vue').ref
const resolveComponent: typeof import('vue').resolveComponent
const shallowReactive: typeof import('vue').shallowReactive
const shallowReadonly: typeof import('vue').shallowReadonly
const shallowRef: typeof import('vue').shallowRef
const toRaw: typeof import('vue').toRaw
const toRef: typeof import('vue').toRef
const toRefs: typeof import('vue').toRefs
const toValue: typeof import('vue').toValue
const triggerRef: typeof import('vue').triggerRef
const unref: typeof import('vue').unref
const useAttrs: typeof import('vue').useAttrs
const useCssModule: typeof import('vue').useCssModule
const useCssVars: typeof import('vue').useCssVars
const useId: typeof import('vue').useId
const useModel: typeof import('vue').useModel
const useSlots: typeof import('vue').useSlots
const useTemplateRef: typeof import('vue').useTemplateRef
const watch: typeof import('vue').watch
const watchEffect: typeof import('vue').watchEffect
const watchPostEffect: typeof import('vue').watchPostEffect
const watchSyncEffect: typeof import('vue').watchSyncEffect
}
// for type re-export
declare global {
// @ts-ignore
export type { Component, Slot, Slots, ComponentPublicInstance, ComputedRef, DirectiveBinding, ExtractDefaultPropTypes, ExtractPropTypes, ExtractPublicPropTypes, InjectionKey, PropType, Ref, ShallowRef, MaybeRef, MaybeRefOrGetter, VNode, WritableComputedRef } from 'vue'
import('vue')
}

View File

@@ -1,205 +0,0 @@
#!/bin/bash
set -euo pipefail
# ========================
# 常量定义
# ========================
SCRIPT_NAME=$(basename "$0")
NODE_MIN_VERSION=18
NODE_INSTALL_VERSION=22
NVM_VERSION="v0.40.3"
CLAUDE_PACKAGE="@anthropic-ai/claude-code"
CONFIG_DIR="$HOME/.claude"
CONFIG_FILE="$CONFIG_DIR/settings.json"
API_BASE_URL="https://open.bigmodel.cn/api/anthropic"
API_KEY_URL="https://open.bigmodel.cn/usercenter/proj-mgmt/apikeys"
API_TIMEOUT_MS=3000000
# ========================
# 工具函数
# ========================
log_info() {
echo "🔹 $*"
}
log_success() {
echo "$*"
}
log_error() {
echo "$*" >&2
}
ensure_dir_exists() {
local dir="$1"
if [ ! -d "$dir" ]; then
mkdir -p "$dir" || {
log_error "Failed to create directory: $dir"
exit 1
}
fi
}
# ========================
# Node.js 安装函数
# ========================
install_nodejs() {
local platform=$(uname -s)
case "$platform" in
Linux|Darwin)
log_info "Installing Node.js on $platform..."
# 安装 nvm
log_info "Installing nvm ($NVM_VERSION)..."
curl -s https://raw.githubusercontent.com/nvm-sh/nvm/"$NVM_VERSION"/install.sh | bash
# 加载 nvm
log_info "Loading nvm environment..."
\. "$HOME/.nvm/nvm.sh"
# 安装 Node.js
log_info "Installing Node.js $NODE_INSTALL_VERSION..."
nvm install "$NODE_INSTALL_VERSION"
# 验证安装
node -v &>/dev/null || {
log_error "Node.js installation failed"
exit 1
}
log_success "Node.js installed: $(node -v)"
log_success "npm version: $(npm -v)"
;;
*)
log_error "Unsupported platform: $platform"
exit 1
;;
esac
}
# ========================
# Node.js 检查函数
# ========================
check_nodejs() {
if command -v node &>/dev/null; then
current_version=$(node -v | sed 's/v//')
major_version=$(echo "$current_version" | cut -d. -f1)
if [ "$major_version" -ge "$NODE_MIN_VERSION" ]; then
log_success "Node.js is already installed: v$current_version"
return 0
else
log_info "Node.js v$current_version is installed but version < $NODE_MIN_VERSION. Upgrading..."
install_nodejs
fi
else
log_info "Node.js not found. Installing..."
install_nodejs
fi
}
# ========================
# Claude Code 安装
# ========================
install_claude_code() {
if command -v claude &>/dev/null; then
log_success "Claude Code is already installed: $(claude --version)"
else
log_info "Installing Claude Code..."
npm install -g "$CLAUDE_PACKAGE" || {
log_error "Failed to install claude-code"
exit 1
}
log_success "Claude Code installed successfully"
fi
}
configure_claude_json(){
node --eval '
const os = require("os");
const fs = require("fs");
const path = require("path");
const homeDir = os.homedir();
const filePath = path.join(homeDir, ".claude.json");
if (fs.existsSync(filePath)) {
const content = JSON.parse(fs.readFileSync(filePath, "utf-8"));
fs.writeFileSync(filePath, JSON.stringify({ ...content, hasCompletedOnboarding: true }, null, 2), "utf-8");
} else {
fs.writeFileSync(filePath, JSON.stringify({ hasCompletedOnboarding: true }, null, 2), "utf-8");
}'
}
# ========================
# API Key 配置
# ========================
configure_claude() {
log_info "Configuring Claude Code..."
echo " You can get your API key from: $API_KEY_URL"
read -s -p "🔑 Please enter your ZHIPU API key: " api_key
echo
if [ -z "$api_key" ]; then
log_error "API key cannot be empty. Please run the script again."
exit 1
fi
ensure_dir_exists "$CONFIG_DIR"
# 写入配置文件
node --eval '
const os = require("os");
const fs = require("fs");
const path = require("path");
const homeDir = os.homedir();
const filePath = path.join(homeDir, ".claude", "settings.json");
const apiKey = "'"$api_key"'";
const content = fs.existsSync(filePath)
? JSON.parse(fs.readFileSync(filePath, "utf-8"))
: {};
fs.writeFileSync(filePath, JSON.stringify({
...content,
env: {
ANTHROPIC_AUTH_TOKEN: apiKey,
ANTHROPIC_BASE_URL: "'"$API_BASE_URL"'",
API_TIMEOUT_MS: "'"$API_TIMEOUT_MS"'",
CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC: 1
}
}, null, 2), "utf-8");
' || {
log_error "Failed to write settings.json"
exit 1
}
log_success "Claude Code configured successfully"
}
# ========================
# 主流程
# ========================
main() {
echo "🚀 Starting $SCRIPT_NAME"
check_nodejs
install_claude_code
configure_claude_json
configure_claude
echo ""
log_success "🎉 Installation completed successfully!"
echo ""
echo "🚀 You can now start using Claude Code with:"
echo " claude"
}
main "$@"

View File

@@ -1,36 +0,0 @@
/* eslint-disable */
// @ts-nocheck
// biome-ignore lint: disable
// oxlint-disable
// ------
// Generated by unplugin-vue-components
// Read more: https://github.com/vuejs/core/pull/3399
export {}
/* prettier-ignore */
declare module 'vue' {
export interface GlobalComponents {
ElAutocomplete: typeof import('element-plus/es')['ElAutocomplete']
ElButton: typeof import('element-plus/es')['ElButton']
ElCard: typeof import('element-plus/es')['ElCard']
ElCollapse: typeof import('element-plus/es')['ElCollapse']
ElCollapseItem: typeof import('element-plus/es')['ElCollapseItem']
ElDrawer: typeof import('element-plus/es')['ElDrawer']
ElEmpty: typeof import('element-plus/es')['ElEmpty']
ElIcon: typeof import('element-plus/es')['ElIcon']
ElInput: typeof import('element-plus/es')['ElInput']
ElPopover: typeof import('element-plus/es')['ElPopover']
ElScrollbar: typeof import('element-plus/es')['ElScrollbar']
ElTooltip: typeof import('element-plus/es')['ElTooltip']
MultiLineTooltip: typeof import('./src/components/MultiLineTooltip/index.vue')['default']
Notification: typeof import('./src/components/Notification/Notification.vue')['default']
RouterLink: typeof import('vue-router')['RouterLink']
RouterView: typeof import('vue-router')['RouterView']
SvgIcon: typeof import('./src/components/SvgIcon/index.vue')['default']
TaskContentEditor: typeof import('./src/components/TaskContentEditor/index.vue')['default']
}
export interface GlobalDirectives {
vLoading: typeof import('element-plus/es')['ElLoadingDirective']
}
}

Some files were not shown because too many files have changed in this diff Show More