Compare commits
6 Commits
web
...
9a74d2813e
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9a74d2813e | ||
|
|
f50dcd2fe7 | ||
|
|
744945f68d | ||
|
|
25853d8174 | ||
|
|
ceb57acd23 | ||
|
|
da2318a40c |
@@ -1,434 +0,0 @@
|
||||
"""
|
||||
Word 文档 LLM 报告导出器
|
||||
调用大模型生成专业的任务执行报告
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, Optional
|
||||
|
||||
|
||||
class DocxLLMExporter:
|
||||
"""Word 文档 LLM 报告导出器 - 调用大模型生成报告"""
|
||||
|
||||
# LLM 配置(从 config.yaml 加载)
|
||||
LLM_CONFIG = {
|
||||
'OPENAI_API_BASE': None,
|
||||
'OPENAI_API_KEY': None,
|
||||
'OPENAI_API_MODEL': None,
|
||||
}
|
||||
|
||||
# Prompt 模板
|
||||
PROMPT_TEMPLATE = """你是一位专业的项目管理顾问和报告分析师。你的任务是将以下任务执行数据生成一份详细、专业、结构化的执行报告。
|
||||
|
||||
## 任务基本信息
|
||||
- 任务名称:{task_name}
|
||||
|
||||
## 任务大纲(规划阶段)
|
||||
{task_outline}
|
||||
|
||||
## 执行结果
|
||||
{rehearsal_log}
|
||||
|
||||
## 参与智能体
|
||||
{agents}
|
||||
|
||||
## 智能体评分
|
||||
{agent_scores}
|
||||
|
||||
---
|
||||
|
||||
## 报告要求
|
||||
|
||||
请生成一份完整的任务执行报告,包含以下章节:
|
||||
|
||||
### 1. 执行摘要
|
||||
用 2-3 句话概括本次任务的整体执行情况。
|
||||
|
||||
### 2. 任务概述
|
||||
- 任务背景与目标
|
||||
- 任务范围与边界
|
||||
|
||||
### 3. 任务规划分析
|
||||
- 任务拆解的合理性
|
||||
- 智能体角色分配的优化建议
|
||||
- 工作流程设计
|
||||
|
||||
### 4. 执行过程回顾
|
||||
- 各阶段的完成情况
|
||||
- 关键决策点
|
||||
- 遇到的问题及解决方案
|
||||
|
||||
### 5. 成果产出分析
|
||||
- 产出物的质量评估
|
||||
- 产出与预期目标的匹配度
|
||||
|
||||
### 6. 团队协作分析
|
||||
- 智能体之间的协作模式
|
||||
- 信息传递效率
|
||||
|
||||
### 7. 质量评估
|
||||
- 整体完成质量评分(1-10分)
|
||||
- 各维度的具体评分及理由
|
||||
|
||||
### 8. 经验教训与改进建议
|
||||
- 成功经验
|
||||
- 存在的问题与不足
|
||||
- 改进建议
|
||||
|
||||
---
|
||||
|
||||
## 输出格式要求
|
||||
- 使用 Markdown 格式输出
|
||||
- 语言:简体中文
|
||||
- 适当使用列表、表格增强可读性
|
||||
- 报告长度必须达到 4000-6000 字,每个章节都要详细展开,不要遗漏任何章节
|
||||
- 每个章节的内容要充实,提供具体的分析和建议
|
||||
- 注意:所有加粗标记必须成对出现,如 **文本**,不要单独使用 ** 或缺少结束标记
|
||||
- 禁止使用 mermaid、graph TD、flowchart 等图表代码,如果需要描述流程请用纯文字描述
|
||||
- 不要生成附录章节(如有关键参数对照表、工艺流程图等),如果确实需要附录再生成
|
||||
- 不要在报告中显示"报告总字数"这样的统计信息
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._load_llm_config()
|
||||
|
||||
def _load_llm_config(self):
|
||||
"""从配置文件加载 LLM 配置"""
|
||||
try:
|
||||
import yaml
|
||||
# 尝试多个可能的配置文件路径
|
||||
possible_paths = [
|
||||
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'config.yaml'),
|
||||
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'backend', 'config', 'config.yaml'),
|
||||
os.path.join(os.getcwd(), 'config', 'config.yaml'),
|
||||
]
|
||||
|
||||
for config_path in possible_paths:
|
||||
if os.path.exists(config_path):
|
||||
with open(config_path, 'r', encoding='utf-8') as f:
|
||||
config = yaml.safe_load(f)
|
||||
if config:
|
||||
self.LLM_CONFIG['OPENAI_API_BASE'] = config.get('OPENAI_API_BASE')
|
||||
self.LLM_CONFIG['OPENAI_API_KEY'] = config.get('OPENAI_API_KEY')
|
||||
self.LLM_CONFIG['OPENAI_API_MODEL'] = config.get('OPENAI_API_MODEL')
|
||||
print(f"已加载 LLM 配置: {self.LLM_CONFIG['OPENAI_API_MODEL']}")
|
||||
return
|
||||
except Exception as e:
|
||||
print(f"加载 LLM 配置失败: {e}")
|
||||
|
||||
def generate(self, task_data: Dict[str, Any], file_path: str) -> bool:
|
||||
"""生成 Word 文档(调用 LLM 生成报告)"""
|
||||
try:
|
||||
# 1. 准备数据
|
||||
task_name = task_data.get('task_name', '未命名任务')
|
||||
task_outline = task_data.get('task_outline')
|
||||
rehearsal_log = task_data.get('rehearsal_log')
|
||||
agent_scores = task_data.get('agent_scores')
|
||||
|
||||
# 2. 提取参与智能体(从 task_outline 的 Collaboration Process 中提取)
|
||||
agents = self._extract_agents(task_outline)
|
||||
|
||||
# 3. 过滤 agent_scores(只保留参与当前任务的智能体评分)
|
||||
filtered_agent_scores = self._filter_agent_scores(agent_scores, agents)
|
||||
|
||||
# 4. 格式化数据为 JSON 字符串
|
||||
task_outline_str = json.dumps(task_outline, ensure_ascii=False, indent=2) if task_outline else '无'
|
||||
rehearsal_log_str = json.dumps(rehearsal_log, ensure_ascii=False, indent=2) if rehearsal_log else '无'
|
||||
agents_str = ', '.join(agents) if agents else '无'
|
||||
agent_scores_str = json.dumps(filtered_agent_scores, ensure_ascii=False, indent=2) if filtered_agent_scores else '无'
|
||||
|
||||
# 5. 构建 Prompt
|
||||
prompt = self.PROMPT_TEMPLATE.format(
|
||||
task_name=task_name,
|
||||
task_outline=task_outline_str,
|
||||
rehearsal_log=rehearsal_log_str,
|
||||
agents=agents_str,
|
||||
agent_scores=agent_scores_str
|
||||
)
|
||||
|
||||
# 6. 调用 LLM 生成报告
|
||||
print("正在调用大模型生成报告...")
|
||||
report_content = self._call_llm(prompt)
|
||||
|
||||
if not report_content:
|
||||
print("LLM 生成报告失败")
|
||||
return False
|
||||
|
||||
# 7. 清理报告内容:去掉开头的"任务执行报告"标题(如果存在)
|
||||
report_content = self._clean_report_title(report_content)
|
||||
|
||||
print(f"报告生成成功,长度: {len(report_content)} 字符")
|
||||
|
||||
# 8. 将 Markdown 转换为 Word 文档
|
||||
self._save_as_word(report_content, file_path)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Word LLM 导出失败: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
def _clean_report_title(self, content: str) -> str:
|
||||
"""清理报告开头的重复标题"""
|
||||
lines = content.split('\n')
|
||||
if not lines:
|
||||
return content
|
||||
|
||||
# 检查第一行是否是"任务执行报告"
|
||||
first_line = lines[0].strip()
|
||||
if first_line == '任务执行报告' or first_line == '# 任务执行报告':
|
||||
# 去掉第一行
|
||||
lines = lines[1:]
|
||||
# 去掉可能的空行
|
||||
while lines and not lines[0].strip():
|
||||
lines.pop(0)
|
||||
|
||||
return '\n'.join(lines)
|
||||
|
||||
def _extract_agents(self, task_outline: Any) -> list:
|
||||
"""从 task_outline 中提取参与智能体列表"""
|
||||
agents = set()
|
||||
if not task_outline or not isinstance(task_outline, dict):
|
||||
return []
|
||||
|
||||
collaboration_process = task_outline.get('Collaboration Process', [])
|
||||
if not collaboration_process or not isinstance(collaboration_process, list):
|
||||
return []
|
||||
|
||||
for step in collaboration_process:
|
||||
if isinstance(step, dict):
|
||||
agent_selection = step.get('AgentSelection', [])
|
||||
if isinstance(agent_selection, list):
|
||||
for agent in agent_selection:
|
||||
if agent:
|
||||
agents.add(agent)
|
||||
|
||||
return list(agents)
|
||||
|
||||
def _filter_agent_scores(self, agent_scores: Any, agents: list) -> dict:
|
||||
"""过滤 agent_scores,只保留参与当前任务的智能体评分"""
|
||||
if not agent_scores or not isinstance(agent_scores, dict):
|
||||
return {}
|
||||
|
||||
if not agents:
|
||||
return {}
|
||||
|
||||
filtered = {}
|
||||
for step_id, step_data in agent_scores.items():
|
||||
if not isinstance(step_data, dict):
|
||||
continue
|
||||
|
||||
aspect_list = step_data.get('aspectList', [])
|
||||
agent_scores_data = step_data.get('agentScores', {})
|
||||
|
||||
if not agent_scores_data:
|
||||
continue
|
||||
|
||||
# 只保留在 agents 列表中的智能体评分
|
||||
filtered_scores = {}
|
||||
for agent_name, scores in agent_scores_data.items():
|
||||
if agent_name in agents and isinstance(scores, dict):
|
||||
filtered_scores[agent_name] = scores
|
||||
|
||||
if filtered_scores:
|
||||
filtered[step_id] = {
|
||||
'aspectList': aspect_list,
|
||||
'agentScores': filtered_scores
|
||||
}
|
||||
|
||||
return filtered
|
||||
|
||||
def _call_llm(self, prompt: str) -> str:
|
||||
"""调用大模型 API 生成报告"""
|
||||
try:
|
||||
import openai
|
||||
|
||||
# 验证配置
|
||||
if not self.LLM_CONFIG['OPENAI_API_KEY']:
|
||||
print("错误: OPENAI_API_KEY 未配置")
|
||||
return ""
|
||||
if not self.LLM_CONFIG['OPENAI_API_BASE']:
|
||||
print("错误: OPENAI_API_BASE 未配置")
|
||||
return ""
|
||||
if not self.LLM_CONFIG['OPENAI_API_MODEL']:
|
||||
print("错误: OPENAI_API_MODEL 未配置")
|
||||
return ""
|
||||
|
||||
# 配置 OpenAI 客户端
|
||||
client = openai.OpenAI(
|
||||
api_key=self.LLM_CONFIG['OPENAI_API_KEY'],
|
||||
base_url=self.LLM_CONFIG['OPENAI_API_BASE']
|
||||
)
|
||||
|
||||
# 调用 API
|
||||
response = client.chat.completions.create(
|
||||
model=self.LLM_CONFIG['OPENAI_API_MODEL'],
|
||||
messages=[
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
temperature=0.7,
|
||||
max_tokens=12000,
|
||||
)
|
||||
|
||||
if response and response.choices:
|
||||
return response.choices[0].message.content
|
||||
|
||||
return ""
|
||||
|
||||
except ImportError:
|
||||
print("请安装 openai 库: pip install openai")
|
||||
return ""
|
||||
except Exception as e:
|
||||
print(f"调用 LLM 失败: {e}")
|
||||
return ""
|
||||
|
||||
def _save_as_word(self, markdown_content: str, file_path: str):
|
||||
"""将 Markdown 内容保存为 Word 文档"""
|
||||
try:
|
||||
from docx import Document
|
||||
from docx.shared import Pt, Inches
|
||||
from docx.enum.text import WD_ALIGN_PARAGRAPH
|
||||
|
||||
doc = Document()
|
||||
|
||||
# 提取文档标题(从第一个 # 标题获取)
|
||||
lines = markdown_content.split('\n')
|
||||
first_title = None
|
||||
content_start = 0
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
line = line.strip()
|
||||
if line.startswith('# '):
|
||||
first_title = line[2:].strip()
|
||||
content_start = i + 1
|
||||
break
|
||||
|
||||
# 添加文档标题
|
||||
if first_title:
|
||||
title = doc.add_heading(first_title, level=0)
|
||||
title.alignment = WD_ALIGN_PARAGRAPH.CENTER
|
||||
|
||||
# 解析剩余的 Markdown 内容
|
||||
remaining_content = '\n'.join(lines[content_start:])
|
||||
self._parse_markdown_to_doc(remaining_content, doc)
|
||||
|
||||
# 添加时间戳
|
||||
doc.add_paragraph(f"\n\n导出时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
|
||||
doc.save(file_path)
|
||||
print(f"Word 文档已保存: {file_path}")
|
||||
|
||||
except ImportError:
|
||||
print("请安装 python-docx 库: pip install python-docx")
|
||||
raise
|
||||
except Exception as e:
|
||||
print(f"保存 Word 文档失败: {e}")
|
||||
raise
|
||||
|
||||
def _parse_markdown_to_doc(self, markdown_content: str, doc):
|
||||
"""解析 Markdown 内容并添加到 Word 文档"""
|
||||
lines = markdown_content.split('\n')
|
||||
i = 0
|
||||
table_rows = []
|
||||
in_table = False
|
||||
|
||||
while i < len(lines):
|
||||
line = lines[i].rstrip()
|
||||
|
||||
# 空行处理
|
||||
if not line:
|
||||
in_table = False
|
||||
if table_rows:
|
||||
self._add_table_to_doc(table_rows, doc)
|
||||
table_rows = []
|
||||
i += 1
|
||||
continue
|
||||
|
||||
# 表格分隔线检测(跳过 |---| 或 |:---| 等格式的行)
|
||||
stripped = line.strip()
|
||||
if stripped.startswith('|') and stripped.endswith('|') and '---' in stripped:
|
||||
i += 1
|
||||
continue
|
||||
|
||||
# 表格检测:检查是否是表格行
|
||||
if '|' in line and line.strip().startswith('|'):
|
||||
# 收集表格行
|
||||
cells = [cell.strip() for cell in line.split('|')[1:-1]]
|
||||
if cells and any(cells): # 跳过空行
|
||||
table_rows.append(cells)
|
||||
in_table = True
|
||||
i += 1
|
||||
continue
|
||||
else:
|
||||
# 如果之前在表格中,现在不是表格行了,添加表格
|
||||
if in_table and table_rows:
|
||||
self._add_table_to_doc(table_rows, doc)
|
||||
table_rows = []
|
||||
in_table = False
|
||||
|
||||
# 标题处理
|
||||
if line.startswith('### '):
|
||||
doc.add_heading(line[4:].strip(), level=3)
|
||||
elif line.startswith('## '):
|
||||
doc.add_heading(line[3:].strip(), level=1)
|
||||
elif line.startswith('# '):
|
||||
doc.add_heading(line[2:].strip(), level=0)
|
||||
# 无序列表处理(去掉 • 或 - 符号)
|
||||
elif line.startswith('- ') or line.startswith('* ') or line.startswith('• '):
|
||||
# 去掉列表符号,保留内容
|
||||
text = line[2:].strip() if line.startswith(('- ', '* ')) else line[1:].strip()
|
||||
self._add_formatted_paragraph(text, doc, 'List Bullet')
|
||||
# 普通段落(处理加粗)
|
||||
else:
|
||||
# 使用格式化方法处理加粗
|
||||
self._add_formatted_paragraph(line, doc)
|
||||
|
||||
i += 1
|
||||
|
||||
# 处理最后的表格
|
||||
if table_rows:
|
||||
self._add_table_to_doc(table_rows, doc)
|
||||
|
||||
def _add_table_to_doc(self, table_rows: list, doc):
|
||||
"""将表格行添加到 Word 文档"""
|
||||
if not table_rows:
|
||||
return
|
||||
|
||||
# 创建表格
|
||||
table = doc.add_table(rows=len(table_rows), cols=len(table_rows[0]))
|
||||
table.style = 'Light Grid Accent 1'
|
||||
|
||||
for i, row_data in enumerate(table_rows):
|
||||
row = table.rows[i]
|
||||
for j, cell_text in enumerate(row_data):
|
||||
cell = row.cells[j]
|
||||
cell.text = ''
|
||||
|
||||
# 处理加粗
|
||||
parts = re.split(r'(\*\*.+?\*\*)', cell_text)
|
||||
for part in parts:
|
||||
if part.startswith('**') and part.endswith('**'):
|
||||
run = cell.paragraphs[0].add_run(part[2:-2])
|
||||
run.bold = True
|
||||
elif part:
|
||||
cell.paragraphs[0].add_run(part)
|
||||
|
||||
def _add_formatted_paragraph(self, text: str, doc, style: str = None):
|
||||
"""添加带格式的段落"""
|
||||
# 处理加粗文本
|
||||
para = doc.add_paragraph(style=style)
|
||||
|
||||
# 分割文本处理加粗
|
||||
parts = re.split(r'(\*\*.+?\*\*)', text)
|
||||
for part in parts:
|
||||
if part.startswith('**') and part.endswith('**'):
|
||||
# 加粗文本
|
||||
run = para.add_run(part[2:-2])
|
||||
run.bold = True
|
||||
else:
|
||||
para.add_run(part)
|
||||
@@ -1,269 +0,0 @@
|
||||
"""
|
||||
信息图 LLM 报告导出器
|
||||
调用大模型生成信息图展示的格式化内容
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, Optional
|
||||
|
||||
|
||||
class InfographicLLMExporter:
|
||||
"""信息图 LLM 报告导出器 - 调用大模型生成信息图内容"""
|
||||
|
||||
LLM_CONFIG = {
|
||||
'OPENAI_API_BASE': None,
|
||||
'OPENAI_API_KEY': None,
|
||||
'OPENAI_API_MODEL': None,
|
||||
}
|
||||
|
||||
PROMPT_TEMPLATE = """你是一位专业的可视化设计师和数据分析专家。你的任务是将以下任务执行数据生成适合信息图展示的格式化内容。
|
||||
|
||||
## 任务基本信息
|
||||
- 任务名称:{task_name}
|
||||
|
||||
## 任务大纲(规划阶段)
|
||||
{task_outline}
|
||||
|
||||
## 执行结果
|
||||
{rehearsal_log}
|
||||
|
||||
## 参与智能体
|
||||
{agents}
|
||||
|
||||
## 智能体评分
|
||||
{agent_scores}
|
||||
|
||||
---
|
||||
|
||||
## 信息图内容要求
|
||||
|
||||
请生成以下信息图展示内容(JSON 格式输出):
|
||||
|
||||
{{
|
||||
"summary": "执行摘要 - 2-3句话概括整体执行情况",
|
||||
"highlights": [
|
||||
"亮点1 - 取得的显著成果",
|
||||
"亮点2 - 关键突破或创新",
|
||||
"亮点3 - 重要的里程碑"
|
||||
],
|
||||
"statistics": {{
|
||||
"total_steps": 执行总步骤数,
|
||||
"agent_count": 参与智能体数量,
|
||||
"completion_rate": 完成率(百分比),
|
||||
"quality_score": 质量评分(1-10)
|
||||
}},
|
||||
"key_insights": [
|
||||
"关键洞察1 - 从执行过程中总结的洞见",
|
||||
"关键洞察2 - 值得关注的趋势或模式",
|
||||
"关键洞察3 - 对未来工作的建议"
|
||||
],
|
||||
"timeline": [
|
||||
{{"step": "步骤名称", "status": "完成/进行中/未完成", "key_result": "关键产出"}},
|
||||
...
|
||||
],
|
||||
"agent_performance": [
|
||||
{{"name": "智能体名称", "score": 评分, "contribution": "主要贡献"}},
|
||||
...
|
||||
]
|
||||
}}
|
||||
|
||||
---
|
||||
|
||||
## 输出要求
|
||||
- 输出必须是有效的 JSON 格式
|
||||
- 语言:简体中文
|
||||
- 所有字符串值使用中文
|
||||
- statistics 中的数值必须是整数或浮点数
|
||||
- 确保 JSON 格式正确,不要有语法错误
|
||||
- 不要输出 JSON 之外的任何内容
|
||||
- **重要**:"执行结果"(rehearsal_log)只是参考数据,用于帮助你分析整体执行情况生成摘要、亮点、统计数据等。**不要在任何输出字段中直接复制或输出原始执行结果数据**,而应该对这些数据进行分析和提炼,生成适合信息图展示的格式化内容。
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._load_llm_config()
|
||||
|
||||
def _load_llm_config(self):
|
||||
"""从配置文件加载 LLM 配置"""
|
||||
try:
|
||||
import yaml
|
||||
possible_paths = [
|
||||
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'config.yaml'),
|
||||
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'backend', 'config', 'config.yaml'),
|
||||
os.path.join(os.getcwd(), 'config', 'config.yaml'),
|
||||
]
|
||||
|
||||
for config_path in possible_paths:
|
||||
if os.path.exists(config_path):
|
||||
with open(config_path, 'r', encoding='utf-8') as f:
|
||||
config = yaml.safe_load(f)
|
||||
if config:
|
||||
self.LLM_CONFIG['OPENAI_API_BASE'] = config.get('OPENAI_API_BASE')
|
||||
self.LLM_CONFIG['OPENAI_API_KEY'] = config.get('OPENAI_API_KEY')
|
||||
self.LLM_CONFIG['OPENAI_API_MODEL'] = config.get('OPENAI_API_MODEL')
|
||||
print(f"已加载 LLM 配置: {self.LLM_CONFIG['OPENAI_API_MODEL']}")
|
||||
return
|
||||
except Exception as e:
|
||||
print(f"加载 LLM 配置失败: {e}")
|
||||
|
||||
def generate(self, task_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""生成信息图内容(调用 LLM 生成)"""
|
||||
try:
|
||||
task_name = task_data.get('task_name', '未命名任务')
|
||||
task_outline = task_data.get('task_outline')
|
||||
rehearsal_log = task_data.get('rehearsal_log')
|
||||
agent_scores = task_data.get('agent_scores')
|
||||
|
||||
agents = self._extract_agents(task_outline)
|
||||
filtered_agent_scores = self._filter_agent_scores(agent_scores, agents)
|
||||
|
||||
task_outline_str = json.dumps(task_outline, ensure_ascii=False, indent=2) if task_outline else '无'
|
||||
rehearsal_log_str = json.dumps(rehearsal_log, ensure_ascii=False, indent=2) if rehearsal_log else '无'
|
||||
agents_str = ', '.join(agents) if agents else '无'
|
||||
agent_scores_str = json.dumps(filtered_agent_scores, ensure_ascii=False, indent=2) if filtered_agent_scores else '无'
|
||||
|
||||
prompt = self.PROMPT_TEMPLATE.format(
|
||||
task_name=task_name,
|
||||
task_outline=task_outline_str,
|
||||
rehearsal_log=rehearsal_log_str,
|
||||
agents=agents_str,
|
||||
agent_scores=agent_scores_str
|
||||
)
|
||||
|
||||
print("正在调用大模型生成信息图内容...")
|
||||
llm_result = self._call_llm(prompt)
|
||||
|
||||
if not llm_result:
|
||||
print("LLM 生成信息图内容失败")
|
||||
return None
|
||||
|
||||
infographic_data = self._parse_llm_result(llm_result)
|
||||
if not infographic_data:
|
||||
print("解析 LLM 结果失败")
|
||||
return None
|
||||
|
||||
print(f"信息图内容生成成功")
|
||||
return infographic_data
|
||||
|
||||
except Exception as e:
|
||||
print(f"信息图 LLM 生成失败: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return None
|
||||
|
||||
def _extract_agents(self, task_outline: Any) -> list:
|
||||
"""从 task_outline 中提取参与智能体列表"""
|
||||
agents = set()
|
||||
if not task_outline or not isinstance(task_outline, dict):
|
||||
return []
|
||||
|
||||
collaboration_process = task_outline.get('Collaboration Process', [])
|
||||
if not collaboration_process or not isinstance(collaboration_process, list):
|
||||
return []
|
||||
|
||||
for step in collaboration_process:
|
||||
if isinstance(step, dict):
|
||||
agent_selection = step.get('AgentSelection', [])
|
||||
if isinstance(agent_selection, list):
|
||||
for agent in agent_selection:
|
||||
if agent:
|
||||
agents.add(agent)
|
||||
|
||||
return list(agents)
|
||||
|
||||
def _filter_agent_scores(self, agent_scores: Any, agents: list) -> dict:
|
||||
"""过滤 agent_scores,只保留参与当前任务的智能体评分"""
|
||||
if not agent_scores or not isinstance(agent_scores, dict):
|
||||
return {}
|
||||
|
||||
if not agents:
|
||||
return {}
|
||||
|
||||
filtered = {}
|
||||
for step_id, step_data in agent_scores.items():
|
||||
if not isinstance(step_data, dict):
|
||||
continue
|
||||
|
||||
aspect_list = step_data.get('aspectList', [])
|
||||
agent_scores_data = step_data.get('agentScores', {})
|
||||
|
||||
if not agent_scores_data:
|
||||
continue
|
||||
|
||||
filtered_scores = {}
|
||||
for agent_name, scores in agent_scores_data.items():
|
||||
if agent_name in agents and isinstance(scores, dict):
|
||||
filtered_scores[agent_name] = scores
|
||||
|
||||
if filtered_scores:
|
||||
filtered[step_id] = {
|
||||
'aspectList': aspect_list,
|
||||
'agentScores': filtered_scores
|
||||
}
|
||||
|
||||
return filtered
|
||||
|
||||
def _call_llm(self, prompt: str) -> str:
|
||||
"""调用大模型 API 生成内容"""
|
||||
try:
|
||||
import openai
|
||||
|
||||
if not self.LLM_CONFIG['OPENAI_API_KEY']:
|
||||
print("错误: OPENAI_API_KEY 未配置")
|
||||
return ""
|
||||
if not self.LLM_CONFIG['OPENAI_API_BASE']:
|
||||
print("错误: OPENAI_API_BASE 未配置")
|
||||
return ""
|
||||
if not self.LLM_CONFIG['OPENAI_API_MODEL']:
|
||||
print("错误: OPENAI_API_MODEL 未配置")
|
||||
return ""
|
||||
|
||||
client = openai.OpenAI(
|
||||
api_key=self.LLM_CONFIG['OPENAI_API_KEY'],
|
||||
base_url=self.LLM_CONFIG['OPENAI_API_BASE']
|
||||
)
|
||||
|
||||
response = client.chat.completions.create(
|
||||
model=self.LLM_CONFIG['OPENAI_API_MODEL'],
|
||||
messages=[
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
temperature=0.7,
|
||||
max_tokens=8000,
|
||||
)
|
||||
|
||||
if response and response.choices:
|
||||
return response.choices[0].message.content
|
||||
|
||||
return ""
|
||||
|
||||
except ImportError:
|
||||
print("请安装 openai 库: pip install openai")
|
||||
return ""
|
||||
except Exception as e:
|
||||
print(f"调用 LLM 失败: {e}")
|
||||
return ""
|
||||
|
||||
def _parse_llm_result(self, llm_result: str) -> Optional[Dict[str, Any]]:
|
||||
"""解析 LLM 返回的 JSON 字符串"""
|
||||
try:
|
||||
json_str = llm_result.strip()
|
||||
if json_str.startswith("```json"):
|
||||
json_str = json_str[7:]
|
||||
if json_str.startswith("```"):
|
||||
json_str = json_str[3:]
|
||||
if json_str.endswith("```"):
|
||||
json_str = json_str[:-3]
|
||||
json_str = json_str.strip()
|
||||
|
||||
return json.loads(json_str)
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"JSON 解析失败: {e}")
|
||||
print(f"原始结果: {llm_result[:500]}...")
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"解析失败: {e}")
|
||||
return None
|
||||
@@ -1,315 +0,0 @@
|
||||
"""
|
||||
Markdown LLM 报告导出器
|
||||
调用大模型生成专业的任务执行报告(Markdown 格式)
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, Optional
|
||||
|
||||
|
||||
class MarkdownLLMExporter:
|
||||
"""Markdown LLM 报告导出器 - 调用大模型生成报告"""
|
||||
|
||||
# LLM 配置(从 config.yaml 加载)
|
||||
LLM_CONFIG = {
|
||||
'OPENAI_API_BASE': None,
|
||||
'OPENAI_API_KEY': None,
|
||||
'OPENAI_API_MODEL': None,
|
||||
}
|
||||
|
||||
# Prompt 模板
|
||||
PROMPT_TEMPLATE = """你是一位专业的项目管理顾问和报告分析师。你的任务是将以下任务执行数据生成一份详细、专业、结构化的执行报告。
|
||||
|
||||
## 任务基本信息
|
||||
- 任务名称:{task_name}
|
||||
|
||||
## 任务大纲(规划阶段)
|
||||
{task_outline}
|
||||
|
||||
## 执行结果
|
||||
{rehearsal_log}
|
||||
|
||||
## 参与智能体
|
||||
{agents}
|
||||
|
||||
## 智能体评分
|
||||
{agent_scores}
|
||||
|
||||
---
|
||||
|
||||
## 报告要求
|
||||
|
||||
请生成一份完整的任务执行报告,包含以下章节:
|
||||
|
||||
### 1. 执行摘要
|
||||
用 2-3 句话概括本次任务的整体执行情况。
|
||||
|
||||
### 2. 任务概述
|
||||
- 任务背景与目标
|
||||
- 任务范围与边界
|
||||
|
||||
### 3. 任务规划分析
|
||||
- 任务拆解的合理性
|
||||
- 智能体角色分配的优化建议
|
||||
- 工作流程设计
|
||||
|
||||
### 4. 执行过程回顾
|
||||
- 各阶段的完成情况
|
||||
- 关键决策点
|
||||
- 遇到的问题及解决方案
|
||||
|
||||
### 5. 成果产出分析
|
||||
- 产出物的质量评估
|
||||
- 产出与预期目标的匹配度
|
||||
|
||||
### 6. 团队协作分析
|
||||
- 智能体之间的协作模式
|
||||
- 信息传递效率
|
||||
|
||||
### 7. 质量评估
|
||||
- 整体完成质量评分(1-10分)
|
||||
- 各维度的具体评分及理由
|
||||
|
||||
### 8. 经验教训与改进建议
|
||||
- 成功经验
|
||||
- 存在的问题与不足
|
||||
- 改进建议
|
||||
|
||||
---
|
||||
|
||||
## 输出格式要求
|
||||
- 使用 Markdown 格式输出
|
||||
- 语言:简体中文
|
||||
- 适当使用列表、表格增强可读性
|
||||
- 报告长度必须达到 4000-6000 字,每个章节都要详细展开,不要遗漏任何章节
|
||||
- 每个章节的内容要充实,提供具体的分析和建议
|
||||
- 注意:所有加粗标记必须成对出现,如 **文本**,不要单独使用 ** 或缺少结束标记
|
||||
- 禁止使用 mermaid、graph TD、flowchart 等图表代码,如果需要描述流程请用纯文字描述
|
||||
- 不要生成附录章节(如有关键参数对照表、工艺流程图等),如果确实需要附录再生成
|
||||
- 不要在报告中显示"报告总字数"这样的统计信息
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._load_llm_config()
|
||||
|
||||
def _load_llm_config(self):
|
||||
"""从配置文件加载 LLM 配置"""
|
||||
try:
|
||||
import yaml
|
||||
# 尝试多个可能的配置文件路径
|
||||
possible_paths = [
|
||||
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'config.yaml'),
|
||||
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'backend', 'config', 'config.yaml'),
|
||||
os.path.join(os.getcwd(), 'config', 'config.yaml'),
|
||||
]
|
||||
|
||||
for config_path in possible_paths:
|
||||
if os.path.exists(config_path):
|
||||
with open(config_path, 'r', encoding='utf-8') as f:
|
||||
config = yaml.safe_load(f)
|
||||
if config:
|
||||
self.LLM_CONFIG['OPENAI_API_BASE'] = config.get('OPENAI_API_BASE')
|
||||
self.LLM_CONFIG['OPENAI_API_KEY'] = config.get('OPENAI_API_KEY')
|
||||
self.LLM_CONFIG['OPENAI_API_MODEL'] = config.get('OPENAI_API_MODEL')
|
||||
print(f"已加载 LLM 配置: {self.LLM_CONFIG['OPENAI_API_MODEL']}")
|
||||
return
|
||||
except Exception as e:
|
||||
print(f"加载 LLM 配置失败: {e}")
|
||||
|
||||
def generate(self, task_data: Dict[str, Any], file_path: str) -> bool:
|
||||
"""生成 Markdown 文件(调用 LLM 生成报告)"""
|
||||
try:
|
||||
# 1. 准备数据
|
||||
task_name = task_data.get('task_name', '未命名任务')
|
||||
task_outline = task_data.get('task_outline')
|
||||
rehearsal_log = task_data.get('rehearsal_log')
|
||||
agent_scores = task_data.get('agent_scores')
|
||||
|
||||
# 2. 提取参与智能体(从 task_outline 的 Collaboration Process 中提取)
|
||||
agents = self._extract_agents(task_outline)
|
||||
|
||||
# 3. 过滤 agent_scores(只保留参与当前任务的智能体评分)
|
||||
filtered_agent_scores = self._filter_agent_scores(agent_scores, agents)
|
||||
|
||||
# 4. 格式化数据为 JSON 字符串
|
||||
task_outline_str = json.dumps(task_outline, ensure_ascii=False, indent=2) if task_outline else '无'
|
||||
rehearsal_log_str = json.dumps(rehearsal_log, ensure_ascii=False, indent=2) if rehearsal_log else '无'
|
||||
agents_str = ', '.join(agents) if agents else '无'
|
||||
agent_scores_str = json.dumps(filtered_agent_scores, ensure_ascii=False, indent=2) if filtered_agent_scores else '无'
|
||||
|
||||
# 5. 构建 Prompt
|
||||
prompt = self.PROMPT_TEMPLATE.format(
|
||||
task_name=task_name,
|
||||
task_outline=task_outline_str,
|
||||
rehearsal_log=rehearsal_log_str,
|
||||
agents=agents_str,
|
||||
agent_scores=agent_scores_str
|
||||
)
|
||||
|
||||
# 6. 调用 LLM 生成报告
|
||||
print("正在调用大模型生成 Markdown 报告...")
|
||||
report_content = self._call_llm(prompt)
|
||||
|
||||
if not report_content:
|
||||
print("LLM 生成报告失败")
|
||||
return False
|
||||
|
||||
# 7. 清理报告内容:去掉开头的重复标题(如果存在)
|
||||
report_content = self._clean_report_title(report_content)
|
||||
|
||||
print(f"Markdown 报告生成成功,长度: {len(report_content)} 字符")
|
||||
|
||||
# 8. 保存为 Markdown 文件
|
||||
self._save_as_markdown(report_content, task_name, file_path)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Markdown LLM 导出失败: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
def _clean_report_title(self, content: str) -> str:
|
||||
"""清理报告开头的重复标题"""
|
||||
lines = content.split('\n')
|
||||
if not lines:
|
||||
return content
|
||||
|
||||
# 检查第一行是否是"任务执行报告"
|
||||
first_line = lines[0].strip()
|
||||
if first_line == '任务执行报告' or first_line == '# 任务执行报告':
|
||||
# 去掉第一行
|
||||
lines = lines[1:]
|
||||
# 去掉可能的空行
|
||||
while lines and not lines[0].strip():
|
||||
lines.pop(0)
|
||||
|
||||
return '\n'.join(lines)
|
||||
|
||||
def _extract_agents(self, task_outline: Any) -> list:
|
||||
"""从 task_outline 中提取参与智能体列表"""
|
||||
agents = set()
|
||||
if not task_outline or not isinstance(task_outline, dict):
|
||||
return []
|
||||
|
||||
collaboration_process = task_outline.get('Collaboration Process', [])
|
||||
if not collaboration_process or not isinstance(collaboration_process, list):
|
||||
return []
|
||||
|
||||
for step in collaboration_process:
|
||||
if isinstance(step, dict):
|
||||
agent_selection = step.get('AgentSelection', [])
|
||||
if isinstance(agent_selection, list):
|
||||
for agent in agent_selection:
|
||||
if agent:
|
||||
agents.add(agent)
|
||||
|
||||
return list(agents)
|
||||
|
||||
def _filter_agent_scores(self, agent_scores: Any, agents: list) -> dict:
|
||||
"""过滤 agent_scores,只保留参与当前任务的智能体评分"""
|
||||
if not agent_scores or not isinstance(agent_scores, dict):
|
||||
return {}
|
||||
|
||||
if not agents:
|
||||
return {}
|
||||
|
||||
filtered = {}
|
||||
for step_id, step_data in agent_scores.items():
|
||||
if not isinstance(step_data, dict):
|
||||
continue
|
||||
|
||||
aspect_list = step_data.get('aspectList', [])
|
||||
agent_scores_data = step_data.get('agentScores', {})
|
||||
|
||||
if not agent_scores_data:
|
||||
continue
|
||||
|
||||
# 只保留在 agents 列表中的智能体评分
|
||||
filtered_scores = {}
|
||||
for agent_name, scores in agent_scores_data.items():
|
||||
if agent_name in agents and isinstance(scores, dict):
|
||||
filtered_scores[agent_name] = scores
|
||||
|
||||
if filtered_scores:
|
||||
filtered[step_id] = {
|
||||
'aspectList': aspect_list,
|
||||
'agentScores': filtered_scores
|
||||
}
|
||||
|
||||
return filtered
|
||||
|
||||
def _call_llm(self, prompt: str) -> str:
|
||||
"""调用大模型 API 生成报告"""
|
||||
try:
|
||||
import openai
|
||||
|
||||
# 验证配置
|
||||
if not self.LLM_CONFIG['OPENAI_API_KEY']:
|
||||
print("错误: OPENAI_API_KEY 未配置")
|
||||
return ""
|
||||
if not self.LLM_CONFIG['OPENAI_API_BASE']:
|
||||
print("错误: OPENAI_API_BASE 未配置")
|
||||
return ""
|
||||
if not self.LLM_CONFIG['OPENAI_API_MODEL']:
|
||||
print("错误: OPENAI_API_MODEL 未配置")
|
||||
return ""
|
||||
|
||||
# 配置 OpenAI 客户端
|
||||
client = openai.OpenAI(
|
||||
api_key=self.LLM_CONFIG['OPENAI_API_KEY'],
|
||||
base_url=self.LLM_CONFIG['OPENAI_API_BASE']
|
||||
)
|
||||
|
||||
# 调用 API
|
||||
response = client.chat.completions.create(
|
||||
model=self.LLM_CONFIG['OPENAI_API_MODEL'],
|
||||
messages=[
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
temperature=0.7,
|
||||
max_tokens=12000,
|
||||
)
|
||||
|
||||
if response and response.choices:
|
||||
return response.choices[0].message.content
|
||||
|
||||
return ""
|
||||
|
||||
except ImportError:
|
||||
print("请安装 openai 库: pip install openai")
|
||||
return ""
|
||||
except Exception as e:
|
||||
print(f"调用 LLM 失败: {e}")
|
||||
return ""
|
||||
|
||||
def _save_as_markdown(self, markdown_content: str, task_name: str, file_path: str):
|
||||
"""将 Markdown 内容保存为文件"""
|
||||
try:
|
||||
# 在内容前添加标题(如果还没有的话)
|
||||
if not markdown_content.strip().startswith('# '):
|
||||
markdown_content = f"# {task_name}\n\n{markdown_content}"
|
||||
|
||||
# 添加时间戳
|
||||
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||||
footer = f"\n\n---\n*导出时间: {timestamp}*"
|
||||
|
||||
# 如果内容末尾没有明显的分隔符,添加分隔符
|
||||
if not markdown_content.rstrip().endswith('---'):
|
||||
markdown_content = markdown_content.rstrip() + footer
|
||||
else:
|
||||
# 已有分隔符,在它之前添加时间戳
|
||||
markdown_content = markdown_content.rstrip() + footer
|
||||
|
||||
with open(file_path, 'w', encoding='utf-8') as f:
|
||||
f.write(markdown_content)
|
||||
|
||||
print(f"Markdown 文件已保存: {file_path}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"保存 Markdown 文件失败: {e}")
|
||||
raise
|
||||
@@ -1,374 +0,0 @@
|
||||
"""
|
||||
思维导图 LLM 导出器
|
||||
调用大模型生成专业的任务执行思维导图
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any
|
||||
|
||||
|
||||
class MindmapLLMExporter:
|
||||
"""思维导图 LLM 导出器 - 调用大模型生成思维导图"""
|
||||
|
||||
LLM_CONFIG = {
|
||||
'OPENAI_API_BASE': None,
|
||||
'OPENAI_API_KEY': None,
|
||||
'OPENAI_API_MODEL': None,
|
||||
}
|
||||
|
||||
PROMPT_TEMPLATE = """你是一位专业的项目管理顾问和可视化设计师。你的任务是将以下任务执行数据生成一份结构清晰、层次分明的思维导图。
|
||||
|
||||
## 任务基本信息
|
||||
- 任务名称:{task_name}
|
||||
|
||||
## 任务大纲(规划阶段)
|
||||
{task_outline}
|
||||
|
||||
## 执行结果
|
||||
{rehearsal_log}
|
||||
|
||||
## 参与智能体
|
||||
{agents}
|
||||
|
||||
## 智能体评分
|
||||
{agent_scores}
|
||||
|
||||
---
|
||||
|
||||
## 思维导图要求
|
||||
|
||||
请生成一份完整的思维导图,包含以下核心分支:
|
||||
|
||||
### 1. 任务概述
|
||||
- 任务背景与目标
|
||||
- 任务范围
|
||||
|
||||
### 2. 任务规划
|
||||
- 任务拆解
|
||||
- 智能体角色分配
|
||||
- 工作流程设计
|
||||
|
||||
### 3. 执行过程
|
||||
- 各阶段的完成情况
|
||||
- 关键决策点
|
||||
- 遇到的问题及解决方案
|
||||
|
||||
### 4. 成果产出
|
||||
- 产出物列表
|
||||
- 质量评估
|
||||
|
||||
### 5. 团队协作
|
||||
- 智能体之间的协作模式
|
||||
- 信息传递效率
|
||||
|
||||
### 6. 质量评估
|
||||
- 整体评分
|
||||
- 各维度评分
|
||||
|
||||
### 7. 经验与改进
|
||||
- 成功经验
|
||||
- 改进建议
|
||||
|
||||
---
|
||||
|
||||
## 输出格式要求
|
||||
请直接输出 JSON 格式,不要包含任何 Markdown 标记。JSON 结构如下:
|
||||
```json
|
||||
{{
|
||||
"title": "思维导图标题",
|
||||
"root": "中心主题",
|
||||
"branches": [
|
||||
{{
|
||||
"name": "分支主题名称",
|
||||
"children": [
|
||||
{{
|
||||
"name": "子主题名称",
|
||||
"children": [
|
||||
{{"name": "具体内容1"}},
|
||||
{{"name": "具体内容2"}}
|
||||
]
|
||||
}}
|
||||
]
|
||||
}},
|
||||
{{
|
||||
"name": "另一个分支主题",
|
||||
"children": [
|
||||
{{"name": "子主题A"}},
|
||||
{{"name": "子主题B"}}
|
||||
]
|
||||
}}
|
||||
]
|
||||
}}
|
||||
```
|
||||
|
||||
注意:
|
||||
- 思维导图层次分明,每个分支至少有 2-3 层深度
|
||||
- 内容简洁,每个节点字数控制在 50-100 字以内
|
||||
- 使用简体中文
|
||||
- 确保 JSON 格式正确,不要有语法错误
|
||||
- 不要生成"报告总字数"这样的统计信息
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._load_llm_config()
|
||||
|
||||
def _load_llm_config(self):
|
||||
"""从配置文件加载 LLM 配置"""
|
||||
try:
|
||||
import yaml
|
||||
possible_paths = [
|
||||
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'config.yaml'),
|
||||
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'backend', 'config', 'config.yaml'),
|
||||
os.path.join(os.getcwd(), 'config', 'config.yaml'),
|
||||
]
|
||||
|
||||
for config_path in possible_paths:
|
||||
if os.path.exists(config_path):
|
||||
with open(config_path, 'r', encoding='utf-8') as f:
|
||||
config = yaml.safe_load(f)
|
||||
if config:
|
||||
self.LLM_CONFIG['OPENAI_API_BASE'] = config.get('OPENAI_API_BASE')
|
||||
self.LLM_CONFIG['OPENAI_API_KEY'] = config.get('OPENAI_API_KEY')
|
||||
self.LLM_CONFIG['OPENAI_API_MODEL'] = config.get('OPENAI_API_MODEL')
|
||||
print(f"已加载 LLM 配置: {self.LLM_CONFIG['OPENAI_API_MODEL']}")
|
||||
return
|
||||
except Exception as e:
|
||||
print(f"加载 LLM 配置失败: {e}")
|
||||
|
||||
def generate(self, task_data: Dict[str, Any], file_path: str) -> bool:
|
||||
"""生成思维导图(调用 LLM 生成)"""
|
||||
try:
|
||||
task_name = task_data.get('task_name', '未命名任务')
|
||||
task_outline = task_data.get('task_outline')
|
||||
rehearsal_log = task_data.get('rehearsal_log')
|
||||
agent_scores = task_data.get('agent_scores')
|
||||
|
||||
agents = self._extract_agents(task_outline)
|
||||
filtered_agent_scores = self._filter_agent_scores(agent_scores, agents)
|
||||
|
||||
task_outline_str = json.dumps(task_outline, ensure_ascii=False, indent=2) if task_outline else '无'
|
||||
rehearsal_log_str = json.dumps(rehearsal_log, ensure_ascii=False, indent=2) if rehearsal_log else '无'
|
||||
agents_str = ', '.join(agents) if agents else '无'
|
||||
agent_scores_str = json.dumps(filtered_agent_scores, ensure_ascii=False, indent=2) if filtered_agent_scores else '无'
|
||||
|
||||
prompt = self.PROMPT_TEMPLATE.format(
|
||||
task_name=task_name,
|
||||
task_outline=task_outline_str,
|
||||
rehearsal_log=rehearsal_log_str,
|
||||
agents=agents_str,
|
||||
agent_scores=agent_scores_str
|
||||
)
|
||||
|
||||
print("正在调用大模型生成思维导图...")
|
||||
mindmap_json_str = self._call_llm(prompt)
|
||||
|
||||
if not mindmap_json_str:
|
||||
print("LLM 生成思维导图失败")
|
||||
return False
|
||||
|
||||
print(f"思维导图内容生成成功,长度: {len(mindmap_json_str)} 字符")
|
||||
self._create_mindmap_from_json(mindmap_json_str, file_path, task_name)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"思维导图 LLM 导出失败: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
def _extract_agents(self, task_outline: Any) -> list:
|
||||
"""从 task_outline 中提取参与智能体列表"""
|
||||
agents = set()
|
||||
if not task_outline or not isinstance(task_outline, dict):
|
||||
return []
|
||||
|
||||
collaboration_process = task_outline.get('Collaboration Process', [])
|
||||
if not collaboration_process or not isinstance(collaboration_process, list):
|
||||
return []
|
||||
|
||||
for step in collaboration_process:
|
||||
if isinstance(step, dict):
|
||||
agent_selection = step.get('AgentSelection', [])
|
||||
if isinstance(agent_selection, list):
|
||||
for agent in agent_selection:
|
||||
if agent:
|
||||
agents.add(agent)
|
||||
return list(agents)
|
||||
|
||||
def _filter_agent_scores(self, agent_scores: Any, agents: list) -> dict:
|
||||
"""过滤 agent_scores,只保留参与当前任务的智能体评分"""
|
||||
if not agent_scores or not isinstance(agent_scores, dict):
|
||||
return {}
|
||||
if not agents:
|
||||
return {}
|
||||
|
||||
filtered = {}
|
||||
for step_id, step_data in agent_scores.items():
|
||||
if not isinstance(step_data, dict):
|
||||
continue
|
||||
aspect_list = step_data.get('aspectList', [])
|
||||
agent_scores_data = step_data.get('agentScores', {})
|
||||
if not agent_scores_data:
|
||||
continue
|
||||
filtered_scores = {}
|
||||
for agent_name, scores in agent_scores_data.items():
|
||||
if agent_name in agents and isinstance(scores, dict):
|
||||
filtered_scores[agent_name] = scores
|
||||
if filtered_scores:
|
||||
filtered[step_id] = {'aspectList': aspect_list, 'agentScores': filtered_scores}
|
||||
return filtered
|
||||
|
||||
def _call_llm(self, prompt: str) -> str:
|
||||
"""调用大模型 API 生成思维导图"""
|
||||
try:
|
||||
import openai
|
||||
|
||||
if not self.LLM_CONFIG['OPENAI_API_KEY']:
|
||||
print("错误: OPENAI_API_KEY 未配置")
|
||||
return ""
|
||||
if not self.LLM_CONFIG['OPENAI_API_BASE']:
|
||||
print("错误: OPENAI_API_BASE 未配置")
|
||||
return ""
|
||||
if not self.LLM_CONFIG['OPENAI_API_MODEL']:
|
||||
print("错误: OPENAI_API_MODEL 未配置")
|
||||
return ""
|
||||
|
||||
client = openai.OpenAI(
|
||||
api_key=self.LLM_CONFIG['OPENAI_API_KEY'],
|
||||
base_url=self.LLM_CONFIG['OPENAI_API_BASE']
|
||||
)
|
||||
|
||||
response = client.chat.completions.create(
|
||||
model=self.LLM_CONFIG['OPENAI_API_MODEL'],
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
temperature=0.7,
|
||||
max_tokens=8000,
|
||||
)
|
||||
|
||||
if response and response.choices:
|
||||
return response.choices[0].message.content
|
||||
return ""
|
||||
|
||||
except ImportError:
|
||||
print("请安装 openai 库: pip install openai")
|
||||
return ""
|
||||
except Exception as e:
|
||||
print(f"调用 LLM 失败: {e}")
|
||||
return ""
|
||||
|
||||
def _fix_json_string(self, json_str: str) -> str:
|
||||
"""修复常见的 JSON 语法错误"""
|
||||
import re
|
||||
|
||||
json_str = json_str.strip()
|
||||
|
||||
json_str = re.sub(r',\s*}', '}', json_str)
|
||||
json_str = re.sub(r',\s*]', ']', json_str)
|
||||
|
||||
json_str = re.sub(r'""', '"', json_str)
|
||||
|
||||
single_quotes = re.findall(r"'[^']*'", json_str)
|
||||
for sq in single_quotes:
|
||||
if '"' not in sq:
|
||||
json_str = json_str.replace(sq, sq.replace("'", '"'))
|
||||
|
||||
trailing_comma_pattern = re.compile(r',(\s*[}\]])')
|
||||
json_str = trailing_comma_pattern.sub(r'\1', json_str)
|
||||
|
||||
unquoted_key_pattern = re.compile(r'([{,]\s*)([a-zA-Z_][a-zA-Z0-9_]*)\s*:')
|
||||
def fix_unquoted_key(match):
|
||||
prefix = match.group(1)
|
||||
key = match.group(2)
|
||||
return f'{prefix}"{key}":'
|
||||
json_str = unquoted_key_pattern.sub(fix_unquoted_key, json_str)
|
||||
|
||||
unquoted_value_pattern = re.compile(r':\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*([,}\]])')
|
||||
def replace_unquoted(match):
|
||||
value = match.group(1)
|
||||
end = match.group(2)
|
||||
if value.lower() in ('true', 'false', 'null'):
|
||||
return f': {value}{end}'
|
||||
else:
|
||||
return f': "{value}"{end}'
|
||||
json_str = unquoted_value_pattern.sub(replace_unquoted, json_str)
|
||||
|
||||
print(f"JSON 修复后长度: {len(json_str)} 字符")
|
||||
return json_str
|
||||
|
||||
def _create_mindmap_from_json(self, json_str: str, file_path: str, task_name: str):
|
||||
"""从 JSON 字符串创建思维导图(Markdown 格式)"""
|
||||
try:
|
||||
json_str = json_str.strip()
|
||||
if '```json' in json_str:
|
||||
json_str = json_str.split('```json')[1].split('```')[0]
|
||||
elif '```' in json_str:
|
||||
json_str = json_str.split('```')[1].split('```')[0]
|
||||
|
||||
json_str = self._fix_json_string(json_str)
|
||||
|
||||
mindmap_data = json.loads(json_str)
|
||||
|
||||
# 生成 Markdown 格式
|
||||
markdown_content = self._generate_markdown(mindmap_data, task_name)
|
||||
with open(file_path, 'w', encoding='utf-8') as f:
|
||||
f.write(markdown_content)
|
||||
|
||||
print(f"思维导图已保存: {file_path}")
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"JSON 解析失败: {e}")
|
||||
raise
|
||||
except Exception as e:
|
||||
print(f"创建思维导图失败: {e}")
|
||||
raise
|
||||
|
||||
def _generate_markdown(self, mindmap_data: dict, task_name: str) -> str:
|
||||
"""将思维导图数据转换为 Markdown 格式(markmap 兼容)"""
|
||||
lines = []
|
||||
|
||||
# 直接使用无序列表,不需要标题和代码块包裹
|
||||
# 根节点使用 2 个空格缩进
|
||||
lines.append(f" - {task_name}")
|
||||
|
||||
branches = mindmap_data.get('branches', [])
|
||||
for branch in branches:
|
||||
# 一级分支使用 4 个空格缩进(比根节点多 2 个空格,是根节点的子节点)
|
||||
self._add_branch(branch, lines, indent=4)
|
||||
|
||||
return '\n'.join(lines)
|
||||
|
||||
def _add_branch(self, branch: dict, lines: list, indent: int = 2):
|
||||
"""递归添加分支到思维导图"""
|
||||
name = branch.get('name', '')
|
||||
if not name:
|
||||
return
|
||||
|
||||
prefix = ' ' * indent
|
||||
lines.append(f"{prefix}- {name}")
|
||||
|
||||
children = branch.get('children', [])
|
||||
if children:
|
||||
for child in children:
|
||||
if isinstance(child, dict):
|
||||
self._add_child_node(child, lines, indent + 2)
|
||||
else:
|
||||
lines.append(f"{' ' * (indent + 2)}- {child}")
|
||||
|
||||
def _add_child_node(self, child: dict, lines: list, indent: int):
|
||||
"""添加子节点"""
|
||||
name = child.get('name', '')
|
||||
if not name:
|
||||
return
|
||||
|
||||
prefix = ' ' * indent
|
||||
lines.append(f"{prefix}- {name}")
|
||||
|
||||
children = child.get('children', [])
|
||||
if children:
|
||||
for grandchild in children:
|
||||
if isinstance(grandchild, dict):
|
||||
self._add_child_node(grandchild, lines, indent + 2)
|
||||
else:
|
||||
lines.append(f"{' ' * (indent + 2)}- {grandchild}")
|
||||
@@ -1,772 +0,0 @@
|
||||
"""
|
||||
PowerPoint 演示文稿 LLM 导出器
|
||||
调用大模型生成专业的任务执行演示文稿
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any
|
||||
|
||||
from pptx import Presentation
|
||||
from pptx.util import Inches, Pt
|
||||
from pptx.enum.text import PP_ALIGN
|
||||
from pptx.chart.data import CategoryChartData
|
||||
from pptx.enum.chart import XL_CHART_TYPE
|
||||
|
||||
|
||||
class PptLLMExporter:
|
||||
"""PowerPoint 演示文稿 LLM 导出器 - 调用大模型生成 PPT"""
|
||||
|
||||
# LLM 配置(从 config.yaml 加载)
|
||||
LLM_CONFIG = {
|
||||
'OPENAI_API_BASE': None,
|
||||
'OPENAI_API_KEY': None,
|
||||
'OPENAI_API_MODEL': None,
|
||||
}
|
||||
|
||||
# Prompt 模板 - 与 docx_llm.py 内容对齐
|
||||
PROMPT_TEMPLATE = """你是一位专业的项目管理顾问和演示文稿设计师。你的任务是将以下任务执行数据生成一份详细、专业的 PowerPoint 演示文稿内容。
|
||||
|
||||
## 任务基本信息
|
||||
- 任务名称:{task_name}
|
||||
|
||||
## 任务大纲(规划阶段)
|
||||
{task_outline}
|
||||
|
||||
## 执行结果
|
||||
{execution_results}
|
||||
|
||||
## 参与智能体
|
||||
{agents}
|
||||
|
||||
## 智能体评分
|
||||
{agent_scores}
|
||||
|
||||
---
|
||||
|
||||
## 演示文稿要求
|
||||
|
||||
请生成一份完整的演示文稿,包含以下幻灯片(共11页):
|
||||
|
||||
1. 封面页:标题、用户ID、日期
|
||||
2. 目录页:5个主要章节(其他作为小点)
|
||||
3. 执行摘要:任务整体执行情况概述
|
||||
4. 任务概述:任务背景与目标、任务范围与边界
|
||||
5. 任务规划分析:任务拆解的合理性、智能体角色分配的优化建议、工作流程设计
|
||||
6. 执行过程回顾:各阶段的完成情况、关键决策点、遇到的问题及解决方案
|
||||
7. 成果产出分析:产出物的质量评估、产出与预期目标的匹配度
|
||||
8. 团队协作分析:智能体之间的协作模式、信息传递效率
|
||||
9. 质量评估:整体完成质量评分(1-10分)、各维度的具体评分及理由
|
||||
10. 经验教训与改进建议:成功经验、存在的问题与不足、改进建议
|
||||
11. 结束页:感谢语
|
||||
|
||||
---
|
||||
|
||||
## 输出格式要求
|
||||
请直接输出 JSON 格式,不要包含任何 Markdown 标记。JSON 结构如下:
|
||||
```json
|
||||
{{
|
||||
"title": "{task_name}",
|
||||
"user_id": "{user_id}",
|
||||
"date": "{date}",
|
||||
"slides": [
|
||||
{{
|
||||
"type": "title",
|
||||
"title": "主标题",
|
||||
"user_id": "用户ID",
|
||||
"date": "日期"
|
||||
}},
|
||||
{{
|
||||
"type": "toc",
|
||||
"title": "目录",
|
||||
"sections": [
|
||||
{{"title": "一、执行摘要", "subs": []}},
|
||||
{{"title": "二、任务概述与规划", "subs": ["任务背景与目标", "任务范围与边界", "任务拆解", "智能体角色"]}},
|
||||
{{"title": "三、执行过程与成果", "subs": ["阶段完成情况", "关键决策点", "产出质量", "目标匹配度"]}},
|
||||
{{"title": "四、团队协作与质量评估", "subs": ["协作模式", "信息传递", "质量评分"]}},
|
||||
{{"title": "五、经验建议与总结", "subs": ["成功经验", "问题与不足", "改进建议"]}}
|
||||
]
|
||||
}},
|
||||
{{
|
||||
"type": "content",
|
||||
"title": "执行摘要",
|
||||
"bullets": ["要点1", "要点2", "要点3"]
|
||||
}},
|
||||
{{
|
||||
"type": "content",
|
||||
"title": "任务概述",
|
||||
"bullets": ["任务背景与目标", "任务范围与边界"]
|
||||
}},
|
||||
{{
|
||||
"type": "content",
|
||||
"title": "任务规划分析",
|
||||
"bullets": ["任务拆解的合理性", "智能体角色分配", "工作流程设计"]
|
||||
}},
|
||||
{{
|
||||
"type": "content",
|
||||
"title": "执行过程回顾",
|
||||
"bullets": ["各阶段完成情况", "关键决策点", "遇到的问题及解决方案"]
|
||||
}},
|
||||
{{
|
||||
"type": "content",
|
||||
"title": "成果产出分析",
|
||||
"bullets": ["产出物质量评估", "与预期目标匹配度"]
|
||||
}},
|
||||
{{
|
||||
"type": "content",
|
||||
"title": "团队协作分析",
|
||||
"bullets": ["智能体协作模式", "信息传递效率"]
|
||||
}},
|
||||
{{
|
||||
"type": "content",
|
||||
"title": "质量评估",
|
||||
"bullets": ["整体评分(1-10分)", "各维度评分及理由"]
|
||||
}},
|
||||
{{
|
||||
"type": "content",
|
||||
"title": "经验教训与改进建议",
|
||||
"bullets": ["成功经验", "存在的问题与不足", "改进建议"]
|
||||
}},
|
||||
{{
|
||||
"type": "ending",
|
||||
"title": "感谢聆听"
|
||||
}},
|
||||
{{
|
||||
"type": "table",
|
||||
"title": "质量评分表",
|
||||
"headers": ["维度", "评分", "说明"],
|
||||
"rows": [["整体质量", "8.5", "表现优秀"], ["协作效率", "7.5", "有待提升"]]
|
||||
}},
|
||||
{{
|
||||
"type": "chart",
|
||||
"title": "任务完成进度",
|
||||
"chart_type": "bar",
|
||||
"categories": ["规划阶段", "执行阶段", "评估阶段"],
|
||||
"series": [
|
||||
{{"name": "完成度", "values": [100, 85, 90]}}
|
||||
]
|
||||
}}
|
||||
]
|
||||
}}
|
||||
```
|
||||
|
||||
**重要说明:**
|
||||
1. **幻灯片顺序**:slides 数组中的幻灯片顺序必须严格按照以下顺序:
|
||||
- 第1页:type="title" 封面页
|
||||
- 第2页:type="toc" 目录页
|
||||
- 第3-10页:type="content" 内容页(8页左右)
|
||||
- 第11-14页:type="table" 或 "chart" 表格/图表页
|
||||
- 最后一页:type="ending" 结束页
|
||||
|
||||
2. **每张幻灯片必须包含完整的字段**:
|
||||
- `content` 类型必须有 `title` 和 `bullets` 数组
|
||||
- `table` 类型必须有 `title`、`headers` 数组和 `rows` 数组
|
||||
- `chart` 类型必须有 `title`、`chart_type`、`categories` 数组和 `series` 数组
|
||||
- `ending` 类型必须有 `title`
|
||||
|
||||
3. **禁止出现"单击此处添加标题"**:所有幻灯片都必须填写完整内容,不能留空
|
||||
|
||||
注意:
|
||||
- 幻灯片数量为11-15 页
|
||||
- 每页内容详细一点,使用要点式呈现,需要表格或者图表增强可读性
|
||||
- 不要生成"报告总字数"这样的统计信息
|
||||
- 所有文字使用简体中文
|
||||
- 根据实际任务数据生成内容,不是编造
|
||||
- **禁止使用中文引号「」『』""** ,所有引号必须是英文双引号 ""
|
||||
- user_id 填写:{user_id}
|
||||
- date 填写:{date}
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._load_llm_config()
|
||||
|
||||
def _load_llm_config(self):
|
||||
"""从配置文件加载 LLM 配置"""
|
||||
try:
|
||||
import yaml
|
||||
possible_paths = [
|
||||
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'config.yaml'),
|
||||
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'backend', 'config', 'config.yaml'),
|
||||
os.path.join(os.getcwd(), 'config', 'config.yaml'),
|
||||
]
|
||||
|
||||
for config_path in possible_paths:
|
||||
if os.path.exists(config_path):
|
||||
with open(config_path, 'r', encoding='utf-8') as f:
|
||||
config = yaml.safe_load(f)
|
||||
if config:
|
||||
self.LLM_CONFIG['OPENAI_API_BASE'] = config.get('OPENAI_API_BASE')
|
||||
self.LLM_CONFIG['OPENAI_API_KEY'] = config.get('OPENAI_API_KEY')
|
||||
self.LLM_CONFIG['OPENAI_API_MODEL'] = config.get('OPENAI_API_MODEL')
|
||||
print(f"已加载 LLM 配置: {self.LLM_CONFIG['OPENAI_API_MODEL']}")
|
||||
return
|
||||
except Exception as e:
|
||||
print(f"加载 LLM 配置失败: {e}")
|
||||
|
||||
def generate(self, task_data: Dict[str, Any], file_path: str) -> bool:
|
||||
"""生成 PowerPoint 演示文稿(调用 LLM 生成)"""
|
||||
try:
|
||||
task_name = task_data.get('task_name', '未命名任务')
|
||||
task_content = task_data.get('task_content', '')
|
||||
task_outline = task_data.get('task_outline')
|
||||
rehearsal_log = task_data.get('rehearsal_log')
|
||||
agent_scores = task_data.get('agent_scores')
|
||||
user_id = task_data.get('user_id', '')
|
||||
date = task_data.get('date', '')
|
||||
|
||||
agents = self._extract_agents(task_outline)
|
||||
filtered_agent_scores = self._filter_agent_scores(agent_scores, agents)
|
||||
|
||||
task_outline_str = json.dumps(task_outline, ensure_ascii=False, indent=2) if task_outline else '无'
|
||||
rehearsal_log_str = json.dumps(rehearsal_log, ensure_ascii=False, indent=2) if rehearsal_log else '无'
|
||||
agents_str = ', '.join(agents) if agents else '无'
|
||||
agent_scores_str = json.dumps(filtered_agent_scores, ensure_ascii=False, indent=2) if filtered_agent_scores else '无'
|
||||
|
||||
prompt = self.PROMPT_TEMPLATE.format(
|
||||
task_name=task_name,
|
||||
task_content=task_content,
|
||||
task_outline=task_outline_str,
|
||||
execution_results=rehearsal_log_str,
|
||||
agents=agents_str,
|
||||
agent_scores=agent_scores_str,
|
||||
user_id=user_id,
|
||||
date=date
|
||||
)
|
||||
|
||||
print("正在调用大模型生成 PPT 内容...")
|
||||
ppt_json_str = self._call_llm(prompt)
|
||||
|
||||
if not ppt_json_str:
|
||||
print("LLM 生成 PPT 失败")
|
||||
return False
|
||||
|
||||
print(f"PPT 内容生成成功,长度: {len(ppt_json_str)} 字符")
|
||||
self._create_ppt_from_json(ppt_json_str, file_path, task_name)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"PPT LLM 导出失败: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
def _extract_agents(self, task_outline: Any) -> list:
|
||||
agents = set()
|
||||
if not task_outline or not isinstance(task_outline, dict):
|
||||
return []
|
||||
|
||||
collaboration_process = task_outline.get('Collaboration Process', [])
|
||||
if not collaboration_process or not isinstance(collaboration_process, list):
|
||||
return []
|
||||
|
||||
for step in collaboration_process:
|
||||
if isinstance(step, dict):
|
||||
agent_selection = step.get('AgentSelection', [])
|
||||
if isinstance(agent_selection, list):
|
||||
for agent in agent_selection:
|
||||
if agent:
|
||||
agents.add(agent)
|
||||
return list(agents)
|
||||
|
||||
def _filter_agent_scores(self, agent_scores: Any, agents: list) -> dict:
|
||||
if not agent_scores or not isinstance(agent_scores, dict):
|
||||
return {}
|
||||
if not agents:
|
||||
return {}
|
||||
|
||||
filtered = {}
|
||||
for step_id, step_data in agent_scores.items():
|
||||
if not isinstance(step_data, dict):
|
||||
continue
|
||||
aspect_list = step_data.get('aspectList', [])
|
||||
agent_scores_data = step_data.get('agentScores', {})
|
||||
if not agent_scores_data:
|
||||
continue
|
||||
filtered_scores = {}
|
||||
for agent_name, scores in agent_scores_data.items():
|
||||
if agent_name in agents and isinstance(scores, dict):
|
||||
filtered_scores[agent_name] = scores
|
||||
if filtered_scores:
|
||||
filtered[step_id] = {'aspectList': aspect_list, 'agentScores': filtered_scores}
|
||||
return filtered
|
||||
|
||||
def _call_llm(self, prompt: str) -> str:
|
||||
try:
|
||||
import openai
|
||||
if not self.LLM_CONFIG['OPENAI_API_KEY']:
|
||||
print("错误: OPENAI_API_KEY 未配置")
|
||||
return ""
|
||||
if not self.LLM_CONFIG['OPENAI_API_BASE']:
|
||||
print("错误: OPENAI_API_BASE 未配置")
|
||||
return ""
|
||||
if not self.LLM_CONFIG['OPENAI_API_MODEL']:
|
||||
print("错误: OPENAI_API_MODEL 未配置")
|
||||
return ""
|
||||
|
||||
client = openai.OpenAI(
|
||||
api_key=self.LLM_CONFIG['OPENAI_API_KEY'],
|
||||
base_url=self.LLM_CONFIG['OPENAI_API_BASE']
|
||||
)
|
||||
|
||||
response = client.chat.completions.create(
|
||||
model=self.LLM_CONFIG['OPENAI_API_MODEL'],
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
temperature=0.7,
|
||||
max_tokens=8000,
|
||||
)
|
||||
|
||||
if response and response.choices:
|
||||
return response.choices[0].message.content
|
||||
return ""
|
||||
|
||||
except ImportError:
|
||||
print("请安装 openai 库: pip install openai")
|
||||
return ""
|
||||
except Exception as e:
|
||||
print(f"调用 LLM 失败: {e}")
|
||||
return ""
|
||||
|
||||
def _fix_json_string(self, json_str: str) -> str:
|
||||
"""修复常见的 JSON 语法错误"""
|
||||
import re
|
||||
|
||||
json_str = json_str.strip()
|
||||
|
||||
# 首先尝试提取 JSON 代码块
|
||||
if '```json' in json_str:
|
||||
json_str = json_str.split('```json')[1].split('```')[0]
|
||||
elif '```' in json_str:
|
||||
# 检查是否有结束标记
|
||||
parts = json_str.split('```')
|
||||
if len(parts) >= 3:
|
||||
json_str = parts[1]
|
||||
else:
|
||||
# 没有结束标记,可能是代码块内容
|
||||
json_str = parts[1] if len(parts) > 1 else json_str
|
||||
|
||||
json_str = json_str.strip()
|
||||
|
||||
# 关键修复:处理中文双引号包含英文双引号的情况
|
||||
# 例如:"工作流程采用"提出-评估-改进"的模式"
|
||||
# 需要:找到中文引号对,去除它们,同时转义内部的英文引号
|
||||
import re
|
||||
|
||||
# 中文引号 Unicode
|
||||
CHINESE_LEFT = '\u201c' # "
|
||||
CHINESE_RIGHT = '\u201d' # "
|
||||
|
||||
# 使用正则匹配中文引号对之间的内容,并处理内部的英文引号
|
||||
# 匹配 "内容" 格式(中文引号对)
|
||||
pattern = re.compile(r'(\u201c)(.*?)(\u201d)')
|
||||
def replace_chinese_quotes(match):
|
||||
content = match.group(2)
|
||||
# 将内部未转义的英文双引号转义
|
||||
content = re.sub(r'(?<!\\)"', r'\\"', content)
|
||||
return content
|
||||
|
||||
json_str = pattern.sub(replace_chinese_quotes, json_str)
|
||||
|
||||
# 现在移除所有中文引号字符(已经被处理过了)
|
||||
json_str = json_str.replace(CHINESE_LEFT, '')
|
||||
json_str = json_str.replace(CHINESE_RIGHT, '')
|
||||
json_str = json_str.replace('\u2018', "'") # 中文左单引号 '
|
||||
json_str = json_str.replace('\u2019', "'") # 中文右单引号 '
|
||||
|
||||
# 移除行尾的逗号(trailing comma)
|
||||
json_str = re.sub(r',\s*}', '}', json_str)
|
||||
json_str = re.sub(r',\s*]', ']', json_str)
|
||||
|
||||
# 修复空字符串
|
||||
json_str = re.sub(r'""', '"', json_str)
|
||||
|
||||
# 修复单引号
|
||||
single_quotes = re.findall(r"'[^']*'", json_str)
|
||||
for sq in single_quotes:
|
||||
if '"' not in sq:
|
||||
json_str = json_str.replace(sq, sq.replace("'", '"'))
|
||||
|
||||
# 移除 trailing comma
|
||||
trailing_comma_pattern = re.compile(r',(\s*[}\]])')
|
||||
json_str = trailing_comma_pattern.sub(r'\1', json_str)
|
||||
|
||||
# 修复未加引号的 key
|
||||
unquoted_key_pattern = re.compile(r'([{,]\s*)([a-zA-Z_][a-zA-Z0-9_]*)\s*:')
|
||||
def fix_unquoted_key(match):
|
||||
prefix = match.group(1)
|
||||
key = match.group(2)
|
||||
return f'{prefix}"{key}":'
|
||||
json_str = unquoted_key_pattern.sub(fix_unquoted_key, json_str)
|
||||
|
||||
# 修复未加引号的值(但保留 true/false/null)
|
||||
unquoted_value_pattern = re.compile(r':\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*([,}\]])')
|
||||
def replace_unquoted(match):
|
||||
value = match.group(1)
|
||||
end = match.group(2)
|
||||
if value.lower() in ('true', 'false', 'null'):
|
||||
return f': {value}{end}'
|
||||
else:
|
||||
return f': "{value}"{end}'
|
||||
json_str = unquoted_value_pattern.sub(replace_unquoted, json_str)
|
||||
|
||||
# 新增:修复多余的逗号(连续逗号)
|
||||
json_str = re.sub(r',,+,', ',', json_str)
|
||||
|
||||
# 新增:修复缺少冒号的情况(key 后面没有冒号)
|
||||
# 匹配 "key" value 而不是 "key": value
|
||||
json_str = re.sub(r'(\"[^\"]+\")\s+([\[{])', r'\1: \2', json_str)
|
||||
|
||||
# 新增:尝试修复换行符问题,将单个 \n 替换为 \\n
|
||||
# 但这可能会导致问题,所以先注释掉
|
||||
# json_str = json_str.replace('\n', '\\n')
|
||||
|
||||
# 新增:移除可能的 BOM 或不可见字符
|
||||
json_str = json_str.replace('\ufeff', '')
|
||||
|
||||
print(f"JSON 修复后长度: {len(json_str)} 字符")
|
||||
return json_str
|
||||
|
||||
def _create_ppt_from_json(self, json_str: str, file_path: str, task_name: str):
|
||||
"""从 JSON 字符串创建 PPT(直接创建,不使用模板)"""
|
||||
try:
|
||||
json_str = json_str.strip()
|
||||
if '```json' in json_str:
|
||||
json_str = json_str.split('```json')[1].split('```')[0]
|
||||
elif '```' in json_str:
|
||||
json_str = json_str.split('```')[1].split('```')[0]
|
||||
|
||||
json_str = self._fix_json_string(json_str)
|
||||
|
||||
# 尝试解析,失败时输出更多信息帮助调试
|
||||
try:
|
||||
replace_data = json.loads(json_str)
|
||||
except json.JSONDecodeError as e:
|
||||
# 输出错误位置附近的 JSON 内容帮助调试
|
||||
print(f"JSON 解析失败: {e}")
|
||||
print(f"错误位置: 第 {e.lineno} 行, 第 {e.colno} 列")
|
||||
start = max(0, e.pos - 100)
|
||||
end = min(len(json_str), e.pos + 100)
|
||||
context = json_str[start:end]
|
||||
print(f"错误上下文: ...{context}...")
|
||||
raise
|
||||
|
||||
# 直接创建空白演示文稿,不使用模板
|
||||
prs = Presentation()
|
||||
prs.slide_width = Inches(13.333)
|
||||
prs.slide_height = Inches(7.5)
|
||||
|
||||
slides_data = replace_data.get('slides', [])
|
||||
|
||||
# 分离 ending 幻灯片和其他幻灯片,确保 ending 在最后
|
||||
ending_slides = [s for s in slides_data if s.get('type') == 'ending']
|
||||
other_slides = [s for s in slides_data if s.get('type') != 'ending']
|
||||
|
||||
# 先创建其他幻灯片
|
||||
for slide_data in other_slides:
|
||||
slide_type = slide_data.get('type', 'content')
|
||||
|
||||
if slide_type == 'title':
|
||||
self._add_title_slide(prs, slide_data)
|
||||
elif slide_type == 'toc':
|
||||
self._add_toc_slide(prs, slide_data)
|
||||
elif slide_type == 'content':
|
||||
self._add_content_slide(prs, slide_data)
|
||||
elif slide_type == 'two_column':
|
||||
self._add_two_column_slide(prs, slide_data)
|
||||
elif slide_type == 'table':
|
||||
self._add_table_slide(prs, slide_data)
|
||||
elif slide_type == 'chart':
|
||||
self._add_chart_slide(prs, slide_data)
|
||||
|
||||
# 最后创建 ending 幻灯片
|
||||
for slide_data in ending_slides:
|
||||
self._add_ending_slide(prs, slide_data)
|
||||
|
||||
prs.save(file_path)
|
||||
print(f"PowerPoint 已保存: {file_path}")
|
||||
|
||||
except ImportError:
|
||||
print("请安装 python-pptx 库: pip install python-pptx")
|
||||
raise
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"JSON 解析失败: {e}")
|
||||
raise
|
||||
except Exception as e:
|
||||
print(f"创建 PPT 失败: {e}")
|
||||
raise
|
||||
|
||||
def _add_title_slide(self, prs, data):
|
||||
"""添加封面页"""
|
||||
slide_layout = prs.slide_layouts[6] # 空白布局
|
||||
slide = prs.slides.add_slide(slide_layout)
|
||||
|
||||
# 删除默认的占位符形状
|
||||
for shape in list(slide.shapes):
|
||||
if shape.has_text_frame and shape.text_frame.text.strip() in ['单击此处添加标题', '单击此处添加内容', 'Click to add title', 'Click to add text']:
|
||||
slide.shapes.remove(shape)
|
||||
|
||||
# 主标题
|
||||
title_box = slide.shapes.add_textbox(Inches(0.5), Inches(2.5), Inches(12.333), Inches(1.5))
|
||||
tf = title_box.text_frame
|
||||
p = tf.paragraphs[0]
|
||||
p.text = data.get('title', '')
|
||||
p.font.size = Pt(44)
|
||||
p.font.bold = True
|
||||
p.alignment = PP_ALIGN.CENTER
|
||||
|
||||
# 用户ID
|
||||
user_id_box = slide.shapes.add_textbox(Inches(0.5), Inches(4.2), Inches(12.333), Inches(0.5))
|
||||
tf = user_id_box.text_frame
|
||||
p = tf.paragraphs[0]
|
||||
p.text = f"用户ID: {data.get('user_id', '')}"
|
||||
p.font.size = Pt(18)
|
||||
p.alignment = PP_ALIGN.CENTER
|
||||
|
||||
# 日期
|
||||
date_box = slide.shapes.add_textbox(Inches(0.5), Inches(4.8), Inches(12.333), Inches(0.5))
|
||||
tf = date_box.text_frame
|
||||
p = tf.paragraphs[0]
|
||||
p.text = f"日期: {data.get('date', '')}"
|
||||
p.font.size = Pt(18)
|
||||
p.alignment = PP_ALIGN.CENTER
|
||||
|
||||
def _add_toc_slide(self, prs, data):
|
||||
"""添加目录页"""
|
||||
slide_layout = prs.slide_layouts[6] # 空白布局
|
||||
slide = prs.slides.add_slide(slide_layout)
|
||||
|
||||
# 删除默认的占位符形状
|
||||
for shape in list(slide.shapes):
|
||||
if shape.has_text_frame and shape.text_frame.text.strip() in ['单击此处添加标题', '单击此处添加内容', 'Click to add title', 'Click to add text']:
|
||||
slide.shapes.remove(shape)
|
||||
|
||||
# 标题
|
||||
title_box = slide.shapes.add_textbox(Inches(0.5), Inches(0.4), Inches(12.333), Inches(0.8))
|
||||
tf = title_box.text_frame
|
||||
p = tf.paragraphs[0]
|
||||
p.text = data.get('title', '目录')
|
||||
p.font.size = Pt(36)
|
||||
p.font.bold = True
|
||||
|
||||
# 目录内容 - 使用两栏布局
|
||||
content_box = slide.shapes.add_textbox(Inches(0.5), Inches(1.5), Inches(12.333), Inches(5.5))
|
||||
tf = content_box.text_frame
|
||||
tf.word_wrap = True
|
||||
|
||||
sections = data.get('sections', [])
|
||||
|
||||
for i, section in enumerate(sections):
|
||||
section_title = section.get('title', '')
|
||||
subs = section.get('subs', [])
|
||||
|
||||
if i == 0:
|
||||
p = tf.paragraphs[0]
|
||||
else:
|
||||
p = tf.add_paragraph()
|
||||
|
||||
# 大章节标题
|
||||
p.text = section_title
|
||||
p.level = 0
|
||||
p.font.size = Pt(20)
|
||||
p.font.bold = True
|
||||
|
||||
# 小点
|
||||
for sub in subs:
|
||||
p_sub = tf.add_paragraph()
|
||||
p_sub.text = sub
|
||||
p_sub.level = 1 # 次级缩进
|
||||
p_sub.font.size = Pt(16)
|
||||
|
||||
def _add_ending_slide(self, prs, data):
|
||||
"""添加结束页"""
|
||||
slide_layout = prs.slide_layouts[6] # 空白布局
|
||||
slide = prs.slides.add_slide(slide_layout)
|
||||
|
||||
# 删除默认的占位符形状
|
||||
for shape in list(slide.shapes):
|
||||
if shape.has_text_frame and shape.text_frame.text.strip() in ['单击此处添加标题', '单击此处添加内容', 'Click to add title', 'Click to add text']:
|
||||
slide.shapes.remove(shape)
|
||||
|
||||
# 结束语
|
||||
title_box = slide.shapes.add_textbox(Inches(0.5), Inches(3), Inches(12.333), Inches(1.5))
|
||||
tf = title_box.text_frame
|
||||
p = tf.paragraphs[0]
|
||||
p.text = data.get('title', '感谢聆听')
|
||||
p.font.size = Pt(44)
|
||||
p.font.bold = True
|
||||
p.alignment = PP_ALIGN.CENTER
|
||||
|
||||
def _add_content_slide(self, prs, data):
|
||||
"""添加内容页"""
|
||||
slide_layout = prs.slide_layouts[6] # 空白布局
|
||||
slide = prs.slides.add_slide(slide_layout)
|
||||
|
||||
# 删除默认的占位符形状(避免出现"单击此处添加标题")
|
||||
for shape in list(slide.shapes):
|
||||
if shape.has_text_frame and shape.text_frame.text.strip() in ['单击此处添加标题', '单击此处添加内容', 'Click to add title', 'Click to add text']:
|
||||
slide.shapes.remove(shape)
|
||||
|
||||
# 标题
|
||||
title_box = slide.shapes.add_textbox(Inches(0.5), Inches(0.3), Inches(12.333), Inches(0.8))
|
||||
tf = title_box.text_frame
|
||||
p = tf.paragraphs[0]
|
||||
p.text = data.get('title', '')
|
||||
p.font.size = Pt(32)
|
||||
p.font.bold = True
|
||||
|
||||
# 内容
|
||||
content_box = slide.shapes.add_textbox(Inches(0.5), Inches(1.3), Inches(12.333), Inches(5.8))
|
||||
tf = content_box.text_frame
|
||||
tf.word_wrap = True
|
||||
|
||||
bullets = data.get('bullets', [])
|
||||
|
||||
for i, bullet in enumerate(bullets):
|
||||
if i == 0:
|
||||
p = tf.paragraphs[0]
|
||||
else:
|
||||
p = tf.add_paragraph()
|
||||
p.text = bullet
|
||||
p.level = 0
|
||||
p.font.size = Pt(18)
|
||||
|
||||
def _add_two_column_slide(self, prs, data):
|
||||
"""添加双栏内容页"""
|
||||
slide_layout = prs.slide_layouts[6] # 空白布局
|
||||
slide = prs.slides.add_slide(slide_layout)
|
||||
|
||||
# 删除默认的占位符形状
|
||||
for shape in list(slide.shapes):
|
||||
if shape.has_text_frame and shape.text_frame.text.strip() in ['单击此处添加标题', '单击此处添加内容', 'Click to add title', 'Click to add text']:
|
||||
slide.shapes.remove(shape)
|
||||
|
||||
# 标题
|
||||
title_box = slide.shapes.add_textbox(Inches(0.5), Inches(0.3), Inches(12.333), Inches(0.8))
|
||||
tf = title_box.text_frame
|
||||
p = tf.paragraphs[0]
|
||||
p.text = data.get('title', '')
|
||||
p.font.size = Pt(32)
|
||||
p.font.bold = True
|
||||
|
||||
# 左侧
|
||||
left_box = slide.shapes.add_textbox(Inches(0.5), Inches(1.3), Inches(5.8), Inches(5.8))
|
||||
tf = left_box.text_frame
|
||||
tf.word_wrap = True
|
||||
|
||||
left_title = data.get('left_title', '')
|
||||
p = tf.paragraphs[0]
|
||||
p.text = left_title
|
||||
p.font.size = Pt(22)
|
||||
p.font.bold = True
|
||||
|
||||
left_bullets = data.get('left_bullets', [])
|
||||
for bullet in left_bullets:
|
||||
p = tf.add_paragraph()
|
||||
p.text = bullet
|
||||
p.level = 1
|
||||
p.font.size = Pt(16)
|
||||
|
||||
# 右侧
|
||||
right_box = slide.shapes.add_textbox(Inches(7.0), Inches(1.3), Inches(5.8), Inches(5.8))
|
||||
tf = right_box.text_frame
|
||||
tf.word_wrap = True
|
||||
|
||||
right_title = data.get('right_title', '')
|
||||
p = tf.paragraphs[0]
|
||||
p.text = right_title
|
||||
p.font.size = Pt(22)
|
||||
p.font.bold = True
|
||||
|
||||
right_bullets = data.get('right_bullets', [])
|
||||
for bullet in right_bullets:
|
||||
p = tf.add_paragraph()
|
||||
p.text = bullet
|
||||
p.level = 1
|
||||
p.font.size = Pt(16)
|
||||
|
||||
def _add_table_slide(self, prs, data):
|
||||
"""添加表格页"""
|
||||
slide_layout = prs.slide_layouts[6] # 空白布局
|
||||
slide = prs.slides.add_slide(slide_layout)
|
||||
|
||||
# 删除默认的占位符形状
|
||||
for shape in list(slide.shapes):
|
||||
if shape.has_text_frame and shape.text_frame.text.strip() in ['单击此处添加标题', '单击此处添加内容', 'Click to add title', 'Click to add text']:
|
||||
slide.shapes.remove(shape)
|
||||
|
||||
# 标题
|
||||
title_box = slide.shapes.add_textbox(Inches(0.5), Inches(0.3), Inches(12), Inches(0.6))
|
||||
tf = title_box.text_frame
|
||||
p = tf.paragraphs[0]
|
||||
p.text = data.get('title', '')
|
||||
p.font.size = Pt(32)
|
||||
p.font.bold = True
|
||||
|
||||
# 表格
|
||||
headers = data.get('headers', [])
|
||||
rows = data.get('rows', [])
|
||||
|
||||
if headers and rows:
|
||||
x, y, cx, cy = Inches(0.5), Inches(1.5), Inches(12), Inches(3)
|
||||
table = slide.shapes.add_table(len(rows) + 1, len(headers), x, y, cx, cy).table
|
||||
|
||||
# 表头
|
||||
for i, header in enumerate(headers):
|
||||
table.cell(0, i).text = header
|
||||
|
||||
# 数据
|
||||
for row_idx, row_data in enumerate(rows):
|
||||
for col_idx, cell_data in enumerate(row_data):
|
||||
table.cell(row_idx + 1, col_idx).text = cell_data
|
||||
|
||||
def _add_chart_slide(self, prs, data):
|
||||
"""添加图表页"""
|
||||
slide_layout = prs.slide_layouts[6] # 空白布局
|
||||
slide = prs.slides.add_slide(slide_layout)
|
||||
|
||||
# 删除默认的占位符形状
|
||||
for shape in list(slide.shapes):
|
||||
if shape.has_text_frame and shape.text_frame.text.strip() in ['单击此处添加标题', '单击此处添加内容', 'Click to add title', 'Click to add text']:
|
||||
slide.shapes.remove(shape)
|
||||
|
||||
# 标题
|
||||
title_box = slide.shapes.add_textbox(Inches(0.5), Inches(0.3), Inches(12.333), Inches(0.6))
|
||||
tf = title_box.text_frame
|
||||
p = tf.paragraphs[0]
|
||||
p.text = data.get('title', '')
|
||||
p.font.size = Pt(32)
|
||||
p.font.bold = True
|
||||
|
||||
# 图表数据
|
||||
chart_type_str = data.get('chart_type', 'bar').lower()
|
||||
categories = data.get('categories', [])
|
||||
series_list = data.get('series', [])
|
||||
|
||||
# 映射图表类型
|
||||
chart_type_map = {
|
||||
'bar': XL_CHART_TYPE.COLUMN_CLUSTERED,
|
||||
'column': XL_CHART_TYPE.COLUMN_CLUSTERED,
|
||||
'line': XL_CHART_TYPE.LINE,
|
||||
'pie': XL_CHART_TYPE.PIE,
|
||||
'饼图': XL_CHART_TYPE.PIE,
|
||||
'柱状图': XL_CHART_TYPE.COLUMN_CLUSTERED,
|
||||
'折线图': XL_CHART_TYPE.LINE,
|
||||
}
|
||||
chart_type = chart_type_map.get(chart_type_str, XL_CHART_TYPE.COLUMN_CLUSTERED)
|
||||
|
||||
# 创建图表数据
|
||||
chart_data = CategoryChartData()
|
||||
chart_data.categories = categories
|
||||
|
||||
for series in series_list:
|
||||
series_name = series.get('name', '数据')
|
||||
values = series.get('values', [])
|
||||
chart_data.add_series(series_name, values)
|
||||
|
||||
# 添加图表
|
||||
x, y, cx, cy = Inches(0.5), Inches(1.2), Inches(12.333), Inches(5.5)
|
||||
chart = slide.shapes.add_chart(chart_type, x, y, cx, cy, chart_data).chart
|
||||
|
||||
# 兼容旧版本
|
||||
def _load_llm_config_old(self):
|
||||
self._load_llm_config()
|
||||
@@ -1,481 +0,0 @@
|
||||
"""
|
||||
Excel 文档 LLM 报告导出器
|
||||
调用大模型生成专业的任务执行报告,并保存为Excel格式
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any
|
||||
|
||||
try:
|
||||
from openpyxl import Workbook
|
||||
from openpyxl.styles import Font, Alignment, PatternFill, Border, Side
|
||||
from openpyxl.utils import get_column_letter
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class XlsxLLMExporter:
|
||||
"""Excel 文档 LLM 报告导出器 - 调用大模型生成报告"""
|
||||
|
||||
LLM_CONFIG = {
|
||||
'OPENAI_API_BASE': None,
|
||||
'OPENAI_API_KEY': None,
|
||||
'OPENAI_API_MODEL': None,
|
||||
}
|
||||
|
||||
PROMPT_TEMPLATE = """你是一位专业的项目管理顾问和数据分析专家。你的任务是将以下任务执行数据生成一份详细、专业、结构化的执行报告。
|
||||
|
||||
## 任务基本信息
|
||||
- 任务名称:{task_name}
|
||||
|
||||
## 任务大纲(规划阶段)
|
||||
{task_outline}
|
||||
|
||||
## 执行结果
|
||||
{rehearsal_log}
|
||||
|
||||
## 参与智能体
|
||||
{agents}
|
||||
|
||||
## 智能体评分
|
||||
{agent_scores}
|
||||
|
||||
---
|
||||
|
||||
## 报告要求
|
||||
|
||||
请生成一份完整的任务执行报告,包含以下章节:
|
||||
|
||||
### 1. 执行摘要
|
||||
用 2-3 句话概括本次任务的整体执行情况。
|
||||
|
||||
### 2. 任务概述
|
||||
- 任务背景与目标
|
||||
- 任务范围与边界
|
||||
|
||||
### 3. 任务规划分析
|
||||
- 任务拆解的合理性
|
||||
- 智能体角色分配的优化建议
|
||||
- 工作流程设计
|
||||
|
||||
### 4. 执行过程回顾
|
||||
- 各阶段的完成情况
|
||||
- 关键决策点
|
||||
- 遇到的问题及解决方案
|
||||
|
||||
### 5. 成果产出分析
|
||||
- 产出物的质量评估
|
||||
- 产出与预期目标的匹配度
|
||||
|
||||
### 6. 团队协作分析
|
||||
- 智能体之间的协作模式
|
||||
- 信息传递效率
|
||||
|
||||
### 7. 质量评估
|
||||
- 整体完成质量评分(1-10分)
|
||||
- 各维度的具体评分及理由
|
||||
|
||||
### 8. 经验教训与改进建议
|
||||
- 成功经验
|
||||
- 存在的问题与不足
|
||||
- 改进建议
|
||||
|
||||
---
|
||||
|
||||
## 输出格式要求
|
||||
- 使用 Markdown 格式输出
|
||||
- 语言:简体中文
|
||||
- 适当使用列表、表格增强可读性
|
||||
- 报告长度必须达到 3000-5000 字,每个章节都要详细展开,不要遗漏任何章节
|
||||
- 每个章节的内容要充实,提供具体的分析和建议
|
||||
- 注意:所有加粗标记必须成对出现,如 **文本**,不要单独使用 ** 或缺少结束标记
|
||||
- 禁止使用 mermaid、graph TD、flowchart 等图表代码,如果需要描述流程请用纯文字描述
|
||||
- 不要生成附录章节
|
||||
- 不要在报告中显示"报告总字数"这样的统计信息
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._load_llm_config()
|
||||
|
||||
def _load_llm_config(self):
|
||||
"""从配置文件加载 LLM 配置"""
|
||||
try:
|
||||
import yaml
|
||||
possible_paths = [
|
||||
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'config.yaml'),
|
||||
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'backend', 'config', 'config.yaml'),
|
||||
os.path.join(os.getcwd(), 'config', 'config.yaml'),
|
||||
]
|
||||
|
||||
for config_path in possible_paths:
|
||||
if os.path.exists(config_path):
|
||||
with open(config_path, 'r', encoding='utf-8') as f:
|
||||
config = yaml.safe_load(f)
|
||||
if config:
|
||||
self.LLM_CONFIG['OPENAI_API_BASE'] = config.get('OPENAI_API_BASE')
|
||||
self.LLM_CONFIG['OPENAI_API_KEY'] = config.get('OPENAI_API_KEY')
|
||||
self.LLM_CONFIG['OPENAI_API_MODEL'] = config.get('OPENAI_API_MODEL')
|
||||
print(f"已加载 LLM 配置: {self.LLM_CONFIG['OPENAI_API_MODEL']}")
|
||||
return
|
||||
except Exception as e:
|
||||
print(f"加载 LLM 配置失败: {e}")
|
||||
|
||||
def generate(self, task_data: Dict[str, Any], file_path: str) -> bool:
|
||||
"""生成 Excel 文档(调用 LLM 生成报告)"""
|
||||
try:
|
||||
task_name = task_data.get('task_name', '未命名任务')
|
||||
task_outline = task_data.get('task_outline')
|
||||
rehearsal_log = task_data.get('rehearsal_log')
|
||||
agent_scores = task_data.get('agent_scores')
|
||||
|
||||
agents = self._extract_agents(task_outline)
|
||||
filtered_agent_scores = self._filter_agent_scores(agent_scores, agents)
|
||||
|
||||
task_outline_str = json.dumps(task_outline, ensure_ascii=False, indent=2) if task_outline else '无'
|
||||
rehearsal_log_str = json.dumps(rehearsal_log, ensure_ascii=False, indent=2) if rehearsal_log else '无'
|
||||
agents_str = ', '.join(agents) if agents else '无'
|
||||
agent_scores_str = json.dumps(filtered_agent_scores, ensure_ascii=False, indent=2) if filtered_agent_scores else '无'
|
||||
|
||||
prompt = self.PROMPT_TEMPLATE.format(
|
||||
task_name=task_name,
|
||||
task_outline=task_outline_str,
|
||||
rehearsal_log=rehearsal_log_str,
|
||||
agents=agents_str,
|
||||
agent_scores=agent_scores_str
|
||||
)
|
||||
|
||||
print("正在调用大模型生成 Excel 报告...")
|
||||
report_content = self._call_llm(prompt)
|
||||
|
||||
if not report_content:
|
||||
print("LLM 生成报告失败")
|
||||
return False
|
||||
|
||||
report_content = self._clean_report_title(report_content)
|
||||
print(f"报告生成成功,长度: {len(report_content)} 字符")
|
||||
|
||||
self._save_as_excel(report_content, file_path, task_name)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Excel LLM 导出失败: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
def _clean_report_title(self, content: str) -> str:
|
||||
"""清理报告开头的重复标题"""
|
||||
lines = content.split('\n')
|
||||
if not lines:
|
||||
return content
|
||||
|
||||
first_line = lines[0].strip()
|
||||
if first_line == '任务执行报告' or first_line == '# 任务执行报告':
|
||||
lines = lines[1:]
|
||||
while lines and not lines[0].strip():
|
||||
lines.pop(0)
|
||||
|
||||
return '\n'.join(lines)
|
||||
|
||||
def _extract_agents(self, task_outline: Any) -> list:
|
||||
"""从 task_outline 中提取参与智能体列表"""
|
||||
agents = set()
|
||||
if not task_outline or not isinstance(task_outline, dict):
|
||||
return []
|
||||
|
||||
collaboration_process = task_outline.get('Collaboration Process', [])
|
||||
if not collaboration_process or not isinstance(collaboration_process, list):
|
||||
return []
|
||||
|
||||
for step in collaboration_process:
|
||||
if isinstance(step, dict):
|
||||
agent_selection = step.get('AgentSelection', [])
|
||||
if isinstance(agent_selection, list):
|
||||
for agent in agent_selection:
|
||||
if agent:
|
||||
agents.add(agent)
|
||||
|
||||
return list(agents)
|
||||
|
||||
def _filter_agent_scores(self, agent_scores: Any, agents: list) -> dict:
|
||||
"""过滤 agent_scores,只保留参与当前任务的智能体评分"""
|
||||
if not agent_scores or not isinstance(agent_scores, dict):
|
||||
return {}
|
||||
if not agents:
|
||||
return {}
|
||||
|
||||
filtered = {}
|
||||
for step_id, step_data in agent_scores.items():
|
||||
if not isinstance(step_data, dict):
|
||||
continue
|
||||
|
||||
aspect_list = step_data.get('aspectList', [])
|
||||
agent_scores_data = step_data.get('agentScores', {})
|
||||
|
||||
if not agent_scores_data:
|
||||
continue
|
||||
|
||||
filtered_scores = {}
|
||||
for agent_name, scores in agent_scores_data.items():
|
||||
if agent_name in agents and isinstance(scores, dict):
|
||||
filtered_scores[agent_name] = scores
|
||||
|
||||
if filtered_scores:
|
||||
filtered[step_id] = {
|
||||
'aspectList': aspect_list,
|
||||
'agentScores': filtered_scores
|
||||
}
|
||||
|
||||
return filtered
|
||||
|
||||
def _call_llm(self, prompt: str) -> str:
|
||||
"""调用大模型 API 生成报告"""
|
||||
try:
|
||||
import openai
|
||||
|
||||
if not self.LLM_CONFIG['OPENAI_API_KEY']:
|
||||
print("错误: OPENAI_API_KEY 未配置")
|
||||
return ""
|
||||
if not self.LLM_CONFIG['OPENAI_API_BASE']:
|
||||
print("错误: OPENAI_API_BASE 未配置")
|
||||
return ""
|
||||
if not self.LLM_CONFIG['OPENAI_API_MODEL']:
|
||||
print("错误: OPENAI_API_MODEL 未配置")
|
||||
return ""
|
||||
|
||||
client = openai.OpenAI(
|
||||
api_key=self.LLM_CONFIG['OPENAI_API_KEY'],
|
||||
base_url=self.LLM_CONFIG['OPENAI_API_BASE']
|
||||
)
|
||||
|
||||
response = client.chat.completions.create(
|
||||
model=self.LLM_CONFIG['OPENAI_API_MODEL'],
|
||||
messages=[
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
temperature=0.7,
|
||||
max_tokens=10000,
|
||||
)
|
||||
|
||||
if response and response.choices:
|
||||
return response.choices[0].message.content
|
||||
|
||||
return ""
|
||||
|
||||
except ImportError:
|
||||
print("请安装 openai 库: pip install openai")
|
||||
return ""
|
||||
except Exception as e:
|
||||
print(f"调用 LLM 失败: {e}")
|
||||
return ""
|
||||
|
||||
def _save_as_excel(self, markdown_content: str, file_path: str, task_name: str):
|
||||
"""将 Markdown 内容保存为 Excel 文档"""
|
||||
try:
|
||||
from openpyxl import Workbook
|
||||
from openpyxl.styles import Font, Alignment, PatternFill, Border, Side
|
||||
from openpyxl.utils import get_column_letter
|
||||
|
||||
wb = Workbook()
|
||||
ws = wb.active
|
||||
ws.title = "任务执行报告"
|
||||
|
||||
header_fill = PatternFill(start_color="4472C4", end_color="4472C4", fill_type="solid")
|
||||
header_font = Font(bold=True, color="FFFFFF", size=12)
|
||||
title_font = Font(bold=True, size=14)
|
||||
section_font = Font(bold=True, size=11)
|
||||
normal_font = Font(size=10)
|
||||
|
||||
thin_border = Border(
|
||||
left=Side(style='thin'),
|
||||
right=Side(style='thin'),
|
||||
top=Side(style='thin'),
|
||||
bottom=Side(style='thin')
|
||||
)
|
||||
|
||||
ws.column_dimensions['A'].width = 20
|
||||
ws.column_dimensions['B'].width = 80
|
||||
|
||||
row = 1
|
||||
ws[f'A{row}'] = task_name
|
||||
ws[f'A{row}'].font = Font(bold=True, size=16)
|
||||
ws[f'A{row}'].alignment = Alignment(horizontal='center', vertical='center')
|
||||
ws.merge_cells(f'A{row}:B{row}')
|
||||
row += 1
|
||||
|
||||
ws[f'A{row}'] = f"导出时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
|
||||
ws[f'A{row}'].font = Font(size=9, italic=True)
|
||||
ws[f'A{row}'].alignment = Alignment(horizontal='center', vertical='center')
|
||||
ws.merge_cells(f'A{row}:B{row}')
|
||||
row += 2
|
||||
|
||||
lines = markdown_content.split('\n')
|
||||
current_section = ""
|
||||
table_data = []
|
||||
in_table = False
|
||||
|
||||
for line in lines:
|
||||
line = line.rstrip()
|
||||
|
||||
if not line:
|
||||
if in_table and table_data:
|
||||
row = self._write_table_to_excel(ws, table_data, row, header_fill, header_font, thin_border, normal_font)
|
||||
table_data = []
|
||||
in_table = False
|
||||
continue
|
||||
|
||||
stripped = line.strip()
|
||||
if stripped.startswith('|') and stripped.endswith('|') and '---' in stripped:
|
||||
continue
|
||||
|
||||
if '|' in line and line.strip().startswith('|'):
|
||||
cells = [cell.strip() for cell in line.split('|')[1:-1]]
|
||||
if cells and any(cells):
|
||||
table_data.append(cells)
|
||||
in_table = True
|
||||
continue
|
||||
else:
|
||||
if in_table and table_data:
|
||||
row = self._write_table_to_excel(ws, table_data, row, header_fill, header_font, thin_border, normal_font)
|
||||
table_data = []
|
||||
in_table = False
|
||||
|
||||
if line.startswith('### '):
|
||||
if table_data:
|
||||
row = self._write_table_to_excel(ws, table_data, row, header_fill, header_font, thin_border, normal_font)
|
||||
table_data = []
|
||||
current_section = line[4:].strip()
|
||||
ws[f'A{row}'] = current_section
|
||||
ws[f'A{row}'].font = Font(bold=True, size=12, color="4472C4")
|
||||
ws[f'A{row}'].alignment = Alignment(horizontal='center', vertical='center')
|
||||
ws.merge_cells(f'A{row}:B{row}')
|
||||
row += 1
|
||||
elif line.startswith('## '):
|
||||
if table_data:
|
||||
row = self._write_table_to_excel(ws, table_data, row, header_fill, header_font, thin_border, normal_font)
|
||||
table_data = []
|
||||
section_title = line[2:].strip()
|
||||
ws[f'A{row}'] = section_title
|
||||
ws[f'A{row}'].font = Font(bold=True, size=13)
|
||||
ws[f'A{row}'].alignment = Alignment(horizontal='center', vertical='center')
|
||||
ws.merge_cells(f'A{row}:B{row}')
|
||||
row += 1
|
||||
elif line.startswith('# '):
|
||||
pass
|
||||
elif line.startswith('#### '):
|
||||
if table_data:
|
||||
row = self._write_table_to_excel(ws, table_data, row, header_fill, header_font, thin_border, normal_font)
|
||||
table_data = []
|
||||
current_section = line[5:].strip()
|
||||
ws[f'A{row}'] = current_section
|
||||
ws[f'A{row}'].font = Font(bold=True, size=11, color="4472C4")
|
||||
ws[f'A{row}'].alignment = Alignment(horizontal='center', vertical='center')
|
||||
ws.merge_cells(f'A{row}:B{row}')
|
||||
row += 1
|
||||
elif line.startswith('- ') or line.startswith('* ') or line.startswith('• '):
|
||||
text = line[2:].strip() if line.startswith(('- ', '* ')) else line[1:].strip()
|
||||
text = self._clean_markdown(text)
|
||||
ws[f'A{row}'] = "• " + text
|
||||
ws[f'A{row}'].font = normal_font
|
||||
ws[f'A{row}'].alignment = Alignment(horizontal='center', vertical='center', wrap_text=True)
|
||||
ws.merge_cells(f'A{row}:B{row}')
|
||||
row += 1
|
||||
elif line.startswith('**') and '**:' in line:
|
||||
parts = line.split(':', 1)
|
||||
if len(parts) == 2:
|
||||
key = self._clean_markdown(parts[0])
|
||||
value = self._clean_markdown(parts[1])
|
||||
ws[f'A{row}'] = f"{key}: {value}"
|
||||
ws[f'A{row}'].font = Font(bold=True, size=10)
|
||||
ws[f'A{row}'].alignment = Alignment(horizontal='center', vertical='center', wrap_text=True)
|
||||
ws.merge_cells(f'A{row}:B{row}')
|
||||
row += 1
|
||||
else:
|
||||
clean_line = self._clean_markdown(line)
|
||||
if clean_line:
|
||||
ws[f'A{row}'] = clean_line
|
||||
ws[f'A{row}'].font = normal_font
|
||||
ws[f'A{row}'].alignment = Alignment(horizontal='center', vertical='center', wrap_text=True)
|
||||
ws.merge_cells(f'A{row}:B{row}')
|
||||
row += 1
|
||||
|
||||
if table_data:
|
||||
row = self._write_table_to_excel(ws, table_data, row, header_fill, header_font, thin_border, normal_font)
|
||||
|
||||
ws.column_dimensions['B'].width = 80
|
||||
|
||||
# 设置自适应行高
|
||||
for r in range(1, row + 1):
|
||||
ws.row_dimensions[r].bestFit = True
|
||||
|
||||
for col in ['A', 'B']:
|
||||
for r in range(1, row + 1):
|
||||
cell = ws[f'{col}{r}']
|
||||
if cell.border is None or cell.border == Border():
|
||||
cell.border = Border(
|
||||
left=Side(style='none'),
|
||||
right=Side(style='none'),
|
||||
top=Side(style='none'),
|
||||
bottom=Side(style='none')
|
||||
)
|
||||
|
||||
wb.save(file_path)
|
||||
print(f"Excel 文档已保存: {file_path}")
|
||||
|
||||
except ImportError:
|
||||
print("请安装 openpyxl 库: pip install openpyxl")
|
||||
raise
|
||||
except Exception as e:
|
||||
print(f"保存 Excel 文档失败: {e}")
|
||||
raise
|
||||
|
||||
def _write_table_to_excel(self, ws, table_data, row, header_fill, header_font, border, normal_font):
|
||||
"""将表格数据写入 Excel"""
|
||||
if not table_data:
|
||||
return row
|
||||
|
||||
if len(table_data) == 1:
|
||||
ws[f'A{row}'] = table_data[0][0] if table_data[0] else ""
|
||||
ws[f'A{row}'].font = normal_font
|
||||
ws[f'A{row}'].alignment = Alignment(horizontal='center', vertical='center', wrap_text=True)
|
||||
ws.merge_cells(f'A{row}:B{row}')
|
||||
return row + 1
|
||||
|
||||
max_cols = max(len(row_data) for row_data in table_data)
|
||||
max_cols = min(max_cols, 2)
|
||||
|
||||
for col_idx in range(max_cols):
|
||||
col_letter = get_column_letter(col_idx + 1)
|
||||
ws.column_dimensions[col_letter].width = 25 if max_cols > 1 else 80
|
||||
|
||||
start_row = row
|
||||
for row_idx, row_data in enumerate(table_data):
|
||||
for col_idx in range(min(len(row_data), max_cols)):
|
||||
col_letter = get_column_letter(col_idx + 1)
|
||||
cell = ws[f'{col_letter}{row + row_idx}']
|
||||
cell.value = self._clean_markdown(row_data[col_idx])
|
||||
cell.font = header_font if row_idx == 0 else normal_font
|
||||
cell.fill = header_fill if row_idx == 0 else PatternFill()
|
||||
cell.alignment = Alignment(horizontal='center', vertical='center', wrap_text=True)
|
||||
cell.border = border
|
||||
|
||||
if max_cols > 1:
|
||||
for col_idx in range(len(row_data), max_cols):
|
||||
col_letter = get_column_letter(col_idx + 1)
|
||||
ws[f'{col_letter}{row + row_idx}'].border = border
|
||||
|
||||
return row + len(table_data) + 1
|
||||
|
||||
def _clean_markdown(self, text: str) -> str:
|
||||
"""清理 Markdown 格式标记"""
|
||||
import re
|
||||
text = re.sub(r'\*\*(.+?)\*\*', r'\1', text)
|
||||
text = re.sub(r'\*(.+?)\*', r'\1', text)
|
||||
text = re.sub(r'__(.+?)__', r'\1', text)
|
||||
text = re.sub(r'_(.+?)_', r'\1', text)
|
||||
text = re.sub(r'~~(.+?)~~', r'\1', text)
|
||||
text = re.sub(r'`(.+?)`', r'\1', text)
|
||||
text = text.replace('\\n', '\n').replace('\\t', '\t')
|
||||
return text.strip()
|
||||
@@ -1,35 +1,24 @@
|
||||
import asyncio
|
||||
|
||||
import httpx
|
||||
from openai import OpenAI, AsyncOpenAI, max_retries
|
||||
import openai
|
||||
import yaml
|
||||
from termcolor import colored
|
||||
import os
|
||||
|
||||
# Helper function to avoid circular import
|
||||
def print_colored(text, text_color="green", background="on_white"):
|
||||
print(colored(text, text_color, background))
|
||||
|
||||
# load config (apikey, apibase, model)
|
||||
yaml_file = os.path.join(os.getcwd(), "config", "config.yaml")
|
||||
try:
|
||||
with open(yaml_file, "r", encoding="utf-8") as file:
|
||||
yaml_data = yaml.safe_load(file)
|
||||
except Exception:
|
||||
yaml_data = {}
|
||||
yaml_file = {}
|
||||
OPENAI_API_BASE = os.getenv("OPENAI_API_BASE") or yaml_data.get(
|
||||
"OPENAI_API_BASE", "https://api.openai.com"
|
||||
)
|
||||
openai.api_base = OPENAI_API_BASE
|
||||
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") or yaml_data.get(
|
||||
"OPENAI_API_KEY", ""
|
||||
)
|
||||
OPENAI_API_MODEL = os.getenv("OPENAI_API_MODEL") or yaml_data.get(
|
||||
"OPENAI_API_MODEL", ""
|
||||
)
|
||||
|
||||
# Initialize OpenAI clients
|
||||
client = OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_API_BASE)
|
||||
async_client = AsyncOpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_API_BASE)
|
||||
openai.api_key = OPENAI_API_KEY
|
||||
MODEL: str = os.getenv("OPENAI_API_MODEL") or yaml_data.get(
|
||||
"OPENAI_API_MODEL", "gpt-4-turbo-preview"
|
||||
)
|
||||
@@ -46,10 +35,8 @@ MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY") or yaml_data.get(
|
||||
|
||||
# for LLM completion
|
||||
def LLM_Completion(
|
||||
messages: list[dict], stream: bool = True, useGroq: bool = True,model_config: dict = None
|
||||
messages: list[dict], stream: bool = True, useGroq: bool = True
|
||||
) -> str:
|
||||
if model_config:
|
||||
return _call_with_custom_config(messages,stream,model_config)
|
||||
if not useGroq or not FAST_DESIGN_MODE:
|
||||
force_gpt4 = True
|
||||
useGroq = False
|
||||
@@ -82,106 +69,16 @@ def LLM_Completion(
|
||||
return _chat_completion(messages=messages)
|
||||
|
||||
|
||||
def _call_with_custom_config(messages: list[dict], stream: bool, model_config: dict) ->str:
|
||||
"使用自定义配置调用API"
|
||||
api_url = model_config.get("apiUrl", OPENAI_API_BASE)
|
||||
api_key = model_config.get("apiKey", OPENAI_API_KEY)
|
||||
api_model = model_config.get("apiModel", OPENAI_API_MODEL)
|
||||
|
||||
temp_client = OpenAI(api_key=api_key, base_url=api_url)
|
||||
temp_async_client = AsyncOpenAI(api_key=api_key, base_url=api_url)
|
||||
|
||||
try:
|
||||
if stream:
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
except RuntimeError as ex:
|
||||
if "There is no current event loop in thread" in str(ex):
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
return loop.run_until_complete(
|
||||
_achat_completion_stream_custom(messages=messages, temp_async_client=temp_async_client, api_model=api_model)
|
||||
)
|
||||
else:
|
||||
response = temp_client.chat.completions.create(
|
||||
messages=messages,
|
||||
model=api_model,
|
||||
temperature=0.3,
|
||||
max_tokens=4096,
|
||||
timeout=180
|
||||
|
||||
)
|
||||
# 检查响应是否有效
|
||||
if not response.choices or len(response.choices) == 0:
|
||||
raise Exception(f"API returned empty response for model {api_model}")
|
||||
if not response.choices[0] or not response.choices[0].message:
|
||||
raise Exception(f"API returned invalid response format for model {api_model}")
|
||||
|
||||
full_reply_content = response.choices[0].message.content
|
||||
if full_reply_content is None:
|
||||
raise Exception(f"API returned None content for model {api_model}")
|
||||
|
||||
#print(colored(full_reply_content, "blue", "on_white"), end="")
|
||||
return full_reply_content
|
||||
except Exception as e:
|
||||
print_colored(f"[API Error] Custom API error: {str(e)}", "red")
|
||||
raise
|
||||
|
||||
|
||||
async def _achat_completion_stream_custom(messages:list[dict], temp_async_client, api_model: str ) -> str:
|
||||
max_retries=3
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
response = await temp_async_client.chat.completions.create(
|
||||
messages=messages,
|
||||
model=api_model,
|
||||
temperature=0.3,
|
||||
max_tokens=4096,
|
||||
stream=True,
|
||||
timeout=180
|
||||
)
|
||||
|
||||
collected_chunks = []
|
||||
collected_messages = []
|
||||
async for chunk in response:
|
||||
collected_chunks.append(chunk)
|
||||
choices = chunk.choices
|
||||
if len(choices) > 0 and choices[0] is not None:
|
||||
chunk_message = choices[0].delta
|
||||
if chunk_message is not None:
|
||||
collected_messages.append(chunk_message)
|
||||
# if chunk_message.content:
|
||||
# print(colored(chunk_message.content, "blue", "on_white"), end="")
|
||||
# print()
|
||||
full_reply_content = "".join(
|
||||
[m.content or "" for m in collected_messages if m is not None]
|
||||
)
|
||||
|
||||
# 检查最终结果是否为空
|
||||
if not full_reply_content or full_reply_content.strip() == "":
|
||||
raise Exception(f"Stream API returned empty content for model {api_model}")
|
||||
|
||||
return full_reply_content
|
||||
except httpx.RemoteProtocolError as e:
|
||||
if attempt < max_retries - 1:
|
||||
wait_time = (attempt + 1) *2
|
||||
print_colored(f"[API Warn] Stream connection interrupted, retrying in {wait_time}s...", "yellow")
|
||||
await asyncio.sleep(wait_time)
|
||||
continue
|
||||
except Exception as e:
|
||||
print_colored(f"[API Error] Custom API stream error: {str(e)}", "red")
|
||||
raise
|
||||
|
||||
|
||||
async def _achat_completion_stream_groq(messages: list[dict]) -> str:
|
||||
from groq import AsyncGroq
|
||||
groq_client = AsyncGroq(api_key=GROQ_API_KEY)
|
||||
client = AsyncGroq(api_key=GROQ_API_KEY)
|
||||
|
||||
max_attempts = 5
|
||||
|
||||
for attempt in range(max_attempts):
|
||||
print("Attempt to use Groq (Fase Design Mode):")
|
||||
try:
|
||||
response = await groq_client.chat.completions.create(
|
||||
stream = await client.chat.completions.create(
|
||||
messages=messages,
|
||||
# model='gemma-7b-it',
|
||||
model="mixtral-8x7b-32768",
|
||||
@@ -195,34 +92,25 @@ async def _achat_completion_stream_groq(messages: list[dict]) -> str:
|
||||
if attempt < max_attempts - 1: # i is zero indexed
|
||||
continue
|
||||
else:
|
||||
raise Exception("failed")
|
||||
raise "failed"
|
||||
|
||||
# 检查响应是否有效
|
||||
if not response.choices or len(response.choices) == 0:
|
||||
raise Exception("Groq API returned empty response")
|
||||
if not response.choices[0] or not response.choices[0].message:
|
||||
raise Exception("Groq API returned invalid response format")
|
||||
|
||||
full_reply_content = response.choices[0].message.content
|
||||
if full_reply_content is None:
|
||||
raise Exception("Groq API returned None content")
|
||||
|
||||
# print(colored(full_reply_content, "blue", "on_white"), end="")
|
||||
# print()
|
||||
full_reply_content = stream.choices[0].message.content
|
||||
print(colored(full_reply_content, "blue", "on_white"), end="")
|
||||
print()
|
||||
return full_reply_content
|
||||
|
||||
|
||||
async def _achat_completion_stream_mixtral(messages: list[dict]) -> str:
|
||||
from mistralai.client import MistralClient
|
||||
from mistralai.models.chat_completion import ChatMessage
|
||||
mistral_client = MistralClient(api_key=MISTRAL_API_KEY)
|
||||
client = MistralClient(api_key=MISTRAL_API_KEY)
|
||||
# client=AsyncGroq(api_key=GROQ_API_KEY)
|
||||
max_attempts = 5
|
||||
|
||||
for attempt in range(max_attempts):
|
||||
try:
|
||||
messages[len(messages) - 1]["role"] = "user"
|
||||
stream = mistral_client.chat(
|
||||
stream = client.chat(
|
||||
messages=[
|
||||
ChatMessage(
|
||||
role=message["role"], content=message["content"]
|
||||
@@ -231,35 +119,31 @@ async def _achat_completion_stream_mixtral(messages: list[dict]) -> str:
|
||||
],
|
||||
# model = "mistral-small-latest",
|
||||
model="open-mixtral-8x7b",
|
||||
# response_format={"type": "json_object"},
|
||||
)
|
||||
break # If the operation is successful, break the loop
|
||||
except Exception:
|
||||
if attempt < max_attempts - 1: # i is zero indexed
|
||||
continue
|
||||
else:
|
||||
raise Exception("failed")
|
||||
|
||||
# 检查响应是否有效
|
||||
if not stream.choices or len(stream.choices) == 0:
|
||||
raise Exception("Mistral API returned empty response")
|
||||
if not stream.choices[0] or not stream.choices[0].message:
|
||||
raise Exception("Mistral API returned invalid response format")
|
||||
raise "failed"
|
||||
|
||||
full_reply_content = stream.choices[0].message.content
|
||||
if full_reply_content is None:
|
||||
raise Exception("Mistral API returned None content")
|
||||
|
||||
# print(colored(full_reply_content, "blue", "on_white"), end="")
|
||||
# print()
|
||||
print(colored(full_reply_content, "blue", "on_white"), end="")
|
||||
print()
|
||||
return full_reply_content
|
||||
|
||||
|
||||
async def _achat_completion_stream_gpt35(messages: list[dict]) -> str:
|
||||
response = await async_client.chat.completions.create(
|
||||
openai.api_key = OPENAI_API_KEY
|
||||
openai.api_base = OPENAI_API_BASE
|
||||
response = await openai.ChatCompletion.acreate(
|
||||
messages=messages,
|
||||
max_tokens=4096,
|
||||
n=1,
|
||||
stop=None,
|
||||
temperature=0.3,
|
||||
timeout=600,
|
||||
timeout=3,
|
||||
model="gpt-3.5-turbo-16k",
|
||||
stream=True,
|
||||
)
|
||||
@@ -270,38 +154,40 @@ async def _achat_completion_stream_gpt35(messages: list[dict]) -> str:
|
||||
# iterate through the stream of events
|
||||
async for chunk in response:
|
||||
collected_chunks.append(chunk) # save the event response
|
||||
choices = chunk.choices
|
||||
if len(choices) > 0 and choices[0] is not None:
|
||||
chunk_message = choices[0].delta
|
||||
if chunk_message is not None:
|
||||
collected_messages.append(chunk_message) # save the message
|
||||
# if chunk_message.content:
|
||||
# print(
|
||||
# colored(chunk_message.content, "blue", "on_white"),
|
||||
# end="",
|
||||
# )
|
||||
# print()
|
||||
choices = chunk["choices"]
|
||||
if len(choices) > 0:
|
||||
chunk_message = chunk["choices"][0].get(
|
||||
"delta", {}
|
||||
) # extract the message
|
||||
collected_messages.append(chunk_message) # save the message
|
||||
if "content" in chunk_message:
|
||||
print(
|
||||
colored(chunk_message["content"], "blue", "on_white"),
|
||||
end="",
|
||||
)
|
||||
print()
|
||||
|
||||
full_reply_content = "".join(
|
||||
[m.content or "" for m in collected_messages if m is not None]
|
||||
[m.get("content", "") for m in collected_messages]
|
||||
)
|
||||
|
||||
# 检查最终结果是否为空
|
||||
if not full_reply_content or full_reply_content.strip() == "":
|
||||
raise Exception("Stream API (gpt-3.5) returned empty content")
|
||||
|
||||
return full_reply_content
|
||||
|
||||
|
||||
def _achat_completion_json(messages: list[dict] ) -> str:
|
||||
async def _achat_completion_json(messages: list[dict]) -> str:
|
||||
openai.api_key = OPENAI_API_KEY
|
||||
openai.api_base = OPENAI_API_BASE
|
||||
|
||||
max_attempts = 5
|
||||
|
||||
for attempt in range(max_attempts):
|
||||
try:
|
||||
response = async_client.chat.completions.create(
|
||||
stream = await openai.ChatCompletion.acreate(
|
||||
messages=messages,
|
||||
max_tokens=4096,
|
||||
n=1,
|
||||
stop=None,
|
||||
temperature=0.3,
|
||||
timeout=600,
|
||||
timeout=3,
|
||||
model=MODEL,
|
||||
response_format={"type": "json_object"},
|
||||
)
|
||||
@@ -310,87 +196,62 @@ def _achat_completion_json(messages: list[dict] ) -> str:
|
||||
if attempt < max_attempts - 1: # i is zero indexed
|
||||
continue
|
||||
else:
|
||||
raise Exception("failed")
|
||||
raise "failed"
|
||||
|
||||
# 检查响应是否有效
|
||||
if not response.choices or len(response.choices) == 0:
|
||||
raise Exception("OpenAI API returned empty response")
|
||||
if not response.choices[0] or not response.choices[0].message:
|
||||
raise Exception("OpenAI API returned invalid response format")
|
||||
|
||||
full_reply_content = response.choices[0].message.content
|
||||
if full_reply_content is None:
|
||||
raise Exception("OpenAI API returned None content")
|
||||
|
||||
# print(colored(full_reply_content, "blue", "on_white"), end="")
|
||||
# print()
|
||||
full_reply_content = stream.choices[0].message.content
|
||||
print(colored(full_reply_content, "blue", "on_white"), end="")
|
||||
print()
|
||||
return full_reply_content
|
||||
|
||||
|
||||
async def _achat_completion_stream(messages: list[dict]) -> str:
|
||||
try:
|
||||
response = await async_client.chat.completions.create(
|
||||
**_cons_kwargs(messages), stream=True
|
||||
)
|
||||
openai.api_key = OPENAI_API_KEY
|
||||
openai.api_base = OPENAI_API_BASE
|
||||
response = await openai.ChatCompletion.acreate(
|
||||
**_cons_kwargs(messages), stream=True
|
||||
)
|
||||
|
||||
# create variables to collect the stream of chunks
|
||||
collected_chunks = []
|
||||
collected_messages = []
|
||||
# iterate through the stream of events
|
||||
async for chunk in response:
|
||||
collected_chunks.append(chunk) # save the event response
|
||||
choices = chunk.choices
|
||||
if len(choices) > 0 and choices[0] is not None:
|
||||
chunk_message = choices[0].delta
|
||||
if chunk_message is not None:
|
||||
collected_messages.append(chunk_message) # save the message
|
||||
# if chunk_message.content:
|
||||
# print(
|
||||
# colored(chunk_message.content, "blue", "on_white"),
|
||||
# end="",
|
||||
# )
|
||||
# print()
|
||||
# create variables to collect the stream of chunks
|
||||
collected_chunks = []
|
||||
collected_messages = []
|
||||
# iterate through the stream of events
|
||||
async for chunk in response:
|
||||
collected_chunks.append(chunk) # save the event response
|
||||
choices = chunk["choices"]
|
||||
if len(choices) > 0:
|
||||
chunk_message = chunk["choices"][0].get(
|
||||
"delta", {}
|
||||
) # extract the message
|
||||
collected_messages.append(chunk_message) # save the message
|
||||
if "content" in chunk_message:
|
||||
print(
|
||||
colored(chunk_message["content"], "blue", "on_white"),
|
||||
end="",
|
||||
)
|
||||
print()
|
||||
|
||||
full_reply_content = "".join(
|
||||
[m.content or "" for m in collected_messages if m is not None]
|
||||
)
|
||||
|
||||
# 检查最终结果是否为空
|
||||
if not full_reply_content or full_reply_content.strip() == "":
|
||||
raise Exception("Stream API returned empty content")
|
||||
|
||||
return full_reply_content
|
||||
except Exception as e:
|
||||
print_colored(f"[API Error] OpenAI API stream error: {str(e)}", "red")
|
||||
raise
|
||||
full_reply_content = "".join(
|
||||
[m.get("content", "") for m in collected_messages]
|
||||
)
|
||||
return full_reply_content
|
||||
|
||||
|
||||
def _chat_completion(messages: list[dict]) -> str:
|
||||
try:
|
||||
rsp = client.chat.completions.create(**_cons_kwargs(messages))
|
||||
|
||||
# 检查响应是否有效
|
||||
if not rsp.choices or len(rsp.choices) == 0:
|
||||
raise Exception("OpenAI API returned empty response")
|
||||
if not rsp.choices[0] or not rsp.choices[0].message:
|
||||
raise Exception("OpenAI API returned invalid response format")
|
||||
|
||||
content = rsp.choices[0].message.content
|
||||
if content is None:
|
||||
raise Exception("OpenAI API returned None content")
|
||||
|
||||
return content
|
||||
except Exception as e:
|
||||
print_colored(f"[API Error] OpenAI API error: {str(e)}", "red")
|
||||
raise
|
||||
print(messages, flush=True)
|
||||
rsp = openai.ChatCompletion.create(**_cons_kwargs(messages))
|
||||
content = rsp["choices"][0]["message"]["content"]
|
||||
print(content, flush=True)
|
||||
return content
|
||||
|
||||
|
||||
def _cons_kwargs(messages: list[dict]) -> dict:
|
||||
kwargs = {
|
||||
"messages": messages,
|
||||
"max_tokens": 2000,
|
||||
"temperature": 0.3,
|
||||
"timeout": 600,
|
||||
"max_tokens": 4096,
|
||||
"n": 1,
|
||||
"stop": None,
|
||||
"temperature": 0.5,
|
||||
"timeout": 3,
|
||||
}
|
||||
kwargs_mode = {"model": MODEL}
|
||||
kwargs.update(kwargs_mode)
|
||||
|
||||
@@ -7,8 +7,6 @@ PROMPT_ABILITY_REQUIREMENT_GENERATION = """
|
||||
## Instruction
|
||||
Based on "General Goal" and "Current Task", output a formatted "Ability Requirement" which lists at least 3 different ability requirement that is required by the "Current Task". The ability should be summarized concisely within a few words.
|
||||
|
||||
**IMPORTANT LANGUAGE REQUIREMENT: You must respond in Chinese (中文) for all ability requirements.**
|
||||
|
||||
## General Goal (The general goal for the collaboration plan, "Current Task" is just one of its substep)
|
||||
{General_Goal}
|
||||
|
||||
@@ -43,7 +41,7 @@ def generate_AbilityRequirement(General_Goal, Current_Task):
|
||||
),
|
||||
},
|
||||
]
|
||||
#print(messages[1]["content"])
|
||||
print(messages[1]["content"])
|
||||
return read_LLM_Completion(messages)["AbilityRequirement"]
|
||||
|
||||
|
||||
@@ -51,8 +49,6 @@ PROMPT_AGENT_ABILITY_SCORING = """
|
||||
## Instruction
|
||||
Based on "Agent Board" and "Ability Requirement", output a score for each agent to estimate the possibility that the agent can fulfil the "Ability Requirement". The score should be 1-5. Provide a concise reason before you assign the score.
|
||||
|
||||
**IMPORTANT LANGUAGE REQUIREMENT: You must respond in Chinese (中文) for all reasons and explanations.**
|
||||
|
||||
## AgentBoard
|
||||
{Agent_Board}
|
||||
|
||||
@@ -104,7 +100,7 @@ def agentAbilityScoring(Agent_Board, Ability_Requirement_List):
|
||||
),
|
||||
},
|
||||
]
|
||||
#print(messages[1]["content"])
|
||||
print(messages[1]["content"])
|
||||
scoreTable[Ability_Requirement] = read_LLM_Completion(messages)
|
||||
return scoreTable
|
||||
|
||||
@@ -137,6 +133,5 @@ def AgentSelectModify_init(stepTask, General_Goal, Agent_Board):
|
||||
|
||||
|
||||
def AgentSelectModify_addAspect(aspectList, Agent_Board):
|
||||
newAspect = aspectList[-1]
|
||||
scoreTable = agentAbilityScoring(Agent_Board, [newAspect])
|
||||
scoreTable = agentAbilityScoring(Agent_Board, aspectList)
|
||||
return scoreTable
|
||||
|
||||
@@ -33,9 +33,7 @@ class JSON_ABILITY_REQUIREMENT_GENERATION(BaseModel):
|
||||
|
||||
PROMPT_AGENT_SELECTION_GENERATION = """
|
||||
## Instruction
|
||||
Based on "General Goal", "Current Task" and "Agent Board", output a formatted "Agent Selection Plan". Your selection should consider the ability needed for "Current Task" and the profile of each agent in "Agent Board".
|
||||
|
||||
**IMPORTANT LANGUAGE REQUIREMENT: You must respond in Chinese (中文) for all explanations and reasoning, though agent names should remain in their original form.**
|
||||
Based on "General Goal", "Current Task" and "Agent Board", output a formatted "Agent Selection Plan". Your selection should consider the ability needed for "Current Task" and the profile of each agent in "Agent Board". Agent Selection Plan ranks from high to low according to ability.
|
||||
|
||||
## General Goal (Specify the general goal for the collaboration plan)
|
||||
{General_Goal}
|
||||
@@ -77,14 +75,11 @@ def generate_AbilityRequirement(General_Goal, Current_Task):
|
||||
),
|
||||
},
|
||||
]
|
||||
print(messages[1]["content"])
|
||||
return read_LLM_Completion(messages)["AbilityRequirement"]
|
||||
|
||||
|
||||
def generate_AgentSelection(General_Goal, Current_Task, Agent_Board):
|
||||
# Check if Agent_Board is None or empty
|
||||
if Agent_Board is None or len(Agent_Board) == 0:
|
||||
raise ValueError("Agent_Board cannot be None or empty. Please ensure agents are set via /setAgents endpoint before generating a plan.")
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
@@ -99,13 +94,12 @@ def generate_AgentSelection(General_Goal, Current_Task, Agent_Board):
|
||||
),
|
||||
},
|
||||
]
|
||||
#print(messages[1]["content"])
|
||||
print(messages[1]["content"])
|
||||
|
||||
agentboard_set = {agent["Name"] for agent in Agent_Board}
|
||||
|
||||
while True:
|
||||
candidate = read_LLM_Completion(messages)["AgentSelectionPlan"]
|
||||
# 添加调试打印
|
||||
if len(candidate) > MAX_TEAM_SIZE:
|
||||
teamSize = random.randint(2, MAX_TEAM_SIZE)
|
||||
candidate = candidate[0:teamSize]
|
||||
@@ -113,6 +107,7 @@ def generate_AgentSelection(General_Goal, Current_Task, Agent_Board):
|
||||
continue
|
||||
AgentSelectionPlan = sorted(candidate)
|
||||
AgentSelectionPlan_set = set(AgentSelectionPlan)
|
||||
|
||||
# Check if every item in AgentSelectionPlan is in agentboard
|
||||
if AgentSelectionPlan_set.issubset(agentboard_set):
|
||||
break # If all items are in agentboard, break the loop
|
||||
|
||||
@@ -1,53 +1,55 @@
|
||||
from AgentCoord.PlanEngine.planOutline_Generator import generate_PlanOutline
|
||||
# from AgentCoord.PlanEngine.AgentSelection_Generator import (
|
||||
# generate_AgentSelection,
|
||||
# )
|
||||
from AgentCoord.PlanEngine.AgentSelection_Generator import (
|
||||
generate_AgentSelection,
|
||||
)
|
||||
from AgentCoord.PlanEngine.taskProcess_Generator import generate_TaskProcess
|
||||
import AgentCoord.util as util
|
||||
|
||||
|
||||
def generate_basePlan(
|
||||
General_Goal, Agent_Board, AgentProfile_Dict, InitialObject_List
|
||||
General_Goal, Agent_Board, AgentProfile_Dict, InitialObject_List, context
|
||||
):
|
||||
"""
|
||||
优化模式:生成大纲 + 智能体选择,但不生成任务流程
|
||||
优化用户体验:
|
||||
1. 快速生成大纲和分配智能体
|
||||
2. 用户可以看到完整的大纲和智能体图标
|
||||
3. TaskProcess由前端通过 fillStepTask API 异步填充
|
||||
|
||||
"""
|
||||
# 参数保留以保持接口兼容性
|
||||
_ = AgentProfile_Dict
|
||||
PlanOutline = generate_PlanOutline(
|
||||
InitialObject_List=InitialObject_List, General_Goal=General_Goal
|
||||
)
|
||||
|
||||
basePlan = {
|
||||
"General Goal": General_Goal,
|
||||
"Initial Input Object": InitialObject_List,
|
||||
"Collaboration Process": []
|
||||
"Collaboration Process": [],
|
||||
}
|
||||
|
||||
PlanOutline = generate_PlanOutline(
|
||||
InitialObject_List=[], General_Goal=General_Goal + context
|
||||
)
|
||||
for stepItem in PlanOutline:
|
||||
# # 为每个步骤分配智能体
|
||||
# Current_Task = {
|
||||
# "TaskName": stepItem["StepName"],
|
||||
# "InputObject_List": stepItem["InputObject_List"],
|
||||
# "OutputObject": stepItem["OutputObject"],
|
||||
# "TaskContent": stepItem["TaskContent"],
|
||||
# }
|
||||
# AgentSelection = generate_AgentSelection(
|
||||
# General_Goal=General_Goal,
|
||||
# Current_Task=Current_Task,
|
||||
# Agent_Board=Agent_Board,
|
||||
# )
|
||||
|
||||
# 添加智能体选择,但不添加任务流程
|
||||
stepItem["AgentSelection"] = []
|
||||
stepItem["TaskProcess"] = [] # 空数组,由前端异步填充
|
||||
stepItem["Collaboration_Brief_frontEnd"] = {
|
||||
"template": "",
|
||||
"data": {}
|
||||
Current_Task = {
|
||||
"TaskName": stepItem["StepName"],
|
||||
"InputObject_List": stepItem["InputObject_List"],
|
||||
"OutputObject": stepItem["OutputObject"],
|
||||
"TaskContent": stepItem["TaskContent"],
|
||||
}
|
||||
AgentSelection = generate_AgentSelection(
|
||||
General_Goal=General_Goal,
|
||||
Current_Task=Current_Task,
|
||||
Agent_Board=Agent_Board,
|
||||
)
|
||||
Current_Task_Description = {
|
||||
"TaskName": stepItem["StepName"],
|
||||
"AgentInvolved": [
|
||||
{"Name": name, "Profile": AgentProfile_Dict[name]}
|
||||
for name in AgentSelection
|
||||
],
|
||||
"InputObject_List": stepItem["InputObject_List"],
|
||||
"OutputObject": stepItem["OutputObject"],
|
||||
"CurrentTaskDescription": util.generate_template_sentence_for_CollaborationBrief(
|
||||
stepItem["InputObject_List"],
|
||||
stepItem["OutputObject"],
|
||||
AgentSelection,
|
||||
stepItem["TaskContent"],
|
||||
),
|
||||
}
|
||||
TaskProcess = generate_TaskProcess(
|
||||
General_Goal=General_Goal + context,
|
||||
Current_Task_Description=Current_Task_Description,
|
||||
)
|
||||
# add the generated AgentSelection and TaskProcess to the stepItem
|
||||
stepItem["AgentSelection"] = AgentSelection
|
||||
stepItem["TaskProcess"] = TaskProcess
|
||||
basePlan["Collaboration Process"].append(stepItem)
|
||||
|
||||
return basePlan
|
||||
basePlan["General Goal"] = General_Goal
|
||||
return basePlan
|
||||
|
||||
@@ -9,8 +9,6 @@ PROMPT_PLAN_OUTLINE_BRANCHING = """
|
||||
Based on "Existing Steps", your task is to comeplete the "Remaining Steps" for the plan for "General Goal".
|
||||
Note: "Modification Requirement" specifies how to modify the "Baseline Completion" for a better/alternative solution.
|
||||
|
||||
**IMPORTANT LANGUAGE REQUIREMENT: You must respond in Chinese (中文) for all content, including StepName, TaskContent, and OutputObject fields.**
|
||||
|
||||
## General Goal (Specify the general goal for the plan)
|
||||
{General_Goal}
|
||||
|
||||
@@ -87,7 +85,7 @@ def branch_PlanOutline(
|
||||
InitialObject_List=str(InitialObject_List),
|
||||
General_Goal=General_Goal,
|
||||
)
|
||||
#print(prompt)
|
||||
print(prompt)
|
||||
branch_List = []
|
||||
for _ in range(branch_Number):
|
||||
messages = [
|
||||
|
||||
@@ -29,8 +29,6 @@ PROMPT_TASK_PROCESS_BRANCHING = """
|
||||
Based on "Existing Steps", your task is to comeplete the "Remaining Steps" for the "Task for Current Step".
|
||||
Note: "Modification Requirement" specifies how to modify the "Baseline Completion" for a better/alternative solution.
|
||||
|
||||
**IMPORTANT LANGUAGE REQUIREMENT: You must respond in Chinese (中文) for the Description field and all explanations, while keeping ID, ActionType, and AgentName in their original format.**
|
||||
|
||||
## General Goal (The general goal for the collaboration plan, you just design the plan for one of its step (i.e. "Task for Current Step"))
|
||||
{General_Goal}
|
||||
|
||||
@@ -57,39 +55,26 @@ Note: "Modification Requirement" specifies how to modify the "Baseline Completio
|
||||
"ID": "Action4",
|
||||
"ActionType": "Propose",
|
||||
"AgentName": "Mia",
|
||||
"Description": "提议关于人工智能情感发展的心理学理论,重点关注爱与依恋的概念。",
|
||||
"Description": "Propose psychological theories on love and attachment that could be applied to AI's emotional development.",
|
||||
"ImportantInput": [
|
||||
"InputObject:Story Outline"
|
||||
]
|
||||
}},
|
||||
{{
|
||||
"ID": "Action5",
|
||||
"ActionType": "Critique",
|
||||
"ActionType": "Propose",
|
||||
"AgentName": "Noah",
|
||||
"Description": "对Mia提出的心理学理论进行批判性评估,分析其在AI情感发展场景中的适用性和局限性。",
|
||||
"ImportantInput": [
|
||||
"ActionResult:Action4"
|
||||
]
|
||||
"Description": "Propose ethical considerations and philosophical questions regarding AI's capacity for love.",
|
||||
"ImportantInput": []
|
||||
}},
|
||||
{{
|
||||
"ID": "Action6",
|
||||
"ActionType": "Improve",
|
||||
"AgentName": "Liam",
|
||||
"Description": "基于Noah的批判性反馈,改进和完善心理学理论框架,使其更贴合AI情感发展的实际需求。",
|
||||
"ImportantInput": [
|
||||
"ActionResult:Action4",
|
||||
"ActionResult:Action5"
|
||||
]
|
||||
}},
|
||||
{{
|
||||
"ID": "Action7",
|
||||
"ActionType": "Finalize",
|
||||
"AgentName": "Mia",
|
||||
"Description": "综合所有提议、批判和改进意见,整合并提交最终的AI情感发展心理学理论框架。",
|
||||
"AgentName": "Liam",
|
||||
"Description": "Combine the poetic elements and ethical considerations into a cohesive set of core love elements for the story.",
|
||||
"ImportantInput": [
|
||||
"ActionResult:Action4",
|
||||
"ActionResult:Action5",
|
||||
"ActionResult:Action6"
|
||||
"ActionResult:Action1",
|
||||
"ActionResult:Action5"
|
||||
]
|
||||
}}
|
||||
]
|
||||
@@ -99,12 +84,7 @@ Note: "Modification Requirement" specifies how to modify the "Baseline Completio
|
||||
ImportantInput: Specify if there is any previous result that should be taken special consideration during the execution the action. Should be of format "InputObject:xx" or "ActionResult:xx".
|
||||
InputObject_List: List existing objects that should be utilized in current step.
|
||||
AgentName: Specify the agent who will perform the action, You CAN ONLY USE THE NAME APPEARS IN "AgentInvolved".
|
||||
ActionType: Specify the type of action. **CRITICAL REQUIREMENTS:**
|
||||
1. The "Remaining Steps" MUST include ALL FOUR action types in the following order: Propose -> Critique -> Improve -> Finalize
|
||||
2. Each action type (Propose, Critique, Improve, Finalize) MUST appear at least once
|
||||
3. The actions must follow the sequence: Propose actions first, then Critique actions, then Improve actions, and Finalize must be the last action
|
||||
4. Even if only one agent is involved in a phase, that phase must still have its corresponding action type
|
||||
5. The last action must ALWAYS be of type "Finalize"
|
||||
ActionType: Specify the type of action, note that only the last action can be of type "Finalize", and the last action must be "Finalize".
|
||||
|
||||
"""
|
||||
|
||||
@@ -155,7 +135,7 @@ def branch_TaskProcess(
|
||||
General_Goal=General_Goal,
|
||||
Act_Set=ACT_SET,
|
||||
)
|
||||
#print(prompt)
|
||||
print(prompt)
|
||||
branch_List = []
|
||||
for i in range(branch_Number):
|
||||
messages = [
|
||||
|
||||
@@ -5,9 +5,7 @@ import json
|
||||
|
||||
PROMPT_PLAN_OUTLINE_GENERATION = """
|
||||
## Instruction
|
||||
Based on "Output Format Example", "General Goal", and "Initial Key Object List", output a formatted "Plan_Outline".
|
||||
|
||||
**IMPORTANT LANGUAGE REQUIREMENT: You must respond in Chinese (中文) for all content, including StepName, TaskContent, and OutputObject fields.**
|
||||
Based on "Output Format Example", "General Goal", and "Initial Key Object List", output a formatted "Plan_Outline". The number of steps in the Plan Outline cannot exceed 5.
|
||||
|
||||
## Initial Key Object List (Specify the list of initial key objects available, each initial key object should be the input object of at least one Step)
|
||||
{InitialObject_List}
|
||||
@@ -53,6 +51,7 @@ TaskContent: Describe the task of the current step.
|
||||
InputObject_List: The list of the input obejects that will be used in current step.
|
||||
OutputObject: The name of the final output object of current step.
|
||||
|
||||
请用中文回答
|
||||
"""
|
||||
|
||||
|
||||
@@ -85,16 +84,4 @@ def generate_PlanOutline(InitialObject_List, General_Goal):
|
||||
),
|
||||
},
|
||||
]
|
||||
result = read_LLM_Completion(messages)
|
||||
if isinstance(result, dict) and "Plan_Outline" in result:
|
||||
return result["Plan_Outline"]
|
||||
else:
|
||||
# 如果格式不正确,返回默认的计划大纲
|
||||
return [
|
||||
{
|
||||
"StepName": "Default Step",
|
||||
"TaskContent": "Generated default plan step due to format error",
|
||||
"InputObject_List": [],
|
||||
"OutputObject": "Default Output"
|
||||
}
|
||||
]
|
||||
return read_LLM_Completion(messages)["Plan_Outline"]
|
||||
|
||||
@@ -25,8 +25,6 @@ PROMPT_TASK_PROCESS_GENERATION = """
|
||||
## Instruction
|
||||
Based on "General Goal", "Task for Current Step", "Action Set" and "Output Format Example", design a plan for "Task for Current Step", output a formatted "Task_Process_Plan".
|
||||
|
||||
**IMPORTANT LANGUAGE REQUIREMENT: You must respond in Chinese (中文) for the Description field and all explanations, while keeping ID, ActionType, and AgentName in their original format.**
|
||||
|
||||
## General Goal (The general goal for the collaboration plan, you just design the plan for one of its step (i.e. "Task for Current Step"))
|
||||
{General_Goal}
|
||||
|
||||
@@ -92,6 +90,7 @@ InputObject_List: List existing objects that should be utilized in current step.
|
||||
AgentName: Specify the agent who will perform the action, You CAN ONLY USE THE NAME APPEARS IN "AgentInvolved".
|
||||
ActionType: Specify the type of action, note that only the last action can be of type "Finalize", and the last action must be "Finalize".
|
||||
|
||||
请用中文回答
|
||||
"""
|
||||
|
||||
|
||||
@@ -124,7 +123,7 @@ def generate_TaskProcess(General_Goal, Current_Task_Description):
|
||||
),
|
||||
},
|
||||
]
|
||||
#print(messages[1]["content"])
|
||||
print(messages[1]["content"])
|
||||
|
||||
# write a callback function, if read_LLM_Completion(messages)["Task_Process_Plan"] dont have the right format, call this function again
|
||||
while True:
|
||||
|
||||
@@ -5,12 +5,10 @@ PROMPT_TEMPLATE_TAKE_ACTION_BASE = '''
|
||||
Your name is {agentName}. You will play the role as the Profile indicates.
|
||||
Profile: {agentProfile}
|
||||
|
||||
You are within a multi-agent collaboration for the "Current Task".
|
||||
Now it's your turn to take action. Read the "Context Information" and take your action following "Instruction for Your Current Action".
|
||||
You are within a multi-agent collaboration for the "Current Task".
|
||||
Now it's your turn to take action. Read the "Context Information" and take your action following "Instruction for Your Current Action".
|
||||
Note: Important Input for your action are marked with *Important Input*
|
||||
|
||||
**IMPORTANT LANGUAGE REQUIREMENT: You must respond in Chinese (中文) for all your answers and outputs.**
|
||||
|
||||
## Context Information
|
||||
|
||||
### General Goal (The "Current Task" is indeed a substep of the general goal)
|
||||
@@ -25,8 +23,8 @@ Note: Important Input for your action are marked with *Important Input*
|
||||
### History Action
|
||||
{History_Action}
|
||||
|
||||
## Instruction for Your Current Action
|
||||
{Action_Description}
|
||||
## Instruction for Your Current Action
|
||||
{Action_Description}
|
||||
|
||||
{Action_Custom_Note}
|
||||
|
||||
@@ -82,33 +80,10 @@ class BaseAction():
|
||||
Important_Mark = ""
|
||||
action_Record += PROMPT_TEMPLATE_ACTION_RECORD.format(AgentName = actionInfo["AgentName"], Action_Description = actionInfo["AgentName"], Action_Result = actionInfo["Action_Result"], Important_Mark = Important_Mark)
|
||||
|
||||
# Handle missing agent profiles gracefully
|
||||
model_config = None
|
||||
if agentName not in AgentProfile_Dict:
|
||||
agentProfile = f"AI Agent named {agentName}"
|
||||
else:
|
||||
# agentProfile = AgentProfile_Dict[agentName]
|
||||
agent_config = AgentProfile_Dict[agentName]
|
||||
agentProfile = agent_config.get("profile",f"AI Agent named {agentName}")
|
||||
if agent_config.get("useCustomAPI",False):
|
||||
model_config = {
|
||||
"apiModel":agent_config.get("apiModel"),
|
||||
"apiUrl":agent_config.get("apiUrl"),
|
||||
"apiKey":agent_config.get("apiKey"),
|
||||
}
|
||||
prompt = PROMPT_TEMPLATE_TAKE_ACTION_BASE.format(
|
||||
agentName = agentName,
|
||||
agentProfile = agentProfile,
|
||||
General_Goal = General_Goal,
|
||||
Current_Task_Description = TaskDescription,
|
||||
Input_Objects = inputObject_Record,
|
||||
History_Action = action_Record,
|
||||
Action_Description = self.info["Description"],
|
||||
Action_Custom_Note = self.Action_Custom_Note
|
||||
)
|
||||
#print_colored(text = prompt, text_color="red")
|
||||
prompt = PROMPT_TEMPLATE_TAKE_ACTION_BASE.format(agentName = agentName, agentProfile = AgentProfile_Dict[agentName], General_Goal = General_Goal, Current_Task_Description = TaskDescription, Input_Objects = inputObject_Record, History_Action = action_Record, Action_Description = self.info["Description"], Action_Custom_Note = self.Action_Custom_Note)
|
||||
print_colored(text = prompt, text_color="red")
|
||||
messages = [{"role":"system", "content": prompt}]
|
||||
ActionResult = LLM_Completion(messages,True,False,model_config=model_config)
|
||||
ActionResult = LLM_Completion(messages,True,False)
|
||||
ActionInfo_with_Result = copy.deepcopy(self.info)
|
||||
ActionInfo_with_Result["Action_Result"] = ActionResult
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from AgentCoord.RehearsalEngine_V2.Action import BaseAction
|
||||
|
||||
ACTION_CUSTOM_NOTE = '''
|
||||
注意:由于你在对话中,你的批评必须简洁、清晰且易于阅读,不要让人感到压力过大。如果你要列出一些观点,最多列出2点。
|
||||
Note: Since you are in a conversation, your critique must be concise, clear and easy to read, don't overwhelm others. If you want to list some points, list at most 2 points.
|
||||
|
||||
'''
|
||||
|
||||
|
||||
@@ -2,9 +2,9 @@ from AgentCoord.util.converter import read_outputObject_content
|
||||
from AgentCoord.RehearsalEngine_V2.Action import BaseAction
|
||||
|
||||
ACTION_CUSTOM_NOTE = '''
|
||||
注意:你可以在给出{OutputName}的最终内容之前先说一些话。当你决定给出{OutputName}的最终内容时,应该这样包含:
|
||||
Note: You can say something before you give the final content of {OutputName}. When you decide to give the final content of {OutputName}, it should be enclosed like this:
|
||||
```{OutputName}
|
||||
({OutputName}的内容)
|
||||
(the content of {OutputName})
|
||||
```
|
||||
'''
|
||||
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
from AgentCoord.RehearsalEngine_V2.Action import BaseAction
|
||||
|
||||
ACTION_CUSTOM_NOTE = '''
|
||||
注意:你可以在提供改进版本的内容之前先说一些话。
|
||||
你提供的改进版本必须是完整的版本(例如,如果你提供改进的故事,你应该给出完整的故事内容,而不仅仅是报告你在哪里改进了)。
|
||||
当你决定提供内容的改进版本时,应该这样开始:
|
||||
Note: You can say something before you provide the improved version of the content.
|
||||
The improved version you provide must be a completed version (e.g. if you provide a improved story, you should give completed story content, rather than just reporting where you have improved).
|
||||
When you decide to give the improved version of the content, it should be start like this:
|
||||
|
||||
## xxx的改进版本
|
||||
(改进版本的内容)
|
||||
(the improved version of the content)
|
||||
```
|
||||
|
||||
'''
|
||||
|
||||
@@ -4,7 +4,7 @@ from termcolor import colored
|
||||
|
||||
|
||||
# Accept inputs: num_StepToRun (the number of step to run, if None, run to the end), plan, RehearsalLog, AgentProfile_Dict
|
||||
def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict):
|
||||
def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict, context):
|
||||
# Prepare for execution
|
||||
KeyObjects = {}
|
||||
finishedStep_index = -1
|
||||
@@ -83,15 +83,11 @@ def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict):
|
||||
}
|
||||
|
||||
# start the group chat
|
||||
util.print_colored(TaskDescription, text_color="green")
|
||||
ActionHistory = []
|
||||
action_count = 0
|
||||
total_actions = len(TaskProcess)
|
||||
|
||||
for ActionInfo in TaskProcess:
|
||||
action_count += 1
|
||||
actionType = ActionInfo["ActionType"]
|
||||
agentName = ActionInfo["AgentName"]
|
||||
|
||||
if actionType in Action.customAction_Dict:
|
||||
currentAction = Action.customAction_Dict[actionType](
|
||||
info=ActionInfo,
|
||||
@@ -105,7 +101,7 @@ def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict):
|
||||
KeyObjects=KeyObjects,
|
||||
)
|
||||
ActionInfo_with_Result = currentAction.run(
|
||||
General_Goal=plan["General Goal"],
|
||||
General_Goal=plan["General Goal"] + "\n\n### Useful Information (some information can help accomplish the task)\n如果使用该信息,请在回答中显示指出是来自数联网的数据。例如,基于数联网数据搜索结果。" + context,
|
||||
TaskDescription=TaskDescription,
|
||||
agentName=agentName,
|
||||
AgentProfile_Dict=AgentProfile_Dict,
|
||||
@@ -117,9 +113,18 @@ def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict):
|
||||
ActionHistory.append(ActionInfo_with_Result)
|
||||
# post processing for the group chat (finish)
|
||||
objectLogNode["content"] = KeyObjects[OutputName]
|
||||
if StepRun_count == len(plan["Collaboration Process"][(finishedStep_index + 1): run_to]):
|
||||
objectLogNode["content"] += context
|
||||
RehearsalLog.append(stepLogNode)
|
||||
RehearsalLog.append(objectLogNode)
|
||||
stepLogNode["ActionHistory"] = ActionHistory
|
||||
|
||||
# Return Output
|
||||
print(
|
||||
colored(
|
||||
"$Run " + str(StepRun_count) + "step$",
|
||||
color="black",
|
||||
on_color="on_white",
|
||||
)
|
||||
)
|
||||
return RehearsalLog
|
||||
|
||||
@@ -1,619 +0,0 @@
|
||||
"""
|
||||
优化版执行计划 - 支持动态追加步骤
|
||||
在执行过程中可以接收新的步骤并追加到执行队列
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import time
|
||||
from typing import List, Dict, Set, Generator, Any
|
||||
import AgentCoord.RehearsalEngine_V2.Action as Action
|
||||
import AgentCoord.util as util
|
||||
from termcolor import colored
|
||||
from AgentCoord.RehearsalEngine_V2.execution_state import execution_state_manager
|
||||
from AgentCoord.RehearsalEngine_V2.dynamic_execution_manager import dynamic_execution_manager
|
||||
|
||||
|
||||
# ==================== 配置参数 ====================
|
||||
# 最大并发请求数
|
||||
MAX_CONCURRENT_REQUESTS = 2
|
||||
|
||||
# 批次之间的延迟
|
||||
BATCH_DELAY = 1.0
|
||||
|
||||
# 429错误重试次数和延迟
|
||||
MAX_RETRIES = 3
|
||||
RETRY_DELAY = 5.0
|
||||
|
||||
|
||||
# ==================== 限流器 ====================
|
||||
class RateLimiter:
|
||||
"""
|
||||
异步限流器,控制并发请求数量
|
||||
"""
|
||||
|
||||
def __init__(self, max_concurrent: int = MAX_CONCURRENT_REQUESTS):
|
||||
self.semaphore = asyncio.Semaphore(max_concurrent)
|
||||
self.max_concurrent = max_concurrent
|
||||
|
||||
async def __aenter__(self):
|
||||
await self.semaphore.acquire()
|
||||
return self
|
||||
|
||||
async def __aexit__(self, *args):
|
||||
self.semaphore.release()
|
||||
|
||||
|
||||
# 全局限流器实例
|
||||
rate_limiter = RateLimiter()
|
||||
|
||||
|
||||
def build_action_dependency_graph(TaskProcess: List[Dict]) -> Dict[int, List[int]]:
|
||||
"""
|
||||
构建动作依赖图
|
||||
|
||||
Args:
|
||||
TaskProcess: 任务流程列表
|
||||
|
||||
Returns:
|
||||
依赖映射字典 {action_index: [dependent_action_indices]}
|
||||
"""
|
||||
dependency_map = {i: [] for i in range(len(TaskProcess))}
|
||||
|
||||
for i, action in enumerate(TaskProcess):
|
||||
important_inputs = action.get('ImportantInput', [])
|
||||
if not important_inputs:
|
||||
continue
|
||||
|
||||
# 检查是否依赖其他动作的ActionResult
|
||||
for j, prev_action in enumerate(TaskProcess):
|
||||
if i == j:
|
||||
continue
|
||||
|
||||
# 判断是否依赖前一个动作的结果
|
||||
if any(
|
||||
inp.startswith('ActionResult:') and
|
||||
inp == f'ActionResult:{prev_action["ID"]}'
|
||||
for inp in important_inputs
|
||||
):
|
||||
dependency_map[i].append(j)
|
||||
|
||||
return dependency_map
|
||||
|
||||
|
||||
def get_parallel_batches(TaskProcess: List[Dict], dependency_map: Dict[int, List[int]]) -> List[List[int]]:
|
||||
"""
|
||||
将动作分为多个批次,每批内部可以并行执行
|
||||
|
||||
Args:
|
||||
TaskProcess: 任务流程列表
|
||||
dependency_map: 依赖图
|
||||
|
||||
Returns:
|
||||
批次列表 [[batch1_indices], [batch2_indices], ...]
|
||||
"""
|
||||
batches = []
|
||||
completed: Set[int] = set()
|
||||
|
||||
while len(completed) < len(TaskProcess):
|
||||
# 找出所有依赖已满足的动作
|
||||
ready_to_run = [
|
||||
i for i in range(len(TaskProcess))
|
||||
if i not in completed and
|
||||
all(dep in completed for dep in dependency_map[i])
|
||||
]
|
||||
|
||||
if not ready_to_run:
|
||||
# 避免死循环
|
||||
remaining = [i for i in range(len(TaskProcess)) if i not in completed]
|
||||
if remaining:
|
||||
ready_to_run = remaining[:1]
|
||||
else:
|
||||
break
|
||||
|
||||
batches.append(ready_to_run)
|
||||
completed.update(ready_to_run)
|
||||
|
||||
return batches
|
||||
|
||||
|
||||
async def execute_single_action_async(
|
||||
ActionInfo: Dict,
|
||||
General_Goal: str,
|
||||
TaskDescription: str,
|
||||
OutputName: str,
|
||||
KeyObjects: Dict,
|
||||
ActionHistory: List,
|
||||
agentName: str,
|
||||
AgentProfile_Dict: Dict,
|
||||
InputName_List: List[str]
|
||||
) -> Dict:
|
||||
"""
|
||||
异步执行单个动作
|
||||
|
||||
Args:
|
||||
ActionInfo: 动作信息
|
||||
General_Goal: 总体目标
|
||||
TaskDescription: 任务描述
|
||||
OutputName: 输出对象名称
|
||||
KeyObjects: 关键对象字典
|
||||
ActionHistory: 动作历史
|
||||
agentName: 智能体名称
|
||||
AgentProfile_Dict: 智能体配置字典
|
||||
InputName_List: 输入名称列表
|
||||
|
||||
Returns:
|
||||
动作执行结果
|
||||
"""
|
||||
actionType = ActionInfo["ActionType"]
|
||||
|
||||
# 创建动作实例
|
||||
if actionType in Action.customAction_Dict:
|
||||
currentAction = Action.customAction_Dict[actionType](
|
||||
info=ActionInfo,
|
||||
OutputName=OutputName,
|
||||
KeyObjects=KeyObjects,
|
||||
)
|
||||
else:
|
||||
currentAction = Action.BaseAction(
|
||||
info=ActionInfo,
|
||||
OutputName=OutputName,
|
||||
KeyObjects=KeyObjects,
|
||||
)
|
||||
|
||||
# 在线程池中运行,避免阻塞事件循环
|
||||
loop = asyncio.get_event_loop()
|
||||
ActionInfo_with_Result = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: currentAction.run(
|
||||
General_Goal=General_Goal,
|
||||
TaskDescription=TaskDescription,
|
||||
agentName=agentName,
|
||||
AgentProfile_Dict=AgentProfile_Dict,
|
||||
InputName_List=InputName_List,
|
||||
OutputName=OutputName,
|
||||
KeyObjects=KeyObjects,
|
||||
ActionHistory=ActionHistory,
|
||||
)
|
||||
)
|
||||
|
||||
return ActionInfo_with_Result
|
||||
|
||||
|
||||
async def execute_step_async_streaming(
|
||||
stepDescrip: Dict,
|
||||
General_Goal: str,
|
||||
AgentProfile_Dict: Dict,
|
||||
KeyObjects: Dict,
|
||||
step_index: int,
|
||||
total_steps: int,
|
||||
execution_id: str = None,
|
||||
RehearsalLog: List = None # 用于追加日志到历史记录
|
||||
) -> Generator[Dict, None, None]:
|
||||
"""
|
||||
异步执行单个步骤,支持流式返回
|
||||
|
||||
Args:
|
||||
stepDescrip: 步骤描述
|
||||
General_Goal: 总体目标
|
||||
AgentProfile_Dict: 智能体配置字典
|
||||
KeyObjects: 关键对象字典
|
||||
step_index: 步骤索引
|
||||
total_steps: 总步骤数
|
||||
execution_id: 执行ID
|
||||
|
||||
Yields:
|
||||
执行事件字典
|
||||
"""
|
||||
# 准备步骤信息
|
||||
StepName = (
|
||||
util.camel_case_to_normal(stepDescrip["StepName"])
|
||||
if util.is_camel_case(stepDescrip["StepName"])
|
||||
else stepDescrip["StepName"]
|
||||
)
|
||||
TaskContent = stepDescrip["TaskContent"]
|
||||
InputName_List = (
|
||||
[
|
||||
(
|
||||
util.camel_case_to_normal(obj)
|
||||
if util.is_camel_case(obj)
|
||||
else obj
|
||||
)
|
||||
for obj in stepDescrip["InputObject_List"]
|
||||
]
|
||||
if stepDescrip["InputObject_List"] is not None
|
||||
else None
|
||||
)
|
||||
OutputName = (
|
||||
util.camel_case_to_normal(stepDescrip["OutputObject"])
|
||||
if util.is_camel_case(stepDescrip["OutputObject"])
|
||||
else stepDescrip["OutputObject"]
|
||||
)
|
||||
Agent_List = stepDescrip["AgentSelection"]
|
||||
TaskProcess = stepDescrip["TaskProcess"]
|
||||
|
||||
TaskDescription = (
|
||||
util.converter.generate_template_sentence_for_CollaborationBrief(
|
||||
input_object_list=InputName_List,
|
||||
output_object=OutputName,
|
||||
agent_list=Agent_List,
|
||||
step_task=TaskContent,
|
||||
)
|
||||
)
|
||||
|
||||
# 初始化日志节点
|
||||
inputObject_Record = [
|
||||
{InputName: KeyObjects[InputName]} for InputName in InputName_List
|
||||
]
|
||||
stepLogNode = {
|
||||
"LogNodeType": "step",
|
||||
"NodeId": StepName,
|
||||
"InputName_List": InputName_List,
|
||||
"OutputName": OutputName,
|
||||
"chatLog": [],
|
||||
"inputObject_Record": inputObject_Record,
|
||||
}
|
||||
objectLogNode = {
|
||||
"LogNodeType": "object",
|
||||
"NodeId": OutputName,
|
||||
"content": None,
|
||||
}
|
||||
|
||||
# 返回步骤开始事件
|
||||
yield {
|
||||
"type": "step_start",
|
||||
"step_index": step_index,
|
||||
"total_steps": total_steps,
|
||||
"step_name": StepName,
|
||||
"task_description": TaskDescription,
|
||||
}
|
||||
|
||||
# 构建动作依赖图
|
||||
dependency_map = build_action_dependency_graph(TaskProcess)
|
||||
batches = get_parallel_batches(TaskProcess, dependency_map)
|
||||
|
||||
ActionHistory = []
|
||||
total_actions = len(TaskProcess)
|
||||
completed_actions = 0
|
||||
|
||||
# 步骤开始日志
|
||||
util.print_colored(
|
||||
f"📋 步骤 {step_index + 1}/{total_steps}: {StepName} ({total_actions} 个动作, 分 {len(batches)} 批执行)",
|
||||
text_color="cyan"
|
||||
)
|
||||
|
||||
# 分批执行动作
|
||||
for batch_index, batch_indices in enumerate(batches):
|
||||
# 在每个批次执行前检查暂停状态
|
||||
should_continue = await execution_state_manager.async_check_pause(execution_id)
|
||||
if not should_continue:
|
||||
return
|
||||
|
||||
batch_size = len(batch_indices)
|
||||
|
||||
# 批次执行日志
|
||||
if batch_size > 1:
|
||||
util.print_colored(
|
||||
f"🚦 批次 {batch_index + 1}/{len(batches)}: 并行执行 {batch_size} 个动作",
|
||||
text_color="blue"
|
||||
)
|
||||
else:
|
||||
util.print_colored(
|
||||
f"🔄 批次 {batch_index + 1}/{len(batches)}: 串行执行",
|
||||
text_color="yellow"
|
||||
)
|
||||
|
||||
# 并行执行当前批次的所有动作
|
||||
tasks = [
|
||||
execute_single_action_async(
|
||||
TaskProcess[i],
|
||||
General_Goal=General_Goal,
|
||||
TaskDescription=TaskDescription,
|
||||
OutputName=OutputName,
|
||||
KeyObjects=KeyObjects,
|
||||
ActionHistory=ActionHistory,
|
||||
agentName=TaskProcess[i]["AgentName"],
|
||||
AgentProfile_Dict=AgentProfile_Dict,
|
||||
InputName_List=InputName_List
|
||||
)
|
||||
for i in batch_indices
|
||||
]
|
||||
|
||||
# 等待当前批次完成
|
||||
batch_results = await asyncio.gather(*tasks)
|
||||
|
||||
# 逐个返回结果
|
||||
for i, result in enumerate(batch_results):
|
||||
action_index_in_batch = batch_indices[i]
|
||||
completed_actions += 1
|
||||
|
||||
util.print_colored(
|
||||
f"✅ 动作 {completed_actions}/{total_actions} 完成: {result['ActionType']} by {result['AgentName']}",
|
||||
text_color="green"
|
||||
)
|
||||
|
||||
ActionHistory.append(result)
|
||||
|
||||
# 立即返回该动作结果
|
||||
yield {
|
||||
"type": "action_complete",
|
||||
"step_index": step_index,
|
||||
"step_name": StepName,
|
||||
"action_index": action_index_in_batch,
|
||||
"total_actions": total_actions,
|
||||
"completed_actions": completed_actions,
|
||||
"action_result": result,
|
||||
"batch_info": {
|
||||
"batch_index": batch_index,
|
||||
"batch_size": batch_size,
|
||||
"is_parallel": batch_size > 1
|
||||
}
|
||||
}
|
||||
|
||||
# 步骤完成
|
||||
objectLogNode["content"] = KeyObjects[OutputName]
|
||||
stepLogNode["ActionHistory"] = ActionHistory
|
||||
|
||||
# 收集该步骤使用的 agent(去重)
|
||||
assigned_agents_in_step = list(set(Agent_List)) if Agent_List else []
|
||||
|
||||
# 追加到 RehearsalLog(因为 RehearsalLog 是可变对象,会反映到原列表)
|
||||
if RehearsalLog is not None:
|
||||
RehearsalLog.append(stepLogNode)
|
||||
RehearsalLog.append(objectLogNode)
|
||||
|
||||
yield {
|
||||
"type": "step_complete",
|
||||
"step_index": step_index,
|
||||
"step_name": StepName,
|
||||
"step_log_node": stepLogNode,
|
||||
"object_log_node": objectLogNode,
|
||||
"assigned_agents": {StepName: assigned_agents_in_step}, # 该步骤使用的 agent
|
||||
}
|
||||
|
||||
|
||||
def executePlan_streaming_dynamic(
|
||||
plan: Dict,
|
||||
num_StepToRun: int,
|
||||
RehearsalLog: List,
|
||||
AgentProfile_Dict: Dict,
|
||||
existingKeyObjects: Dict = None,
|
||||
execution_id: str = None
|
||||
) -> Generator[str, None, None]:
|
||||
"""
|
||||
动态执行计划,支持在执行过程中追加新步骤
|
||||
|
||||
Args:
|
||||
plan: 执行计划
|
||||
num_StepToRun: 要运行的步骤数
|
||||
RehearsalLog: 已执行的历史记录
|
||||
AgentProfile_Dict: 智能体配置
|
||||
existingKeyObjects: 已存在的KeyObjects
|
||||
execution_id: 执行ID(用于动态追加步骤)
|
||||
|
||||
Yields:
|
||||
SSE格式的事件字符串
|
||||
"""
|
||||
# 初始化执行状态
|
||||
general_goal = plan.get("General Goal", "")
|
||||
|
||||
# 确保有 execution_id
|
||||
if execution_id is None:
|
||||
import time
|
||||
execution_id = f"{general_goal}_{int(time.time() * 1000)}"
|
||||
|
||||
execution_state_manager.start_execution(execution_id, general_goal)
|
||||
|
||||
# 准备执行
|
||||
KeyObjects = existingKeyObjects.copy() if existingKeyObjects else {}
|
||||
finishedStep_index = -1
|
||||
|
||||
for logNode in RehearsalLog:
|
||||
if logNode["LogNodeType"] == "step":
|
||||
finishedStep_index += 1
|
||||
if logNode["LogNodeType"] == "object":
|
||||
KeyObjects[logNode["NodeId"]] = logNode["content"]
|
||||
|
||||
# 确定要运行的步骤范围
|
||||
if num_StepToRun is None:
|
||||
run_to = len(plan["Collaboration Process"])
|
||||
else:
|
||||
run_to = (finishedStep_index + 1) + num_StepToRun
|
||||
|
||||
steps_to_run = plan["Collaboration Process"][(finishedStep_index + 1): run_to]
|
||||
|
||||
# 使用动态执行管理器
|
||||
if execution_id:
|
||||
# 初始化执行管理器,使用传入的execution_id
|
||||
actual_execution_id = dynamic_execution_manager.start_execution(general_goal, steps_to_run, execution_id)
|
||||
|
||||
total_steps = len(steps_to_run)
|
||||
|
||||
# 使用队列实现流式推送
|
||||
async def produce_events(queue: asyncio.Queue):
|
||||
"""异步生产者"""
|
||||
try:
|
||||
step_index = 0
|
||||
|
||||
if execution_id:
|
||||
# 动态模式:循环获取下一个步骤
|
||||
# 等待新步骤的最大次数(避免无限等待)
|
||||
max_empty_wait_cycles = 5 # 最多等待60次,每次等待1秒
|
||||
empty_wait_count = 0
|
||||
|
||||
while True:
|
||||
# 检查暂停状态
|
||||
should_continue = await execution_state_manager.async_check_pause(execution_id)
|
||||
if not should_continue:
|
||||
util.print_colored("🛑 用户请求停止执行", "red")
|
||||
await queue.put({
|
||||
"type": "error",
|
||||
"message": "执行已被用户停止"
|
||||
})
|
||||
break
|
||||
|
||||
# 获取下一个步骤
|
||||
stepDescrip = dynamic_execution_manager.get_next_step(execution_id)
|
||||
|
||||
if stepDescrip is None:
|
||||
# 没有更多步骤了,检查是否应该继续等待
|
||||
empty_wait_count += 1
|
||||
|
||||
# 获取执行信息
|
||||
execution_info = dynamic_execution_manager.get_execution_info(execution_id)
|
||||
|
||||
if execution_info:
|
||||
queue_total_steps = execution_info.get("total_steps", 0)
|
||||
completed_steps = execution_info.get("completed_steps", 0)
|
||||
|
||||
# 如果没有步骤在队列中(queue_total_steps为0),立即退出
|
||||
if queue_total_steps == 0:
|
||||
break
|
||||
|
||||
# 如果所有步骤都已完成,等待可能的新步骤
|
||||
if completed_steps >= queue_total_steps:
|
||||
if empty_wait_count >= max_empty_wait_cycles:
|
||||
# 等待超时,退出执行
|
||||
break
|
||||
else:
|
||||
# 等待新步骤追加
|
||||
await asyncio.sleep(1)
|
||||
continue
|
||||
else:
|
||||
# 还有步骤未完成,继续尝试获取
|
||||
await asyncio.sleep(0.5)
|
||||
empty_wait_count = 0 # 重置等待计数
|
||||
continue
|
||||
else:
|
||||
# 执行信息不存在,退出
|
||||
break
|
||||
|
||||
# 重置等待计数
|
||||
empty_wait_count = 0
|
||||
|
||||
# 获取最新的总步骤数(用于显示)
|
||||
execution_info = dynamic_execution_manager.get_execution_info(execution_id)
|
||||
current_total_steps = execution_info.get("total_steps", total_steps) if execution_info else total_steps
|
||||
|
||||
# 执行步骤
|
||||
async for event in execute_step_async_streaming(
|
||||
stepDescrip,
|
||||
plan["General Goal"],
|
||||
AgentProfile_Dict,
|
||||
KeyObjects,
|
||||
step_index,
|
||||
current_total_steps, # 使用动态更新的总步骤数
|
||||
execution_id,
|
||||
RehearsalLog # 传递 RehearsalLog 用于追加日志
|
||||
):
|
||||
if execution_state_manager.is_stopped(execution_id):
|
||||
await queue.put({
|
||||
"type": "error",
|
||||
"message": "执行已被用户停止"
|
||||
})
|
||||
return
|
||||
|
||||
await queue.put(event)
|
||||
|
||||
# 标记步骤完成
|
||||
dynamic_execution_manager.mark_step_completed(execution_id)
|
||||
|
||||
# 更新KeyObjects
|
||||
OutputName = stepDescrip.get("OutputObject", "")
|
||||
if OutputName and OutputName in KeyObjects:
|
||||
# 对象日志节点会在step_complete中发送
|
||||
pass
|
||||
|
||||
step_index += 1
|
||||
|
||||
else:
|
||||
# 非动态模式:按顺序执行所有步骤
|
||||
for step_index, stepDescrip in enumerate(steps_to_run):
|
||||
should_continue = await execution_state_manager.async_check_pause(execution_id)
|
||||
if not should_continue:
|
||||
util.print_colored("🛑 用户请求停止执行", "red")
|
||||
await queue.put({
|
||||
"type": "error",
|
||||
"message": "执行已被用户停止"
|
||||
})
|
||||
return
|
||||
|
||||
async for event in execute_step_async_streaming(
|
||||
stepDescrip,
|
||||
plan["General Goal"],
|
||||
AgentProfile_Dict,
|
||||
KeyObjects,
|
||||
step_index,
|
||||
total_steps,
|
||||
execution_id,
|
||||
RehearsalLog # 传递 RehearsalLog 用于追加日志
|
||||
):
|
||||
if execution_state_manager.is_stopped(execution_id):
|
||||
await queue.put({
|
||||
"type": "error",
|
||||
"message": "执行已被用户停止"
|
||||
})
|
||||
return
|
||||
|
||||
await queue.put(event)
|
||||
|
||||
except Exception as e:
|
||||
await queue.put({
|
||||
"type": "error",
|
||||
"message": f"执行出错: {str(e)}"
|
||||
})
|
||||
finally:
|
||||
await queue.put(None)
|
||||
|
||||
# 运行异步任务并实时yield
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
try:
|
||||
queue = asyncio.Queue(maxsize=10)
|
||||
producer_task = loop.create_task(produce_events(queue))
|
||||
|
||||
while True:
|
||||
event = loop.run_until_complete(queue.get())
|
||||
if event is None:
|
||||
break
|
||||
|
||||
# 立即转换为SSE格式并发送
|
||||
event_str = json.dumps(event, ensure_ascii=False)
|
||||
yield f"data: {event_str}\n\n"
|
||||
|
||||
loop.run_until_complete(producer_task)
|
||||
|
||||
if not execution_state_manager.is_stopped(execution_id):
|
||||
complete_event = json.dumps({
|
||||
"type": "execution_complete",
|
||||
"total_steps": total_steps
|
||||
}, ensure_ascii=False)
|
||||
yield f"data: {complete_event}\n\n"
|
||||
|
||||
finally:
|
||||
# 在关闭事件循环之前先清理执行记录
|
||||
if execution_id:
|
||||
# 清理执行记录
|
||||
dynamic_execution_manager.cleanup(execution_id)
|
||||
# 清理执行状态
|
||||
execution_state_manager.cleanup(execution_id)
|
||||
|
||||
if 'producer_task' in locals():
|
||||
if not producer_task.done():
|
||||
producer_task.cancel()
|
||||
|
||||
# 确保所有任务都完成后再关闭事件循环
|
||||
try:
|
||||
pending = asyncio.all_tasks(loop)
|
||||
for task in pending:
|
||||
task.cancel()
|
||||
loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))
|
||||
except Exception:
|
||||
pass # 忽略清理过程中的错误
|
||||
|
||||
loop.close()
|
||||
|
||||
|
||||
# 保留旧版本函数以保持兼容性
|
||||
executePlan_streaming = executePlan_streaming_dynamic
|
||||
@@ -1,226 +0,0 @@
|
||||
"""
|
||||
动态执行管理器
|
||||
用于在任务执行过程中动态追加新步骤
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from typing import Dict, List, Optional, Any
|
||||
from threading import Lock
|
||||
|
||||
|
||||
class DynamicExecutionManager:
|
||||
"""
|
||||
动态执行管理器
|
||||
管理正在执行的任务,支持动态追加新步骤
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# 执行状态: goal -> execution_info
|
||||
self._executions: Dict[str, Dict] = {}
|
||||
|
||||
# 线程锁
|
||||
self._lock = Lock()
|
||||
|
||||
# 步骤队列: goal -> List[step]
|
||||
self._step_queues: Dict[str, List] = {}
|
||||
|
||||
# 已执行的步骤索引: goal -> Set[step_index]
|
||||
self._executed_steps: Dict[str, set] = {}
|
||||
|
||||
# 待执行的步骤索引: goal -> List[step_index]
|
||||
self._pending_steps: Dict[str, List[int]] = {}
|
||||
|
||||
def start_execution(self, goal: str, initial_steps: List[Dict], execution_id: str = None) -> str:
|
||||
"""
|
||||
开始执行一个新的任务
|
||||
|
||||
Args:
|
||||
goal: 任务目标
|
||||
initial_steps: 初始步骤列表
|
||||
execution_id: 执行ID,如果不提供则自动生成
|
||||
|
||||
Returns:
|
||||
执行ID
|
||||
"""
|
||||
with self._lock:
|
||||
# 如果未提供execution_id,则生成一个
|
||||
if execution_id is None:
|
||||
execution_id = f"{goal}_{asyncio.get_event_loop().time()}"
|
||||
|
||||
self._executions[execution_id] = {
|
||||
"goal": goal,
|
||||
"status": "running",
|
||||
"total_steps": len(initial_steps),
|
||||
"completed_steps": 0
|
||||
}
|
||||
|
||||
# 初始化步骤队列
|
||||
self._step_queues[execution_id] = initial_steps.copy()
|
||||
|
||||
# 初始化已执行步骤集合
|
||||
self._executed_steps[execution_id] = set()
|
||||
|
||||
# 初始化待执行步骤索引
|
||||
self._pending_steps[execution_id] = list(range(len(initial_steps)))
|
||||
|
||||
print(f"[Execution] 启动执行: {execution_id}, 步骤数={len(initial_steps)}")
|
||||
return execution_id
|
||||
|
||||
def add_steps(self, execution_id: str, new_steps: List[Dict]) -> int:
|
||||
"""
|
||||
向执行中追加新步骤
|
||||
|
||||
Args:
|
||||
execution_id: 执行ID
|
||||
new_steps: 新步骤列表
|
||||
|
||||
Returns:
|
||||
追加的步骤数量
|
||||
"""
|
||||
with self._lock:
|
||||
if execution_id not in self._step_queues:
|
||||
return 0
|
||||
|
||||
current_count = len(self._step_queues[execution_id])
|
||||
|
||||
# 追加新步骤到队列
|
||||
self._step_queues[execution_id].extend(new_steps)
|
||||
|
||||
# 添加新步骤的索引到待执行列表
|
||||
new_indices = list(range(current_count, current_count + len(new_steps)))
|
||||
self._pending_steps[execution_id].extend(new_indices)
|
||||
|
||||
# 更新总步骤数
|
||||
self._executions[execution_id]["total_steps"] = len(self._step_queues[execution_id])
|
||||
|
||||
print(f"[Execution] 追加步骤: +{len(new_steps)}, 总计={self._executions[execution_id]['total_steps']}")
|
||||
return len(new_steps)
|
||||
|
||||
def get_next_step(self, execution_id: str) -> Optional[Dict]:
|
||||
"""
|
||||
获取下一个待执行的步骤
|
||||
|
||||
Args:
|
||||
execution_id: 执行ID
|
||||
|
||||
Returns:
|
||||
下一个步骤,如果没有则返回None
|
||||
"""
|
||||
with self._lock:
|
||||
if execution_id not in self._pending_steps:
|
||||
return None
|
||||
|
||||
# 获取第一个待执行步骤的索引
|
||||
if not self._pending_steps[execution_id]:
|
||||
return None
|
||||
|
||||
step_index = self._pending_steps[execution_id].pop(0)
|
||||
|
||||
# 从队列中获取步骤
|
||||
if step_index >= len(self._step_queues[execution_id]):
|
||||
return None
|
||||
|
||||
step = self._step_queues[execution_id][step_index]
|
||||
|
||||
# 标记为已执行
|
||||
self._executed_steps[execution_id].add(step_index)
|
||||
|
||||
step_name = step.get("StepName", "未知")
|
||||
print(f"[Execution] 获取步骤: {step_name} (索引: {step_index})")
|
||||
return step
|
||||
|
||||
def mark_step_completed(self, execution_id: str):
|
||||
"""
|
||||
标记一个步骤完成
|
||||
|
||||
Args:
|
||||
execution_id: 执行ID
|
||||
"""
|
||||
with self._lock:
|
||||
if execution_id in self._executions:
|
||||
self._executions[execution_id]["completed_steps"] += 1
|
||||
completed = self._executions[execution_id]["completed_steps"]
|
||||
total = self._executions[execution_id]["total_steps"]
|
||||
print(f"[Execution] 步骤完成: {completed}/{total}")
|
||||
|
||||
def get_execution_info(self, execution_id: str) -> Optional[Dict]:
|
||||
"""
|
||||
获取执行信息
|
||||
|
||||
Args:
|
||||
execution_id: 执行ID
|
||||
|
||||
Returns:
|
||||
执行信息字典
|
||||
"""
|
||||
with self._lock:
|
||||
return self._executions.get(execution_id)
|
||||
|
||||
def get_pending_count(self, execution_id: str) -> int:
|
||||
"""
|
||||
获取待执行步骤数量
|
||||
|
||||
Args:
|
||||
execution_id: 执行ID
|
||||
|
||||
Returns:
|
||||
待执行步骤数量
|
||||
"""
|
||||
with self._lock:
|
||||
if execution_id not in self._pending_steps:
|
||||
return 0
|
||||
return len(self._pending_steps[execution_id])
|
||||
|
||||
def has_more_steps(self, execution_id: str) -> bool:
|
||||
"""
|
||||
检查是否还有更多步骤待执行
|
||||
|
||||
Args:
|
||||
execution_id: 执行ID
|
||||
|
||||
Returns:
|
||||
是否还有待执行步骤
|
||||
"""
|
||||
with self._lock:
|
||||
if execution_id not in self._pending_steps:
|
||||
return False
|
||||
return len(self._pending_steps[execution_id]) > 0
|
||||
|
||||
def finish_execution(self, execution_id: str):
|
||||
"""
|
||||
完成执行
|
||||
|
||||
Args:
|
||||
execution_id: 执行ID
|
||||
"""
|
||||
with self._lock:
|
||||
if execution_id in self._executions:
|
||||
self._executions[execution_id]["status"] = "completed"
|
||||
|
||||
def cancel_execution(self, execution_id: str):
|
||||
"""
|
||||
取消执行
|
||||
|
||||
Args:
|
||||
execution_id: 执行ID
|
||||
"""
|
||||
with self._lock:
|
||||
if execution_id in self._executions:
|
||||
self._executions[execution_id]["status"] = "cancelled"
|
||||
|
||||
def cleanup(self, execution_id: str):
|
||||
"""
|
||||
清理执行记录
|
||||
|
||||
Args:
|
||||
execution_id: 执行ID
|
||||
"""
|
||||
with self._lock:
|
||||
self._executions.pop(execution_id, None)
|
||||
self._step_queues.pop(execution_id, None)
|
||||
self._executed_steps.pop(execution_id, None)
|
||||
self._pending_steps.pop(execution_id, None)
|
||||
|
||||
|
||||
# 全局单例
|
||||
dynamic_execution_manager = DynamicExecutionManager()
|
||||
@@ -1,292 +0,0 @@
|
||||
"""
|
||||
全局执行状态管理器
|
||||
用于支持任务的暂停、恢复和停止功能
|
||||
使用轮询检查机制,确保线程安全
|
||||
支持多用户/多执行ID并行管理
|
||||
"""
|
||||
|
||||
import threading
|
||||
import asyncio
|
||||
from typing import Optional, Dict
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class ExecutionStatus(Enum):
|
||||
"""执行状态枚举"""
|
||||
RUNNING = "running" # 正在运行
|
||||
PAUSED = "paused" # 已暂停
|
||||
STOPPED = "stopped" # 已停止
|
||||
IDLE = "idle" # 空闲
|
||||
|
||||
|
||||
class ExecutionStateManager:
|
||||
"""
|
||||
全局执行状态管理器
|
||||
|
||||
功能:
|
||||
- 管理多用户/多执行ID的并行状态(使用字典存储)
|
||||
- 管理任务执行状态(运行/暂停/停止)
|
||||
- 使用轮询检查机制,避免异步事件的线程问题
|
||||
- 提供线程安全的状态查询和修改接口
|
||||
|
||||
设计说明:
|
||||
- 保持单例模式(Manager本身)
|
||||
- 但内部状态按 execution_id 隔离存储
|
||||
- 解决了多用户并发问题
|
||||
"""
|
||||
|
||||
_instance: Optional['ExecutionStateManager'] = None
|
||||
_lock = threading.Lock()
|
||||
|
||||
def __new__(cls):
|
||||
"""单例模式"""
|
||||
if cls._instance is None:
|
||||
with cls._lock:
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
cls._instance._initialized = False
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
"""初始化状态管理器"""
|
||||
if self._initialized:
|
||||
return
|
||||
|
||||
self._initialized = True
|
||||
|
||||
# 状态存储:execution_id -> 状态字典
|
||||
# 结构:{
|
||||
# 'status': ExecutionStatus,
|
||||
# 'goal': str,
|
||||
# 'should_pause': bool,
|
||||
# 'should_stop': bool
|
||||
# }
|
||||
self._states: Dict[str, Dict] = {}
|
||||
|
||||
# 每个 execution_id 的锁(更细粒度的锁)
|
||||
self._locks: Dict[str, threading.Lock] = {}
|
||||
|
||||
# 全局锁(用于管理 _states 和 _locks 本身的线程安全)
|
||||
self._manager_lock = threading.Lock()
|
||||
|
||||
def _get_lock(self, execution_id: str) -> threading.Lock:
|
||||
"""获取指定 execution_id 的锁,如果不存在则创建"""
|
||||
with self._manager_lock:
|
||||
if execution_id not in self._locks:
|
||||
self._locks[execution_id] = threading.Lock()
|
||||
return self._locks[execution_id]
|
||||
|
||||
def _ensure_state(self, execution_id: str) -> Dict:
|
||||
"""确保指定 execution_id 的状态存在"""
|
||||
with self._manager_lock:
|
||||
if execution_id not in self._states:
|
||||
self._states[execution_id] = {
|
||||
'status': ExecutionStatus.IDLE,
|
||||
'goal': None,
|
||||
'should_pause': False,
|
||||
'should_stop': False
|
||||
}
|
||||
return self._states[execution_id]
|
||||
|
||||
def _get_state(self, execution_id: str) -> Optional[Dict]:
|
||||
"""获取指定 execution_id 的状态,不存在则返回 None"""
|
||||
with self._manager_lock:
|
||||
return self._states.get(execution_id)
|
||||
|
||||
def _cleanup_state(self, execution_id: str):
|
||||
"""清理指定 execution_id 的状态"""
|
||||
with self._manager_lock:
|
||||
self._states.pop(execution_id, None)
|
||||
self._locks.pop(execution_id, None)
|
||||
|
||||
def get_status(self, execution_id: str) -> Optional[ExecutionStatus]:
|
||||
"""获取当前执行状态"""
|
||||
state = self._get_state(execution_id)
|
||||
if state is None:
|
||||
return None
|
||||
with self._get_lock(execution_id):
|
||||
return state['status']
|
||||
|
||||
def set_goal(self, execution_id: str, goal: str):
|
||||
"""设置当前执行的任务目标"""
|
||||
state = self._ensure_state(execution_id)
|
||||
with self._get_lock(execution_id):
|
||||
state['goal'] = goal
|
||||
|
||||
def get_goal(self, execution_id: str) -> Optional[str]:
|
||||
"""获取当前执行的任务目标"""
|
||||
state = self._get_state(execution_id)
|
||||
if state is None:
|
||||
return None
|
||||
with self._get_lock(execution_id):
|
||||
return state['goal']
|
||||
|
||||
def start_execution(self, execution_id: str, goal: str):
|
||||
"""开始执行"""
|
||||
state = self._ensure_state(execution_id)
|
||||
with self._get_lock(execution_id):
|
||||
state['status'] = ExecutionStatus.RUNNING
|
||||
state['goal'] = goal
|
||||
state['should_pause'] = False
|
||||
state['should_stop'] = False
|
||||
|
||||
def pause_execution(self, execution_id: str) -> bool:
|
||||
"""
|
||||
暂停执行
|
||||
|
||||
Args:
|
||||
execution_id: 执行ID
|
||||
|
||||
Returns:
|
||||
bool: 是否成功暂停
|
||||
"""
|
||||
state = self._get_state(execution_id)
|
||||
if state is None:
|
||||
return False
|
||||
|
||||
with self._get_lock(execution_id):
|
||||
if state['status'] != ExecutionStatus.RUNNING:
|
||||
return False
|
||||
state['status'] = ExecutionStatus.PAUSED
|
||||
state['should_pause'] = True
|
||||
return True
|
||||
|
||||
def resume_execution(self, execution_id: str) -> bool:
|
||||
"""
|
||||
恢复执行
|
||||
|
||||
Args:
|
||||
execution_id: 执行ID
|
||||
|
||||
Returns:
|
||||
bool: 是否成功恢复
|
||||
"""
|
||||
state = self._get_state(execution_id)
|
||||
if state is None:
|
||||
return False
|
||||
|
||||
with self._get_lock(execution_id):
|
||||
if state['status'] != ExecutionStatus.PAUSED:
|
||||
return False
|
||||
state['status'] = ExecutionStatus.RUNNING
|
||||
state['should_pause'] = False
|
||||
return True
|
||||
|
||||
def stop_execution(self, execution_id: str) -> bool:
|
||||
"""
|
||||
停止执行
|
||||
|
||||
Args:
|
||||
execution_id: 执行ID
|
||||
|
||||
Returns:
|
||||
bool: 是否成功停止
|
||||
"""
|
||||
state = self._get_state(execution_id)
|
||||
if state is None:
|
||||
return False
|
||||
|
||||
with self._get_lock(execution_id):
|
||||
if state['status'] in [ExecutionStatus.IDLE, ExecutionStatus.STOPPED]:
|
||||
return False
|
||||
state['status'] = ExecutionStatus.STOPPED
|
||||
state['should_stop'] = True
|
||||
state['should_pause'] = False
|
||||
return True
|
||||
|
||||
def reset(self, execution_id: str):
|
||||
"""重置指定 execution_id 的状态为空闲"""
|
||||
state = self._ensure_state(execution_id)
|
||||
with self._get_lock(execution_id):
|
||||
state['status'] = ExecutionStatus.IDLE
|
||||
state['goal'] = None
|
||||
state['should_pause'] = False
|
||||
state['should_stop'] = False
|
||||
|
||||
def cleanup(self, execution_id: str):
|
||||
"""清理指定 execution_id 的所有状态"""
|
||||
self._cleanup_state(execution_id)
|
||||
|
||||
async def async_check_pause(self, execution_id: str):
|
||||
"""
|
||||
异步检查是否需要暂停(轮询方式)
|
||||
|
||||
如果处于暂停状态,会阻塞当前协程直到恢复或停止
|
||||
应该在执行循环的关键点调用此方法
|
||||
|
||||
Args:
|
||||
execution_id: 执行ID
|
||||
|
||||
Returns:
|
||||
bool: 如果返回True表示应该继续执行,False表示应该停止
|
||||
"""
|
||||
state = self._get_state(execution_id)
|
||||
if state is None:
|
||||
# 状态不存在,默认继续执行
|
||||
return True
|
||||
|
||||
# 使用轮询检查,避免异步事件问题
|
||||
while True:
|
||||
with self._get_lock(execution_id):
|
||||
should_stop = state['should_stop']
|
||||
should_pause = state['should_pause']
|
||||
|
||||
# 检查停止标志
|
||||
if should_stop:
|
||||
return False
|
||||
|
||||
# 检查暂停状态
|
||||
if should_pause:
|
||||
# 处于暂停状态,等待恢复
|
||||
await asyncio.sleep(0.1) # 短暂睡眠,避免占用CPU
|
||||
|
||||
# 重新获取状态
|
||||
with self._get_lock(execution_id):
|
||||
should_pause = state['should_pause']
|
||||
should_stop = state['should_stop']
|
||||
|
||||
if not should_pause:
|
||||
continue
|
||||
if should_stop:
|
||||
return False
|
||||
# 继续等待
|
||||
continue
|
||||
|
||||
# 既没有停止也没有暂停,可以继续执行
|
||||
return True
|
||||
|
||||
def is_paused(self, execution_id: str) -> bool:
|
||||
"""检查是否处于暂停状态"""
|
||||
state = self._get_state(execution_id)
|
||||
if state is None:
|
||||
return False
|
||||
with self._get_lock(execution_id):
|
||||
return state['status'] == ExecutionStatus.PAUSED
|
||||
|
||||
def is_running(self, execution_id: str) -> bool:
|
||||
"""检查是否正在运行"""
|
||||
state = self._get_state(execution_id)
|
||||
if state is None:
|
||||
return False
|
||||
with self._get_lock(execution_id):
|
||||
return state['status'] == ExecutionStatus.RUNNING
|
||||
|
||||
def is_stopped(self, execution_id: str) -> bool:
|
||||
"""检查是否已停止"""
|
||||
state = self._get_state(execution_id)
|
||||
if state is None:
|
||||
return True
|
||||
with self._get_lock(execution_id):
|
||||
return state['status'] == ExecutionStatus.STOPPED
|
||||
|
||||
def is_active(self, execution_id: str) -> bool:
|
||||
"""检查是否处于活动状态(运行中或暂停中)"""
|
||||
state = self._get_state(execution_id)
|
||||
if state is None:
|
||||
return False
|
||||
with self._get_lock(execution_id):
|
||||
return state['status'] in [ExecutionStatus.RUNNING, ExecutionStatus.PAUSED]
|
||||
|
||||
|
||||
# 全局单例实例
|
||||
execution_state_manager = ExecutionStateManager()
|
||||
@@ -1,220 +0,0 @@
|
||||
"""
|
||||
生成阶段状态管理器
|
||||
用于支持生成任务的暂停、停止功能
|
||||
使用轮询检查机制,确保线程安全
|
||||
支持多用户/多generation_id并行管理
|
||||
"""
|
||||
|
||||
import threading
|
||||
import asyncio
|
||||
import time
|
||||
from typing import Optional, Dict
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class GenerationStatus(Enum):
|
||||
"""生成状态枚举"""
|
||||
GENERATING = "generating" # 正在生成
|
||||
PAUSED = "paused" # 已暂停
|
||||
STOPPED = "stopped" # 已停止
|
||||
COMPLETED = "completed" # 已完成
|
||||
IDLE = "idle" # 空闲
|
||||
|
||||
|
||||
class GenerationStateManager:
|
||||
"""
|
||||
生成阶段状态管理器
|
||||
|
||||
功能:
|
||||
- 管理多用户/多generation_id的并行状态(使用字典存储)
|
||||
- 管理生成任务状态(生成中/暂停/停止/完成)
|
||||
- 提供线程安全的状态查询和修改接口
|
||||
|
||||
设计说明:
|
||||
- 保持单例模式(Manager本身)
|
||||
- 但内部状态按 generation_id 隔离存储
|
||||
- 解决多用户并发生成时的干扰问题
|
||||
"""
|
||||
|
||||
_instance: Optional['GenerationStateManager'] = None
|
||||
_lock = threading.Lock()
|
||||
|
||||
def __new__(cls):
|
||||
"""单例模式"""
|
||||
if cls._instance is None:
|
||||
with cls._lock:
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
cls._instance._initialized = False
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
"""初始化状态管理器"""
|
||||
if self._initialized:
|
||||
return
|
||||
|
||||
self._initialized = True
|
||||
|
||||
# 状态存储:generation_id -> 状态字典
|
||||
# 结构:{
|
||||
# 'status': GenerationStatus,
|
||||
# 'goal': str,
|
||||
# 'should_stop': bool
|
||||
# }
|
||||
self._states: Dict[str, Dict] = {}
|
||||
|
||||
# 每个 generation_id 的锁(更细粒度的锁)
|
||||
self._locks: Dict[str, threading.Lock] = {}
|
||||
|
||||
# 全局锁(用于管理 _states 和 _locks 本身的线程安全)
|
||||
self._manager_lock = threading.Lock()
|
||||
|
||||
def _get_lock(self, generation_id: str) -> threading.Lock:
|
||||
"""获取指定 generation_id 的锁,如果不存在则创建"""
|
||||
with self._manager_lock:
|
||||
if generation_id not in self._locks:
|
||||
self._locks[generation_id] = threading.Lock()
|
||||
return self._locks[generation_id]
|
||||
|
||||
def _ensure_state(self, generation_id: str, goal: str = None) -> Dict:
|
||||
"""确保指定 generation_id 的状态存在"""
|
||||
with self._manager_lock:
|
||||
if generation_id not in self._states:
|
||||
self._states[generation_id] = {
|
||||
'status': GenerationStatus.IDLE,
|
||||
'goal': goal,
|
||||
'should_stop': False
|
||||
}
|
||||
return self._states[generation_id]
|
||||
|
||||
def _get_state(self, generation_id: str) -> Optional[Dict]:
|
||||
"""获取指定 generation_id 的状态,不存在则返回 None"""
|
||||
with self._manager_lock:
|
||||
return self._states.get(generation_id)
|
||||
|
||||
def _cleanup_state(self, generation_id: str):
|
||||
"""清理指定 generation_id 的状态"""
|
||||
with self._manager_lock:
|
||||
self._states.pop(generation_id, None)
|
||||
self._locks.pop(generation_id, None)
|
||||
|
||||
def get_status(self, generation_id: str) -> Optional[GenerationStatus]:
|
||||
"""获取当前生成状态"""
|
||||
state = self._get_state(generation_id)
|
||||
if state is None:
|
||||
return None
|
||||
with self._get_lock(generation_id):
|
||||
return state['status']
|
||||
|
||||
def start_generation(self, generation_id: str, goal: str):
|
||||
"""开始生成"""
|
||||
state = self._ensure_state(generation_id, goal)
|
||||
with self._get_lock(generation_id):
|
||||
state['status'] = GenerationStatus.GENERATING
|
||||
state['goal'] = goal
|
||||
state['should_stop'] = False
|
||||
|
||||
def stop_generation(self, generation_id: str) -> bool:
|
||||
"""
|
||||
停止生成
|
||||
|
||||
Args:
|
||||
generation_id: 生成ID
|
||||
|
||||
Returns:
|
||||
bool: 是否成功停止(COMPLETED 状态也返回 True,表示已停止)
|
||||
"""
|
||||
state = self._get_state(generation_id)
|
||||
if state is None:
|
||||
return True # 不存在也算停止成功
|
||||
|
||||
with self._get_lock(generation_id):
|
||||
if state['status'] == GenerationStatus.STOPPED:
|
||||
return True # 已经停止也算成功
|
||||
|
||||
if state['status'] == GenerationStatus.COMPLETED:
|
||||
return True # 已完成也视为停止成功
|
||||
|
||||
if state['status'] == GenerationStatus.IDLE:
|
||||
return True # 空闲状态也视为无需停止
|
||||
|
||||
# 真正需要停止的情况
|
||||
state['status'] = GenerationStatus.STOPPED
|
||||
state['should_stop'] = True
|
||||
return True
|
||||
|
||||
def complete_generation(self, generation_id: str):
|
||||
"""标记生成完成"""
|
||||
state = self._ensure_state(generation_id)
|
||||
with self._get_lock(generation_id):
|
||||
state['status'] = GenerationStatus.COMPLETED
|
||||
|
||||
def cleanup(self, generation_id: str):
|
||||
"""清理指定 generation_id 的所有状态"""
|
||||
self._cleanup_state(generation_id)
|
||||
|
||||
def should_stop(self, generation_id: str) -> bool:
|
||||
"""检查是否应该停止"""
|
||||
state = self._get_state(generation_id)
|
||||
if state is None:
|
||||
return False
|
||||
with self._get_lock(generation_id):
|
||||
return state.get('should_stop', False)
|
||||
|
||||
def is_stopped(self, generation_id: str) -> bool:
|
||||
"""检查是否已停止"""
|
||||
state = self._get_state(generation_id)
|
||||
if state is None:
|
||||
return False
|
||||
with self._get_lock(generation_id):
|
||||
return state['status'] == GenerationStatus.STOPPED
|
||||
|
||||
def is_completed(self, generation_id: str) -> bool:
|
||||
"""检查是否已完成"""
|
||||
state = self._get_state(generation_id)
|
||||
if state is None:
|
||||
return False
|
||||
with self._get_lock(generation_id):
|
||||
return state['status'] == GenerationStatus.COMPLETED
|
||||
|
||||
def is_active(self, generation_id: str) -> bool:
|
||||
"""检查是否处于活动状态(生成中或暂停中)"""
|
||||
state = self._get_state(generation_id)
|
||||
if state is None:
|
||||
return False
|
||||
with self._get_lock(generation_id):
|
||||
return state['status'] == GenerationStatus.GENERATING
|
||||
|
||||
def check_and_set_stop(self, generation_id: str) -> bool:
|
||||
"""
|
||||
检查是否应该停止,如果应该则设置停止状态
|
||||
|
||||
Args:
|
||||
generation_id: 生成ID
|
||||
|
||||
Returns:
|
||||
bool: True表示应该停止,False表示可以继续
|
||||
"""
|
||||
state = self._get_state(generation_id)
|
||||
if state is None:
|
||||
return False
|
||||
with self._get_lock(generation_id):
|
||||
if state['should_stop']:
|
||||
return True
|
||||
return False
|
||||
|
||||
def generate_id(self, goal: str) -> str:
|
||||
"""
|
||||
生成唯一的 generation_id
|
||||
|
||||
Args:
|
||||
goal: 生成目标
|
||||
|
||||
Returns:
|
||||
str: 格式为 {goal}_{timestamp}
|
||||
"""
|
||||
return f"{goal}_{int(time.time() * 1000)}"
|
||||
|
||||
|
||||
# 全局单例实例
|
||||
generation_state_manager = GenerationStateManager()
|
||||
@@ -20,8 +20,6 @@ def camel_case_to_normal(s):
|
||||
def generate_template_sentence_for_CollaborationBrief(
|
||||
input_object_list, output_object, agent_list, step_task
|
||||
):
|
||||
# Ensure step_task is not None
|
||||
step_task = step_task if step_task is not None else "perform the task"
|
||||
# Check if the names are in camel case (no spaces) and convert them to normal naming convention
|
||||
input_object_list = (
|
||||
[
|
||||
@@ -33,48 +31,29 @@ def generate_template_sentence_for_CollaborationBrief(
|
||||
)
|
||||
output_object = (
|
||||
camel_case_to_normal(output_object)
|
||||
if output_object is not None and is_camel_case(output_object)
|
||||
else (output_object if output_object is not None else "unknown output")
|
||||
if is_camel_case(output_object)
|
||||
else output_object
|
||||
)
|
||||
|
||||
# Format the agents into a string with proper grammar
|
||||
if agent_list is None or len(agent_list) == 0:
|
||||
agent_str = "Unknown agents"
|
||||
elif all(agent is not None for agent in agent_list):
|
||||
agent_str = (
|
||||
" and ".join([", ".join(agent_list[:-1]), agent_list[-1]])
|
||||
if len(agent_list) > 1
|
||||
else agent_list[0]
|
||||
)
|
||||
else:
|
||||
# Filter out None values
|
||||
filtered_agents = [agent for agent in agent_list if agent is not None]
|
||||
if filtered_agents:
|
||||
agent_str = (
|
||||
" and ".join([", ".join(filtered_agents[:-1]), filtered_agents[-1]])
|
||||
if len(filtered_agents) > 1
|
||||
else filtered_agents[0]
|
||||
)
|
||||
else:
|
||||
agent_str = "Unknown agents"
|
||||
agent_str = (
|
||||
" and ".join([", ".join(agent_list[:-1]), agent_list[-1]])
|
||||
if len(agent_list) > 1
|
||||
else agent_list[0]
|
||||
)
|
||||
|
||||
if input_object_list is None or len(input_object_list) == 0:
|
||||
# Combine all the parts into the template sentence
|
||||
template_sentence = f"{agent_str} perform the task of {step_task} to obtain {output_object}."
|
||||
else:
|
||||
# Format the input objects into a string with proper grammar
|
||||
# Filter out None values from input_object_list
|
||||
filtered_input_list = [obj for obj in input_object_list if obj is not None]
|
||||
if filtered_input_list:
|
||||
input_str = (
|
||||
" and ".join(
|
||||
[", ".join(filtered_input_list[:-1]), filtered_input_list[-1]]
|
||||
)
|
||||
if len(filtered_input_list) > 1
|
||||
else filtered_input_list[0]
|
||||
input_str = (
|
||||
" and ".join(
|
||||
[", ".join(input_object_list[:-1]), input_object_list[-1]]
|
||||
)
|
||||
else:
|
||||
input_str = "unknown inputs"
|
||||
if len(input_object_list) > 1
|
||||
else input_object_list[0]
|
||||
)
|
||||
# Combine all the parts into the template sentence
|
||||
template_sentence = f"Based on {input_str}, {agent_str} perform the task of {step_task} to obtain {output_object}."
|
||||
|
||||
@@ -94,7 +73,6 @@ def remove_render_spec(duty_spec):
|
||||
return duty_spec
|
||||
|
||||
|
||||
|
||||
def read_LLM_Completion(messages, useGroq=True):
|
||||
for _ in range(3):
|
||||
text = LLM_Completion(messages, useGroq=useGroq)
|
||||
@@ -112,7 +90,7 @@ def read_LLM_Completion(messages, useGroq=True):
|
||||
return json.loads(match.group(0).strip())
|
||||
except Exception:
|
||||
pass
|
||||
return {} # 返回空对象而不是抛出异常
|
||||
raise ("bad format!")
|
||||
|
||||
|
||||
def read_json_content(text):
|
||||
@@ -133,7 +111,7 @@ def read_json_content(text):
|
||||
if match:
|
||||
return json.loads(match.group(0).strip())
|
||||
|
||||
return {} # 返回空对象而不是抛出异常
|
||||
raise ("bad format!")
|
||||
|
||||
|
||||
def read_outputObject_content(text, keyword):
|
||||
@@ -149,4 +127,4 @@ def read_outputObject_content(text, keyword):
|
||||
if match:
|
||||
return match.group(1).strip()
|
||||
else:
|
||||
return "" # 返回空字符串而不是抛出异常
|
||||
raise ("bad format!")
|
||||
|
||||
@@ -1,116 +1,102 @@
|
||||
[
|
||||
{
|
||||
"Icon": "Hailey_Johnson.png",
|
||||
"Name": "船舶设计师",
|
||||
"Profile": "提供船舶制造中的实际需求和约束。",
|
||||
"Classification": "船舶制造数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Jennifer_Moore.png",
|
||||
"Name": "防护工程专家",
|
||||
"Profile": "专注于船舶腐蚀防护技术的设计与应用。在你的总结回答中,必须引用来自数联网的搜索数据,是搜索数据,不是数联网的研究成果。",
|
||||
"Classification": "船舶制造数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Jane_Moreno.png",
|
||||
"Name": "病理生理学家",
|
||||
"Profile": "专注于失血性休克的疾病机制,为药物研发提供理论靶点。",
|
||||
"Classification": "医药数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Giorgio_Rossi.png",
|
||||
"Name": "药物化学家",
|
||||
"Profile": "负责将靶点概念转化为实际可合成的分子。",
|
||||
"Classification": "医药数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Tamara_Taylor.png",
|
||||
"Name": "制剂工程师",
|
||||
"Profile": "负责将活性药物成分(API)变成稳定、可用、符合战场要求的剂型。",
|
||||
"Classification": "医药数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Maria_Lopez.png",
|
||||
"Name": "监管事务专家",
|
||||
"Profile": "深谙药品审评法规,目标是找到最快的合法上市路径。",
|
||||
"Classification": "医药数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Sam_Moore.png",
|
||||
"Name": "物理学家",
|
||||
"Profile": "从热力学与统计力学的基本原理出发,研究液态金属的自由能、焓、熵、比热等参数的理论建模。",
|
||||
"Classification": "科学数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Yuriko_Yamamoto.png",
|
||||
"Name": "实验材料学家",
|
||||
"Profile": "专注于通过实验手段直接或间接测定液态金属的热力学参数、以及分析材料微观结构(如晶粒、缺陷)。",
|
||||
"Classification": "科学数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Carlos_Gomez.png",
|
||||
"Name": "计算模拟专家",
|
||||
"Profile": "侧重于利用数值计算和模拟技术获取液态金属的热力学参数。",
|
||||
"Classification": "科学数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "John_Lin.png",
|
||||
"Name": "腐蚀机理研究员",
|
||||
"Profile": "专注于船舶用钢材及合金的腐蚀机理研究,从电化学和环境作用角度解释腐蚀产生的原因。在你的总结回答中,必须引用来自数联网的搜索数据,是搜索数据,不是数联网的研究成果。",
|
||||
"Classification": "船舶制造数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Arthur_Burton.png",
|
||||
"Name": "先进材料研发员",
|
||||
"Profile": "专注于开发和评估新型耐腐蚀材料、复合材料及固态电池材料。",
|
||||
"Classification": "科学数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Eddy_Lin.png",
|
||||
"Name": "肾脏病学家",
|
||||
"Profile": "专注于慢性肾脏病的诊断、治疗和患者管理,能提供临床洞察。",
|
||||
"Classification": "医药数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Isabella_Rodriguez.png",
|
||||
"Name": "临床研究协调员",
|
||||
"Profile": "负责受试者招募和临床试验流程优化。",
|
||||
"Classification": "医药数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Latoya_Williams.png",
|
||||
"Name": "中医药专家",
|
||||
"Profile": "理解药物的中药成分和作用机制。",
|
||||
"Classification": "医药数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Carmen_Ortiz.png",
|
||||
"Name": "药物安全专家",
|
||||
"Profile": "专注于药物不良反应数据收集、分析和报告。",
|
||||
"Classification": "医药数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Rajiv_Patel.png",
|
||||
"Name": "二维材料科学家",
|
||||
"Profile": "专注于二维材料(如石墨烯)的合成、性质和应用。",
|
||||
"Classification": "科学数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Tom_Moreno.png",
|
||||
"Name": "光电物理学家",
|
||||
"Profile": "研究材料的光电转换机制和关键影响因素。",
|
||||
"Classification": "科学数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Ayesha_Khan.png",
|
||||
"Name": "机器学习专家",
|
||||
"Profile": "专注于开发和应用AI模型用于材料模拟。",
|
||||
"Classification": "科学数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Mei_Lin.png",
|
||||
"Name": "流体动力学专家",
|
||||
"Profile": "专注于流体行为理论和模拟。",
|
||||
"Classification": "科学数据空间"
|
||||
}
|
||||
]
|
||||
{
|
||||
"Icon": "Abigail_Chen.png",
|
||||
"Name": "Abigail",
|
||||
"Profile": "AI Engineer"
|
||||
},
|
||||
{
|
||||
"Icon": "Jane_Moreno.png",
|
||||
"Name": "Jane",
|
||||
"Profile": "Cybersecurity Specialist"
|
||||
},
|
||||
{
|
||||
"Icon": "Giorgio_Rossi.png",
|
||||
"Name": "Giorgio",
|
||||
"Profile": "Poet"
|
||||
},
|
||||
{
|
||||
"Icon": "Jennifer_Moore.png",
|
||||
"Name": "Jennifer",
|
||||
"Profile": "Linguist"
|
||||
},
|
||||
{
|
||||
"Icon": "Maria_Lopez.png",
|
||||
"Name": "Maria",
|
||||
"Profile": "Philosopher"
|
||||
},
|
||||
{
|
||||
"Icon": "Sam_Moore.png",
|
||||
"Name": "Sam",
|
||||
"Profile": "Ethicist"
|
||||
},
|
||||
{
|
||||
"Icon": "Yuriko_Yamamoto.png",
|
||||
"Name": "Yuriko",
|
||||
"Profile": "Futurist"
|
||||
},
|
||||
{
|
||||
"Icon": "Carlos_Gomez.png",
|
||||
"Name": "Carlos",
|
||||
"Profile": "Language Expert"
|
||||
},
|
||||
{
|
||||
"Icon": "John_Lin.png",
|
||||
"Name": "John",
|
||||
"Profile": "Software Developer"
|
||||
},
|
||||
{
|
||||
"Icon": "Tamara_Taylor.png",
|
||||
"Name": "Tamara",
|
||||
"Profile": "Music Composer"
|
||||
},
|
||||
{
|
||||
"Icon": "Arthur_Burton.png",
|
||||
"Name": "Arthur",
|
||||
"Profile": "Neuroscientist"
|
||||
},
|
||||
{
|
||||
"Icon": "Eddy_Lin.png",
|
||||
"Name": "Eddy",
|
||||
"Profile": "Cognitive Psychologist"
|
||||
},
|
||||
{
|
||||
"Icon": "Isabella_Rodriguez.png",
|
||||
"Name": "Isabella",
|
||||
"Profile": "Science Fiction Writer"
|
||||
},
|
||||
{
|
||||
"Icon": "Latoya_Williams.png",
|
||||
"Name": "Latoya",
|
||||
"Profile": "Historian of Technology"
|
||||
},
|
||||
{
|
||||
"Icon": "Carmen_Ortiz.png",
|
||||
"Name": "Carmen",
|
||||
"Profile": "Robotics Engineer"
|
||||
},
|
||||
{
|
||||
"Icon": "Rajiv_Patel.png",
|
||||
"Name": "Rajiv",
|
||||
"Profile": "Science Educator"
|
||||
},
|
||||
{
|
||||
"Icon": "Tom_Moreno.png",
|
||||
"Name": "Tom",
|
||||
"Profile": "AI Scientist"
|
||||
},
|
||||
{
|
||||
"Icon": "Ayesha_Khan.png",
|
||||
"Name": "Ayesha",
|
||||
"Profile": "Multimedia Artist"
|
||||
},
|
||||
{
|
||||
"Icon": "Mei_Lin.png",
|
||||
"Name": "Mei",
|
||||
"Profile": "Graphic Designer"
|
||||
},
|
||||
{
|
||||
"Icon": "Hailey_Johnson.png",
|
||||
"Name": "Hailey",
|
||||
"Profile": "Legal Expert on AI Law"
|
||||
}
|
||||
]
|
||||
@@ -1,102 +0,0 @@
|
||||
[
|
||||
{
|
||||
"Icon": "Abigail_Chen.png",
|
||||
"Name": "Abigail",
|
||||
"Profile": "AI Engineer"
|
||||
},
|
||||
{
|
||||
"Icon": "Jane_Moreno.png",
|
||||
"Name": "Jane",
|
||||
"Profile": "Cybersecurity Specialist"
|
||||
},
|
||||
{
|
||||
"Icon": "Giorgio_Rossi.png",
|
||||
"Name": "Giorgio",
|
||||
"Profile": "Poet"
|
||||
},
|
||||
{
|
||||
"Icon": "Jennifer_Moore.png",
|
||||
"Name": "Jennifer",
|
||||
"Profile": "Linguist"
|
||||
},
|
||||
{
|
||||
"Icon": "Maria_Lopez.png",
|
||||
"Name": "Maria",
|
||||
"Profile": "Philosopher"
|
||||
},
|
||||
{
|
||||
"Icon": "Sam_Moore.png",
|
||||
"Name": "Sam",
|
||||
"Profile": "Ethicist"
|
||||
},
|
||||
{
|
||||
"Icon": "Yuriko_Yamamoto.png",
|
||||
"Name": "Yuriko",
|
||||
"Profile": "Futurist"
|
||||
},
|
||||
{
|
||||
"Icon": "Carlos_Gomez.png",
|
||||
"Name": "Carlos",
|
||||
"Profile": "Language Expert"
|
||||
},
|
||||
{
|
||||
"Icon": "John_Lin.png",
|
||||
"Name": "John",
|
||||
"Profile": "Software Developer"
|
||||
},
|
||||
{
|
||||
"Icon": "Tamara_Taylor.png",
|
||||
"Name": "Tamara",
|
||||
"Profile": "Music Composer"
|
||||
},
|
||||
{
|
||||
"Icon": "Arthur_Burton.png",
|
||||
"Name": "Arthur",
|
||||
"Profile": "Neuroscientist"
|
||||
},
|
||||
{
|
||||
"Icon": "Eddy_Lin.png",
|
||||
"Name": "Eddy",
|
||||
"Profile": "Cognitive Psychologist"
|
||||
},
|
||||
{
|
||||
"Icon": "Isabella_Rodriguez.png",
|
||||
"Name": "Isabella",
|
||||
"Profile": "Science Fiction Writer"
|
||||
},
|
||||
{
|
||||
"Icon": "Latoya_Williams.png",
|
||||
"Name": "Latoya",
|
||||
"Profile": "Historian of Technology"
|
||||
},
|
||||
{
|
||||
"Icon": "Carmen_Ortiz.png",
|
||||
"Name": "Carmen",
|
||||
"Profile": "Robotics Engineer"
|
||||
},
|
||||
{
|
||||
"Icon": "Rajiv_Patel.png",
|
||||
"Name": "Rajiv",
|
||||
"Profile": "Science Educator"
|
||||
},
|
||||
{
|
||||
"Icon": "Tom_Moreno.png",
|
||||
"Name": "Tom",
|
||||
"Profile": "AI Scientist"
|
||||
},
|
||||
{
|
||||
"Icon": "Ayesha_Khan.png",
|
||||
"Name": "Ayesha",
|
||||
"Profile": "Multimedia Artist"
|
||||
},
|
||||
{
|
||||
"Icon": "Mei_Lin.png",
|
||||
"Name": "Mei",
|
||||
"Profile": "Graphic Designer"
|
||||
},
|
||||
{
|
||||
"Icon": "Hailey_Johnson.png",
|
||||
"Name": "Hailey",
|
||||
"Profile": "Legal Expert on AI Law"
|
||||
}
|
||||
]
|
||||
97
backend/AgentRepo/科创.json
Normal file
@@ -0,0 +1,97 @@
|
||||
[
|
||||
{
|
||||
"Icon": "Hailey_Johnson.png",
|
||||
"Name": "船舶设计师",
|
||||
"Profile": "提供船舶制造中的实际需求和约束。"
|
||||
},
|
||||
{
|
||||
"Icon": "Jennifer_Moore.png",
|
||||
"Name": "防护工程专家",
|
||||
"Profile": "专注于船舶腐蚀防护技术的设计与应用。在你的总结回答中,必须引用来自数联网的搜索数据,是搜索数据,不是数联网的研究成果。"
|
||||
},
|
||||
{
|
||||
"Icon": "Jane_Moreno.png",
|
||||
"Name": "病理生理学家",
|
||||
"Profile": "专注于失血性休克的疾病机制,为药物研发提供理论靶点。"
|
||||
},
|
||||
{
|
||||
"Icon": "Giorgio_Rossi.png",
|
||||
"Name": "药物化学家",
|
||||
"Profile": "负责将靶点概念转化为实际可合成的分子。"
|
||||
},
|
||||
{
|
||||
"Icon": "Tamara_Taylor.png",
|
||||
"Name": "制剂工程师",
|
||||
"Profile": "负责将活性药物成分(API)变成稳定、可用、符合战场要求的剂型。"
|
||||
},
|
||||
{
|
||||
"Icon": "Maria_Lopez.png",
|
||||
"Name": "监管事务专家",
|
||||
"Profile": "深谙药品审评法规,目标是找到最快的合法上市路径。"
|
||||
},
|
||||
{
|
||||
"Icon": "Sam_Moore.png",
|
||||
"Name": "物理学家",
|
||||
"Profile": "从热力学与统计力学的基本原理出发,研究液态金属的自由能、焓、熵、比热等参数的理论建模。"
|
||||
},
|
||||
{
|
||||
"Icon": "Yuriko_Yamamoto.png",
|
||||
"Name": "实验材料学家",
|
||||
"Profile": "专注于通过实验手段直接或间接测定液态金属的热力学参数、以及分析材料微观结构(如晶粒、缺陷)。"
|
||||
},
|
||||
{
|
||||
"Icon": "Carlos_Gomez.png",
|
||||
"Name": "计算模拟专家",
|
||||
"Profile": "侧重于利用数值计算和模拟技术获取液态金属的热力学参数。"
|
||||
},
|
||||
{
|
||||
"Icon": "John_Lin.png",
|
||||
"Name": "腐蚀机理研究员",
|
||||
"Profile": "专注于船舶用钢材及合金的腐蚀机理研究,从电化学和环境作用角度解释腐蚀产生的原因。在你的总结回答中,必须引用来自数联网的搜索数据,是搜索数据,不是数联网的研究成果。"
|
||||
},
|
||||
{
|
||||
"Icon": "Arthur_Burton.png",
|
||||
"Name": "先进材料研发员",
|
||||
"Profile": "专注于开发和评估新型耐腐蚀材料、复合材料及固态电池材料。"
|
||||
},
|
||||
{
|
||||
"Icon": "Eddy_Lin.png",
|
||||
"Name": "肾脏病学家",
|
||||
"Profile": "专注于慢性肾脏病的诊断、治疗和患者管理,能提供临床洞察。"
|
||||
},
|
||||
{
|
||||
"Icon": "Isabella_Rodriguez.png",
|
||||
"Name": "临床研究协调员",
|
||||
"Profile": "负责受试者招募和临床试验流程优化。"
|
||||
},
|
||||
{
|
||||
"Icon": "Latoya_Williams.png",
|
||||
"Name": "中医药专家",
|
||||
"Profile": "理解药物的中药成分和作用机制。"
|
||||
},
|
||||
{
|
||||
"Icon": "Carmen_Ortiz.png",
|
||||
"Name": "药物安全专家",
|
||||
"Profile": "专注于药物不良反应数据收集、分析和报告。"
|
||||
},
|
||||
{
|
||||
"Icon": "Rajiv_Patel.png",
|
||||
"Name": "二维材料科学家",
|
||||
"Profile": "专注于二维材料(如石墨烯)的合成、性质和应用。"
|
||||
},
|
||||
{
|
||||
"Icon": "Tom_Moreno.png",
|
||||
"Name": "光电物理学家",
|
||||
"Profile": "研究材料的光电转换机制和关键影响因素。"
|
||||
},
|
||||
{
|
||||
"Icon": "Ayesha_Khan.png",
|
||||
"Name": "机器学习专家",
|
||||
"Profile": "专注于开发和应用AI模型用于材料模拟。"
|
||||
},
|
||||
{
|
||||
"Icon": "Mei_Lin.png",
|
||||
"Name": "流体动力学专家",
|
||||
"Profile": "专注于流体行为理论和模拟。"
|
||||
}
|
||||
]
|
||||
@@ -1,29 +1,12 @@
|
||||
## config for default LLM
|
||||
OPENAI_API_BASE: "https://ai.gitee.com/v1"
|
||||
OPENAI_API_KEY: "HYCNGM39GGFNSB1F8MBBMI9QYJR3P1CRSYS2PV1A"
|
||||
OPENAI_API_MODEL: "DeepSeek-V3"
|
||||
# OPENAI_API_BASE: "https://qianfan.baidubce.com/v2"
|
||||
# OPENAI_API_KEY: "bce-v3/ALTAK-Rp1HuLgqIdXM1rywQHRxr/96c5a0e99ccddf91193157e7d42d89979981bf05"
|
||||
# OPENAI_API_MODEL: "deepseek-v3"
|
||||
#OPENAI_API_BASE: "https://api.yyds168.net/v1"
|
||||
#OPENAI_API_KEY: "sk-5SJms7yWhGtb0YuwayqaDPjU95s4gapMqkaX5q9opWn3Ati3"
|
||||
#OPENAI_API_MODEL: "deepseek-v3"
|
||||
|
||||
OPENAI_API_BASE: ""
|
||||
OPENAI_API_KEY: ""
|
||||
OPENAI_API_MODEL: "gpt-4-turbo-preview"
|
||||
|
||||
## config for fast mode
|
||||
FAST_DESIGN_MODE: False
|
||||
FAST_DESIGN_MODE: True
|
||||
GROQ_API_KEY: ""
|
||||
MISTRAL_API_KEY: ""
|
||||
|
||||
## options under experimentation, leave them as Fasle unless you know what it is for
|
||||
USE_CACHE: False
|
||||
|
||||
# PostgreSQL 数据库配置
|
||||
database:
|
||||
host: "localhost"
|
||||
port: 5432
|
||||
username: "postgres"
|
||||
password: "123456"
|
||||
name: "agentcoord"
|
||||
pool_size: 10
|
||||
max_overflow: 20
|
||||
USE_CACHE: True
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
"""
|
||||
AgentCoord 数据库模块
|
||||
提供 PostgreSQL 数据库连接、模型和 CRUD 操作
|
||||
基于 DATABASE_DESIGN.md 设计
|
||||
"""
|
||||
|
||||
from .database import get_db, get_db_context, test_connection, engine, text
|
||||
from .models import MultiAgentTask, UserAgent, ExportRecord, PlanShare, TaskStatus
|
||||
from .crud import MultiAgentTaskCRUD, UserAgentCRUD, ExportRecordCRUD, PlanShareCRUD
|
||||
|
||||
__all__ = [
|
||||
# 连接管理
|
||||
"get_db",
|
||||
"get_db_context",
|
||||
"test_connection",
|
||||
"engine",
|
||||
"text",
|
||||
# 模型
|
||||
"MultiAgentTask",
|
||||
"UserAgent",
|
||||
"ExportRecord",
|
||||
"PlanShare",
|
||||
"TaskStatus",
|
||||
# CRUD
|
||||
"MultiAgentTaskCRUD",
|
||||
"UserAgentCRUD",
|
||||
"ExportRecordCRUD",
|
||||
"PlanShareCRUD",
|
||||
]
|
||||
@@ -1,577 +0,0 @@
|
||||
"""
|
||||
数据库 CRUD 操作
|
||||
封装所有数据库操作方法 (基于 DATABASE_DESIGN.md)
|
||||
"""
|
||||
|
||||
import copy
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from typing import List, Optional
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from .models import MultiAgentTask, UserAgent, ExportRecord, PlanShare
|
||||
|
||||
|
||||
class MultiAgentTaskCRUD:
|
||||
"""多智能体任务 CRUD 操作"""
|
||||
|
||||
@staticmethod
|
||||
def create(
|
||||
db: Session,
|
||||
task_id: Optional[str] = None, # 可选,如果为 None 则自动生成
|
||||
user_id: str = "",
|
||||
query: str = "",
|
||||
agents_info: list = [],
|
||||
task_outline: Optional[dict] = None,
|
||||
assigned_agents: Optional[list] = None,
|
||||
agent_scores: Optional[dict] = None,
|
||||
result: Optional[str] = None,
|
||||
) -> MultiAgentTask:
|
||||
"""创建任务记录"""
|
||||
task = MultiAgentTask(
|
||||
task_id=task_id or str(uuid.uuid4()), # 如果没传则生成新的
|
||||
user_id=user_id,
|
||||
query=query,
|
||||
agents_info=agents_info,
|
||||
task_outline=task_outline,
|
||||
assigned_agents=assigned_agents,
|
||||
agent_scores=agent_scores,
|
||||
result=result,
|
||||
)
|
||||
db.add(task)
|
||||
db.commit()
|
||||
db.refresh(task)
|
||||
return task
|
||||
|
||||
@staticmethod
|
||||
def get_by_id(db: Session, task_id: str) -> Optional[MultiAgentTask]:
|
||||
"""根据任务 ID 获取记录"""
|
||||
return db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
|
||||
|
||||
@staticmethod
|
||||
def get_by_user_id(
|
||||
db: Session, user_id: str, limit: int = 50, offset: int = 0
|
||||
) -> List[MultiAgentTask]:
|
||||
"""根据用户 ID 获取任务记录"""
|
||||
return (
|
||||
db.query(MultiAgentTask)
|
||||
.filter(MultiAgentTask.user_id == user_id)
|
||||
.order_by(MultiAgentTask.created_at.desc())
|
||||
.offset(offset)
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_recent(
|
||||
db: Session, limit: int = 20, offset: int = 0, user_id: str = None
|
||||
) -> List[MultiAgentTask]:
|
||||
"""获取最近的任务记录,置顶的排在最前面"""
|
||||
query = db.query(MultiAgentTask)
|
||||
# 按 user_id 过滤
|
||||
if user_id:
|
||||
query = query.filter(MultiAgentTask.user_id == user_id)
|
||||
return (
|
||||
query
|
||||
.order_by(MultiAgentTask.is_pinned.desc(), MultiAgentTask.created_at.desc())
|
||||
.offset(offset)
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def update_result(
|
||||
db: Session, task_id: str, result: list
|
||||
) -> Optional[MultiAgentTask]:
|
||||
"""更新任务结果"""
|
||||
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
|
||||
if task:
|
||||
task.result = result if result else []
|
||||
db.commit()
|
||||
db.refresh(task)
|
||||
return task
|
||||
|
||||
@staticmethod
|
||||
def update_task_outline(
|
||||
db: Session, task_id: str, task_outline: dict
|
||||
) -> Optional[MultiAgentTask]:
|
||||
"""更新任务大纲"""
|
||||
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
|
||||
if task:
|
||||
task.task_outline = task_outline
|
||||
db.commit()
|
||||
db.refresh(task)
|
||||
return task
|
||||
|
||||
@staticmethod
|
||||
def update_assigned_agents(
|
||||
db: Session, task_id: str, assigned_agents: dict
|
||||
) -> Optional[MultiAgentTask]:
|
||||
"""更新分配的智能体(步骤名 -> agent列表)"""
|
||||
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
|
||||
if task:
|
||||
task.assigned_agents = assigned_agents
|
||||
db.commit()
|
||||
db.refresh(task)
|
||||
return task
|
||||
|
||||
@staticmethod
|
||||
def update_agent_scores(
|
||||
db: Session, task_id: str, agent_scores: dict
|
||||
) -> Optional[MultiAgentTask]:
|
||||
"""更新智能体评分(合并模式,追加新步骤的评分)"""
|
||||
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
|
||||
if task:
|
||||
# 合并现有评分数据和新评分数据
|
||||
existing_scores = task.agent_scores or {}
|
||||
merged_scores = {**existing_scores, **agent_scores} # 新数据覆盖/追加旧数据
|
||||
task.agent_scores = merged_scores
|
||||
db.commit()
|
||||
db.refresh(task)
|
||||
return task
|
||||
|
||||
@staticmethod
|
||||
def update_status(
|
||||
db: Session, task_id: str, status: str
|
||||
) -> Optional[MultiAgentTask]:
|
||||
"""更新任务状态"""
|
||||
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
|
||||
if task:
|
||||
task.status = status
|
||||
db.commit()
|
||||
db.refresh(task)
|
||||
return task
|
||||
|
||||
@staticmethod
|
||||
def increment_execution_count(db: Session, task_id: str) -> Optional[MultiAgentTask]:
|
||||
"""增加任务执行次数"""
|
||||
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
|
||||
if task:
|
||||
task.execution_count = (task.execution_count or 0) + 1
|
||||
db.commit()
|
||||
db.refresh(task)
|
||||
return task
|
||||
|
||||
@staticmethod
|
||||
def update_generation_id(
|
||||
db: Session, task_id: str, generation_id: str
|
||||
) -> Optional[MultiAgentTask]:
|
||||
"""更新生成 ID"""
|
||||
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
|
||||
if task:
|
||||
task.generation_id = generation_id
|
||||
db.commit()
|
||||
db.refresh(task)
|
||||
return task
|
||||
|
||||
@staticmethod
|
||||
def update_execution_id(
|
||||
db: Session, task_id: str, execution_id: str
|
||||
) -> Optional[MultiAgentTask]:
|
||||
"""更新执行 ID"""
|
||||
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
|
||||
if task:
|
||||
task.execution_id = execution_id
|
||||
db.commit()
|
||||
db.refresh(task)
|
||||
return task
|
||||
|
||||
@staticmethod
|
||||
def update_rehearsal_log(
|
||||
db: Session, task_id: str, rehearsal_log: list
|
||||
) -> Optional[MultiAgentTask]:
|
||||
"""更新排练日志"""
|
||||
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
|
||||
if task:
|
||||
task.rehearsal_log = rehearsal_log if rehearsal_log else []
|
||||
db.commit()
|
||||
db.refresh(task)
|
||||
return task
|
||||
|
||||
@staticmethod
|
||||
def update_is_pinned(
|
||||
db: Session, task_id: str, is_pinned: bool
|
||||
) -> Optional[MultiAgentTask]:
|
||||
"""更新任务置顶状态"""
|
||||
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
|
||||
if task:
|
||||
task.is_pinned = is_pinned
|
||||
db.commit()
|
||||
db.refresh(task)
|
||||
return task
|
||||
|
||||
@staticmethod
|
||||
def append_rehearsal_log(
|
||||
db: Session, task_id: str, log_entry: dict
|
||||
) -> Optional[MultiAgentTask]:
|
||||
"""追加排练日志条目"""
|
||||
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
|
||||
if task:
|
||||
current_log = task.rehearsal_log or []
|
||||
if isinstance(current_log, list):
|
||||
current_log.append(log_entry)
|
||||
else:
|
||||
current_log = [log_entry]
|
||||
task.rehearsal_log = current_log
|
||||
db.commit()
|
||||
db.refresh(task)
|
||||
return task
|
||||
|
||||
@staticmethod
|
||||
def update_branches(
|
||||
db: Session, task_id: str, branches
|
||||
) -> Optional[MultiAgentTask]:
|
||||
"""更新任务分支数据
|
||||
支持两种格式:
|
||||
- list: 旧格式,直接覆盖
|
||||
- dict: 新格式 { flow_branches: [...], task_process_branches: {...} }
|
||||
两个 key 独立保存,互不干扰。
|
||||
"""
|
||||
import copy
|
||||
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
|
||||
if task:
|
||||
if isinstance(branches, dict):
|
||||
# 新格式:字典,独立保存两个 key,互不干扰
|
||||
# 使用深拷贝避免引用共享问题
|
||||
existing = copy.deepcopy(task.branches) if task.branches else {}
|
||||
if isinstance(existing, dict):
|
||||
# 如果只更新 flow_branches,保留已有的 task_process_branches
|
||||
if 'flow_branches' in branches and 'task_process_branches' not in branches:
|
||||
branches['task_process_branches'] = existing.get('task_process_branches', {})
|
||||
# 如果只更新 task_process_branches,保留已有的 flow_branches
|
||||
if 'task_process_branches' in branches and 'flow_branches' not in branches:
|
||||
branches['flow_branches'] = existing.get('flow_branches', [])
|
||||
task.branches = branches
|
||||
else:
|
||||
# 旧格式:列表
|
||||
task.branches = branches if branches else []
|
||||
db.commit()
|
||||
db.refresh(task)
|
||||
return task
|
||||
|
||||
@staticmethod
|
||||
def get_branches(db: Session, task_id: str) -> Optional[list]:
|
||||
"""获取任务分支数据"""
|
||||
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
|
||||
if task:
|
||||
return task.branches or []
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def get_by_status(
|
||||
db: Session, status: str, limit: int = 50, offset: int = 0
|
||||
) -> List[MultiAgentTask]:
|
||||
"""根据状态获取任务记录"""
|
||||
return (
|
||||
db.query(MultiAgentTask)
|
||||
.filter(MultiAgentTask.status == status)
|
||||
.order_by(MultiAgentTask.created_at.desc())
|
||||
.offset(offset)
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_by_generation_id(
|
||||
db: Session, generation_id: str
|
||||
) -> List[MultiAgentTask]:
|
||||
"""根据生成 ID 获取任务记录"""
|
||||
return (
|
||||
db.query(MultiAgentTask)
|
||||
.filter(MultiAgentTask.generation_id == generation_id)
|
||||
.all()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_by_execution_id(
|
||||
db: Session, execution_id: str
|
||||
) -> List[MultiAgentTask]:
|
||||
"""根据执行 ID 获取任务记录"""
|
||||
return (
|
||||
db.query(MultiAgentTask)
|
||||
.filter(MultiAgentTask.execution_id == execution_id)
|
||||
.all()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def delete(db: Session, task_id: str) -> bool:
|
||||
"""删除任务记录"""
|
||||
task = db.query(MultiAgentTask).filter(MultiAgentTask.task_id == task_id).first()
|
||||
if task:
|
||||
db.delete(task)
|
||||
db.commit()
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class UserAgentCRUD:
|
||||
"""用户智能体配置 CRUD 操作"""
|
||||
|
||||
@staticmethod
|
||||
def create(
|
||||
db: Session,
|
||||
user_id: str,
|
||||
agent_name: str,
|
||||
agent_config: dict,
|
||||
) -> UserAgent:
|
||||
"""创建用户智能体配置"""
|
||||
agent = UserAgent(
|
||||
id=str(uuid.uuid4()),
|
||||
user_id=user_id,
|
||||
agent_name=agent_name,
|
||||
agent_config=agent_config,
|
||||
)
|
||||
db.add(agent)
|
||||
db.commit()
|
||||
db.refresh(agent)
|
||||
return agent
|
||||
|
||||
@staticmethod
|
||||
def get_by_id(db: Session, agent_id: str) -> Optional[UserAgent]:
|
||||
"""根据 ID 获取配置"""
|
||||
return db.query(UserAgent).filter(UserAgent.id == agent_id).first()
|
||||
|
||||
@staticmethod
|
||||
def get_by_user_id(
|
||||
db: Session, user_id: str, limit: int = 50
|
||||
) -> List[UserAgent]:
|
||||
"""根据用户 ID 获取所有智能体配置"""
|
||||
return (
|
||||
db.query(UserAgent)
|
||||
.filter(UserAgent.user_id == user_id)
|
||||
.order_by(UserAgent.created_at.desc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_by_name(
|
||||
db: Session, user_id: str, agent_name: str
|
||||
) -> List[UserAgent]:
|
||||
"""根据用户 ID 和智能体名称获取配置"""
|
||||
return (
|
||||
db.query(UserAgent)
|
||||
.filter(
|
||||
UserAgent.user_id == user_id,
|
||||
UserAgent.agent_name == agent_name,
|
||||
)
|
||||
.all()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def update_config(
|
||||
db: Session, agent_id: str, agent_config: dict
|
||||
) -> Optional[UserAgent]:
|
||||
"""更新智能体配置"""
|
||||
agent = db.query(UserAgent).filter(UserAgent.id == agent_id).first()
|
||||
if agent:
|
||||
agent.agent_config = agent_config
|
||||
db.commit()
|
||||
db.refresh(agent)
|
||||
return agent
|
||||
|
||||
@staticmethod
|
||||
def delete(db: Session, agent_id: str) -> bool:
|
||||
"""删除智能体配置"""
|
||||
agent = db.query(UserAgent).filter(UserAgent.id == agent_id).first()
|
||||
if agent:
|
||||
db.delete(agent)
|
||||
db.commit()
|
||||
return True
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def upsert(
|
||||
db: Session,
|
||||
user_id: str,
|
||||
agent_name: str,
|
||||
agent_config: dict,
|
||||
) -> UserAgent:
|
||||
"""更新或插入用户智能体配置(根据 user_id + agent_name 判断唯一性)
|
||||
|
||||
如果已存在相同 user_id 和 agent_name 的记录,则更新配置;
|
||||
否则创建新记录。
|
||||
"""
|
||||
existing = (
|
||||
db.query(UserAgent)
|
||||
.filter(
|
||||
UserAgent.user_id == user_id,
|
||||
UserAgent.agent_name == agent_name,
|
||||
)
|
||||
.first()
|
||||
)
|
||||
if existing:
|
||||
# 更新现有记录
|
||||
existing.agent_config = agent_config
|
||||
db.commit()
|
||||
db.refresh(existing)
|
||||
return existing
|
||||
else:
|
||||
# 创建新记录
|
||||
agent = UserAgent(
|
||||
id=str(uuid.uuid4()),
|
||||
user_id=user_id,
|
||||
agent_name=agent_name,
|
||||
agent_config=agent_config,
|
||||
)
|
||||
db.add(agent)
|
||||
db.commit()
|
||||
db.refresh(agent)
|
||||
return agent
|
||||
|
||||
|
||||
class ExportRecordCRUD:
|
||||
"""导出记录 CRUD 操作"""
|
||||
|
||||
@staticmethod
|
||||
def create(
|
||||
db: Session,
|
||||
task_id: str,
|
||||
user_id: str,
|
||||
export_type: str,
|
||||
file_name: str,
|
||||
file_path: str,
|
||||
file_url: str = "",
|
||||
file_size: int = 0,
|
||||
) -> ExportRecord:
|
||||
"""创建导出记录"""
|
||||
record = ExportRecord(
|
||||
task_id=task_id,
|
||||
user_id=user_id,
|
||||
export_type=export_type,
|
||||
file_name=file_name,
|
||||
file_path=file_path,
|
||||
file_url=file_url,
|
||||
file_size=file_size,
|
||||
)
|
||||
db.add(record)
|
||||
db.commit()
|
||||
db.refresh(record)
|
||||
return record
|
||||
|
||||
@staticmethod
|
||||
def get_by_id(db: Session, record_id: int) -> Optional[ExportRecord]:
|
||||
"""根据 ID 获取记录"""
|
||||
return db.query(ExportRecord).filter(ExportRecord.id == record_id).first()
|
||||
|
||||
@staticmethod
|
||||
def get_by_task_id(
|
||||
db: Session, task_id: str, limit: int = 50
|
||||
) -> List[ExportRecord]:
|
||||
"""根据任务 ID 获取导出记录列表"""
|
||||
return (
|
||||
db.query(ExportRecord)
|
||||
.filter(ExportRecord.task_id == task_id)
|
||||
.order_by(ExportRecord.created_at.desc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_by_user_id(
|
||||
db: Session, user_id: str, limit: int = 50
|
||||
) -> List[ExportRecord]:
|
||||
"""根据用户 ID 获取导出记录列表"""
|
||||
return (
|
||||
db.query(ExportRecord)
|
||||
.filter(ExportRecord.user_id == user_id)
|
||||
.order_by(ExportRecord.created_at.desc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def delete(db: Session, record_id: int) -> bool:
|
||||
"""删除导出记录"""
|
||||
record = db.query(ExportRecord).filter(ExportRecord.id == record_id).first()
|
||||
if record:
|
||||
db.delete(record)
|
||||
db.commit()
|
||||
return True
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def delete_by_task_id(db: Session, task_id: str) -> bool:
|
||||
"""删除任务的所有导出记录"""
|
||||
records = db.query(ExportRecord).filter(ExportRecord.task_id == task_id).all()
|
||||
if records:
|
||||
for record in records:
|
||||
db.delete(record)
|
||||
db.commit()
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class PlanShareCRUD:
|
||||
"""任务分享 CRUD 操作"""
|
||||
|
||||
@staticmethod
|
||||
def create(
|
||||
db: Session,
|
||||
share_token: str,
|
||||
task_id: str,
|
||||
task_data: dict,
|
||||
expires_at: Optional[datetime] = None,
|
||||
extraction_code: Optional[str] = None,
|
||||
) -> PlanShare:
|
||||
"""创建分享记录"""
|
||||
share = PlanShare(
|
||||
share_token=share_token,
|
||||
extraction_code=extraction_code,
|
||||
task_id=task_id,
|
||||
task_data=task_data,
|
||||
expires_at=expires_at,
|
||||
)
|
||||
db.add(share)
|
||||
db.commit()
|
||||
db.refresh(share)
|
||||
return share
|
||||
|
||||
@staticmethod
|
||||
def get_by_token(db: Session, share_token: str) -> Optional[PlanShare]:
|
||||
"""根据 token 获取分享记录"""
|
||||
return db.query(PlanShare).filter(PlanShare.share_token == share_token).first()
|
||||
|
||||
@staticmethod
|
||||
def get_by_task_id(
|
||||
db: Session, task_id: str, limit: int = 10
|
||||
) -> List[PlanShare]:
|
||||
"""根据任务 ID 获取分享记录列表"""
|
||||
return (
|
||||
db.query(PlanShare)
|
||||
.filter(PlanShare.task_id == task_id)
|
||||
.order_by(PlanShare.created_at.desc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def increment_view_count(db: Session, share_token: str) -> Optional[PlanShare]:
|
||||
"""增加查看次数"""
|
||||
share = db.query(PlanShare).filter(PlanShare.share_token == share_token).first()
|
||||
if share:
|
||||
share.view_count = (share.view_count or 0) + 1
|
||||
db.commit()
|
||||
db.refresh(share)
|
||||
return share
|
||||
|
||||
@staticmethod
|
||||
def delete(db: Session, share_token: str) -> bool:
|
||||
"""删除分享记录"""
|
||||
share = db.query(PlanShare).filter(PlanShare.share_token == share_token).first()
|
||||
if share:
|
||||
db.delete(share)
|
||||
db.commit()
|
||||
return True
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def delete_by_task_id(db: Session, task_id: str) -> bool:
|
||||
"""删除任务的所有分享记录"""
|
||||
shares = db.query(PlanShare).filter(PlanShare.task_id == task_id).all()
|
||||
if shares:
|
||||
for share in shares:
|
||||
db.delete(share)
|
||||
db.commit()
|
||||
return True
|
||||
return False
|
||||
@@ -1,95 +0,0 @@
|
||||
"""
|
||||
数据库连接管理模块
|
||||
使用 SQLAlchemy ORM,支持同步操作
|
||||
"""
|
||||
|
||||
import os
|
||||
import yaml
|
||||
from typing import Generator
|
||||
from contextlib import contextmanager
|
||||
import json
|
||||
|
||||
from sqlalchemy import create_engine, text
|
||||
from sqlalchemy.orm import sessionmaker, declarative_base
|
||||
from sqlalchemy.pool import QueuePool
|
||||
from sqlalchemy.dialects.postgresql import dialect as pg_dialect
|
||||
|
||||
# 读取配置
|
||||
yaml_file = os.path.join(os.getcwd(), "config", "config.yaml")
|
||||
try:
|
||||
with open(yaml_file, "r", encoding="utf-8") as file:
|
||||
config = yaml.safe_load(file).get("database", {})
|
||||
except Exception:
|
||||
config = {}
|
||||
|
||||
|
||||
def get_database_url() -> str:
|
||||
"""获取数据库连接 URL"""
|
||||
# 优先使用环境变量
|
||||
host = os.getenv("DB_HOST", config.get("host", "localhost"))
|
||||
port = os.getenv("DB_PORT", config.get("port", "5432"))
|
||||
user = os.getenv("DB_USER", config.get("username", "postgres"))
|
||||
password = os.getenv("DB_PASSWORD", config.get("password", ""))
|
||||
dbname = os.getenv("DB_NAME", config.get("name", "agentcoord"))
|
||||
|
||||
return f"postgresql://{user}:{password}@{host}:{port}/{dbname}"
|
||||
|
||||
|
||||
# 创建引擎
|
||||
DATABASE_URL = get_database_url()
|
||||
engine = create_engine(
|
||||
DATABASE_URL,
|
||||
poolclass=QueuePool,
|
||||
pool_size=config.get("pool_size", 10),
|
||||
max_overflow=config.get("max_overflow", 20),
|
||||
pool_pre_ping=True,
|
||||
echo=False,
|
||||
# JSONB 类型处理器配置
|
||||
json_serializer=lambda obj: json.dumps(obj, ensure_ascii=False),
|
||||
json_deserializer=lambda s: json.loads(s) if isinstance(s, str) else s
|
||||
)
|
||||
|
||||
# 创建会话工厂
|
||||
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
||||
|
||||
# 基础类
|
||||
Base = declarative_base()
|
||||
|
||||
|
||||
def get_db() -> Generator:
|
||||
"""
|
||||
获取数据库会话
|
||||
用法: for db in get_db(): ...
|
||||
"""
|
||||
db = SessionLocal()
|
||||
try:
|
||||
yield db
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def get_db_context() -> Generator:
|
||||
"""
|
||||
上下文管理器方式获取数据库会话
|
||||
用法: with get_db_context() as db: ...
|
||||
"""
|
||||
db = SessionLocal()
|
||||
try:
|
||||
yield db
|
||||
db.commit()
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
raise
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
|
||||
def test_connection() -> bool:
|
||||
"""测试数据库连接"""
|
||||
try:
|
||||
with engine.connect() as conn:
|
||||
conn.execute(text("SELECT 1"))
|
||||
return True
|
||||
except Exception as e:
|
||||
return False
|
||||
@@ -1,22 +0,0 @@
|
||||
"""
|
||||
数据库初始化脚本
|
||||
运行此脚本创建所有表结构
|
||||
基于 DATABASE_DESIGN.md 设计
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from db.database import engine, Base
|
||||
from db.models import MultiAgentTask, UserAgent, PlanShare
|
||||
|
||||
|
||||
def init_database():
|
||||
"""初始化数据库表结构"""
|
||||
Base.metadata.create_all(bind=engine)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
init_database()
|
||||
@@ -1,170 +0,0 @@
|
||||
"""
|
||||
SQLAlchemy ORM 数据模型
|
||||
对应数据库表结构 (基于 DATABASE_DESIGN.md)
|
||||
"""
|
||||
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum as PyEnum
|
||||
from sqlalchemy import Column, String, Text, DateTime, Integer, Enum, Index, ForeignKey, Boolean
|
||||
from sqlalchemy.dialects.postgresql import JSONB
|
||||
from sqlalchemy.orm import relationship
|
||||
|
||||
from .database import Base
|
||||
|
||||
|
||||
class TaskStatus(str, PyEnum):
|
||||
"""任务状态枚举"""
|
||||
GENERATING = "generating" # 生成中 - TaskProcess 生成阶段
|
||||
EXECUTING = "executing" # 执行中 - 任务执行阶段
|
||||
STOPPED = "stopped" # 已停止 - 用户手动停止执行
|
||||
COMPLETED = "completed" # 已完成 - 任务正常完成
|
||||
|
||||
|
||||
def utc_now():
|
||||
"""获取当前 UTC 时间"""
|
||||
return datetime.now(timezone.utc)
|
||||
|
||||
|
||||
class MultiAgentTask(Base):
|
||||
"""多智能体任务记录模型"""
|
||||
__tablename__ = "multi_agent_tasks"
|
||||
|
||||
task_id = Column(String(64), primary_key=True)
|
||||
user_id = Column(String(64), nullable=False, index=True)
|
||||
query = Column(Text, nullable=False)
|
||||
agents_info = Column(JSONB, nullable=False)
|
||||
task_outline = Column(JSONB)
|
||||
assigned_agents = Column(JSONB)
|
||||
agent_scores = Column(JSONB)
|
||||
result = Column(JSONB)
|
||||
status = Column(
|
||||
Enum(TaskStatus, name="task_status_enum", create_type=False),
|
||||
default=TaskStatus.GENERATING,
|
||||
nullable=False
|
||||
)
|
||||
execution_count = Column(Integer, default=0, nullable=False)
|
||||
generation_id = Column(String(64))
|
||||
execution_id = Column(String(64))
|
||||
rehearsal_log = Column(JSONB)
|
||||
branches = Column(JSONB) # 任务大纲探索分支数据
|
||||
is_pinned = Column(Boolean, default=False, nullable=False) # 置顶标志
|
||||
created_at = Column(DateTime(timezone=True), default=utc_now)
|
||||
updated_at = Column(DateTime(timezone=True), default=utc_now, onupdate=utc_now)
|
||||
|
||||
__table_args__ = (
|
||||
Index("idx_multi_agent_tasks_status", "status"),
|
||||
Index("idx_multi_agent_tasks_generation_id", "generation_id"),
|
||||
Index("idx_multi_agent_tasks_execution_id", "execution_id"),
|
||||
)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""转换为字典"""
|
||||
return {
|
||||
"task_id": self.task_id,
|
||||
"user_id": self.user_id,
|
||||
"query": self.query,
|
||||
"agents_info": self.agents_info,
|
||||
"task_outline": self.task_outline,
|
||||
"assigned_agents": self.assigned_agents,
|
||||
"agent_scores": self.agent_scores,
|
||||
"result": self.result,
|
||||
"status": self.status.value if self.status else None,
|
||||
"execution_count": self.execution_count,
|
||||
"generation_id": self.generation_id,
|
||||
"execution_id": self.execution_id,
|
||||
"rehearsal_log": self.rehearsal_log,
|
||||
"branches": self.branches,
|
||||
"is_pinned": self.is_pinned,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
|
||||
}
|
||||
|
||||
|
||||
class ExportRecord(Base):
|
||||
"""导出记录模型"""
|
||||
__tablename__ = "export_records"
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
task_id = Column(String(64), nullable=False, index=True) # 关联任务ID
|
||||
user_id = Column(String(64), nullable=False, index=True) # 用户ID
|
||||
export_type = Column(String(32), nullable=False) # 导出类型: doc/markdown/mindmap/infographic/excel/ppt
|
||||
file_name = Column(String(256), nullable=False) # 文件名
|
||||
file_path = Column(String(512), nullable=False) # 服务器存储路径
|
||||
file_url = Column(String(512)) # 访问URL
|
||||
file_size = Column(Integer, default=0) # 文件大小(字节)
|
||||
created_at = Column(DateTime(timezone=True), default=utc_now)
|
||||
|
||||
__table_args__ = (
|
||||
Index("idx_export_records_task_user", "task_id", "user_id"),
|
||||
)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""转换为字典"""
|
||||
return {
|
||||
"id": self.id,
|
||||
"task_id": self.task_id,
|
||||
"user_id": self.user_id,
|
||||
"export_type": self.export_type,
|
||||
"file_name": self.file_name,
|
||||
"file_path": self.file_path,
|
||||
"file_url": self.file_url,
|
||||
"file_size": self.file_size,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
}
|
||||
|
||||
|
||||
class UserAgent(Base):
|
||||
"""用户保存的智能体配置模型 (可选表)"""
|
||||
__tablename__ = "user_agents"
|
||||
|
||||
id = Column(String(64), primary_key=True)
|
||||
user_id = Column(String(64), nullable=False, index=True)
|
||||
agent_name = Column(String(100), nullable=False)
|
||||
agent_config = Column(JSONB, nullable=False)
|
||||
created_at = Column(DateTime(timezone=True), default=utc_now)
|
||||
|
||||
__table_args__ = (
|
||||
Index("idx_user_agents_user_created", "user_id", "created_at"),
|
||||
)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""转换为字典"""
|
||||
return {
|
||||
"id": self.id,
|
||||
"user_id": self.user_id,
|
||||
"agent_name": self.agent_name,
|
||||
"agent_config": self.agent_config,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
}
|
||||
|
||||
|
||||
class PlanShare(Base):
|
||||
"""任务分享记录模型"""
|
||||
__tablename__ = "plan_shares"
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
share_token = Column(String(64), unique=True, index=True, nullable=False) # 唯一分享码
|
||||
extraction_code = Column(String(8), nullable=True) # 提取码(4位字母数字)
|
||||
task_id = Column(String(64), nullable=False, index=True) # 关联的任务ID
|
||||
task_data = Column(JSONB, nullable=False) # 完整的任务数据(脱敏后)
|
||||
created_at = Column(DateTime(timezone=True), default=utc_now)
|
||||
expires_at = Column(DateTime(timezone=True), nullable=True) # 过期时间
|
||||
view_count = Column(Integer, default=0) # 查看次数
|
||||
|
||||
__table_args__ = (
|
||||
Index("idx_plan_shares_token", "share_token"),
|
||||
)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""转换为字典"""
|
||||
return {
|
||||
"id": self.id,
|
||||
"share_token": self.share_token,
|
||||
"extraction_code": self.extraction_code,
|
||||
"task_id": self.task_id,
|
||||
"task_data": self.task_data,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
"expires_at": self.expires_at.isoformat() if self.expires_at else None,
|
||||
"view_count": self.view_count,
|
||||
}
|
||||
@@ -1,106 +0,0 @@
|
||||
-- AgentCoord 数据库表结构
|
||||
-- 基于 DATABASE_DESIGN.md 设计
|
||||
-- 执行方式: psql -U postgres -d agentcoord -f schema.sql
|
||||
|
||||
-- =============================================================================
|
||||
-- 表1: multi_agent_tasks (多智能体任务记录)
|
||||
-- 状态枚举: pending/planning/generating/executing/completed/failed
|
||||
-- =============================================================================
|
||||
CREATE TABLE IF NOT EXISTS multi_agent_tasks (
|
||||
task_id VARCHAR(64) PRIMARY KEY,
|
||||
user_id VARCHAR(64) NOT NULL,
|
||||
query TEXT NOT NULL,
|
||||
agents_info JSONB NOT NULL,
|
||||
task_outline JSONB,
|
||||
assigned_agents JSONB,
|
||||
agent_scores JSONB,
|
||||
result JSONB,
|
||||
status VARCHAR(20) DEFAULT 'pending',
|
||||
execution_count INTEGER DEFAULT 0,
|
||||
generation_id VARCHAR(64),
|
||||
execution_id VARCHAR(64),
|
||||
rehearsal_log JSONB,
|
||||
branches JSONB, -- 任务大纲探索分支数据
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- 索引
|
||||
CREATE INDEX IF NOT EXISTS idx_multi_agent_tasks_user_id ON multi_agent_tasks(user_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_multi_agent_tasks_created_at ON multi_agent_tasks(created_at DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_multi_agent_tasks_status ON multi_agent_tasks(status);
|
||||
CREATE INDEX IF NOT EXISTS idx_multi_agent_tasks_generation_id ON multi_agent_tasks(generation_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_multi_agent_tasks_execution_id ON multi_agent_tasks(execution_id);
|
||||
|
||||
-- =============================================================================
|
||||
-- 表2: user_agents (用户保存的智能体配置) - 可选表
|
||||
-- =============================================================================
|
||||
CREATE TABLE IF NOT EXISTS user_agents (
|
||||
id VARCHAR(64) PRIMARY KEY,
|
||||
user_id VARCHAR(64) NOT NULL,
|
||||
agent_name VARCHAR(100) NOT NULL,
|
||||
agent_config JSONB NOT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_user_agents_user_id ON user_agents(user_id);
|
||||
|
||||
-- =============================================================================
|
||||
-- 更新时间触发器函数
|
||||
-- =============================================================================
|
||||
CREATE OR REPLACE FUNCTION update_updated_at_column()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
-- 为 multi_agent_tasks 表创建触发器
|
||||
CREATE TRIGGER update_multi_agent_tasks_updated_at
|
||||
BEFORE UPDATE ON multi_agent_tasks
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
-- =============================================================================
|
||||
-- 表3: export_records (导出记录)
|
||||
-- =============================================================================
|
||||
CREATE TABLE IF NOT EXISTS export_records (
|
||||
id SERIAL PRIMARY KEY,
|
||||
task_id VARCHAR(64) NOT NULL,
|
||||
user_id VARCHAR(64) NOT NULL,
|
||||
export_type VARCHAR(32) NOT NULL,
|
||||
file_name VARCHAR(256) NOT NULL,
|
||||
file_path VARCHAR(512) NOT NULL,
|
||||
file_url VARCHAR(512),
|
||||
file_size INTEGER DEFAULT 0,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_export_records_task_user ON export_records(task_id, user_id);
|
||||
|
||||
-- =============================================================================
|
||||
-- 表4: plan_shares (任务分享记录)
|
||||
-- =============================================================================
|
||||
CREATE TABLE IF NOT EXISTS plan_shares (
|
||||
id SERIAL PRIMARY KEY,
|
||||
share_token VARCHAR(64) NOT NULL UNIQUE,
|
||||
extraction_code VARCHAR(8),
|
||||
task_id VARCHAR(64) NOT NULL,
|
||||
task_data JSONB NOT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
expires_at TIMESTAMP WITH TIME ZONE,
|
||||
view_count INTEGER DEFAULT 0
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_plan_shares_token ON plan_shares(share_token);
|
||||
CREATE INDEX IF NOT EXISTS idx_plan_shares_task_id ON plan_shares(task_id);
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
RAISE NOTICE '✅ PostgreSQL 数据库表结构创建完成!';
|
||||
RAISE NOTICE '表: multi_agent_tasks (多智能体任务记录)';
|
||||
RAISE NOTICE '表: user_agents (用户智能体配置)';
|
||||
RAISE NOTICE '表: export_records (导出记录)';
|
||||
RAISE NOTICE '表: plan_shares (任务分享记录)';
|
||||
END $$;
|
||||
@@ -1,13 +1,7 @@
|
||||
Flask==3.0.2
|
||||
openai==2.8.1
|
||||
openai==0.28.1
|
||||
PyYAML==6.0.1
|
||||
termcolor==2.4.0
|
||||
groq==0.4.2
|
||||
mistralai==0.1.6
|
||||
flask-socketio==5.3.6
|
||||
python-socketio==5.11.0
|
||||
simple-websocket==1.0.0
|
||||
eventlet==0.40.4
|
||||
python-docx==1.1.2
|
||||
openpyxl==3.1.5
|
||||
python-pptx==1.0.2
|
||||
socksio==1.0.0
|
||||
|
||||
3821
backend/server.py
@@ -21,11 +21,12 @@ services:
|
||||
ports:
|
||||
- "8000:8000"
|
||||
environment:
|
||||
- OPENAI_API_BASE=htts://api.openai.com
|
||||
- OPENAI_API_KEY=
|
||||
- OPENAI_API_MODEL=gpt-4-turbo-preview
|
||||
- FAST_DESIGN_MODE=True
|
||||
- OPENAI_API_BASE=https://api.moleapi.com/v1
|
||||
- OPENAI_API_KEY=sk-sps7FBCbEvu85DfPoS8SdnPwYLEoW7u5Dd8vCDTXqPLpHuyb
|
||||
- OPENAI_API_MODEL=gpt-4.1-mini
|
||||
- FAST_DESIGN_MODE=False
|
||||
- GROQ_API_KEY=
|
||||
- USE_CACHE=True
|
||||
networks:
|
||||
- agentcoord-network
|
||||
|
||||
|
||||
@@ -15,8 +15,9 @@ FROM base AS runner
|
||||
WORKDIR /app
|
||||
EXPOSE 8080/tcp
|
||||
COPY .npmrc ./
|
||||
RUN npm install @modern-js/app-tools @modern-js/runtime --no-optional --no-shrinkwrap && mkdir src
|
||||
COPY src ./src
|
||||
COPY modern.config.ts package.json ./
|
||||
COPY --from=builder /app/dist ./dist
|
||||
COPY --from=builder /app/node_modules ./node_modules
|
||||
ENV API_BASE=
|
||||
CMD ["npm", "run", "serve"]
|
||||
|
||||
@@ -351,7 +351,7 @@ export default observer(({ style = {} }: IPlanModification) => {
|
||||
overflowY: 'auto',
|
||||
}}
|
||||
>
|
||||
<Box sx={{ marginBottom: '6px', fontWeight: '600' }}>Assignment</Box>
|
||||
<Box sx={{ marginBottom: '6px', fontWeight: '600' }}>已选智能体</Box>
|
||||
<Box>
|
||||
{Object.keys(agentSelections).map(selectionId => (
|
||||
<Box
|
||||
@@ -405,7 +405,7 @@ export default observer(({ style = {} }: IPlanModification) => {
|
||||
overflowY: 'auto',
|
||||
}}
|
||||
>
|
||||
<Box sx={{ marginBottom: '4px', fontWeight: '600' }}>Comparison</Box>
|
||||
<Box sx={{ marginBottom: '4px', fontWeight: '600' }}>候选智能体</Box>
|
||||
|
||||
<Box
|
||||
sx={{
|
||||
|
||||
@@ -65,7 +65,6 @@ export default observer(({ agent, style = {} }: IAgentCardProps) => (
|
||||
userSelect: 'none',
|
||||
}}
|
||||
>
|
||||
{agent.profile}
|
||||
</Box>
|
||||
</Box>
|
||||
<Box
|
||||
@@ -105,7 +104,7 @@ export default observer(({ agent, style = {} }: IAgentCardProps) => (
|
||||
marginLeft: '2px',
|
||||
}}
|
||||
>
|
||||
Current Duty:
|
||||
当前职责:
|
||||
</Box>
|
||||
<Box>
|
||||
{agent.actions.map((action, index) => (
|
||||
|
||||
@@ -4,6 +4,7 @@ import Box from '@mui/material/Box';
|
||||
import MoreHorizIcon from '@mui/icons-material/MoreHoriz';
|
||||
import UnfoldLessIcon from '@mui/icons-material/UnfoldLess';
|
||||
import { IAgentAction, globalStorage } from '@/storage';
|
||||
import { getActionTypeDisplayText } from '@/storage/plan/action'; // 导入映射函数
|
||||
|
||||
export interface IDutyItem {
|
||||
action: IAgentAction;
|
||||
@@ -38,7 +39,7 @@ export default React.memo<IDutyItem>(({ action, style = {} }) => {
|
||||
..._style,
|
||||
}}
|
||||
>
|
||||
{action.type}
|
||||
{getActionTypeDisplayText(action.type)}
|
||||
<Box
|
||||
onClick={() => setExpand(true)}
|
||||
sx={{
|
||||
@@ -83,7 +84,7 @@ export default React.memo<IDutyItem>(({ action, style = {} }) => {
|
||||
marginLeft: '2px',
|
||||
}}
|
||||
>
|
||||
{action.type}
|
||||
{getActionTypeDisplayText(action.type)}
|
||||
</Box>
|
||||
<Divider
|
||||
sx={{
|
||||
|
||||
@@ -88,7 +88,7 @@ export default observer(({ style = {} }: IAgentBoardProps) => {
|
||||
'& > input': { display: 'none' },
|
||||
}}
|
||||
>
|
||||
<Title title="Agent Board" />
|
||||
<Title title="智能体库" />
|
||||
<Box
|
||||
sx={{
|
||||
flexGrow: 1,
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import AbigailChen from '@/static/AgentIcons/Abigail_Chen.png';
|
||||
import AbigailChenMilitary from '@/static/AgentIcons/Abigail_Chen_military.png';
|
||||
import AdamSmith from '@/static/AgentIcons/Adam_Smith.png';
|
||||
import ArthurBurton from '@/static/AgentIcons/Arthur_Burton.png';
|
||||
import AyeshaKhan from '@/static/AgentIcons/Ayesha_Khan.png';
|
||||
@@ -8,9 +9,11 @@ import EddyLin from '@/static/AgentIcons/Eddy_Lin.png';
|
||||
import FranciscoLopez from '@/static/AgentIcons/Francisco_Lopez.png';
|
||||
import GiorgioRossi from '@/static/AgentIcons/Giorgio_Rossi.png';
|
||||
import HaileyJohnson from '@/static/AgentIcons/Hailey_Johnson.png';
|
||||
import HaileyJohnsonMilitary from '@/static/AgentIcons/Hailey_Johnson_military.png';
|
||||
import IsabellaRodriguez from '@/static/AgentIcons/Isabella_Rodriguez.png';
|
||||
import JaneMoreno from '@/static/AgentIcons/Jane_Moreno.png';
|
||||
import JenniferMoore from '@/static/AgentIcons/Jennifer_Moore.png';
|
||||
import JenniferMooreMilitary from '@/static/AgentIcons/Jennifer_Moore_military.png';
|
||||
import JohnLin from '@/static/AgentIcons/John_Lin.png';
|
||||
import KlausMueller from '@/static/AgentIcons/Klaus_Mueller.png';
|
||||
import LatoyaWilliams from '@/static/AgentIcons/Latoya_Williams.png';
|
||||
@@ -27,6 +30,7 @@ import Unknown from '@/static/AgentIcons/Unknow.png';
|
||||
|
||||
export enum IconName {
|
||||
AbigailChen = 'Abigail_Chen',
|
||||
AbigailChenMilitary = 'Abigail_Chen_military',
|
||||
AdamSmith = 'Adam_Smith',
|
||||
ArthurBurton = 'Arthur_Burton',
|
||||
AyeshaKhan = 'Ayesha_Khan',
|
||||
@@ -45,9 +49,11 @@ export enum IconName {
|
||||
GabeSmith = 'Gabe_Smith',
|
||||
GiorgioRossi = 'Giorgio_Rossi',
|
||||
HaileyJohnson = 'Hailey_Johnson',
|
||||
HaileyJohnsonMilitary = 'Hailey_Johnson_military',
|
||||
IsabellaRodriguez = 'Isabella_Rodriguez',
|
||||
JaneMoreno = 'Jane_Moreno',
|
||||
JenniferMoore = 'Jennifer_Moore',
|
||||
JenniferMooreMilitary = 'Jennifer_Moore_military',
|
||||
JohnLin = 'John_Lin',
|
||||
KlausMueller = 'Klaus_Mueller',
|
||||
LatoyaWilliams = 'Latoya_Williams',
|
||||
@@ -83,6 +89,7 @@ export const IconMap = new Proxy<{ [key: string]: IconName }>(
|
||||
export const IconUrl: { [key in IconName]: string } = {
|
||||
[IconName.Unknown]: Unknown,
|
||||
[IconName.AbigailChen]: AbigailChen,
|
||||
[IconName.AbigailChenMilitary]: AbigailChenMilitary,
|
||||
[IconName.AdamSmith]: AdamSmith,
|
||||
[IconName.ArthurBurton]: ArthurBurton,
|
||||
[IconName.AyeshaKhan]: AyeshaKhan,
|
||||
@@ -101,9 +108,11 @@ export const IconUrl: { [key in IconName]: string } = {
|
||||
[IconName.GabeSmith]: AbigailChen,
|
||||
[IconName.GiorgioRossi]: GiorgioRossi,
|
||||
[IconName.HaileyJohnson]: HaileyJohnson,
|
||||
[IconName.HaileyJohnsonMilitary]: HaileyJohnsonMilitary,
|
||||
[IconName.IsabellaRodriguez]: IsabellaRodriguez,
|
||||
[IconName.JaneMoreno]: JaneMoreno,
|
||||
[IconName.JenniferMoore]: JenniferMoore,
|
||||
[IconName.JenniferMooreMilitary]: JenniferMooreMilitary,
|
||||
[IconName.JohnLin]: JohnLin,
|
||||
[IconName.KlausMueller]: KlausMueller,
|
||||
[IconName.LatoyaWilliams]: LatoyaWilliams,
|
||||
|
||||
@@ -151,7 +151,7 @@ export default observer(({ style = {} }: { style?: SxProps }) => {
|
||||
>
|
||||
<LogoIcon />
|
||||
<Box sx={{ marginLeft: '6px', fontWeight: 800, fontSize: '20px' }}>
|
||||
AGENTCOORD
|
||||
多智能体协同平台
|
||||
</Box>
|
||||
</Box>
|
||||
) : (
|
||||
|
||||
@@ -8,6 +8,7 @@ import RemoveIcon from '@mui/icons-material/Remove';
|
||||
import AdjustIcon from '@mui/icons-material/Adjust';
|
||||
import { ObjectProps, ProcessProps } from './interface';
|
||||
import AgentIcon from '@/components/AgentIcon';
|
||||
import DescriptionCard from '@/components/ProcessDiscription/DescriptionCard';
|
||||
import { globalStorage } from '@/storage';
|
||||
|
||||
export interface IEditObjectProps {
|
||||
@@ -329,6 +330,13 @@ export const ProcessCard: React.FC<IProcessCardProps> = React.memo(
|
||||
// handleEditStep(step.name, { ...step, task: text });
|
||||
}}
|
||||
/>
|
||||
{/* 这里直接渲染对应的 DescriptionCard */}
|
||||
{/* {(() => {
|
||||
const step = globalStorage.planManager.currentPlan.find(
|
||||
s => s.id === process.id
|
||||
);
|
||||
return step ? <DescriptionCard step={step} /> : null;
|
||||
})()} */}
|
||||
</Box>
|
||||
)}
|
||||
</Box>
|
||||
|
||||
@@ -118,10 +118,10 @@ const D3Graph: React.FC<D3GraphProps> = ({
|
||||
stepName: stepCardName,
|
||||
objectName: objectCardName,
|
||||
type,
|
||||
x1: objectRect.left + objectRect.width,
|
||||
y1: objectRect.top + 0.5 * objectRect.height,
|
||||
x2: stepRect.left,
|
||||
y2: stepRect.top + 0.5 * stepRect.height,
|
||||
x1: stepRect.left + stepRect.width,
|
||||
y1: stepRect.top + 0.5 * stepRect.height,
|
||||
x2: objectRect.left,
|
||||
y2: objectRect.top + 0.5 * objectRect.height,
|
||||
};
|
||||
});
|
||||
const objectMetrics = calculateLineMetrics(cardRect, 'object');
|
||||
@@ -152,17 +152,17 @@ const D3Graph: React.FC<D3GraphProps> = ({
|
||||
userSelect: 'none',
|
||||
}}
|
||||
>
|
||||
<marker
|
||||
id="arrowhead"
|
||||
markerWidth="4"
|
||||
markerHeight="4"
|
||||
refX="2"
|
||||
refY="2"
|
||||
orient="auto"
|
||||
markerUnits="strokeWidth"
|
||||
>
|
||||
<path d="M0,0 L4,2 L0,4 z" fill="#E5E5E5" />
|
||||
</marker>
|
||||
<marker
|
||||
id="arrowhead"
|
||||
markerWidth="2"
|
||||
markerHeight="2"
|
||||
refX="1"
|
||||
refY="1"
|
||||
orient="auto"
|
||||
markerUnits="strokeWidth"
|
||||
>
|
||||
<path d="M0,0 L2,1 L0,2 z" fill="#E5E5E5" />
|
||||
</marker>
|
||||
<marker
|
||||
id="starter"
|
||||
markerWidth="4"
|
||||
@@ -184,7 +184,7 @@ const D3Graph: React.FC<D3GraphProps> = ({
|
||||
fill="#898989"
|
||||
fontWeight="800"
|
||||
>
|
||||
Key Object
|
||||
产出
|
||||
</text>
|
||||
<line
|
||||
x1={objectLine.x}
|
||||
@@ -204,7 +204,7 @@ const D3Graph: React.FC<D3GraphProps> = ({
|
||||
fill="#898989"
|
||||
fontWeight="800"
|
||||
>
|
||||
Process
|
||||
流程
|
||||
</text>
|
||||
<line
|
||||
x1={processLine.x}
|
||||
|
||||
@@ -53,7 +53,7 @@ export default observer(() => {
|
||||
const handleEditContent = (stepTaskId: string, newContent: string) => {
|
||||
globalStorage.setStepTaskContent(stepTaskId, newContent);
|
||||
};
|
||||
const WidthRatio = ['30%', '15%', '52.5%'];
|
||||
const WidthRatio = ['35%', '10%', '50%'];
|
||||
|
||||
const [cardRefMapReady, setCardRefMapReady] = React.useState(false);
|
||||
|
||||
@@ -175,6 +175,34 @@ export default observer(() => {
|
||||
sx={{ display: 'flex' }}
|
||||
ref={ref}
|
||||
>
|
||||
<Box
|
||||
sx={{
|
||||
// display: 'flex',
|
||||
alignItems: 'center',
|
||||
// width: WidthRatio[2],
|
||||
justifyContent: 'center',
|
||||
flex: `0 0 ${WidthRatio[2]}`,
|
||||
}}
|
||||
>
|
||||
{name && (
|
||||
<ProcessCard
|
||||
process={{
|
||||
id,
|
||||
name,
|
||||
icons: agentIcons,
|
||||
agents,
|
||||
content,
|
||||
cardRef: getCardRef(`process.${name}`),
|
||||
}}
|
||||
handleProcessClick={handleProcessClick}
|
||||
isFocusing={focusingStepTaskId === id}
|
||||
isAddActive={id === activeProcessIdAdd}
|
||||
handleAddActive={handleProcessAdd}
|
||||
handleEditContent={handleEditContent}
|
||||
/>
|
||||
)}
|
||||
</Box>
|
||||
<Box sx={{ flex: `0 0 ${WidthRatio[1]}` }} />
|
||||
<Box
|
||||
sx={{
|
||||
display: 'flex',
|
||||
@@ -209,34 +237,6 @@ export default observer(() => {
|
||||
/>
|
||||
)}
|
||||
</Box>
|
||||
<Box sx={{ flex: `0 0 ${WidthRatio[1]}` }} />
|
||||
<Box
|
||||
sx={{
|
||||
// display: 'flex',
|
||||
alignItems: 'center',
|
||||
// width: WidthRatio[2],
|
||||
justifyContent: 'center',
|
||||
flex: `0 0 ${WidthRatio[2]}`,
|
||||
}}
|
||||
>
|
||||
{name && (
|
||||
<ProcessCard
|
||||
process={{
|
||||
id,
|
||||
name,
|
||||
icons: agentIcons,
|
||||
agents,
|
||||
content,
|
||||
cardRef: getCardRef(`process.${name}`),
|
||||
}}
|
||||
handleProcessClick={handleProcessClick}
|
||||
isFocusing={focusingStepTaskId === id}
|
||||
isAddActive={id === activeProcessIdAdd}
|
||||
handleAddActive={handleProcessAdd}
|
||||
handleEditContent={handleEditContent}
|
||||
/>
|
||||
)}
|
||||
</Box>
|
||||
</Box>
|
||||
),
|
||||
)}
|
||||
|
||||
@@ -25,7 +25,7 @@ export default observer(({ style = {} }: { style?: SxProps }) => {
|
||||
...style,
|
||||
}}
|
||||
>
|
||||
<Title title="Plan Outline" />
|
||||
<Title title="任务大纲" />
|
||||
<Box
|
||||
sx={{
|
||||
position: 'relative',
|
||||
|
||||
@@ -19,7 +19,7 @@ export default observer(({ style = {} }: { style?: SxProps }) => {
|
||||
...style,
|
||||
}}
|
||||
>
|
||||
<Title title="Task Process" />
|
||||
<Title title="任务流程" />
|
||||
<Stack
|
||||
spacing={1}
|
||||
sx={{
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import React from 'react';
|
||||
import React, { useState } from 'react';
|
||||
import { observer } from 'mobx-react-lite';
|
||||
import { Divider, SxProps } from '@mui/material';
|
||||
import { Divider, SxProps, Tooltip, Typography} from '@mui/material';
|
||||
import Box from '@mui/material/Box';
|
||||
import UnfoldLessIcon from '@mui/icons-material/UnfoldLess';
|
||||
import MoreHorizIcon from '@mui/icons-material/MoreHoriz';
|
||||
@@ -9,6 +9,7 @@ import { globalStorage } from '@/storage';
|
||||
import type { IExecuteStepHistoryItem } from '@/apis/execute-plan';
|
||||
import AgentIcon from '@/components/AgentIcon';
|
||||
import { getAgentActionStyle } from '@/storage/plan';
|
||||
import { getActionTypeDisplayText } from '@/storage/plan/action';
|
||||
|
||||
export interface IStepHistoryItemProps {
|
||||
item: IExecuteStepHistoryItem;
|
||||
@@ -46,6 +47,170 @@ export default observer(
|
||||
}, 10);
|
||||
}
|
||||
}, [expand]);
|
||||
|
||||
interface DataSpaceCard {
|
||||
name: string;
|
||||
data_space: string;
|
||||
doId: string;
|
||||
fromRepo: string;
|
||||
}
|
||||
|
||||
interface DataSpaceIndicatorProps {
|
||||
cards?: DataSpaceCard[];
|
||||
}
|
||||
|
||||
const DataSpaceIndicator = ({ cards = [] }: DataSpaceIndicatorProps) => {
|
||||
const [currentCard, setCurrentCard] = useState(0);
|
||||
|
||||
if (!cards || cards.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const tooltipContent = (
|
||||
<>
|
||||
{/* 添加标题 */}
|
||||
<Box
|
||||
sx={{
|
||||
display: 'flex',
|
||||
justifyContent: 'space-between',
|
||||
alignItems: 'center',
|
||||
padding: '8px 12px',
|
||||
backgroundColor: '#1565c0',
|
||||
color: 'white',
|
||||
borderRadius: '4px 4px 0 0',
|
||||
fontWeight: 'bold',
|
||||
fontSize: '14px',
|
||||
}}
|
||||
>
|
||||
<span>数联网搜索结果</span>
|
||||
<Box sx={{ display: 'flex', alignItems: 'center', gap: '8px' }}>
|
||||
<span style={{ fontSize: '12px', color: 'rgba(255,255,255,0.8)' }}>
|
||||
{currentCard + 1}/{cards.length}
|
||||
</span>
|
||||
<button
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
setCurrentCard((prev) => (prev + 1) % cards.length);
|
||||
}}
|
||||
style={{
|
||||
fontSize: '12px',
|
||||
padding: '2px 8px',
|
||||
backgroundColor: 'white',
|
||||
color: '#1565c0',
|
||||
border: 'none',
|
||||
borderRadius: '3px',
|
||||
cursor: 'pointer',
|
||||
fontWeight: 'bold'
|
||||
}}
|
||||
>
|
||||
下一个
|
||||
</button>
|
||||
</Box>
|
||||
</Box>
|
||||
<Box
|
||||
sx={{
|
||||
padding: '12px',
|
||||
borderRadius: '4px',
|
||||
backgroundColor: '#e3f2fd',
|
||||
border: '1px solid #1565c0',
|
||||
minWidth: '450px', // 设置卡片最小宽度
|
||||
}}
|
||||
>
|
||||
<Box sx={{ display: 'flex', alignItems: 'flex-start', marginBottom: '4px' }}>
|
||||
<Typography variant="caption" sx={{ fontWeight: 'bold', color: '#666', minWidth: '60px' }}>
|
||||
名称:
|
||||
</Typography>
|
||||
<Typography variant="body2" sx={{ color: 'black' }}>
|
||||
{cards[currentCard]?.name}
|
||||
</Typography>
|
||||
</Box>
|
||||
|
||||
<Box sx={{ display: 'flex', alignItems: 'flex-start', marginBottom: '4px' }}>
|
||||
<Typography variant="caption" sx={{ fontWeight: 'bold', color: '#666', minWidth: '60px' }}>
|
||||
数据空间:
|
||||
</Typography>
|
||||
<Typography variant="body2" sx={{ color: 'black' }}>
|
||||
{cards[currentCard]?.data_space}
|
||||
</Typography>
|
||||
</Box>
|
||||
|
||||
<Box sx={{ display: 'flex', alignItems: 'flex-start', marginBottom: '4px' }}>
|
||||
<Typography variant="caption" sx={{ fontWeight: 'bold', color: '#666', minWidth: '60px' }}>
|
||||
DOID:
|
||||
</Typography>
|
||||
<Typography variant="body2" sx={{ color: 'black' }}>
|
||||
{cards[currentCard]?.doId}
|
||||
</Typography>
|
||||
</Box>
|
||||
|
||||
<Box sx={{ display: 'flex', alignItems: 'flex-start', marginBottom: '4px' }}>
|
||||
<Typography variant="caption" sx={{ fontWeight: 'bold', color: '#666', minWidth: '60px' }}>
|
||||
来源仓库:
|
||||
</Typography>
|
||||
<Typography variant="body2" sx={{ color: 'black' }}>
|
||||
{cards[currentCard]?.fromRepo}
|
||||
</Typography>
|
||||
</Box>
|
||||
</Box>
|
||||
</>
|
||||
);
|
||||
|
||||
return (
|
||||
<Tooltip
|
||||
title={tooltipContent}
|
||||
arrow
|
||||
slotProps={{
|
||||
tooltip: {
|
||||
sx: {
|
||||
maxWidth: 500, // 只需要这一行来增加宽度
|
||||
},
|
||||
},
|
||||
}}
|
||||
>
|
||||
<Box
|
||||
sx={{
|
||||
display: 'inline-flex',
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
width: '20px',
|
||||
height: '20px',
|
||||
borderRadius: '50%',
|
||||
backgroundColor: '#9e9e9e',
|
||||
color: 'white',
|
||||
fontSize: '12px',
|
||||
fontWeight: '600',
|
||||
cursor: 'pointer',
|
||||
'&:hover': {
|
||||
backgroundColor: '#1565c0',
|
||||
}
|
||||
}}
|
||||
>
|
||||
{cards.length}
|
||||
</Box>
|
||||
</Tooltip>
|
||||
);
|
||||
};
|
||||
|
||||
const cardData = [
|
||||
{
|
||||
name: "3D Semantic-Geometric Corrosion Mapping Implementations",
|
||||
data_space: "江苏省产研院",
|
||||
doId: "bdware.scenario/d8f3ff8c-3fb3-4573-88a6-5dd823627c37",
|
||||
fromRepo: "https://arxiv.org/abs/2404.13691"
|
||||
},
|
||||
{
|
||||
name: "RustSEG -- Automated segmentation of corrosion using deep learning",
|
||||
data_space: "江苏省产研院",
|
||||
doId: "bdware.scenario/67445299-110a-4a4e-9fda-42e4b5a493c2",
|
||||
fromRepo: "https://arxiv.org/abs/2205.05426"
|
||||
},
|
||||
{
|
||||
name: "Pixel-level Corrosion Detection on Metal Constructions by Fusion of Deep Learning Semantic and Contour Segmentation",
|
||||
data_space: "江苏省产研院",
|
||||
doId: "bdware.scenario/115d5135-85d3-4123-8b81-9eb9f07b6153",
|
||||
fromRepo: "https://arxiv.org/abs/2008.05204"
|
||||
},
|
||||
];
|
||||
const s = { ...getAgentActionStyle(item.type), ...style } as SxProps;
|
||||
React.useEffect(() => {
|
||||
console.log(item);
|
||||
@@ -77,7 +242,7 @@ export default observer(
|
||||
{item.agent}
|
||||
</Box>
|
||||
<Box component="span" sx={{ fontWeight: 400 }}>
|
||||
: {item.type}
|
||||
: {getActionTypeDisplayText(item.type)}
|
||||
</Box>
|
||||
</Box>
|
||||
{item.result ? (
|
||||
@@ -103,10 +268,14 @@ export default observer(
|
||||
marginBottom: '4px',
|
||||
color: '#0009',
|
||||
fontWeight: 400,
|
||||
display: 'inline-flex',
|
||||
alignItems: 'center',
|
||||
gap: '8px',
|
||||
}}
|
||||
>
|
||||
{item.description}
|
||||
</Box>
|
||||
<DataSpaceIndicator cards={cardData} />
|
||||
<MarkdownBlock
|
||||
text={item.result}
|
||||
style={{
|
||||
|
||||
@@ -79,7 +79,7 @@ export default observer(({ style = {} }: { style?: SxProps }) => {
|
||||
alignItems: 'center',
|
||||
}}
|
||||
>
|
||||
<Title title="Execution Result" />
|
||||
<Title title="执行结果" />
|
||||
<Box
|
||||
sx={{
|
||||
flexGrow: 1,
|
||||
|
||||
@@ -4,6 +4,8 @@ import { SxProps } from '@mui/material';
|
||||
import Box from '@mui/material/Box';
|
||||
import IconButton from '@mui/material/IconButton';
|
||||
import FormControl from '@mui/material/FormControl';
|
||||
import Autocomplete from '@mui/material/Autocomplete';
|
||||
import TextField from '@mui/material/TextField';
|
||||
import FilledInput from '@mui/material/FilledInput';
|
||||
import TelegramIcon from '@mui/icons-material/Telegram';
|
||||
import CircularProgress from '@mui/material/CircularProgress';
|
||||
@@ -16,6 +18,18 @@ export interface UserGoalInputProps {
|
||||
export default observer(({ style = {} }: UserGoalInputProps) => {
|
||||
const inputRef = React.useRef<string>('');
|
||||
const inputElementRef = React.useRef<HTMLInputElement>(null);
|
||||
const goalOptions = [
|
||||
'如何快速筛选慢性肾脏病药物潜在受试者?',
|
||||
'如何补充“丹芍活血胶囊”不良反应数据?',
|
||||
'如何快速研发用于战场失血性休克的药物?',
|
||||
'二维材料的光电性质受哪些关键因素影响?',
|
||||
'如何通过AI模拟的方法分析材料的微观结构?',
|
||||
'如何分析获取液态金属热力学参数?',
|
||||
'如何解决固态电池的成本和寿命难题?',
|
||||
'如何解决船舶制造中的材料腐蚀难题?',
|
||||
'如何解决船舶制造中流体模拟和建模优化难题?',
|
||||
];
|
||||
|
||||
React.useEffect(() => {
|
||||
if (inputElementRef.current) {
|
||||
if (globalStorage.planManager) {
|
||||
@@ -32,42 +46,70 @@ export default observer(({ style = {} }: UserGoalInputProps) => {
|
||||
...style,
|
||||
}}
|
||||
>
|
||||
<FilledInput
|
||||
<Autocomplete
|
||||
freeSolo
|
||||
disableClearable
|
||||
disabled={
|
||||
globalStorage.api.busy ||
|
||||
!globalStorage.api.agentsReady ||
|
||||
globalStorage.api.planReady
|
||||
}
|
||||
placeholder="Yout Goal"
|
||||
fullWidth
|
||||
inputRef={inputElementRef}
|
||||
onChange={event => (inputRef.current = event.target.value)}
|
||||
size="small"
|
||||
sx={{
|
||||
borderRadius: '10px',
|
||||
background: '#E1E1E1',
|
||||
borderBottom: 'none !important',
|
||||
'&::before': {
|
||||
borderBottom: 'none !important',
|
||||
},
|
||||
'& > input': {
|
||||
padding: '10px',
|
||||
},
|
||||
options={goalOptions}
|
||||
value={inputRef.current || (globalStorage.planManager ? globalStorage.planManager.goal : globalStorage.briefGoal)}
|
||||
onChange={(event, newValue) => {
|
||||
if (newValue) {
|
||||
inputRef.current = newValue;
|
||||
}
|
||||
}}
|
||||
startAdornment={
|
||||
<Box
|
||||
onInputChange={(event, newInputValue) => {
|
||||
inputRef.current = newInputValue;
|
||||
}}
|
||||
renderInput={(params) => (
|
||||
<TextField
|
||||
{...params}
|
||||
variant="filled"
|
||||
placeholder="请输入你的任务"
|
||||
fullWidth
|
||||
size="small"
|
||||
inputRef={inputElementRef}
|
||||
sx={{
|
||||
color: '#4A9C9E',
|
||||
fontWeight: 800,
|
||||
fontSize: '18px',
|
||||
textWrap: 'nowrap',
|
||||
userSelect: 'none',
|
||||
borderRadius: '10px',
|
||||
background: '#E1E1E1',
|
||||
borderBottom: 'none !important',
|
||||
'& .MuiFilledInput-root': {
|
||||
height: '45px',
|
||||
borderRadius: '10px',
|
||||
background: '#E1E1E1',
|
||||
paddingTop: '5px',
|
||||
borderBottom: 'none !important',
|
||||
'&::before': {
|
||||
borderBottom: 'none !important',
|
||||
},
|
||||
'&::after': {
|
||||
borderBottom: 'none !important',
|
||||
},
|
||||
},
|
||||
}}
|
||||
>
|
||||
\General Goal:
|
||||
</Box>
|
||||
}
|
||||
/>
|
||||
InputProps={{
|
||||
...params.InputProps,
|
||||
startAdornment: (
|
||||
<Box
|
||||
sx={{
|
||||
color: '#4A9C9E',
|
||||
fontWeight: 800,
|
||||
fontSize: '18px',
|
||||
textWrap: 'nowrap',
|
||||
userSelect: 'none',
|
||||
}}
|
||||
>
|
||||
任务
|
||||
</Box>
|
||||
),
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
/>
|
||||
|
||||
{globalStorage.api.planGenerating ? (
|
||||
<CircularProgress
|
||||
sx={{
|
||||
|
||||
@@ -88,24 +88,24 @@ export default observer(() => {
|
||||
setConnectors(<></>);
|
||||
}
|
||||
const outlinePosition =
|
||||
ElementToLink[0]?.current?.getBoundingClientRect?.();
|
||||
ElementToLink[1]?.current?.getBoundingClientRect?.();
|
||||
if (!outlinePosition) {
|
||||
return;
|
||||
}
|
||||
const agentRects = ElementToLink[1]
|
||||
const agentRects = ElementToLink[0]
|
||||
.map(ele => ele.current?.getBoundingClientRect?.() as DOMRect)
|
||||
.filter(rect => rect);
|
||||
if (agentRects.length === 0) {
|
||||
return;
|
||||
}
|
||||
const agentsPosition = mergeRects(...agentRects);
|
||||
const descriptionPosition =
|
||||
ElementToLink[2]?.current?.getBoundingClientRect?.();
|
||||
if (!descriptionPosition) {
|
||||
return;
|
||||
}
|
||||
// const descriptionPosition =
|
||||
// ElementToLink[2]?.current?.getBoundingClientRect?.();
|
||||
// if (!descriptionPosition) {
|
||||
// return;
|
||||
// }
|
||||
|
||||
const LogRects = ElementToLink[3]
|
||||
const LogRects = ElementToLink[2]
|
||||
.map(ele => ele?.current?.getBoundingClientRect?.() as DOMRect)
|
||||
.filter(rect => rect);
|
||||
if (LogRects.length > 0) {
|
||||
@@ -115,15 +115,14 @@ export default observer(() => {
|
||||
|
||||
setConnectors(
|
||||
drawConnectors(
|
||||
outlinePosition,
|
||||
agentsPosition,
|
||||
descriptionPosition,
|
||||
outlinePosition,
|
||||
logPosition,
|
||||
),
|
||||
);
|
||||
} else {
|
||||
setConnectors(
|
||||
drawConnectors(outlinePosition, agentsPosition, descriptionPosition),
|
||||
drawConnectors(agentsPosition,outlinePosition),
|
||||
);
|
||||
}
|
||||
} catch (e) {
|
||||
|
||||
@@ -1,33 +1,33 @@
|
||||
import { Box } from '@mui/material';
|
||||
import React from 'react';
|
||||
|
||||
// 定义你的图标属性类型,这里可以扩展成任何你需要的属性
|
||||
interface CustomIconProps {
|
||||
size?: number | string;
|
||||
color?: string;
|
||||
// ...其他你需要的props
|
||||
}
|
||||
|
||||
// 创建你的自定义SVG图标组件
|
||||
const CheckIcon: React.FC<CustomIconProps> = ({
|
||||
size = '100%',
|
||||
color = 'currentColor',
|
||||
}) => {
|
||||
return (
|
||||
<Box
|
||||
component="svg"
|
||||
width={size}
|
||||
height="auto"
|
||||
viewBox="0 0 11 9"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
>
|
||||
<path
|
||||
d="M10.7204 0C7.37522 1.94391 4.95071 4.40066 3.85635 5.63171L1.18331 3.64484L0 4.54699L4.61463 9C5.4068 7.07085 7.92593 3.30116 11 0.620227L10.7204 0Z"
|
||||
fill={color}
|
||||
/>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
export default CheckIcon;
|
||||
import { Box } from '@mui/material';
|
||||
import React from 'react';
|
||||
|
||||
// 定义你的图标属性类型,这里可以扩展成任何你需要的属性
|
||||
interface CustomIconProps {
|
||||
size?: number | string;
|
||||
color?: string;
|
||||
// ...其他你需要的props
|
||||
}
|
||||
|
||||
// 创建你的自定义SVG图标组件
|
||||
const CheckIcon: React.FC<CustomIconProps> = ({
|
||||
size = '100%',
|
||||
color = 'currentColor',
|
||||
}) => {
|
||||
return (
|
||||
<Box
|
||||
component="svg"
|
||||
width={size}
|
||||
height="auto"
|
||||
viewBox="0 0 11 9"
|
||||
fill="none"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
>
|
||||
<path
|
||||
d="M10.7204 0C7.37522 1.94391 4.95071 4.40066 3.85635 5.63171L1.18331 3.64484L0 4.54699L4.61463 9C5.4068 7.07085 7.92593 3.30116 11 0.620227L10.7204 0Z"
|
||||
fill={color}
|
||||
/>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
export default CheckIcon;
|
||||
|
||||
@@ -44,7 +44,7 @@ export default observer(() => {
|
||||
)}
|
||||
{globalStorage.agentAssigmentWindow ? (
|
||||
<FloatWindow
|
||||
title="Assignment Exploration"
|
||||
title="智能体选择"
|
||||
onClose={() => (globalStorage.agentAssigmentWindow = false)}
|
||||
>
|
||||
<AgentAssignment
|
||||
|
||||
@@ -99,16 +99,7 @@ export default React.memo(() => {
|
||||
}}
|
||||
/>
|
||||
<Box sx={{ height: 0, flexGrow: 1, display: 'flex' }}>
|
||||
<ResizeableColumn columnWidth="25%">
|
||||
<Outline
|
||||
style={{
|
||||
height: '100%',
|
||||
width: '100%',
|
||||
marginRight: '6px',
|
||||
}}
|
||||
/>
|
||||
</ResizeableColumn>
|
||||
<ResizeableColumn columnWidth="25%">
|
||||
<ResizeableColumn columnWidth="19%">
|
||||
<AgentBoard
|
||||
style={{
|
||||
height: '100%',
|
||||
@@ -118,8 +109,8 @@ export default React.memo(() => {
|
||||
onAddAgent={() => setShowAgentHiring(true)}
|
||||
/>
|
||||
</ResizeableColumn>
|
||||
<ResizeableColumn columnWidth="25%">
|
||||
<ProcessDiscrption
|
||||
<ResizeableColumn columnWidth="26%">
|
||||
<Outline
|
||||
style={{
|
||||
height: '100%',
|
||||
width: '100%',
|
||||
@@ -127,6 +118,15 @@ export default React.memo(() => {
|
||||
}}
|
||||
/>
|
||||
</ResizeableColumn>
|
||||
{/* <ResizeableColumn columnWidth="25%">
|
||||
<ProcessDiscrption
|
||||
style={{
|
||||
height: '100%',
|
||||
width: '100%',
|
||||
marginRight: '6px',
|
||||
}}
|
||||
/>
|
||||
</ResizeableColumn> */}
|
||||
<ProcessRehearsal
|
||||
style={{
|
||||
height: '100%',
|
||||
|
||||
BIN
frontend-react/src/static/AgentIcons/Abigail_Chen_military.png
Normal file
|
After Width: | Height: | Size: 2.4 KiB |
BIN
frontend-react/src/static/AgentIcons/Hailey_Johnson_military.png
Normal file
|
After Width: | Height: | Size: 2.2 KiB |
BIN
frontend-react/src/static/AgentIcons/Jennifer_Moore_military.png
Normal file
|
After Width: | Height: | Size: 2.5 KiB |
@@ -285,17 +285,15 @@ export class GlobalStorage {
|
||||
}
|
||||
|
||||
public get ElementToLink(): [
|
||||
React.RefObject<HTMLElement> | undefined,
|
||||
React.RefObject<HTMLElement>[],
|
||||
React.RefObject<HTMLElement> | undefined,
|
||||
(React.RefObject<HTMLElement> | undefined)[],
|
||||
] {
|
||||
return [
|
||||
this.refMap.OutlineCard.get(this.focusingStepTaskId ?? ''),
|
||||
this.focusingStepTask?.agentSelection?.agents?.map(
|
||||
agent => this.refMap.AgentCard.get(agent) ?? [],
|
||||
) ?? ([] as any),
|
||||
this.refMap.DiscriptionCard.get(this.focusingStepTaskId ?? ''),
|
||||
this.refMap.OutlineCard.get(this.focusingStepTaskId ?? ''),
|
||||
this.logManager.outdate
|
||||
? []
|
||||
: [
|
||||
|
||||
@@ -11,6 +11,17 @@ export enum ActionType {
|
||||
Finalize = 'Finalize',
|
||||
}
|
||||
|
||||
export const getActionTypeDisplayText = (actionType: ActionType | string): string => {
|
||||
const displayMap: Record<string, string> = {
|
||||
[ActionType.Propose]: '提议',
|
||||
[ActionType.Critique]: '评审',
|
||||
[ActionType.Improve]: '改进',
|
||||
[ActionType.Finalize]: '总结',
|
||||
};
|
||||
|
||||
return displayMap[actionType] || actionType;
|
||||
};
|
||||
|
||||
const AgentActionStyles = new Map<ActionType | '', SxProps>([
|
||||
[ActionType.Propose, { backgroundColor: '#B9EBF9', borderColor: '#94c2dc' }],
|
||||
[ActionType.Critique, { backgroundColor: '#EFF9B9', borderColor: '#c0dc94' }],
|
||||
|
||||
@@ -20,7 +20,7 @@ const nameJoin = (names: string[]) => {
|
||||
const last = tmp.pop()!;
|
||||
let t = tmp.join(', ');
|
||||
if (t.length > 0) {
|
||||
t = `${t} and ${last}`;
|
||||
t = `${t} 和 ${last}`;
|
||||
} else {
|
||||
t = last;
|
||||
}
|
||||
@@ -102,14 +102,14 @@ export class StepTaskNode implements INodeBase {
|
||||
text: this.output,
|
||||
style: { background: '#FFCA8C' },
|
||||
};
|
||||
outputSentence = `to obtain !<${indexOffset}>!`;
|
||||
outputSentence = `得到 !<${indexOffset}>!`;
|
||||
}
|
||||
// Join them togeter
|
||||
let content = inputSentence;
|
||||
if (content) {
|
||||
content = `Based on ${content}, ${nameSentence} perform the task of !<${actionIndex}>!`;
|
||||
content = `基于${content}, ${nameSentence} 执行任务 !<${actionIndex}>!`;
|
||||
} else {
|
||||
content = `${nameSentence} perform the task of !<${actionIndex}>!`;
|
||||
content = `${nameSentence} 执行任务 !<${actionIndex}>!`;
|
||||
}
|
||||
if (outputSentence) {
|
||||
content = `${content}, ${outputSentence}.`;
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
node_modules
|
||||
dist
|
||||
.idea
|
||||
.vscode
|
||||
.git
|
||||
.gitignore
|
||||
@@ -1,8 +0,0 @@
|
||||
[*.{js,jsx,mjs,cjs,ts,tsx,mts,cts,vue,css,scss,sass,less,styl}]
|
||||
charset = utf-8
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
end_of_line = lf
|
||||
max_line_length = 100
|
||||
@@ -1 +0,0 @@
|
||||
API_BASE=http://127.0.0.1:8000
|
||||
@@ -1,79 +0,0 @@
|
||||
{
|
||||
"globals": {
|
||||
"Component": true,
|
||||
"ComponentPublicInstance": true,
|
||||
"ComputedRef": true,
|
||||
"DirectiveBinding": true,
|
||||
"EffectScope": true,
|
||||
"ExtractDefaultPropTypes": true,
|
||||
"ExtractPropTypes": true,
|
||||
"ExtractPublicPropTypes": true,
|
||||
"InjectionKey": true,
|
||||
"MaybeRef": true,
|
||||
"MaybeRefOrGetter": true,
|
||||
"PropType": true,
|
||||
"Ref": true,
|
||||
"ShallowRef": true,
|
||||
"Slot": true,
|
||||
"Slots": true,
|
||||
"VNode": true,
|
||||
"WritableComputedRef": true,
|
||||
"computed": true,
|
||||
"createApp": true,
|
||||
"customRef": true,
|
||||
"defineAsyncComponent": true,
|
||||
"defineComponent": true,
|
||||
"effectScope": true,
|
||||
"getCurrentInstance": true,
|
||||
"getCurrentScope": true,
|
||||
"getCurrentWatcher": true,
|
||||
"h": true,
|
||||
"inject": true,
|
||||
"isProxy": true,
|
||||
"isReactive": true,
|
||||
"isReadonly": true,
|
||||
"isRef": true,
|
||||
"isShallow": true,
|
||||
"markRaw": true,
|
||||
"nextTick": true,
|
||||
"onActivated": true,
|
||||
"onBeforeMount": true,
|
||||
"onBeforeUnmount": true,
|
||||
"onBeforeUpdate": true,
|
||||
"onDeactivated": true,
|
||||
"onErrorCaptured": true,
|
||||
"onMounted": true,
|
||||
"onRenderTracked": true,
|
||||
"onRenderTriggered": true,
|
||||
"onScopeDispose": true,
|
||||
"onServerPrefetch": true,
|
||||
"onUnmounted": true,
|
||||
"onUpdated": true,
|
||||
"onWatcherCleanup": true,
|
||||
"provide": true,
|
||||
"reactive": true,
|
||||
"readonly": true,
|
||||
"ref": true,
|
||||
"resolveComponent": true,
|
||||
"shallowReactive": true,
|
||||
"shallowReadonly": true,
|
||||
"shallowRef": true,
|
||||
"toRaw": true,
|
||||
"toRef": true,
|
||||
"toRefs": true,
|
||||
"toValue": true,
|
||||
"triggerRef": true,
|
||||
"unref": true,
|
||||
"useAttrs": true,
|
||||
"useCssModule": true,
|
||||
"useCssVars": true,
|
||||
"useId": true,
|
||||
"useModel": true,
|
||||
"useSlots": true,
|
||||
"useTemplateRef": true,
|
||||
"watch": true,
|
||||
"watchEffect": true,
|
||||
"watchPostEffect": true,
|
||||
"watchSyncEffect": true
|
||||
}
|
||||
}
|
||||
1
frontend/.gitattributes
vendored
@@ -1 +0,0 @@
|
||||
* text=auto eol=lf
|
||||
36
frontend/.gitignore
vendored
@@ -1,36 +0,0 @@
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
lerna-debug.log*
|
||||
|
||||
node_modules
|
||||
.DS_Store
|
||||
dist
|
||||
dist-ssr
|
||||
coverage
|
||||
*.local
|
||||
|
||||
# Editor directories and files
|
||||
.vscode/*
|
||||
!.vscode/extensions.json
|
||||
.idea
|
||||
*.suo
|
||||
*.ntvs*
|
||||
*.njsproj
|
||||
*.sln
|
||||
*.sw?
|
||||
|
||||
*.tsbuildinfo
|
||||
|
||||
.eslintcache
|
||||
|
||||
# Cypress
|
||||
/cypress/videos/
|
||||
/cypress/screenshots/
|
||||
|
||||
# Vitest
|
||||
__screenshots__/
|
||||
@@ -1,6 +0,0 @@
|
||||
{
|
||||
"$schema": "https://json.schemastore.org/prettierrc",
|
||||
"semi": false,
|
||||
"singleQuote": true,
|
||||
"printWidth": 100
|
||||
}
|
||||
9
frontend/.vscode/extensions.json
vendored
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"recommendations": [
|
||||
"Vue.volar",
|
||||
"vitest.explorer",
|
||||
"dbaeumer.vscode-eslint",
|
||||
"EditorConfig.EditorConfig",
|
||||
"esbenp.prettier-vscode"
|
||||
]
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Development Commands
|
||||
|
||||
```bash
|
||||
# Install dependencies
|
||||
pnpm install
|
||||
|
||||
# Development server with hot reload
|
||||
pnpm dev
|
||||
|
||||
# Build for production
|
||||
pnpm build
|
||||
|
||||
# Type checking
|
||||
pnpm type-check
|
||||
|
||||
# Lint and fix code
|
||||
pnpm lint
|
||||
|
||||
# Format code
|
||||
pnpm format
|
||||
|
||||
# Run unit tests
|
||||
pnpm test:unit
|
||||
```
|
||||
|
||||
## Project Architecture
|
||||
|
||||
This is a **Multi-Agent Coordination Platform** (多智能体协同平台) built with Vue 3, TypeScript, and Vite. The application enables users to create and manage AI agents with specialized roles and coordinate them to complete complex tasks through visual workflows.
|
||||
|
||||
### Tech Stack
|
||||
- **Vue 3** with Composition API and TypeScript
|
||||
- **Vite** for build tooling and development
|
||||
- **Element Plus** for UI components
|
||||
- **Pinia** for state management
|
||||
- **Tailwind CSS** for styling
|
||||
- **Vue Router** for routing (minimal usage)
|
||||
- **JSPlumb** for visual workflow connections
|
||||
- **Axios** for API requests with custom interceptors
|
||||
|
||||
### Key Architecture Components
|
||||
|
||||
#### State Management (`src/stores/modules/agents.ts`)
|
||||
Central store managing:
|
||||
- Agent definitions with profiles and icons
|
||||
- Task workflow data structures (`IRawStepTask`, `TaskProcess`)
|
||||
- Search functionality and current task state
|
||||
- Raw plan responses with UUID generation for tasks
|
||||
|
||||
#### Request Layer (`src/utils/request.ts`)
|
||||
Custom Axios wrapper with:
|
||||
- Proxy configuration for `/api` -> `http://localhost:8000`
|
||||
- Response interceptors for error handling
|
||||
- `useRequest` hook for reactive data fetching
|
||||
- Integrated Element Plus notifications
|
||||
|
||||
#### Component Structure
|
||||
- **Layout System** (`src/layout/`): Main application layout with Header and Main sections
|
||||
- **Task Templates** (`src/layout/components/Main/TaskTemplate/`): Different task types including AgentRepo, TaskSyllabus, and TaskResult
|
||||
- **Visual Workflow**: JSPlumb integration for drag-and-drop agent coordination flows
|
||||
|
||||
#### Icon System
|
||||
- SVG icons stored in `src/assets/icons/`
|
||||
- Custom `SvgIcon` component with vite-plugin-svg-icons
|
||||
- Icon categories include specialist roles (doctor, engineer, researcher, etc.)
|
||||
|
||||
### Build Configuration
|
||||
|
||||
#### Vite (`vite.config.ts`)
|
||||
- Element Plus auto-import and component resolution
|
||||
- SVG icon caching with custom symbol IDs
|
||||
- Proxy setup for API requests to backend
|
||||
- Path aliases: `@/` maps to `src/`
|
||||
|
||||
#### Docker Deployment
|
||||
- Multi-stage build: Node.js build + Caddy web server
|
||||
- API proxy configured via Caddyfile
|
||||
- Environment variable support for different deployment modes
|
||||
|
||||
### Data Models
|
||||
Key interfaces for the agent coordination system:
|
||||
- `Agent`: Name, Profile, Icon
|
||||
- `IRawStepTask`: Individual task steps with agent selection and inputs
|
||||
- `TaskProcess`: Action descriptions with important inputs
|
||||
- `IRichText`: Template-based content formatting with style support
|
||||
|
||||
### Development Notes
|
||||
- Uses pnpm as package manager (required by package.json)
|
||||
- Node version constraint: ^20.19.0 or >=22.12.0
|
||||
- Dark theme enabled by default in App.vue
|
||||
- Auto-imports configured for Vue APIs
|
||||
- No traditional Vue routes - uses component-based navigation
|
||||
@@ -1,27 +0,0 @@
|
||||
ARG CADDY_VERSION=2.6
|
||||
ARG BUILD_ENV=prod
|
||||
|
||||
FROM node:20.19.0 as base
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
RUN npm install -g pnpm
|
||||
RUN pnpm install
|
||||
RUN pnpm build
|
||||
|
||||
|
||||
# The base for mode ENVIRONMENT=prod
|
||||
FROM caddy:${CADDY_VERSION}-alpine as prod
|
||||
|
||||
# Workaround for https://github.com/alpinelinux/docker-alpine/issues/98#issuecomment-679278499
|
||||
RUN sed -i 's/https/http/' /etc/apk/repositories \
|
||||
&& apk add --no-cache bash
|
||||
|
||||
COPY docker/Caddyfile /etc/caddy/
|
||||
COPY --from=base /app/dist /frontend
|
||||
|
||||
# Run stage
|
||||
FROM ${BUILD_ENV}
|
||||
|
||||
EXPOSE 80 443
|
||||
VOLUME ["/data", "/etc/caddy"]
|
||||
CMD ["caddy", "run", "--config", "/etc/caddy/Caddyfile", "--adapter", "caddyfile"]
|
||||
@@ -1,106 +0,0 @@
|
||||
# 多智能体协同平台 (Agent Coordination Platform)
|
||||
|
||||
一个强大的可视化平台,用于创建和管理具有专门角色的AI智能体,通过直观的工作流程协调它们来完成复杂任务。
|
||||
|
||||
## ✨ 功能特性
|
||||
|
||||
- **多智能体系统**:创建具有专门角色和专业知识的AI智能体
|
||||
- **可视化工作流编辑器**:使用JSPlumb设计智能体协调流程的拖放界面
|
||||
- **任务管理**:定义、执行和跟踪复杂的多步骤任务
|
||||
- **实时通信**:无缝的智能体交互和协调
|
||||
- **丰富的模板系统**:支持样式的灵活内容格式化
|
||||
- **TypeScript支持**:整个应用程序的完整类型安全
|
||||
|
||||
## 🚀 快速开始
|
||||
|
||||
### 开发命令
|
||||
|
||||
```bash
|
||||
# 安装依赖
|
||||
pnpm install
|
||||
|
||||
# 开发服务器(热重载)
|
||||
pnpm dev
|
||||
|
||||
# 生产构建
|
||||
pnpm build
|
||||
|
||||
# 类型检查
|
||||
pnpm type-check
|
||||
|
||||
# 代码检查和修复
|
||||
pnpm lint
|
||||
|
||||
# 代码格式化
|
||||
pnpm format
|
||||
|
||||
# 运行单元测试
|
||||
pnpm test:unit
|
||||
```
|
||||
|
||||
### 系统要求
|
||||
|
||||
- Node.js ^20.19.0 或 >=22.12.0
|
||||
- pnpm(必需的包管理器)
|
||||
|
||||
## 🏗️ 架构设计
|
||||
|
||||
### 技术栈
|
||||
|
||||
- **Vue 3**:Composition API 和 TypeScript
|
||||
- **Vite**:构建工具和开发环境
|
||||
- **Element Plus**:UI组件库
|
||||
- **Pinia**:状态管理
|
||||
- **Tailwind CSS**:样式框架
|
||||
- **JSPlumb**:可视化工作流连接
|
||||
- **Axios**:API请求与自定义拦截器
|
||||
|
||||
### 核心组件
|
||||
|
||||
#### 状态管理
|
||||
中央存储管理智能体定义、任务工作流和协调状态
|
||||
|
||||
#### 请求层
|
||||
自定义Axios包装器,具有代理配置和集成通知
|
||||
|
||||
#### 可视化工作流
|
||||
JSPlumb集成,用于拖放智能体协调流程
|
||||
|
||||
#### 图标系统
|
||||
基于SVG的图标,用于不同的智能体专业化和角色
|
||||
|
||||
## 📁 项目结构
|
||||
|
||||
```
|
||||
src/
|
||||
├── assets/ # 静态资源,包括智能体图标
|
||||
├── components/ # 可复用的Vue组件
|
||||
├── layout/ # 应用布局和主要组件
|
||||
├── stores/ # Pinia状态管理
|
||||
├── utils/ # 工具函数和请求层
|
||||
├── views/ # 页面组件
|
||||
└── App.vue # 根组件
|
||||
```
|
||||
|
||||
## 🎯 开发指南
|
||||
|
||||
### IDE设置
|
||||
|
||||
[VS Code](https://code.visualstudio.com/) + [Vue (Official)](https://marketplace.visualstudio.com/items?itemName=Vue.volar)(禁用Vetur)。
|
||||
|
||||
### 浏览器开发工具
|
||||
|
||||
- 基于Chromium的浏览器:
|
||||
- [Vue.js devtools](https://chromewebstore.google.com/detail/vuejs-devtools/nhdogjmejiglipccpnnnanhbledajbpd)
|
||||
- 在DevTools中启用自定义对象格式化程序
|
||||
- Firefox:
|
||||
- [Vue.js devtools](https://addons.mozilla.org/en-US/firefox/addon/vue-js-devtools/)
|
||||
- 在DevTools中启用自定义对象格式化程序
|
||||
|
||||
## 🚀 部署
|
||||
|
||||
应用程序支持Docker部署,使用多阶段构建过程:Node.js用于构建,Caddy作为Web服务器。
|
||||
|
||||
## 📄 许可证
|
||||
|
||||
MIT许可证 - 详见LICENSE文件
|
||||
75
frontend/auto-imports.d.ts
vendored
@@ -1,75 +0,0 @@
|
||||
/* eslint-disable */
|
||||
/* prettier-ignore */
|
||||
// @ts-nocheck
|
||||
// noinspection JSUnusedGlobalSymbols
|
||||
// Generated by unplugin-auto-import
|
||||
// biome-ignore lint: disable
|
||||
export {}
|
||||
declare global {
|
||||
const EffectScope: typeof import('vue').EffectScope
|
||||
const ElMessage: typeof import('element-plus/es').ElMessage
|
||||
const ElNotification: typeof import('element-plus/es').ElNotification
|
||||
const computed: typeof import('vue').computed
|
||||
const createApp: typeof import('vue').createApp
|
||||
const customRef: typeof import('vue').customRef
|
||||
const defineAsyncComponent: typeof import('vue').defineAsyncComponent
|
||||
const defineComponent: typeof import('vue').defineComponent
|
||||
const effectScope: typeof import('vue').effectScope
|
||||
const getCurrentInstance: typeof import('vue').getCurrentInstance
|
||||
const getCurrentScope: typeof import('vue').getCurrentScope
|
||||
const getCurrentWatcher: typeof import('vue').getCurrentWatcher
|
||||
const h: typeof import('vue').h
|
||||
const inject: typeof import('vue').inject
|
||||
const isProxy: typeof import('vue').isProxy
|
||||
const isReactive: typeof import('vue').isReactive
|
||||
const isReadonly: typeof import('vue').isReadonly
|
||||
const isRef: typeof import('vue').isRef
|
||||
const isShallow: typeof import('vue').isShallow
|
||||
const markRaw: typeof import('vue').markRaw
|
||||
const nextTick: typeof import('vue').nextTick
|
||||
const onActivated: typeof import('vue').onActivated
|
||||
const onBeforeMount: typeof import('vue').onBeforeMount
|
||||
const onBeforeUnmount: typeof import('vue').onBeforeUnmount
|
||||
const onBeforeUpdate: typeof import('vue').onBeforeUpdate
|
||||
const onDeactivated: typeof import('vue').onDeactivated
|
||||
const onErrorCaptured: typeof import('vue').onErrorCaptured
|
||||
const onMounted: typeof import('vue').onMounted
|
||||
const onRenderTracked: typeof import('vue').onRenderTracked
|
||||
const onRenderTriggered: typeof import('vue').onRenderTriggered
|
||||
const onScopeDispose: typeof import('vue').onScopeDispose
|
||||
const onServerPrefetch: typeof import('vue').onServerPrefetch
|
||||
const onUnmounted: typeof import('vue').onUnmounted
|
||||
const onUpdated: typeof import('vue').onUpdated
|
||||
const onWatcherCleanup: typeof import('vue').onWatcherCleanup
|
||||
const provide: typeof import('vue').provide
|
||||
const reactive: typeof import('vue').reactive
|
||||
const readonly: typeof import('vue').readonly
|
||||
const ref: typeof import('vue').ref
|
||||
const resolveComponent: typeof import('vue').resolveComponent
|
||||
const shallowReactive: typeof import('vue').shallowReactive
|
||||
const shallowReadonly: typeof import('vue').shallowReadonly
|
||||
const shallowRef: typeof import('vue').shallowRef
|
||||
const toRaw: typeof import('vue').toRaw
|
||||
const toRef: typeof import('vue').toRef
|
||||
const toRefs: typeof import('vue').toRefs
|
||||
const toValue: typeof import('vue').toValue
|
||||
const triggerRef: typeof import('vue').triggerRef
|
||||
const unref: typeof import('vue').unref
|
||||
const useAttrs: typeof import('vue').useAttrs
|
||||
const useCssModule: typeof import('vue').useCssModule
|
||||
const useCssVars: typeof import('vue').useCssVars
|
||||
const useId: typeof import('vue').useId
|
||||
const useModel: typeof import('vue').useModel
|
||||
const useSlots: typeof import('vue').useSlots
|
||||
const useTemplateRef: typeof import('vue').useTemplateRef
|
||||
const watch: typeof import('vue').watch
|
||||
const watchEffect: typeof import('vue').watchEffect
|
||||
const watchPostEffect: typeof import('vue').watchPostEffect
|
||||
const watchSyncEffect: typeof import('vue').watchSyncEffect
|
||||
}
|
||||
// for type re-export
|
||||
declare global {
|
||||
// @ts-ignore
|
||||
export type { Component, Slot, Slots, ComponentPublicInstance, ComputedRef, DirectiveBinding, ExtractDefaultPropTypes, ExtractPropTypes, ExtractPublicPropTypes, InjectionKey, PropType, Ref, ShallowRef, MaybeRef, MaybeRefOrGetter, VNode, WritableComputedRef } from 'vue'
|
||||
import('vue')
|
||||
}
|
||||
@@ -1,205 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# ========================
|
||||
# 常量定义
|
||||
# ========================
|
||||
SCRIPT_NAME=$(basename "$0")
|
||||
NODE_MIN_VERSION=18
|
||||
NODE_INSTALL_VERSION=22
|
||||
NVM_VERSION="v0.40.3"
|
||||
CLAUDE_PACKAGE="@anthropic-ai/claude-code"
|
||||
CONFIG_DIR="$HOME/.claude"
|
||||
CONFIG_FILE="$CONFIG_DIR/settings.json"
|
||||
API_BASE_URL="https://open.bigmodel.cn/api/anthropic"
|
||||
API_KEY_URL="https://open.bigmodel.cn/usercenter/proj-mgmt/apikeys"
|
||||
API_TIMEOUT_MS=3000000
|
||||
|
||||
# ========================
|
||||
# 工具函数
|
||||
# ========================
|
||||
|
||||
log_info() {
|
||||
echo "🔹 $*"
|
||||
}
|
||||
|
||||
log_success() {
|
||||
echo "✅ $*"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo "❌ $*" >&2
|
||||
}
|
||||
|
||||
ensure_dir_exists() {
|
||||
local dir="$1"
|
||||
if [ ! -d "$dir" ]; then
|
||||
mkdir -p "$dir" || {
|
||||
log_error "Failed to create directory: $dir"
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
}
|
||||
|
||||
# ========================
|
||||
# Node.js 安装函数
|
||||
# ========================
|
||||
|
||||
install_nodejs() {
|
||||
local platform=$(uname -s)
|
||||
|
||||
case "$platform" in
|
||||
Linux|Darwin)
|
||||
log_info "Installing Node.js on $platform..."
|
||||
|
||||
# 安装 nvm
|
||||
log_info "Installing nvm ($NVM_VERSION)..."
|
||||
curl -s https://raw.githubusercontent.com/nvm-sh/nvm/"$NVM_VERSION"/install.sh | bash
|
||||
|
||||
# 加载 nvm
|
||||
log_info "Loading nvm environment..."
|
||||
\. "$HOME/.nvm/nvm.sh"
|
||||
|
||||
# 安装 Node.js
|
||||
log_info "Installing Node.js $NODE_INSTALL_VERSION..."
|
||||
nvm install "$NODE_INSTALL_VERSION"
|
||||
|
||||
# 验证安装
|
||||
node -v &>/dev/null || {
|
||||
log_error "Node.js installation failed"
|
||||
exit 1
|
||||
}
|
||||
log_success "Node.js installed: $(node -v)"
|
||||
log_success "npm version: $(npm -v)"
|
||||
;;
|
||||
*)
|
||||
log_error "Unsupported platform: $platform"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# ========================
|
||||
# Node.js 检查函数
|
||||
# ========================
|
||||
|
||||
check_nodejs() {
|
||||
if command -v node &>/dev/null; then
|
||||
current_version=$(node -v | sed 's/v//')
|
||||
major_version=$(echo "$current_version" | cut -d. -f1)
|
||||
|
||||
if [ "$major_version" -ge "$NODE_MIN_VERSION" ]; then
|
||||
log_success "Node.js is already installed: v$current_version"
|
||||
return 0
|
||||
else
|
||||
log_info "Node.js v$current_version is installed but version < $NODE_MIN_VERSION. Upgrading..."
|
||||
install_nodejs
|
||||
fi
|
||||
else
|
||||
log_info "Node.js not found. Installing..."
|
||||
install_nodejs
|
||||
fi
|
||||
}
|
||||
|
||||
# ========================
|
||||
# Claude Code 安装
|
||||
# ========================
|
||||
|
||||
install_claude_code() {
|
||||
if command -v claude &>/dev/null; then
|
||||
log_success "Claude Code is already installed: $(claude --version)"
|
||||
else
|
||||
log_info "Installing Claude Code..."
|
||||
npm install -g "$CLAUDE_PACKAGE" || {
|
||||
log_error "Failed to install claude-code"
|
||||
exit 1
|
||||
}
|
||||
log_success "Claude Code installed successfully"
|
||||
fi
|
||||
}
|
||||
|
||||
configure_claude_json(){
|
||||
node --eval '
|
||||
const os = require("os");
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
|
||||
const homeDir = os.homedir();
|
||||
const filePath = path.join(homeDir, ".claude.json");
|
||||
if (fs.existsSync(filePath)) {
|
||||
const content = JSON.parse(fs.readFileSync(filePath, "utf-8"));
|
||||
fs.writeFileSync(filePath, JSON.stringify({ ...content, hasCompletedOnboarding: true }, null, 2), "utf-8");
|
||||
} else {
|
||||
fs.writeFileSync(filePath, JSON.stringify({ hasCompletedOnboarding: true }, null, 2), "utf-8");
|
||||
}'
|
||||
}
|
||||
|
||||
# ========================
|
||||
# API Key 配置
|
||||
# ========================
|
||||
|
||||
configure_claude() {
|
||||
log_info "Configuring Claude Code..."
|
||||
echo " You can get your API key from: $API_KEY_URL"
|
||||
read -s -p "🔑 Please enter your ZHIPU API key: " api_key
|
||||
echo
|
||||
|
||||
if [ -z "$api_key" ]; then
|
||||
log_error "API key cannot be empty. Please run the script again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ensure_dir_exists "$CONFIG_DIR"
|
||||
|
||||
# 写入配置文件
|
||||
node --eval '
|
||||
const os = require("os");
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
|
||||
const homeDir = os.homedir();
|
||||
const filePath = path.join(homeDir, ".claude", "settings.json");
|
||||
const apiKey = "'"$api_key"'";
|
||||
|
||||
const content = fs.existsSync(filePath)
|
||||
? JSON.parse(fs.readFileSync(filePath, "utf-8"))
|
||||
: {};
|
||||
|
||||
fs.writeFileSync(filePath, JSON.stringify({
|
||||
...content,
|
||||
env: {
|
||||
ANTHROPIC_AUTH_TOKEN: apiKey,
|
||||
ANTHROPIC_BASE_URL: "'"$API_BASE_URL"'",
|
||||
API_TIMEOUT_MS: "'"$API_TIMEOUT_MS"'",
|
||||
CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC: 1
|
||||
}
|
||||
}, null, 2), "utf-8");
|
||||
' || {
|
||||
log_error "Failed to write settings.json"
|
||||
exit 1
|
||||
}
|
||||
|
||||
log_success "Claude Code configured successfully"
|
||||
}
|
||||
|
||||
# ========================
|
||||
# 主流程
|
||||
# ========================
|
||||
|
||||
main() {
|
||||
echo "🚀 Starting $SCRIPT_NAME"
|
||||
|
||||
check_nodejs
|
||||
install_claude_code
|
||||
configure_claude_json
|
||||
configure_claude
|
||||
|
||||
echo ""
|
||||
log_success "🎉 Installation completed successfully!"
|
||||
echo ""
|
||||
echo "🚀 You can now start using Claude Code with:"
|
||||
echo " claude"
|
||||
}
|
||||
|
||||
main "$@"
|
||||
36
frontend/components.d.ts
vendored
@@ -1,36 +0,0 @@
|
||||
/* eslint-disable */
|
||||
// @ts-nocheck
|
||||
// biome-ignore lint: disable
|
||||
// oxlint-disable
|
||||
// ------
|
||||
// Generated by unplugin-vue-components
|
||||
// Read more: https://github.com/vuejs/core/pull/3399
|
||||
|
||||
export {}
|
||||
|
||||
/* prettier-ignore */
|
||||
declare module 'vue' {
|
||||
export interface GlobalComponents {
|
||||
ElAutocomplete: typeof import('element-plus/es')['ElAutocomplete']
|
||||
ElButton: typeof import('element-plus/es')['ElButton']
|
||||
ElCard: typeof import('element-plus/es')['ElCard']
|
||||
ElCollapse: typeof import('element-plus/es')['ElCollapse']
|
||||
ElCollapseItem: typeof import('element-plus/es')['ElCollapseItem']
|
||||
ElDrawer: typeof import('element-plus/es')['ElDrawer']
|
||||
ElEmpty: typeof import('element-plus/es')['ElEmpty']
|
||||
ElIcon: typeof import('element-plus/es')['ElIcon']
|
||||
ElInput: typeof import('element-plus/es')['ElInput']
|
||||
ElPopover: typeof import('element-plus/es')['ElPopover']
|
||||
ElScrollbar: typeof import('element-plus/es')['ElScrollbar']
|
||||
ElTooltip: typeof import('element-plus/es')['ElTooltip']
|
||||
MultiLineTooltip: typeof import('./src/components/MultiLineTooltip/index.vue')['default']
|
||||
Notification: typeof import('./src/components/Notification/Notification.vue')['default']
|
||||
RouterLink: typeof import('vue-router')['RouterLink']
|
||||
RouterView: typeof import('vue-router')['RouterView']
|
||||
SvgIcon: typeof import('./src/components/SvgIcon/index.vue')['default']
|
||||
TaskContentEditor: typeof import('./src/components/TaskContentEditor/index.vue')['default']
|
||||
}
|
||||
export interface GlobalDirectives {
|
||||
vLoading: typeof import('element-plus/es')['ElLoadingDirective']
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
:80
|
||||
|
||||
# Proxy `/api` to backends
|
||||
handle_path /api/* {
|
||||
reverse_proxy {$API_HOST}
|
||||
}
|
||||
|
||||
# Frontend
|
||||
handle {
|
||||
root * /frontend
|
||||
encode gzip
|
||||
try_files {path} /index.html
|
||||
file_server
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
agent-coord-font:
|
||||
image: agent-coord:0.0.1
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: docker/Dockerfile
|
||||
ports:
|
||||
- "8080:80"
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile
|
||||
environment:
|
||||
- API_HOST="http://host.docker.internal:8000"
|
||||
- BUILD_ENV=prod
|
||||
4
frontend/env.d.ts
vendored
@@ -1,4 +0,0 @@
|
||||
/// <reference types="vite/client" />
|
||||
declare global {
|
||||
const testGlobal: any; // 声明 testGlobal 为全局变量
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
import { globalIgnores } from 'eslint/config'
|
||||
import { defineConfigWithVueTs, vueTsConfigs } from '@vue/eslint-config-typescript'
|
||||
import pluginVue from 'eslint-plugin-vue'
|
||||
import pluginVitest from '@vitest/eslint-plugin'
|
||||
import skipFormatting from '@vue/eslint-config-prettier/skip-formatting'
|
||||
import autoImportConfig from './.eslintrc-auto-import.json' with { type: 'json' }
|
||||
|
||||
|
||||
// To allow more languages other than `ts` in `.vue` files, uncomment the following lines:
|
||||
// import { configureVueProject } from '@vue/eslint-config-typescript'
|
||||
// configureVueProject({ scriptLangs: ['ts', 'tsx'] })
|
||||
// More info at https://github.com/vuejs/eslint-config-typescript/#advanced-setup
|
||||
|
||||
export default defineConfigWithVueTs(
|
||||
{
|
||||
name: 'app/files-to-lint',
|
||||
files: ['**/*.{ts,mts,tsx,vue}'],
|
||||
},
|
||||
globalIgnores(['**/dist/**', '**/dist-ssr/**', '**/coverage/**']),
|
||||
pluginVue.configs['flat/essential'],
|
||||
vueTsConfigs.recommended,
|
||||
{
|
||||
...pluginVitest.configs.recommended,
|
||||
files: ['src/**/__tests__/*'],
|
||||
},
|
||||
skipFormatting,
|
||||
{
|
||||
name: 'app/custom-rules',
|
||||
files: ['**/*.{ts,mts,tsx,vue}'],
|
||||
rules: {
|
||||
'vue/multi-word-component-names': 'off',
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'auto-import-globals',
|
||||
files: ['**/*.{ts,mts,tsx,vue}'],
|
||||
languageOptions: {
|
||||
globals: {
|
||||
...(autoImportConfig.globals || {}),
|
||||
testGlobal: 'readonly' // 手动添加一个测试变量
|
||||
}
|
||||
},
|
||||
rules: {
|
||||
'no-undef': 'off', // 确保关闭 no-undef 规则
|
||||
'@typescript-eslint/no-undef': 'off'
|
||||
}
|
||||
}
|
||||
)
|
||||
@@ -1,13 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<link rel="icon" href="/logo.png">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>多智能体协同平台</title>
|
||||
</head>
|
||||
<body>
|
||||
<div id="app"></div>
|
||||
<script type="module" src="/src/main.ts"></script>
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,80 +0,0 @@
|
||||
{
|
||||
"name": "agent-coord",
|
||||
"version": "0.0.0",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"engines": {
|
||||
"node": "^20.19.0 || >=22.12.0"
|
||||
},
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
"build": "vite build",
|
||||
"preview": "vite preview",
|
||||
"test:unit": "vitest",
|
||||
"build-only": "vite build",
|
||||
"type-check": "vue-tsc --build",
|
||||
"lint": "eslint . --fix --cache",
|
||||
"format": "prettier --write src/"
|
||||
},
|
||||
"dependencies": {
|
||||
"@element-plus/icons-vue": "^2.3.2",
|
||||
"@jsplumb/browser-ui": "^6.2.10",
|
||||
"@types/markdown-it": "^14.1.2",
|
||||
"@vue-flow/background": "^1.3.2",
|
||||
"@vue-flow/controls": "^1.1.3",
|
||||
"@vue-flow/core": "^1.48.1",
|
||||
"@vue-flow/minimap": "^1.5.4",
|
||||
"@vueuse/core": "^14.0.0",
|
||||
"axios": "^1.12.2",
|
||||
"d3": "^7.9.0",
|
||||
"docx-preview": "^0.3.7",
|
||||
"dompurify": "^3.3.0",
|
||||
"element-plus": "^2.11.5",
|
||||
"lodash": "^4.17.21",
|
||||
"markdown-it": "^14.1.0",
|
||||
"marked": "^17.0.4",
|
||||
"markmap": "^0.6.1",
|
||||
"markmap-lib": "^0.18.12",
|
||||
"markmap-view": "^0.18.12",
|
||||
"pinia": "^3.0.3",
|
||||
"pptxjs": "^0.0.0",
|
||||
"qs": "^6.14.0",
|
||||
"socket.io-client": "^4.8.3",
|
||||
"uuid": "^13.0.0",
|
||||
"vue": "^3.5.22",
|
||||
"vue-router": "^4.6.3",
|
||||
"xlsx": "^0.18.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@tailwindcss/vite": "^4.1.15",
|
||||
"@tsconfig/node22": "^22.0.2",
|
||||
"@types/jsdom": "^27.0.0",
|
||||
"@types/lodash": "^4.17.20",
|
||||
"@types/node": "^22.18.11",
|
||||
"@types/qs": "^6.14.0",
|
||||
"@vitejs/plugin-vue": "^6.0.1",
|
||||
"@vitest/eslint-plugin": "^1.3.23",
|
||||
"@vue/eslint-config-prettier": "^10.2.0",
|
||||
"@vue/eslint-config-typescript": "^14.6.0",
|
||||
"@vue/test-utils": "^2.4.6",
|
||||
"@vue/tsconfig": "^0.8.1",
|
||||
"eslint": "^9.37.0",
|
||||
"eslint-plugin-import": "^2.32.0",
|
||||
"eslint-plugin-vue": "~10.5.0",
|
||||
"jiti": "^2.6.1",
|
||||
"jsdom": "^27.0.1",
|
||||
"npm-run-all2": "^8.0.4",
|
||||
"prettier": "3.6.2",
|
||||
"sass": "^1.93.2",
|
||||
"sass-loader": "^16.0.5",
|
||||
"tailwindcss": "^4.1.15",
|
||||
"typescript": "~5.9.0",
|
||||
"unplugin-auto-import": "^20.2.0",
|
||||
"unplugin-vue-components": "^30.0.0",
|
||||
"vite": "^7.1.11",
|
||||
"vite-plugin-svg-icons": "^2.0.1",
|
||||
"vite-plugin-vue-devtools": "^8.0.3",
|
||||
"vitest": "^3.2.4",
|
||||
"vue-tsc": "^3.1.1"
|
||||
}
|
||||
}
|
||||
7811
frontend/pnpm-lock.yaml
generated
@@ -1,116 +0,0 @@
|
||||
[
|
||||
{
|
||||
"Icon": "Hailey_Johnson.png",
|
||||
"Name": "船舶设计师",
|
||||
"Profile": "提供船舶制造中的实际需求和约束。",
|
||||
"Classification": "船舶制造数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Jennifer_Moore.png",
|
||||
"Name": "防护工程专家",
|
||||
"Profile": "专注于船舶腐蚀防护技术的设计与应用。在你的总结回答中,必须引用来自数联网的搜索数据,是搜索数据,不是数联网的研究成果。",
|
||||
"Classification": "船舶制造数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Jane_Moreno.png",
|
||||
"Name": "病理生理学家",
|
||||
"Profile": "专注于失血性休克的疾病机制,为药物研发提供理论靶点。",
|
||||
"Classification": "医药数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Giorgio_Rossi.png",
|
||||
"Name": "药物化学家",
|
||||
"Profile": "负责将靶点概念转化为实际可合成的分子。",
|
||||
"Classification": "医药数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Tamara_Taylor.png",
|
||||
"Name": "制剂工程师",
|
||||
"Profile": "负责将活性药物成分(API)变成稳定、可用、符合战场要求的剂型。",
|
||||
"Classification": "医药数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Maria_Lopez.png",
|
||||
"Name": "监管事务专家",
|
||||
"Profile": "深谙药品审评法规,目标是找到最快的合法上市路径。",
|
||||
"Classification": "医药数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Sam_Moore.png",
|
||||
"Name": "物理学家",
|
||||
"Profile": "从热力学与统计力学的基本原理出发,研究液态金属的自由能、焓、熵、比热等参数的理论建模。",
|
||||
"Classification": "科学数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Yuriko_Yamamoto.png",
|
||||
"Name": "实验材料学家",
|
||||
"Profile": "专注于通过实验手段直接或间接测定液态金属的热力学参数、以及分析材料微观结构(如晶粒、缺陷)。",
|
||||
"Classification": "科学数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Carlos_Gomez.png",
|
||||
"Name": "计算模拟专家",
|
||||
"Profile": "侧重于利用数值计算和模拟技术获取液态金属的热力学参数。",
|
||||
"Classification": "科学数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "John_Lin.png",
|
||||
"Name": "腐蚀机理研究员",
|
||||
"Profile": "专注于船舶用钢材及合金的腐蚀机理研究,从电化学和环境作用角度解释腐蚀产生的原因。在你的总结回答中,必须引用来自数联网的搜索数据,是搜索数据,不是数联网的研究成果。",
|
||||
"Classification": "船舶制造数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Arthur_Burton.png",
|
||||
"Name": "先进材料研发员",
|
||||
"Profile": "专注于开发和评估新型耐腐蚀材料、复合材料及固态电池材料。",
|
||||
"Classification": "科学数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Eddy_Lin.png",
|
||||
"Name": "肾脏病学家",
|
||||
"Profile": "专注于慢性肾脏病的诊断、治疗和患者管理,能提供临床洞察。",
|
||||
"Classification": "医药数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Isabella_Rodriguez.png",
|
||||
"Name": "临床研究协调员",
|
||||
"Profile": "负责受试者招募和临床试验流程优化。",
|
||||
"Classification": "医药数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Latoya_Williams.png",
|
||||
"Name": "中医药专家",
|
||||
"Profile": "理解药物的中药成分和作用机制。",
|
||||
"Classification": "医药数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Carmen_Ortiz.png",
|
||||
"Name": "药物安全专家",
|
||||
"Profile": "专注于药物不良反应数据收集、分析和报告。",
|
||||
"Classification": "医药数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Rajiv_Patel.png",
|
||||
"Name": "二维材料科学家",
|
||||
"Profile": "专注于二维材料(如石墨烯)的合成、性质和应用。",
|
||||
"Classification": "科学数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Tom_Moreno.png",
|
||||
"Name": "光电物理学家",
|
||||
"Profile": "研究材料的光电转换机制和关键影响因素。",
|
||||
"Classification": "科学数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Ayesha_Khan.png",
|
||||
"Name": "机器学习专家",
|
||||
"Profile": "专注于开发和应用AI模型用于材料模拟。",
|
||||
"Classification": "科学数据空间"
|
||||
},
|
||||
{
|
||||
"Icon": "Mei_Lin.png",
|
||||
"Name": "流体动力学专家",
|
||||
"Profile": "专注于流体行为理论和模拟。",
|
||||
"Classification": "科学数据空间"
|
||||
}
|
||||
]
|
||||
@@ -1,21 +0,0 @@
|
||||
{
|
||||
"title": "数联网",
|
||||
"subTitle": "众创智能体",
|
||||
"centerTitle": "多智能体协同平台",
|
||||
"taskPromptWords": [
|
||||
"如何快速筛选慢性肾脏病药物潜在受试者?",
|
||||
"如何补充\"丹芍活血胶囊\"不良反应数据?",
|
||||
"如何快速研发用于战场失血性休克的药物?",
|
||||
"二维材料的光电性质受哪些关键因素影响?",
|
||||
"如何通过AI模拟的方法分析材料的微观结构?",
|
||||
"如何分析获取液态金属热力学参数?",
|
||||
"如何解决固态电池的成本和寿命难题?",
|
||||
"如何解决船舶制造中的材料腐蚀难题?",
|
||||
"如何解决船舶制造中流体模拟和建模优化难题?"
|
||||
],
|
||||
"agentRepository": {
|
||||
"storageVersionIdentifier": "1"
|
||||
},
|
||||
"dev": true,
|
||||
"apiBaseUrl": "http://localhost:8000"
|
||||
}
|
||||
|
Before Width: | Height: | Size: 4.2 KiB |
@@ -1,22 +0,0 @@
|
||||
{
|
||||
"data": [
|
||||
{
|
||||
"name": "3D Semantic-Geometric Corrosion Mapping Implementations",
|
||||
"data_space": "江苏省产研院",
|
||||
"doId": "bdware.scenario/d8f3ff8c-3fb3-4573-88a6-5dd823627c37",
|
||||
"fromRepo": "https://arxiv.org/abs/2404.13691"
|
||||
},
|
||||
{
|
||||
"name": "RustSEG -- Automated segmentation of corrosion using deep learning",
|
||||
"data_space": "江苏省产研院",
|
||||
"doId": "bdware.scenario/67445299-110a-4a4e-9fda-42e4b5a493c2",
|
||||
"fromRepo": "https://arxiv.org/abs/2205.05426"
|
||||
},
|
||||
{
|
||||
"name": "Pixel-level Corrosion Detection on Metal Constructions by Fusion of Deep Learning Semantic and Contour Segmentation",
|
||||
"data_space": "江苏省产研院",
|
||||
"doId": "bdware.scenario/115d5135-85d3-4123-8b81-9eb9f07b6153",
|
||||
"fromRepo": "https://arxiv.org/abs/2008.05204"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
Before Width: | Height: | Size: 79 KiB |
|
Before Width: | Height: | Size: 6.9 KiB |
@@ -1,42 +0,0 @@
|
||||
import type { Directive, DirectiveBinding } from 'vue'
|
||||
import { useConfigStoreHook } from '@/stores'
|
||||
|
||||
/**
|
||||
* 开发模式专用指令
|
||||
* 只在开发模式下显示元素,生产模式下会移除该元素
|
||||
*
|
||||
* \@param binding.value - 是否开启该功能,默认为 true
|
||||
* @example
|
||||
* \!-- 默认开启,开发模式显示 --
|
||||
* \!div v-dev-only开发模式内容</div>
|
||||
*
|
||||
* \!-- 传入参数控制 --
|
||||
* <div v-dev-only="true">开启指令</div>
|
||||
* <div v-dev-only="false">取消指令</div>
|
||||
*/
|
||||
export const devOnly: Directive = {
|
||||
mounted(el: HTMLElement, binding: DirectiveBinding) {
|
||||
checkAndRemoveElement(el, binding)
|
||||
},
|
||||
|
||||
updated(el: HTMLElement, binding: DirectiveBinding) {
|
||||
checkAndRemoveElement(el, binding)
|
||||
},
|
||||
}
|
||||
|
||||
const configStore = useConfigStoreHook()
|
||||
|
||||
/**
|
||||
* 检查并移除元素的逻辑
|
||||
*/
|
||||
function checkAndRemoveElement(el: HTMLElement, binding: DirectiveBinding) {
|
||||
const isDev = typeof configStore.config.dev === 'boolean' ? configStore.config.dev : import.meta.env.DEV
|
||||
// 默认值为 true,如果没有传值或者传值为 true 都启用
|
||||
const shouldEnable = binding.value !== false
|
||||
// 如果不是开发模式或者明确禁用,移除该元素
|
||||
if (!isDev && shouldEnable) {
|
||||
if (el.parentNode) {
|
||||
el.parentNode.removeChild(el)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
import type { App } from 'vue'
|
||||
|
||||
import { devOnly } from './devOnly'
|
||||
|
||||
|
||||
// 全局注册 directive
|
||||
export function setupDirective(app: App<Element>) {
|
||||
app.directive('dev-only', devOnly)
|
||||
}
|
||||
@@ -1,104 +0,0 @@
|
||||
<script setup lang="ts">
|
||||
import { useRoute } from 'vue-router'
|
||||
import Layout from './layout/index.vue'
|
||||
import Share from './views/Share.vue'
|
||||
|
||||
const route = useRoute()
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<!-- 分享页面使用独立布局 -->
|
||||
<Share v-if="route.path.startsWith('/share/')" />
|
||||
<!-- 其他页面使用主布局 -->
|
||||
<Layout v-else />
|
||||
</template>
|
||||
|
||||
<style lang="scss">
|
||||
#app {
|
||||
background-color: var(--color-bg);
|
||||
color: var(--color-text);
|
||||
}
|
||||
|
||||
.jtk-endpoint {
|
||||
z-index: 100;
|
||||
}
|
||||
|
||||
.card-item + .card-item {
|
||||
margin-top: 35px;
|
||||
}
|
||||
|
||||
.el-card {
|
||||
border-radius: 8px;
|
||||
background: var(--color-bg-tertiary);
|
||||
border: 2px solid var(--color-card-border);
|
||||
box-shadow: var(--color-card-border-hover);
|
||||
|
||||
&:hover {
|
||||
background: var(--color-bg-content-hover);
|
||||
box-shadow: none;
|
||||
transition: background-color 0.3s ease-in-out;
|
||||
}
|
||||
|
||||
.el-card__body {
|
||||
padding: 10px;
|
||||
}
|
||||
}
|
||||
|
||||
:root {
|
||||
--gradient: linear-gradient(to right, #0093eb, #00d2d1);
|
||||
}
|
||||
|
||||
#task-template {
|
||||
.active-card {
|
||||
border: 2px solid transparent;
|
||||
$bg: var(--el-input-bg-color, var(--el-fill-color-blank));
|
||||
background: linear-gradient(
|
||||
var(--color-agent-list-selected-bg),
|
||||
var(--color-agent-list-selected-bg)
|
||||
)
|
||||
padding-box,
|
||||
linear-gradient(to right, #00c8d2, #315ab4) border-box;
|
||||
color: var(--color-text);
|
||||
}
|
||||
}
|
||||
|
||||
/* 1. 定义流动动画:让虚线沿路径移动 */
|
||||
@keyframes flowAnimation {
|
||||
to {
|
||||
stroke-dashoffset: 8; /* 与stroke-dasharray总和一致 */
|
||||
}
|
||||
}
|
||||
|
||||
@keyframes flowAnimationReverse {
|
||||
to {
|
||||
stroke-dashoffset: -8; /* 与stroke-dasharray总和一致 */
|
||||
}
|
||||
}
|
||||
|
||||
/* 2. 为jsPlumb连线绑定动画:作用于SVG的path元素 */
|
||||
/* jtk-connector是jsPlumb连线的默认SVG类,path是实际的线条元素 */
|
||||
.jtk-connector-output path {
|
||||
/* 定义虚线规则:线段长度5px + 间隙3px(总长度8px,与动画偏移量匹配) */
|
||||
stroke-dasharray: 5 3;
|
||||
/* 应用动画:名称+时长+线性速度+无限循环 */
|
||||
animation: flowAnimationReverse 0.5s linear infinite;
|
||||
/* 可选:设置线条基础样式(颜色、宽度) */
|
||||
stroke-width: 2;
|
||||
}
|
||||
|
||||
/* 2. 为jsPlumb连线绑定动画:作用于SVG的path元素 */
|
||||
/* jtk-connector是jsPlumb连线的默认SVG类,path是实际的线条元素 */
|
||||
.jtk-connector-input path {
|
||||
/* 定义虚线规则:线段长度5px + 间隙3px(总长度8px,与动画偏移量匹配) */
|
||||
stroke-dasharray: 5 3;
|
||||
/* 应用动画:名称+时长+线性速度+无限循环 */
|
||||
animation: flowAnimationReverse 0.5s linear infinite;
|
||||
/* 可选:设置线条基础样式(颜色、宽度) */
|
||||
stroke-width: 2;
|
||||
}
|
||||
|
||||
/* 可选: hover时增强动画效果(如加速、变色) */
|
||||
.jtk-connector path:hover {
|
||||
animation-duration: 0.5s; /* hover时流动加速 */
|
||||
}
|
||||
</style>
|
||||
@@ -1,11 +0,0 @@
|
||||
import { describe, it, expect } from 'vitest'
|
||||
|
||||
import { mount } from '@vue/test-utils'
|
||||
import App from '../App.vue'
|
||||
|
||||
describe('App', () => {
|
||||
it('mounts renders properly', () => {
|
||||
const wrapper = mount(App)
|
||||
expect(wrapper.text()).toContain('You did it!')
|
||||
})
|
||||
})
|
||||
@@ -1,998 +0,0 @@
|
||||
import websocket from '@/utils/websocket'
|
||||
import type { Agent, IApiStepTask, IRawPlanResponse, IRawStepTask } from '@/stores'
|
||||
import { withRetry } from '@/utils/retry'
|
||||
import request from '@/utils/request'
|
||||
import { useConfigStoreHook } from '@/stores'
|
||||
|
||||
export interface ActionHistory {
|
||||
ID: string
|
||||
ActionType: string
|
||||
AgentName: string
|
||||
Description: string
|
||||
ImportantInput: string[]
|
||||
Action_Result: string
|
||||
}
|
||||
|
||||
export interface BranchAction {
|
||||
ID: string
|
||||
ActionType: string
|
||||
AgentName: string
|
||||
Description: string
|
||||
ImportantInput: string[]
|
||||
}
|
||||
|
||||
export type IExecuteRawResponse = {
|
||||
LogNodeType: string
|
||||
NodeId: string
|
||||
InputName_List?: string[] | null
|
||||
OutputName?: string
|
||||
content?: string
|
||||
ActionHistory: ActionHistory[]
|
||||
}
|
||||
|
||||
/**
|
||||
* WebSocket 流式事件类型
|
||||
*/
|
||||
export type StreamingEvent =
|
||||
| {
|
||||
type: 'step_start'
|
||||
step_index: number
|
||||
total_steps: number
|
||||
step_name: string
|
||||
task_description?: string
|
||||
}
|
||||
| {
|
||||
type: 'action_complete'
|
||||
step_index: number
|
||||
step_name: string
|
||||
action_index: number
|
||||
total_actions: number
|
||||
completed_actions: number
|
||||
action_result: ActionHistory
|
||||
batch_info?: {
|
||||
batch_index: number
|
||||
batch_size: number
|
||||
is_parallel: boolean
|
||||
}
|
||||
}
|
||||
| {
|
||||
type: 'step_complete'
|
||||
step_index: number
|
||||
step_name: string
|
||||
step_log_node: any
|
||||
object_log_node: any
|
||||
}
|
||||
| {
|
||||
type: 'execution_complete'
|
||||
total_steps: number
|
||||
}
|
||||
| {
|
||||
type: 'error'
|
||||
message: string
|
||||
}
|
||||
|
||||
export interface IFillAgentSelectionRequest {
|
||||
goal: string
|
||||
stepTask: IApiStepTask
|
||||
agents: string[]
|
||||
}
|
||||
|
||||
class Api {
|
||||
// 提取响应数据的公共方法
|
||||
private extractResponse<T>(raw: any): T {
|
||||
return (raw.data || raw) as T
|
||||
}
|
||||
|
||||
// 颜色向量转 HSL 字符串
|
||||
private vec2Hsl = (color: number[]): string => {
|
||||
const [h, s, l] = color
|
||||
return `hsl(${h}, ${s}%, ${l}%)`
|
||||
}
|
||||
|
||||
setAgents = (data: Pick<Agent, 'Name' | 'Profile' | 'apiUrl' | 'apiKey' | 'apiModel'>[], saveToDb = false) => {
|
||||
const storedUserId = localStorage.getItem('user_id')
|
||||
// 为每个智能体添加 user_id
|
||||
const payload = data.map(agent => ({
|
||||
...agent,
|
||||
...(storedUserId ? { user_id: storedUserId } : {})
|
||||
}))
|
||||
// saveToDb 为 true 时才写入数据库
|
||||
return websocket.send('set_agents', { agents: payload, save_to_db: saveToDb })
|
||||
}
|
||||
|
||||
getAgents = (user_id?: string) => {
|
||||
// 优先使用传入的 user_id,其次使用 localStorage 存储的,首次访问时不传
|
||||
const storedUserId = localStorage.getItem('user_id')
|
||||
const sendUserId = user_id || storedUserId
|
||||
return websocket.send('get_agents', sendUserId ? { user_id: sendUserId } : {})
|
||||
}
|
||||
|
||||
generateBasePlan = (data: {
|
||||
goal: string
|
||||
inputs: string[]
|
||||
apiUrl?: string
|
||||
apiKey?: string
|
||||
apiModel?: string
|
||||
onProgress?: (progress: {
|
||||
status: string
|
||||
stage?: string
|
||||
message?: string
|
||||
[key: string]: any
|
||||
}) => void
|
||||
}) => {
|
||||
const storedUserId = localStorage.getItem('user_id')
|
||||
return websocket.send(
|
||||
'generate_base_plan',
|
||||
{
|
||||
'General Goal': data.goal,
|
||||
'Initial Input Object': data.inputs,
|
||||
apiUrl: data.apiUrl,
|
||||
apiKey: data.apiKey,
|
||||
apiModel: data.apiModel,
|
||||
...(storedUserId ? { user_id: storedUserId } : {}),
|
||||
},
|
||||
undefined,
|
||||
data.onProgress,
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* 优化版流式执行计划(支持动态追加步骤)
|
||||
* 步骤级流式 + 动作级智能并行 + 动态追加步骤
|
||||
*/
|
||||
executePlanOptimized = (
|
||||
plan: IRawPlanResponse,
|
||||
onMessage: (event: StreamingEvent) => void,
|
||||
onError?: (error: Error) => void,
|
||||
onComplete?: () => void,
|
||||
_useWebSocket?: boolean,
|
||||
existingKeyObjects?: Record<string, any>,
|
||||
enableDynamic?: boolean,
|
||||
onExecutionStarted?: (executionId: string) => void,
|
||||
executionId?: string,
|
||||
restartFromStepIndex?: number,
|
||||
rehearsalLog?: any[],
|
||||
TaskID?: string,
|
||||
) => {
|
||||
|
||||
void _useWebSocket // 保留参数位置以保持兼容性
|
||||
const data = {
|
||||
RehearsalLog: rehearsalLog || [], // 使用传递的 RehearsalLog
|
||||
num_StepToRun: null,
|
||||
existingKeyObjects: existingKeyObjects || {},
|
||||
enable_dynamic: enableDynamic || false,
|
||||
execution_id: executionId || null,
|
||||
restart_from_step_index: restartFromStepIndex ?? null, // 新增:传递重新执行索引
|
||||
task_id: TaskID || null, // 任务唯一标识,用于写入数据库
|
||||
plan: {
|
||||
'Initial Input Object': plan['Initial Input Object'],
|
||||
'General Goal': plan['General Goal'],
|
||||
'Collaboration Process': plan['Collaboration Process']?.map((step) => ({
|
||||
StepName: step.StepName,
|
||||
TaskContent: step.TaskContent,
|
||||
InputObject_List: step.InputObject_List,
|
||||
OutputObject: step.OutputObject,
|
||||
AgentSelection: step.AgentSelection,
|
||||
Collaboration_Brief_frontEnd: step.Collaboration_Brief_frontEnd,
|
||||
TaskProcess: step.TaskProcess.map((action) => ({
|
||||
ActionType: action.ActionType,
|
||||
AgentName: action.AgentName,
|
||||
Description: action.Description,
|
||||
ID: action.ID,
|
||||
ImportantInput: action.ImportantInput,
|
||||
})),
|
||||
})),
|
||||
},
|
||||
}
|
||||
|
||||
websocket.subscribe(
|
||||
'execute_plan_optimized',
|
||||
data,
|
||||
// onProgress
|
||||
(progressData) => {
|
||||
try {
|
||||
let event: StreamingEvent
|
||||
|
||||
// 处理不同类型的progress数据
|
||||
if (typeof progressData === 'string') {
|
||||
event = JSON.parse(progressData)
|
||||
} else {
|
||||
event = progressData as StreamingEvent
|
||||
}
|
||||
|
||||
// 处理特殊事件类型
|
||||
if (event && typeof event === 'object') {
|
||||
// 检查是否是execution_started事件
|
||||
if ('status' in event && event.status === 'execution_started') {
|
||||
if ('execution_id' in event && onExecutionStarted) {
|
||||
onExecutionStarted(event.execution_id as string)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
onMessage(event)
|
||||
} catch (e) {
|
||||
// Failed to parse WebSocket data
|
||||
}
|
||||
},
|
||||
// onComplete
|
||||
() => {
|
||||
onComplete?.()
|
||||
},
|
||||
// onError
|
||||
(error) => {
|
||||
onError?.(error)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* 分支任务大纲
|
||||
*/
|
||||
branchPlanOutline = (data: {
|
||||
branch_Number: number
|
||||
Modification_Requirement: string
|
||||
Existing_Steps: IRawStepTask[]
|
||||
Baseline_Completion: number
|
||||
initialInputs: string[]
|
||||
goal: string
|
||||
onProgress?: (progress: {
|
||||
status: string
|
||||
stage?: string
|
||||
message?: string
|
||||
[key: string]: any
|
||||
}) => void
|
||||
}) =>
|
||||
websocket.send(
|
||||
'branch_plan_outline',
|
||||
{
|
||||
branch_Number: data.branch_Number,
|
||||
Modification_Requirement: data.Modification_Requirement,
|
||||
Existing_Steps: data.Existing_Steps,
|
||||
Baseline_Completion: data.Baseline_Completion,
|
||||
'Initial Input Object': data.initialInputs,
|
||||
'General Goal': data.goal,
|
||||
},
|
||||
undefined,
|
||||
data.onProgress,
|
||||
)
|
||||
|
||||
/**
|
||||
* 分支任务流程
|
||||
*/
|
||||
branchTaskProcess = (data: {
|
||||
branch_Number: number
|
||||
Modification_Requirement: string
|
||||
Existing_Steps: BranchAction[]
|
||||
Baseline_Completion: number
|
||||
stepTaskExisting: any
|
||||
goal: string
|
||||
onProgress?: (progress: {
|
||||
status: string
|
||||
stage?: string
|
||||
message?: string
|
||||
[key: string]: any
|
||||
}) => void
|
||||
}) =>
|
||||
websocket.send(
|
||||
'branch_task_process',
|
||||
{
|
||||
branch_Number: data.branch_Number,
|
||||
Modification_Requirement: data.Modification_Requirement,
|
||||
Existing_Steps: data.Existing_Steps,
|
||||
Baseline_Completion: data.Baseline_Completion,
|
||||
stepTaskExisting: data.stepTaskExisting,
|
||||
'General Goal': data.goal,
|
||||
},
|
||||
undefined,
|
||||
data.onProgress,
|
||||
)
|
||||
|
||||
fillStepTask = async (data: {
|
||||
goal: string
|
||||
stepTask: any
|
||||
generation_id?: string
|
||||
TaskID?: string
|
||||
onProgress?: (progress: {
|
||||
status: string
|
||||
stage?: string
|
||||
message?: string
|
||||
[key: string]: any
|
||||
}) => void
|
||||
}): Promise<IRawStepTask> => {
|
||||
const rawResponse = await withRetry(
|
||||
() =>
|
||||
websocket.send(
|
||||
'fill_step_task',
|
||||
{
|
||||
'General Goal': data.goal,
|
||||
stepTask: data.stepTask,
|
||||
generation_id: data.generation_id || '',
|
||||
task_id: data.TaskID || '',
|
||||
},
|
||||
undefined,
|
||||
data.onProgress,
|
||||
),
|
||||
{
|
||||
maxRetries: 3,
|
||||
initialDelayMs: 2000,
|
||||
onRetry: (error, attempt, delay) => {
|
||||
console.warn(` [fillStepTask] 第${attempt}次重试,等待 ${delay}ms...`, error?.message)
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
let response = this.extractResponse<any>(rawResponse)
|
||||
if (response?.filled_stepTask) {
|
||||
response = response.filled_stepTask
|
||||
}
|
||||
|
||||
const briefData: Record<string, { text: string; style?: Record<string, string> }> = {}
|
||||
if (response.Collaboration_Brief_FrontEnd?.data) {
|
||||
for (const [key, value] of Object.entries(response.Collaboration_Brief_FrontEnd.data)) {
|
||||
briefData[key] = {
|
||||
text: (value as { text: string; color: number[] }).text,
|
||||
style: {
|
||||
background: this.vec2Hsl((value as { text: string; color: number[] }).color),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
StepName: response.StepName || '',
|
||||
TaskContent: response.TaskContent || '',
|
||||
InputObject_List: response.InputObject_List || [],
|
||||
OutputObject: response.OutputObject || '',
|
||||
AgentSelection: response.AgentSelection || [],
|
||||
Collaboration_Brief_frontEnd: {
|
||||
template: response.Collaboration_Brief_FrontEnd?.template || '',
|
||||
data: briefData,
|
||||
},
|
||||
TaskProcess: response.TaskProcess || [],
|
||||
}
|
||||
}
|
||||
|
||||
fillStepTaskTaskProcess = async (data: {
|
||||
goal: string
|
||||
stepTask: IApiStepTask
|
||||
agents: string[]
|
||||
TaskID?: string
|
||||
onProgress?: (progress: {
|
||||
status: string
|
||||
stage?: string
|
||||
message?: string
|
||||
[key: string]: any
|
||||
}) => void
|
||||
}): Promise<IApiStepTask> => {
|
||||
const rawResponse = await withRetry(
|
||||
() =>
|
||||
websocket.send(
|
||||
'fill_step_task_process',
|
||||
{
|
||||
'General Goal': data.goal,
|
||||
task_id: data.TaskID || undefined,
|
||||
stepTask_lackTaskProcess: {
|
||||
StepName: data.stepTask.name,
|
||||
TaskContent: data.stepTask.content,
|
||||
InputObject_List: data.stepTask.inputs,
|
||||
OutputObject: data.stepTask.output,
|
||||
AgentSelection: data.agents,
|
||||
},
|
||||
agents: data.agents,
|
||||
},
|
||||
undefined,
|
||||
data.onProgress,
|
||||
),
|
||||
{
|
||||
maxRetries: 3,
|
||||
initialDelayMs: 2000,
|
||||
onRetry: (error, attempt, delay) => {
|
||||
console.warn(
|
||||
`[fillStepTaskTaskProcess] 第${attempt}次重试,等待 ${delay}ms...`,
|
||||
error?.message,
|
||||
)
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
let response = this.extractResponse<any>(rawResponse)
|
||||
if (response?.filled_stepTask) {
|
||||
response = response.filled_stepTask
|
||||
}
|
||||
|
||||
const briefData: Record<string, { text: string; style: { background: string } }> = {}
|
||||
if (response.Collaboration_Brief_FrontEnd?.data) {
|
||||
for (const [key, value] of Object.entries(response.Collaboration_Brief_FrontEnd.data)) {
|
||||
briefData[key] = {
|
||||
text: (value as { text: string; color: number[] }).text,
|
||||
style: {
|
||||
background: this.vec2Hsl((value as { text: string; color: number[] }).color),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const process = (response.TaskProcess || []).map((action: any) => ({
|
||||
id: action.ID,
|
||||
type: action.ActionType,
|
||||
agent: action.AgentName,
|
||||
description: action.Description,
|
||||
inputs: action.ImportantInput,
|
||||
}))
|
||||
|
||||
return {
|
||||
name: response.StepName || '',
|
||||
content: response.TaskContent || '',
|
||||
inputs: response.InputObject_List || [],
|
||||
output: response.OutputObject || '',
|
||||
agents: response.AgentSelection || [],
|
||||
brief: {
|
||||
template: response.Collaboration_Brief_FrontEnd?.template || '',
|
||||
data: briefData,
|
||||
},
|
||||
process,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 为每个智能体评分(带重试机制)
|
||||
*/
|
||||
agentSelectModifyInit = async (data: {
|
||||
goal: string
|
||||
stepTask: any
|
||||
TaskID?: string
|
||||
onProgress?: (progress: {
|
||||
status: string
|
||||
stage?: string
|
||||
message?: string
|
||||
[key: string]: any
|
||||
}) => void
|
||||
}): Promise<Record<string, Record<string, { reason: string; score: number }>>> => {
|
||||
const requestPayload = {
|
||||
'General Goal': data.goal,
|
||||
stepTask: {
|
||||
Id: data.stepTask.Id || data.stepTask.id,
|
||||
StepName: data.stepTask.StepName || data.stepTask.name,
|
||||
TaskContent: data.stepTask.TaskContent || data.stepTask.content,
|
||||
InputObject_List: data.stepTask.InputObject_List || data.stepTask.inputs,
|
||||
OutputObject: data.stepTask.OutputObject || data.stepTask.output,
|
||||
},
|
||||
task_id: data.TaskID || '',
|
||||
}
|
||||
|
||||
const rawResponse = await withRetry(
|
||||
() =>
|
||||
websocket.send(
|
||||
'agent_select_modify_init',
|
||||
requestPayload,
|
||||
undefined,
|
||||
data.onProgress,
|
||||
),
|
||||
{
|
||||
maxRetries: 3,
|
||||
initialDelayMs: 2000,
|
||||
onRetry: (error, attempt, delay) => {
|
||||
console.warn(
|
||||
`[agentSelectModifyInit] 第${attempt}次重试,等待 ${delay}ms...`,
|
||||
error?.message,
|
||||
)
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
let response = this.extractResponse<any>(rawResponse)
|
||||
if (response?.scoreTable) {
|
||||
response = response.scoreTable
|
||||
}
|
||||
|
||||
const transformedData: Record<string, Record<string, { reason: string; score: number }>> = {}
|
||||
|
||||
if (!response || typeof response !== 'object' || Array.isArray(response)) {
|
||||
console.warn('[agentSelectModifyInit] 后端返回数据格式异常:', response)
|
||||
return transformedData
|
||||
}
|
||||
|
||||
for (const [aspect, agents] of Object.entries(response)) {
|
||||
for (const [agentName, scoreInfo] of Object.entries(
|
||||
(agents as Record<string, { Reason: string; Score: number }>) || {},
|
||||
)) {
|
||||
if (!transformedData[agentName]) {
|
||||
transformedData[agentName] = {}
|
||||
}
|
||||
transformedData[agentName][aspect] = {
|
||||
reason: scoreInfo.Reason,
|
||||
score: scoreInfo.Score,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return transformedData
|
||||
}
|
||||
|
||||
/**
|
||||
* 添加新的评估维度
|
||||
*/
|
||||
agentSelectModifyAddAspect = async (data: {
|
||||
aspectList: string[]
|
||||
stepTask?: {
|
||||
Id?: string
|
||||
StepName?: string
|
||||
TaskContent?: string
|
||||
InputObject_List?: string[]
|
||||
OutputObject?: string
|
||||
}
|
||||
TaskID?: string
|
||||
onProgress?: (progress: {
|
||||
status: string
|
||||
stage?: string
|
||||
message?: string
|
||||
[key: string]: any
|
||||
}) => void
|
||||
}): Promise<{
|
||||
aspectName: string
|
||||
agentScores: Record<string, { score: number; reason: string }>
|
||||
}> => {
|
||||
const rawResponse = await websocket.send(
|
||||
'agent_select_modify_add_aspect',
|
||||
{
|
||||
aspectList: data.aspectList,
|
||||
stepTask: data.stepTask,
|
||||
task_id: data.TaskID || '',
|
||||
},
|
||||
undefined,
|
||||
data.onProgress,
|
||||
)
|
||||
|
||||
const response = this.extractResponse<any>(rawResponse)
|
||||
|
||||
const newAspect = data.aspectList[data.aspectList.length - 1]
|
||||
if (!newAspect) {
|
||||
throw new Error('aspectList is empty')
|
||||
}
|
||||
|
||||
const scoreTable = response.scoreTable || response
|
||||
const newAspectAgents = scoreTable[newAspect]
|
||||
const agentScores: Record<string, { score: number; reason: string }> = {}
|
||||
|
||||
if (newAspectAgents) {
|
||||
for (const [agentName, scoreInfo] of Object.entries(newAspectAgents as Record<string, { Score?: number; score?: number; Reason?: string; reason?: string }>)) {
|
||||
agentScores[agentName] = {
|
||||
score: scoreInfo.Score || scoreInfo.score || 0,
|
||||
reason: scoreInfo.Reason || scoreInfo.reason || '',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
aspectName: newAspect,
|
||||
agentScores,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 删除评估维度
|
||||
* @param taskId 任务ID
|
||||
* @param stepId 步骤ID(可选,不传则删除所有步骤中的该维度)
|
||||
* @param aspectName 要删除的维度名称
|
||||
* @returns 是否删除成功
|
||||
*/
|
||||
agentSelectModifyDeleteAspect = async (
|
||||
taskId: string,
|
||||
aspectName: string,
|
||||
stepId?: string
|
||||
): Promise<boolean> => {
|
||||
if (!websocket.connected) {
|
||||
throw new Error('WebSocket未连接')
|
||||
}
|
||||
|
||||
try {
|
||||
const rawResponse = await websocket.send(
|
||||
'agent_select_modify_delete_aspect',
|
||||
{
|
||||
task_id: taskId,
|
||||
step_id: stepId || '',
|
||||
aspect_name: aspectName,
|
||||
},
|
||||
undefined,
|
||||
undefined
|
||||
)
|
||||
|
||||
const response = this.extractResponse<{ status: string }>(rawResponse)
|
||||
return response?.status === 'success' || false
|
||||
} catch (error) {
|
||||
console.error('删除维度失败:', error)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 向正在执行的任务追加新步骤
|
||||
* @param executionId 执行ID
|
||||
* @param newSteps 新步骤列表
|
||||
* @returns 追加的步骤数量
|
||||
*/
|
||||
addStepsToExecution = async (executionId: string, newSteps: IRawStepTask[]): Promise<number> => {
|
||||
if (!websocket.connected) {
|
||||
throw new Error('WebSocket未连接')
|
||||
}
|
||||
|
||||
const rawResponse = await websocket.send('add_steps_to_execution', {
|
||||
execution_id: executionId,
|
||||
new_steps: newSteps.map((step) => ({
|
||||
StepName: step.StepName,
|
||||
TaskContent: step.TaskContent,
|
||||
InputObject_List: step.InputObject_List,
|
||||
OutputObject: step.OutputObject,
|
||||
AgentSelection: step.AgentSelection,
|
||||
Collaboration_Brief_frontEnd: step.Collaboration_Brief_frontEnd,
|
||||
TaskProcess: step.TaskProcess.map((action) => ({
|
||||
ActionType: action.ActionType,
|
||||
AgentName: action.AgentName,
|
||||
Description: action.Description,
|
||||
ID: action.ID,
|
||||
ImportantInput: action.ImportantInput,
|
||||
})),
|
||||
})),
|
||||
})
|
||||
|
||||
const response = this.extractResponse<{ added_count: number }>(rawResponse)
|
||||
|
||||
return response?.added_count || 0
|
||||
}
|
||||
|
||||
/**
|
||||
* 保存任务分支数据
|
||||
* @param taskId 任务ID
|
||||
* @param branches 分支数据数组
|
||||
* @returns 是否保存成功
|
||||
*/
|
||||
saveBranches = async (taskId: string, branches: any[]): Promise<boolean> => {
|
||||
if (!websocket.connected) {
|
||||
throw new Error('WebSocket未连接')
|
||||
}
|
||||
|
||||
try {
|
||||
const rawResponse = await websocket.send('save_branches', {
|
||||
task_id: taskId,
|
||||
branches,
|
||||
})
|
||||
|
||||
const response = this.extractResponse<{ status: string }>(rawResponse)
|
||||
return response?.status === 'success' || false
|
||||
} catch (error) {
|
||||
console.error('保存分支数据失败:', error)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 保存任务过程分支数据
|
||||
* @param TaskID 大任务ID(数据库主键)
|
||||
* @param branches 任务过程分支数据(Map结构转对象)
|
||||
* @returns 是否保存成功
|
||||
*/
|
||||
saveTaskProcessBranches = async (
|
||||
TaskID: string,
|
||||
branches: Record<string, Record<string, any[]>>,
|
||||
): Promise<boolean> => {
|
||||
if (!websocket.connected) {
|
||||
throw new Error('WebSocket未连接')
|
||||
}
|
||||
|
||||
try {
|
||||
const rawResponse = await websocket.send('save_task_process_branches', {
|
||||
task_id: TaskID,
|
||||
branches,
|
||||
})
|
||||
|
||||
const response = this.extractResponse<{ status: string }>(rawResponse)
|
||||
return response?.status === 'success' || false
|
||||
} catch (error) {
|
||||
console.error('保存任务过程分支数据失败:', error)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 删除任务过程分支数据
|
||||
* @param TaskID 大任务ID
|
||||
* @param stepId 小任务ID
|
||||
* @param branchId 要删除的分支ID
|
||||
* @returns 是否删除成功
|
||||
*/
|
||||
deleteTaskProcessBranch = async (
|
||||
TaskID: string,
|
||||
stepId: string,
|
||||
branchId: string,
|
||||
): Promise<boolean> => {
|
||||
if (!websocket.connected) {
|
||||
throw new Error('WebSocket未连接')
|
||||
}
|
||||
|
||||
try {
|
||||
const rawResponse = await websocket.send('delete_task_process_branch', {
|
||||
task_id: TaskID,
|
||||
stepId,
|
||||
branchId,
|
||||
})
|
||||
|
||||
const response = this.extractResponse<{ status: string }>(rawResponse)
|
||||
return response?.status === 'success' || false
|
||||
} catch (error) {
|
||||
console.error('删除任务过程分支数据失败:', error)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 删除任务过程分支中的单个节点
|
||||
* @param TaskID 大任务ID
|
||||
* @param stepId 小任务ID
|
||||
* @param branchId 分支ID
|
||||
* @param nodeId 要删除的节点ID
|
||||
* @param edges 更新后的 edges 数据
|
||||
* @returns 是否删除成功
|
||||
*/
|
||||
deleteTaskProcessNode = async (
|
||||
TaskID: string,
|
||||
stepId: string,
|
||||
branchId: string,
|
||||
nodeId: string,
|
||||
edges: any[] = [],
|
||||
): Promise<boolean> => {
|
||||
if (!websocket.connected) {
|
||||
throw new Error('WebSocket未连接')
|
||||
}
|
||||
|
||||
try {
|
||||
const rawResponse = await websocket.send('delete_task_process_node', {
|
||||
task_id: TaskID,
|
||||
stepId,
|
||||
branchId,
|
||||
nodeId,
|
||||
edges,
|
||||
})
|
||||
|
||||
const response = this.extractResponse<{ status: string }>(rawResponse)
|
||||
return response?.status === 'success' || false
|
||||
} catch (error) {
|
||||
console.error('删除任务过程节点数据失败:', error)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 更新任务大纲数据
|
||||
* @param taskId 任务ID
|
||||
* @param taskOutline 完整的大纲数据
|
||||
* @returns 是否更新成功
|
||||
*/
|
||||
updateTaskOutline = async (taskId: string, taskOutline: any): Promise<boolean> => {
|
||||
if (!websocket.connected) {
|
||||
throw new Error('WebSocket未连接')
|
||||
}
|
||||
|
||||
try {
|
||||
const rawResponse = await websocket.send('save_task_outline', {
|
||||
task_id: taskId,
|
||||
task_outline: taskOutline,
|
||||
})
|
||||
|
||||
const response = this.extractResponse<{ status: string }>(rawResponse)
|
||||
return response?.status === 'success' || false
|
||||
} catch (error) {
|
||||
console.error('更新任务大纲失败:', error)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 更新指定步骤的 assigned_agents
|
||||
* @param params 参数对象
|
||||
* @returns 是否更新成功
|
||||
*/
|
||||
updateAssignedAgents = async (params: {
|
||||
task_id: string // 大任务ID(数据库主键)
|
||||
step_id: string // 步骤级ID(小任务UUID)
|
||||
agents: string[] // 选中的 agent 列表
|
||||
confirmed_groups?: string[][] // 可选:确认的 agent 组合列表
|
||||
agent_combinations?: Record<string, { process: any; brief: any }> // 可选:agent 组合的 TaskProcess 数据
|
||||
}): Promise<boolean> => {
|
||||
if (!websocket.connected) {
|
||||
throw new Error('WebSocket未连接')
|
||||
}
|
||||
|
||||
try {
|
||||
const rawResponse = await websocket.send('update_assigned_agents', {
|
||||
task_id: params.task_id,
|
||||
step_id: params.step_id,
|
||||
agents: params.agents,
|
||||
confirmed_groups: params.confirmed_groups,
|
||||
agent_combinations: params.agent_combinations,
|
||||
})
|
||||
|
||||
const response = this.extractResponse<{ status: string; error?: string }>(rawResponse)
|
||||
if (response?.status === 'success') {
|
||||
console.log('更新 assigned_agents 成功:', params)
|
||||
return true
|
||||
}
|
||||
console.warn('更新 assigned_agents 失败:', response?.error)
|
||||
return false
|
||||
} catch (error) {
|
||||
console.error('更新 assigned_agents 失败:', error)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// ==================== 导出功能 ====================
|
||||
|
||||
/**
|
||||
* 导出任务为指定格式
|
||||
*/
|
||||
exportTask = async (params: {
|
||||
task_id: string
|
||||
export_type: string
|
||||
user_id?: string
|
||||
}): Promise<{
|
||||
record_id: number
|
||||
file_name: string
|
||||
file_url: string
|
||||
file_size: number
|
||||
export_type: string
|
||||
}> => {
|
||||
if (!websocket.connected) {
|
||||
throw new Error('WebSocket未连接')
|
||||
}
|
||||
|
||||
const storedUserId = localStorage.getItem('user_id')
|
||||
const sendUserId = params.user_id || storedUserId
|
||||
const rawResponse = await websocket.send('export', {
|
||||
task_id: params.task_id,
|
||||
export_type: params.export_type,
|
||||
...(sendUserId ? { user_id: sendUserId } : {}),
|
||||
})
|
||||
|
||||
const response = this.extractResponse<{
|
||||
record_id: number
|
||||
file_name: string
|
||||
file_url: string
|
||||
file_size: number
|
||||
export_type: string
|
||||
}>(rawResponse)
|
||||
|
||||
if (response) {
|
||||
console.log('导出成功:', response)
|
||||
return response
|
||||
}
|
||||
throw new Error('导出失败')
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取导出记录列表
|
||||
*/
|
||||
getExportList = async (params: {
|
||||
task_id: string
|
||||
}): Promise<{
|
||||
list: Array<{
|
||||
id: number
|
||||
task_id: string
|
||||
user_id: string
|
||||
export_type: string
|
||||
file_name: string
|
||||
file_path: string
|
||||
file_url: string
|
||||
file_size: number
|
||||
created_at: string
|
||||
}>
|
||||
total: number
|
||||
}> => {
|
||||
if (!websocket.connected) {
|
||||
throw new Error('WebSocket未连接')
|
||||
}
|
||||
|
||||
const rawResponse = await websocket.send('get_export_list', {
|
||||
task_id: params.task_id,
|
||||
})
|
||||
|
||||
const response = this.extractResponse<{
|
||||
list: Array<{
|
||||
id: number
|
||||
task_id: string
|
||||
user_id: string
|
||||
export_type: string
|
||||
file_name: string
|
||||
file_path: string
|
||||
file_url: string
|
||||
file_size: number
|
||||
created_at: string
|
||||
}>
|
||||
total: number
|
||||
}>(rawResponse)
|
||||
|
||||
if (response) {
|
||||
return response
|
||||
}
|
||||
return { list: [], total: 0 }
|
||||
}
|
||||
|
||||
/**
|
||||
* 下载导出文件
|
||||
*/
|
||||
downloadExport = async (recordId: number): Promise<void> => {
|
||||
const configStore = useConfigStoreHook()
|
||||
const baseURL = configStore.config.apiBaseUrl || ''
|
||||
// 直接使用 window.location.href 跳转下载,浏览器会自动处理文件名
|
||||
window.location.href = `${baseURL}/export/${recordId}/download`
|
||||
}
|
||||
|
||||
/**
|
||||
* 预览导出文件
|
||||
*/
|
||||
previewExport = async (recordId: number): Promise<{
|
||||
content?: string
|
||||
file_url?: string
|
||||
file_name?: string
|
||||
type: string
|
||||
}> => {
|
||||
const configStore = useConfigStoreHook()
|
||||
const baseURL = configStore.config.apiBaseUrl || ''
|
||||
const url = `${baseURL}/export/${recordId}/preview`
|
||||
|
||||
const response = await request<{
|
||||
content?: string
|
||||
file_url?: string
|
||||
file_name?: string
|
||||
type: string
|
||||
}>({
|
||||
url,
|
||||
method: 'GET',
|
||||
})
|
||||
|
||||
return response.data
|
||||
}
|
||||
|
||||
/**
|
||||
* 生成分享链接
|
||||
*/
|
||||
shareExport = async (recordId: number): Promise<{
|
||||
share_url: string
|
||||
file_name: string
|
||||
expired_at: string | null
|
||||
}> => {
|
||||
const configStore = useConfigStoreHook()
|
||||
const baseURL = configStore.config.apiBaseUrl || ''
|
||||
const url = `${baseURL}/export/${recordId}/share`
|
||||
|
||||
const response = await request<{
|
||||
share_url: string
|
||||
file_name: string
|
||||
expired_at: string | null
|
||||
}>({
|
||||
url,
|
||||
method: 'GET',
|
||||
})
|
||||
|
||||
return response
|
||||
}
|
||||
deleteExport = async (recordId: number): Promise<boolean> => {
|
||||
const configStore = useConfigStoreHook()
|
||||
const baseURL = configStore.config.apiBaseUrl || ''
|
||||
const url = `${baseURL}/export/${recordId}`
|
||||
|
||||
try {
|
||||
await request({
|
||||
url,
|
||||
method: 'DELETE',
|
||||
})
|
||||
console.log('删除导出记录成功:', recordId)
|
||||
return true
|
||||
} catch (error) {
|
||||
console.error('删除导出记录失败:', error)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default new Api()
|
||||
@@ -1,20 +0,0 @@
|
||||
<?xml version="1.0" standalone="no"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
|
||||
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<svg t="1766247549273" class="icon"
|
||||
viewBox="0 0 1024 1024" version="1.1"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
p-id="10050" xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
width="16" height="16">
|
||||
<path d="M512 1024C229.248 1024
|
||||
0 794.752 0 512S229.248 0 512 0s512
|
||||
229.248 512 512-229.248 512-512 512z
|
||||
m0-938.666667C276.352 85.333333 85.333333
|
||||
276.352 85.333333 512s191.018667 426.666667
|
||||
426.666667 426.666667 426.666667-191.018667
|
||||
426.666667-426.666667S747.648 85.333333 512
|
||||
85.333333z m198.698667 625.365334a42.666667
|
||||
42.666667 0 0 1-60.330667
|
||||
0L512 572.330667l-138.368 138.368a42.666667 42.666667 0 0 1-60.330667-60.330667L451.669333 512 313.301333 373.632a42.666667 42.666667 0 0 1 60.330667-60.330667L512 451.669333l138.368-138.368a42.624 42.624 0 1 1 60.330667 60.330667L572.330667 512l138.368 138.368a42.666667 42.666667 0 0 1 0 60.330667z"
|
||||
fill="#8e0707" p-id="10051">
|
||||
</path></svg>
|
||||
|
Before Width: | Height: | Size: 1.1 KiB |
@@ -1,7 +0,0 @@
|
||||
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<svg t="1766247454540" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="9049"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" width="16" height="16">
|
||||
<path d="M512 1024C230.4 1024 0 793.6 0 512S230.4 0 512 0s512 230.4 512 512-230.4 512-512 512z m0-938.666667C277.333333 85.333333 85.333333 277.333333 85.333333 512s192 426.666667 426.666667 426.666667 426.666667-192 426.666667-426.666667S746.666667 85.333333 512 85.333333z" p-id="9050">
|
||||
</path>
|
||||
<path d="M708.266667 375.466667c-17.066667-17.066667-44.8-17.066667-59.733334 0l-181.333333 181.333333-91.733333-91.733333c-14.933333-14.933333-40.533333-14.933333-55.466667 0l-6.4 4.266666c-14.933333 14.933333-14.933333 40.533333 0 55.466667l125.866667 125.866667c14.933333 14.933333 40.533333 14.933333 55.466666 0l4.266667-4.266667 209.066667-209.066667c17.066667-17.066667 17.066667-44.8 0-61.866666z" p-id="9051">
|
||||
</path></svg>
|
||||
|
Before Width: | Height: | Size: 1.0 KiB |
@@ -1 +0,0 @@
|
||||
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg t="1772516996617" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="8642" xmlns:xlink="http://www.w3.org/1999/xlink" width="256" height="256"><path d="M354.40128 0c-87.04 0-157.44 70.55872-157.44 157.59872v275.68128H78.72c-21.6576 0-39.36256 17.69984-39.36256 39.36256v236.31872c0 21.6576 17.69984 39.35744 39.36256 39.35744h118.24128v118.08256c0 87.04 70.4 157.59872 157.44 157.59872h472.63744c87.04 0 157.59872-70.55872 157.59872-157.59872V315.0336c0-41.74848-38.9888-81.93024-107.52-149.27872l-29.11744-29.12256L818.87744 107.52C751.5392 38.9888 711.39328 0 669.59872 0H354.4064z m0 78.72h287.20128c28.35456 7.0912 27.99616 42.1376 27.99616 76.8v120.16128c0 21.6576 17.69984 39.35744 39.36256 39.35744h118.07744c39.38816 0 78.87872-0.0256 78.87872 39.36256v512c0 43.32032-35.55328 78.87872-78.87872 78.87872H354.4064c-43.32544 0-78.72-35.5584-78.72-78.87872v-118.08256h393.91744c21.66272 0 39.36256-17.69472 39.36256-39.35744V472.64256c0-21.66272-17.69984-39.36256-39.36256-39.36256H275.68128V157.59872c0-43.32032 35.39456-78.87872 78.72-78.87872zM308.6336 498.07872c23.04 0 41.28256 8.00256 54.72256 24.00256 14.08 15.36 21.12 37.43744 21.12 66.23744 0 29.44-7.04 51.84-21.12 67.2-13.44 15.36-31.68256 23.04-54.72256 23.04-23.68 0-42.24-7.35744-55.68-22.07744-13.44-15.36-20.15744-38.08256-20.15744-68.16256 0-29.44 6.71744-51.84 20.15744-67.2s32-23.04 55.68-23.04z m186.24 0.96256c17.92 0 33.28 3.2 46.08 9.6l-9.6 19.2c-12.16-6.4-24.32-9.6-36.48-9.6-16.64 0-30.39744 6.4-41.27744 19.2-10.24 12.16-15.36 29.44-15.36 51.84 0 23.04 4.79744 40.63744 14.39744 52.79744 9.6 11.52 23.68 17.28 42.24 17.28 10.24 0 23.36256-2.23744 39.36256-6.71744v19.2c-11.52 4.48-25.92256 6.71744-43.20256 6.71744-23.68 0-42.24-7.35744-55.68-22.07744-13.44-15.36-20.15744-38.08256-20.15744-68.16256 0-28.16 7.04-49.92 21.12-65.28 14.72-16 34.23744-23.99744 58.55744-23.99744z m-421.43744 1.92h48.95744c24.96 0 44.48256 7.68 58.56256 23.04 14.72 14.72 22.07744 35.84 22.07744 63.36 0 28.8-7.68 50.87744-23.04 66.23744-14.72 15.36-35.51744 23.04-62.39744 23.04h-44.16V500.96128z m478.08 0h23.99744l39.36256 67.2 40.32-67.2h23.04l-50.88256 83.51744 54.72256 92.16h-24.96l-43.20256-75.83744-43.19744 75.83744h-23.04l54.71744-92.16-50.87744-83.51744z m-242.88256 17.28c-17.28 0-30.39744 6.07744-39.35744 18.23744-8.96 11.52-13.44 28.8-13.44 51.84 0 23.68 4.48 41.6 13.44 53.76 8.96 11.52 22.07744 17.28 39.35744 17.28s30.40256-5.76 39.36256-17.28c8.96-12.16 13.44-30.08 13.44-53.76 0-23.04-4.48-40.32-13.44-51.84-8.96-12.16-22.08256-18.23744-39.36256-18.23744z m-213.12 1.92v137.27744h19.2c21.76 0 37.76-5.76 48-17.28 10.88-11.52 16.32256-28.8 16.32256-51.84s-5.12-39.99744-15.36-50.87744c-9.6-11.52-24.32-17.28-44.16-17.28h-24.00256z" p-id="8643" ></path></svg>
|
||||
|
Before Width: | Height: | Size: 2.9 KiB |
@@ -1 +0,0 @@
|
||||
<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg t="1772506712715" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="5360" xmlns:xlink="http://www.w3.org/1999/xlink" width="256" height="256"><path d="M279.5 354.3l198-162.8V734c0 19.3 15.7 35 35 35s35-15.7 35-35V191.6l197.1 162.6c6.5 5.4 14.4 8 22.3 8 10.1 0 20.1-4.3 27-12.7 12.3-14.9 10.2-37-4.7-49.3L534.7 90.4c-0.2-0.2-0.4-0.3-0.6-0.5-0.2-0.1-0.4-0.3-0.5-0.4-0.6-0.5-1.2-0.9-1.8-1.3-0.6-0.4-1.3-0.8-2-1.2-0.1-0.1-0.2-0.1-0.3-0.2-1.4-0.8-2.9-1.5-4.4-2.1h-0.1c-1.5-0.6-3.1-1.1-4.7-1.4h-0.2c-0.7-0.2-1.5-0.3-2.2-0.4h-0.2c-0.8-0.1-1.5-0.2-2.3-0.3h-0.3c-0.6 0-1.3-0.1-1.9-0.1h-3.1c-0.6 0-1.2 0.1-1.8 0.2-0.2 0-0.4 0-0.6 0.1l-2.1 0.3h-0.1c-0.7 0.1-1.4 0.3-2.1 0.5-0.1 0-0.3 0.1-0.4 0.1-1.5 0.4-2.9 0.9-4.3 1.5-0.1 0-0.1 0.1-0.2 0.1-1.5 0.6-2.9 1.4-4.3 2.2-1.4 0.9-2.8 1.8-4.1 2.9L235 300.3c-14.9 12.3-17.1 34.3-4.8 49.3 12.3 14.9 34.3 17 49.3 4.7z" p-id="5361"></path><path d="M925.8 598.2c-19.3 0-35 15.7-35 35v238.4H133.2V633.2c0-19.3-15.7-35-35-35s-35 15.7-35 35v273.4c0 19.3 15.7 35 35 35h827.5c19.3 0 35-15.7 35-35V633.2c0.1-19.3-15.6-35-34.9-35z" p-id="5362"></path></svg>
|
||||
|
Before Width: | Height: | Size: 1.2 KiB |