This commit is contained in:
liailing1026
2025-12-07 17:18:10 +08:00
parent ab8c9e294d
commit 23db6fc4a1
55 changed files with 16237 additions and 376 deletions

View File

@@ -6,11 +6,12 @@ import os
# load config (apikey, apibase, model)
yaml_file = os.path.join(os.getcwd(), "config", "config.yaml")
yaml_data = {}
try:
with open(yaml_file, "r", encoding="utf-8") as file:
yaml_data = yaml.safe_load(file)
except Exception:
yaml_file = {}
yaml_data = {}
OPENAI_API_BASE = os.getenv("OPENAI_API_BASE") or yaml_data.get(
"OPENAI_API_BASE", "https://api.openai.com"
)
@@ -37,7 +38,28 @@ MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY") or yaml_data.get(
def LLM_Completion(
messages: list[dict], stream: bool = True, useGroq: bool = True
) -> str:
if not useGroq or not FAST_DESIGN_MODE:
# 增强消息验证:确保所有消息的 role 和 content 非空且不是空白字符串
if not messages or len(messages) == 0:
raise ValueError("Messages list is empty")
# print(f"[DEBUG] LLM_Completion received {len(messages)} messages", flush=True)
for i, msg in enumerate(messages):
if not isinstance(msg, dict):
raise ValueError(f"Message at index {i} is not a dictionary")
if not msg.get("role") or str(msg.get("role")).strip() == "":
raise ValueError(f"Message at index {i} has empty 'role'")
if not msg.get("content") or str(msg.get("content")).strip() == "":
raise ValueError(f"Message at index {i} has empty 'content'")
# 额外验证确保content不会因为格式化问题变成空
content = str(msg.get("content")).strip()
if len(content) < 10: # 设置最小长度阈值
print(f"[WARNING] Message at index {i} has very short content: '{content}'", flush=True)
# 修改1
if not GROQ_API_KEY:
useGroq = False
elif not useGroq or not FAST_DESIGN_MODE:
force_gpt4 = True
useGroq = False
else:
@@ -137,16 +159,23 @@ async def _achat_completion_stream_mixtral(messages: list[dict]) -> str:
async def _achat_completion_stream_gpt35(messages: list[dict]) -> str:
openai.api_key = OPENAI_API_KEY
openai.api_base = OPENAI_API_BASE
response = await openai.ChatCompletion.acreate(
messages=messages,
max_tokens=4096,
n=1,
stop=None,
temperature=0.3,
timeout=3,
model="gpt-3.5-turbo-16k",
stream=True,
)
kwargs = {
"messages": messages,
"max_tokens": 4096,
"n": 1,
"stop": None,
"temperature": 0.3,
"timeout": 3,
"model": "gpt-3.5-turbo-16k",
"stream": True,
}
# print("[DEBUG] about to call acreate with kwargs:", type(kwargs), kwargs)
assert kwargs is not None, "kwargs is None right before acreate!"
assert isinstance(kwargs, dict), "kwargs must be dict!"
response = await openai.ChatCompletion.acreate(**kwargs)
# create variables to collect the stream of chunks
collected_chunks = []
@@ -205,6 +234,9 @@ async def _achat_completion_json(messages: list[dict]) -> str:
async def _achat_completion_stream(messages: list[dict]) -> str:
# print(">>>> _achat_completion_stream 被调用", flush=True)
# print(">>>> messages 实参 =", messages, flush=True)
# print(">>>> messages 类型 =", type(messages), flush=True)
openai.api_key = OPENAI_API_KEY
openai.api_base = OPENAI_API_BASE
response = await openai.ChatCompletion.acreate(
@@ -246,11 +278,54 @@ def _cons_kwargs(messages: list[dict]) -> dict:
kwargs = {
"messages": messages,
"max_tokens": 4096,
"n": 1,
"stop": None,
"temperature": 0.5,
"timeout": 3,
}
kwargs_mode = {"model": MODEL}
kwargs.update(kwargs_mode)
return kwargs
print("[DEBUG] kwargs =", kwargs)
assert isinstance(kwargs, dict), f"_cons_kwargs returned {type(kwargs)}, must be dict"
# 添加调试信息
print(f'[DEBUG] _cons_kwargs messages: {messages}', flush=True)
# 检查并修复消息中的null值
for i, msg in enumerate(messages):
# 确保msg是字典
if not isinstance(msg, dict):
print(f"[ERROR] Message {i} is not a dictionary: {msg}", flush=True)
messages[i] = {"role": "user", "content": str(msg) if msg is not None else ""}
continue
# 确保role和content存在且不为None
if "role" not in msg or msg["role"] is None:
print(f"[ERROR] Message {i} missing role, setting to 'user'", flush=True)
msg["role"] = "user"
else:
msg["role"] = str(msg["role"]).strip()
if "content" not in msg or msg["content"] is None:
print(f"[ERROR] Message {i} missing content, setting to empty string", flush=True)
msg["content"] = ""
else:
msg["content"] = str(msg["content"]).strip()
# 根据不同的API提供商调整参数
if "deepseek" in MODEL.lower():
# DeepSeek API特殊处理
print("[DEBUG] DeepSeek API detected, adjusting parameters", flush=True)
kwargs.pop("n", None) # 移除n参数DeepSeek可能不支持
if "timeout" in kwargs:
kwargs.pop("timeout", None)
# DeepSeek可能不支持stop参数
kwargs.pop("stop", None)
else:
# OpenAI兼容的API
kwargs["n"] = 1
kwargs["stop"] = None
kwargs["timeout"] = 3
kwargs["model"] = MODEL
# 确保messages列表中的每个元素都有有效的role和content
kwargs["messages"] = [msg for msg in messages if msg["role"] and msg["content"]]
print(f"[DEBUG] Final kwargs for API call: {kwargs.keys()}", flush=True)
return kwargs

View File

@@ -42,7 +42,7 @@ def generate_AbilityRequirement(General_Goal, Current_Task):
},
]
print(messages[1]["content"])
return read_LLM_Completion(messages)["AbilityRequirement"]
return read_LLM_Completion(messages)["AbilityRequirement"]
PROMPT_AGENT_ABILITY_SCORING = """
@@ -83,7 +83,6 @@ class JSON_Agent(BaseModel):
class JSON_AGENT_ABILITY_SCORING(RootModel):
root: Dict[str, JSON_Agent]
def agentAbilityScoring(Agent_Board, Ability_Requirement_List):
scoreTable = {}
for Ability_Requirement in Ability_Requirement_List:
@@ -105,7 +104,7 @@ def agentAbilityScoring(Agent_Board, Ability_Requirement_List):
return scoreTable
def AgentSelectModify_init(stepTask, General_Goal, Agent_Board):
async def AgentSelectModify_init(stepTask, General_Goal, Agent_Board):
Current_Task = {
"TaskName": stepTask["StepName"],
"InputObject_List": stepTask["InputObject_List"],
@@ -125,10 +124,10 @@ def AgentSelectModify_init(stepTask, General_Goal, Agent_Board):
),
},
]
Ability_Requirement_List = read_LLM_Completion(messages)[
Ability_Requirement_List = await read_LLM_Completion(messages)[
"AbilityRequirement"
]
scoreTable = agentAbilityScoring(Agent_Board, Ability_Requirement_List)
scoreTable = await agentAbilityScoring(Agent_Board, Ability_Requirement_List)
return scoreTable

View File

@@ -76,8 +76,7 @@ def generate_AbilityRequirement(General_Goal, Current_Task):
},
]
print(messages[1]["content"])
return read_LLM_Completion(messages)["AbilityRequirement"]
return read_LLM_Completion(messages)["AbilityRequirement"]
def generate_AgentSelection(General_Goal, Current_Task, Agent_Board):
messages = [
@@ -99,7 +98,7 @@ def generate_AgentSelection(General_Goal, Current_Task, Agent_Board):
agentboard_set = {agent["Name"] for agent in Agent_Board}
while True:
candidate = read_LLM_Completion(messages)["AgentSelectionPlan"]
candidate = read_LLM_Completion(messages)["AgentSelectionPlan"]
if len(candidate) > MAX_TEAM_SIZE:
teamSize = random.randint(2, MAX_TEAM_SIZE)
candidate = candidate[0:teamSize]

View File

@@ -9,6 +9,14 @@ import AgentCoord.util as util
def generate_basePlan(
General_Goal, Agent_Board, AgentProfile_Dict, InitialObject_List
):
Agent_Board = [
{"Name": (a.get("Name") or "").strip(),"Profile": (a.get("Profile") or "").strip()}
for a in Agent_Board
if a and a.get("Name") is not None
]
if not Agent_Board: # 洗完后还是空 → 直接返回空计划
return {"Plan_Outline": []}
basePlan = {
"Initial Input Object": InitialObject_List,
"Collaboration Process": [],

View File

@@ -95,7 +95,7 @@ def branch_PlanOutline(
},
{"role": "system", "content": prompt},
]
Remaining_Steps = read_LLM_Completion(messages, useGroq=False)[
Remaining_Steps = read_LLM_Completion(messages)[
"Remaining Steps"
]
branch_List.append(Remaining_Steps)

View File

@@ -145,7 +145,7 @@ def branch_TaskProcess(
},
{"role": "system", "content": prompt},
]
Remaining_Steps = read_LLM_Completion(messages, useGroq=False)[
Remaining_Steps = read_LLM_Completion(messages)[
"Remaining Steps"
]

View File

@@ -2,7 +2,7 @@ from AgentCoord.util.converter import read_LLM_Completion
from typing import List
from pydantic import BaseModel
import json
import asyncio
PROMPT_PLAN_OUTLINE_GENERATION = """
## Instruction
Based on "Output Format Example", "General Goal", and "Initial Key Object List", output a formatted "Plan_Outline".
@@ -69,6 +69,13 @@ class PlanOutline(BaseModel):
def generate_PlanOutline(InitialObject_List, General_Goal):
# 新增:校验 General_Goal 必须有有效内容
if not isinstance(General_Goal, str) or len(General_Goal.strip()) == 0:
raise ValueError("General_Goal 不能为空!必须提供具体的目标描述")
# 处理 InitialObject_List 为空的情况(可选,但更友好)
if not InitialObject_List:
InitialObject_List = ["无初始对象"] # 避免空列表导致的歧义
messages = [
{
"role": "system",
@@ -83,4 +90,11 @@ def generate_PlanOutline(InitialObject_List, General_Goal):
),
},
]
# 二次校验 messages 内容(防止意外空值)
for msg in messages:
content = msg.get("content", "").strip()
if not content:
raise ValueError("生成的 LLM 请求消息内容为空,请检查参数")
return read_LLM_Completion(messages)["Plan_Outline"]

View File

@@ -106,6 +106,9 @@ class TaskProcessPlan(BaseModel):
def generate_TaskProcess(General_Goal, Current_Task_Description):
# 新增参数验证
if not General_Goal or str(General_Goal).strip() == "":
raise ValueError("General_Goal cannot be empty")
messages = [
{
"role": "system",

View File

@@ -83,7 +83,7 @@ class BaseAction():
prompt = PROMPT_TEMPLATE_TAKE_ACTION_BASE.format(agentName = agentName, agentProfile = AgentProfile_Dict[agentName], General_Goal = General_Goal, Current_Task_Description = TaskDescription, Input_Objects = inputObject_Record, History_Action = action_Record, Action_Description = self.info["Description"], Action_Custom_Note = self.Action_Custom_Note)
print_colored(text = prompt, text_color="red")
messages = [{"role":"system", "content": prompt}]
ActionResult = LLM_Completion(messages,True,False)
ActionResult = LLM_Completion(messages,stream=False)
ActionInfo_with_Result = copy.deepcopy(self.info)
ActionInfo_with_Result["Action_Result"] = ActionResult

View File

@@ -1,2 +1,23 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 导出常用模块和函数,根据实际需求调整
from .PlanEngine.planOutline_Generator import generate_PlanOutline
from .PlanEngine.basePlan_Generator import generate_basePlan
from .PlanEngine.AgentSelection_Generator import generate_AgentSelection
from .LLMAPI.LLMAPI import LLM_Completion
from .util.converter import read_LLM_Completion
# 定义包元数据
__version__ = "0.1.0"
__all__ = [
"generate_PlanOutline",
"generate_basePlan",
"generate_AgentSelection",
"LLM_Completion",
"read_LLM_Completion"
]

View File

@@ -1,6 +1,6 @@
import re
import json
from AgentCoord.LLMAPI.LLMAPI import LLM_Completion
from AgentCoord.LLMAPI.LLMAPI import LLM_Completion,GROQ_API_KEY
def create_agent_dict(agent_list):
@@ -73,24 +73,80 @@ def remove_render_spec(duty_spec):
return duty_spec
def read_LLM_Completion(messages, useGroq=True):
for _ in range(3):
text = LLM_Completion(messages, useGroq=useGroq)
def read_LLM_Completion(messages, useGroq=None):
if useGroq is None:
useGroq = bool(GROQ_API_KEY)
# 添加调试信息和输入验证
print(f"[DEBUG] read_LLM_Completion called with {len(messages)} messages", flush=True)
if not messages or len(messages) == 0:
raise ValueError("No messages provided to read_LLM_Completion")
# 确保messages中的每个元素都是有效的
for i, msg in enumerate(messages):
if not isinstance(msg, dict):
print(f"[ERROR] Message {i} is not a dictionary: {type(msg)}", flush=True)
raise ValueError(f"Message {i} is not a dictionary")
if 'content' not in msg or msg['content'] is None:
print(f"[ERROR] Message {i} has no content or content is None", flush=True)
msg['content'] = "" # 提供默认空字符串
for attempt in range(3):
try:
print(f"[DEBUG] Attempt {attempt + 1}/3 to get LLM response", flush=True)
text = LLM_Completion(messages, useGroq=useGroq)
# 确保text是字符串类型
if text is None:
print(f"[ERROR] Null response from LLM on attempt {attempt + 1}", flush=True)
continue
text = str(text).strip()
if not text:
print(f"[ERROR] Empty response from LLM on attempt {attempt + 1}", flush=True)
continue
print(f"[DEBUG] LLM response length: {len(text)} characters", flush=True)
pattern = r"(?:.*?```json)(.*?)(?:```.*?)"
match = re.search(pattern, text, re.DOTALL)
# 尝试从代码块中提取JSON
pattern = r"(?:.*?```json)(.*?)(?:```.*?)"
match = re.search(pattern, text, re.DOTALL)
if match:
return json.loads(match.group(1).strip())
if match:
json_content = match.group(1).strip()
print(f"[DEBUG] Found JSON in code block, length: {len(json_content)}", flush=True)
try:
result = json.loads(json_content)
print(f"[DEBUG] Successfully parsed JSON from code block", flush=True)
return result
except json.JSONDecodeError as e:
print(f"[ERROR] JSON decode error in code block: {e}", flush=True)
print(f"[ERROR] JSON content was: {json_content}", flush=True)
pattern = r"\{.*\}"
match = re.search(pattern, text, re.DOTALL)
if match:
try:
return json.loads(match.group(0).strip())
except Exception:
pass
raise ("bad format!")
# 尝试直接提取JSON对象
pattern = r"\{.*\}"
match = re.search(pattern, text, re.DOTALL)
if match:
json_content = match.group(0).strip()
print(f"[DEBUG] Found JSON in plain text, length: {len(json_content)}", flush=True)
try:
result = json.loads(json_content)
print(f"[DEBUG] Successfully parsed JSON from plain text", flush=True)
return result
except json.JSONDecodeError as e:
print(f"[ERROR] JSON decode error in plain text: {e}", flush=True)
print(f"[ERROR] JSON content was: {json_content}", flush=True)
print(f"[ERROR] No valid JSON found in response on attempt {attempt + 1}", flush=True)
print(f"[ERROR] Full response was: {text[:200]}..." if len(text) > 200 else f"[ERROR] Full response was: {text}", flush=True)
except Exception as e:
print(f"[ERROR] Exception on attempt {attempt + 1}: {e}", flush=True)
import traceback
traceback.print_exc()
continue
raise ValueError(f"Failed to get valid JSON response after 3 attempts. Last error: bad format or empty response")
def read_json_content(text):
@@ -127,4 +183,4 @@ def read_outputObject_content(text, keyword):
if match:
return match.group(1).strip()
else:
raise ("bad format!")
raise ("bad format!")

View File

@@ -1,12 +1,12 @@
## config for default LLM
OPENAI_API_BASE: ""
OPENAI_API_KEY: ""
OPENAI_API_MODEL: "gpt-4-turbo-preview"
OPENAI_API_BASE: "https://ai.gitee.com/v1"
OPENAI_API_KEY: "HYCNGM39GGFNSB1F8MBBMI9QYJR3P1CRSYS2PV1A"
OPENAI_API_MODEL: "DeepSeek-V3"
## config for fast mode
FAST_DESIGN_MODE: True
GROQ_API_KEY: ""
MISTRAL_API_KEY: ""
## options under experimentation, leave them as Fasle unless you know what it is for
USE_CACHE: False
## options under experimentation, leave them as False unless you know what it is for
USE_CACHE: False

View File

@@ -3,5 +3,5 @@ openai==0.28.1
PyYAML==6.0.1
termcolor==2.4.0
groq==0.4.2
mistralai==0.1.6
mistralai==1.5.2
socksio==1.0.0

13
backend/restart.ps1 Normal file
View File

@@ -0,0 +1,13 @@
# restart.ps1
$port=8000
$env:PYTHONUNBUFFERED="1"
python server.py --port 8000
Write-Host "Killing PID on port $port..." -ForegroundColor Red
Get-NetTCPConnection -LocalPort $port -ErrorAction SilentlyContinue | ForEach-Object {
Stop-Process -Id $_.OwningProcess -Force
}
Write-Host "Starting Flask..." -ForegroundColor Green
python server.py --port $port

View File

@@ -1,3 +1,10 @@
import nest_asyncio
nest_asyncio.apply()
import os, sys, functools
print = functools.partial(print, flush=True) # 全局 flush
sys.stdout.reconfigure(line_buffering=True) # 3.7+ 有效
import asyncio
from flask import Flask, request, jsonify
import json
from DataProcess import Add_Collaboration_Brief_FrontEnd
@@ -19,11 +26,12 @@ import argparse
# initialize global variables
yaml_file = os.path.join(os.getcwd(), "config", "config.yaml")
yaml_data = {}
try:
with open(yaml_file, "r", encoding="utf-8") as file:
yaml_data = yaml.safe_load(file)
except Exception:
yaml_file = {}
yaml_data = {}
USE_CACHE: bool = os.getenv("USE_CACHE")
if USE_CACHE is None:
USE_CACHE = yaml_data.get("USE_CACHE", False)
@@ -35,14 +43,98 @@ Request_Cache: dict[str, str] = {}
app = Flask(__name__)
from jsonschema import validate, ValidationError
AGENT_SELECTION_SCHEMA = {
"type": "object",
"properties": {
"AgentSelectionPlan": {
"type": "array",
"items": {
"type": "string",
"minLength": 1, # 不允许空字符串
"pattern": r"^\S+$" # 不允许仅空白
},
"minItems": 1 # 至少选一个
}
},
"required": ["AgentSelectionPlan"],
"additionalProperties": False
}
BASE_PLAN_SCHEMA = {
"type": "object",
"properties": {
"Plan_Outline": {
"type": "array",
"items": {
"type": "object",
"properties": {
"StepName": {"type": "string"},
"TaskContent": {"type": "string"},
"InputObject_List":{"type": "array", "items": {"type": "string"}},
"OutputObject": {"type": "string"},
},
"required": ["StepName", "TaskContent", "InputObject_List", "OutputObject"],
"additionalProperties": False,
},
}
},
"required": ["Plan_Outline"],
"additionalProperties": False,
}
def safe_join(iterable, sep=""):
"""保证 join 前全是 strNone 变空串"""
return sep.join("" if x is None else str(x) for x in iterable)
def clean_agent_board(board):
"""把 AgentBoard 洗成只含 str 的字典列表"""
if not board:
return []
return [
{"Name": (a.get("Name") or "").strip(),
"Profile": (a.get("Profile") or "").strip()}
for a in board
if a and a.get("Name")
]
def clean_plan_outline(outline):
"""清洗 Plan_Outline 里的 None"""
if not isinstance(outline, list):
return []
for step in outline:
if not isinstance(step, dict):
continue
step["InputObject_List"] = [
str(i) for i in step.get("InputObject_List", []) if i is not None
]
step["OutputObject"] = str(step.get("OutputObject") or "")
step["StepName"] = str(step.get("StepName") or "")
step["TaskContent"] = str(step.get("TaskContent") or "")
return outline
@app.route("/fill_stepTask_TaskProcess", methods=["post"])
def Handle_fill_stepTask_TaskProcess():
incoming_data = request.get_json()
# print(f"[DEBUG] fill_stepTask_TaskProcess received data: {incoming_data}", flush=True)
# 验证必需参数
General_Goal = incoming_data.get("General Goal", "").strip()
stepTask_lackTaskProcess = incoming_data.get("stepTask_lackTaskProcess")
if not General_Goal:
return jsonify({"error": "General Goal is required and cannot be empty"}), 400
if not stepTask_lackTaskProcess:
return jsonify({"error": "stepTask_lackTaskProcess is required"}), 400
requestIdentifier = str(
(
"/fill_stepTask_TaskProcess",
incoming_data["General Goal"],
incoming_data["stepTask_lackTaskProcess"],
General_Goal,
stepTask_lackTaskProcess,
)
)
@@ -50,40 +142,54 @@ def Handle_fill_stepTask_TaskProcess():
if requestIdentifier in Request_Cache:
return jsonify(Request_Cache[requestIdentifier])
filled_stepTask = fill_stepTask_TaskProcess(
General_Goal=incoming_data["General Goal"],
stepTask=incoming_data["stepTask_lackTaskProcess"],
AgentProfile_Dict=AgentProfile_Dict,
)
try:
filled_stepTask = fill_stepTask_TaskProcess(
General_Goal=General_Goal,
stepTask=stepTask_lackTaskProcess,
AgentProfile_Dict=AgentProfile_Dict,
)
except Exception as e:
print(f"[ERROR] fill_stepTask_TaskProcess failed: {e}", flush=True)
return jsonify({"error": str(e)}), 500
filled_stepTask = Add_Collaboration_Brief_FrontEnd(filled_stepTask)
Request_Cache[requestIdentifier] = filled_stepTask
response = jsonify(filled_stepTask)
return response
@app.route("/agentSelectModify_init", methods=["post"])
@app.route("/agentSelectModify_init", methods=["POST"])
def Handle_agentSelectModify_init():
incoming_data = request.get_json()
requestIdentifier = str(
(
"/agentSelectModify_init",
incoming_data["General Goal"],
incoming_data["stepTask"],
)
)
incoming = request.get_json(silent=True) or {}
general_goal = (incoming.get("General Goal") or "").strip()
step_task = incoming.get("stepTask")
if not general_goal or not step_task:
return jsonify({"error": "Missing field"}), 400
if not AgentBoard: # 空 Board 直接返回
return jsonify({"AgentSelectionPlan": []})
req_id = str(("/agentSelectModify_init", general_goal, step_task))
if USE_CACHE and req_id in Request_Cache:
return jsonify(Request_Cache[req_id])
try:
clean_board = clean_agent_board(AgentBoard)
raw = AgentSelectModify_init(stepTask=step_task,
General_Goal=general_goal,
Agent_Board=clean_board)
if not isinstance(raw, dict):
raise ValueError("model returned non-dict")
plan = raw.get("AgentSelectionPlan") or []
cleaned = [str(x).strip() for x in plan if x is not None and str(x).strip()]
raw["AgentSelectionPlan"] = cleaned
validate(instance=raw, schema=AGENT_SELECTION_SCHEMA)
except Exception as exc:
print(f"[ERROR] AgentSelectModify_init: {exc}")
return jsonify({"error": str(exc)}), 500
if USE_CACHE:
if requestIdentifier in Request_Cache:
return jsonify(Request_Cache[requestIdentifier])
scoreTable = AgentSelectModify_init(
stepTask=incoming_data["stepTask"],
General_Goal=incoming_data["General Goal"],
Agent_Board=AgentBoard,
)
Request_Cache[requestIdentifier] = scoreTable
response = jsonify(scoreTable)
return response
Request_Cache[req_id] = raw
return jsonify(raw)
@app.route("/agentSelectModify_addAspect", methods=["post"])
@@ -98,7 +204,7 @@ def Handle_agentSelectModify_addAspect():
return jsonify(Request_Cache[requestIdentifier])
scoreTable = AgentSelectModify_addAspect(
aspectList=incoming_data["aspectList"], Agent_Board=AgentBoard
aspectList=incoming_data["aspectList"], Agent_Board=AgentBoard or []
)
Request_Cache[requestIdentifier] = scoreTable
response = jsonify(scoreTable)
@@ -108,11 +214,22 @@ def Handle_agentSelectModify_addAspect():
@app.route("/fill_stepTask", methods=["post"])
def Handle_fill_stepTask():
incoming_data = request.get_json()
# print(f"[DEBUG] fill_stepTask received data: {incoming_data}", flush=True)
# 验证必需参数
General_Goal = incoming_data.get("General Goal", "").strip()
stepTask = incoming_data.get("stepTask")
if not General_Goal:
return jsonify({"error": "General Goal is required and cannot be empty"}), 400
if not stepTask:
return jsonify({"error": "stepTask is required"}), 400
requestIdentifier = str(
(
"/fill_stepTask",
incoming_data["General Goal"],
incoming_data["stepTask"],
General_Goal,
stepTask,
)
)
@@ -120,12 +237,16 @@ def Handle_fill_stepTask():
if requestIdentifier in Request_Cache:
return jsonify(Request_Cache[requestIdentifier])
filled_stepTask = fill_stepTask(
General_Goal=incoming_data["General Goal"],
stepTask=incoming_data["stepTask"],
Agent_Board=AgentBoard,
AgentProfile_Dict=AgentProfile_Dict,
)
try:
filled_stepTask = fill_stepTask(
General_Goal=General_Goal,
stepTask=stepTask,
Agent_Board=AgentBoard,
AgentProfile_Dict=AgentProfile_Dict,
)
except Exception as e:
print(f"[ERROR] fill_stepTask failed: {e}", flush=True)
return jsonify({"error": str(e)}), 500
filled_stepTask = Add_Collaboration_Brief_FrontEnd(filled_stepTask)
Request_Cache[requestIdentifier] = filled_stepTask
response = jsonify(filled_stepTask)
@@ -198,31 +319,46 @@ def Handle_branch_TaskProcess():
return response
@app.route("/generate_basePlan", methods=["post"])
@app.route("/generate_basePlan", methods=["POST"])
def Handle_generate_basePlan():
incoming_data = request.get_json()
requestIdentifier = str(
(
"/generate_basePlan",
incoming_data["General Goal"],
incoming_data["Initial Input Object"],
incoming = request.get_json(silent=True) or {}
general_goal = (incoming.get("General Goal") or "").strip()
initial_objs = incoming.get("Initial Input Object") or []
if not general_goal:
return jsonify({"error": "General Goal is required"}), 400
# 1. 空 Board 直接短路
if not AgentBoard:
print("[SKIP] AgentBoard empty")
out = Add_Collaboration_Brief_FrontEnd({"Plan_Outline": []})
return jsonify(out)
req_id = str(("/generate_basePlan", general_goal, initial_objs))
if USE_CACHE and req_id in Request_Cache:
return jsonify(Request_Cache[req_id])
try:
# 2. 洗 Board → 调模型 → 洗返回
clean_board = clean_agent_board(AgentBoard)
raw_plan = asyncio.run(
generate_basePlan(
General_Goal=general_goal,
Agent_Board=clean_board,
AgentProfile_Dict=AgentProfile_Dict,
InitialObject_List=initial_objs,
)
)
)
raw_plan["Plan_Outline"] = clean_plan_outline(raw_plan.get("Plan_Outline"))
validate(instance=raw_plan, schema=BASE_PLAN_SCHEMA) # 可选,二次校验
except Exception as exc:
print(f"[ERROR] generate_basePlan failed: {exc}")
return jsonify({"error": "model call failed", "detail": str(exc)}), 500
out = Add_Collaboration_Brief_FrontEnd(raw_plan)
if USE_CACHE:
if requestIdentifier in Request_Cache:
return jsonify(Request_Cache[requestIdentifier])
basePlan = generate_basePlan(
General_Goal=incoming_data["General Goal"],
Agent_Board=AgentBoard,
AgentProfile_Dict=AgentProfile_Dict,
InitialObject_List=incoming_data["Initial Input Object"],
)
basePlan_withRenderSpec = Add_Collaboration_Brief_FrontEnd(basePlan)
Request_Cache[requestIdentifier] = basePlan_withRenderSpec
response = jsonify(basePlan_withRenderSpec)
return response
Request_Cache[req_id] = out
return jsonify(out)
@app.route("/executePlan", methods=["post"])
@@ -267,13 +403,11 @@ def Handle_saveRequestCashe():
@app.route("/setAgents", methods=["POST"])
def set_agents():
global AgentBoard, AgentProfile_Dict
AgentBoard = request.json
AgentProfile_Dict = {}
for item in AgentBoard:
name = item["Name"]
profile = item["Profile"]
AgentProfile_Dict[name] = profile
return jsonify({"code": 200, "content": "set agentboard successfully"})
board_in = request.json or []
# 先清洗再赋值
AgentBoard = clean_agent_board(board_in)
AgentProfile_Dict = {a["Name"]: a["Profile"] for a in AgentBoard}
return jsonify({"code": 200, "content": "AgentBoard set successfully"})
def init():