Compare commits

..

5 Commits

Author SHA1 Message Date
wuyifan18
f50dcd2fe7 update 2025-10-21 10:13:14 +08:00
wuyifan18
744945f68d update 2025-10-21 10:09:18 +08:00
wuyifan18
25853d8174 update 2025-10-19 18:54:11 +08:00
wuyifan18
ceb57acd23 update 2025-10-19 18:14:21 +08:00
wuyifan18
da2318a40c update 2025-10-18 14:31:34 +08:00
295 changed files with 26005 additions and 24364 deletions

View File

@@ -1,30 +1,24 @@
import asyncio
from openai import OpenAI, AsyncOpenAI
import openai
import yaml
from termcolor import colored
import os
# Helper function to avoid circular import
def print_colored(text, text_color="green", background="on_white"):
print(colored(text, text_color, background))
# load config (apikey, apibase, model)
yaml_file = os.path.join(os.getcwd(), "config", "config.yaml")
try:
with open(yaml_file, "r", encoding="utf-8") as file:
yaml_data = yaml.safe_load(file)
except Exception:
yaml_data = {}
yaml_file = {}
OPENAI_API_BASE = os.getenv("OPENAI_API_BASE") or yaml_data.get(
"OPENAI_API_BASE", "https://api.openai.com"
)
openai.api_base = OPENAI_API_BASE
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") or yaml_data.get(
"OPENAI_API_KEY", ""
)
# Initialize OpenAI clients
client = OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_API_BASE)
async_client = AsyncOpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_API_BASE)
openai.api_key = OPENAI_API_KEY
MODEL: str = os.getenv("OPENAI_API_MODEL") or yaml_data.get(
"OPENAI_API_MODEL", "gpt-4-turbo-preview"
)
@@ -77,14 +71,14 @@ def LLM_Completion(
async def _achat_completion_stream_groq(messages: list[dict]) -> str:
from groq import AsyncGroq
groq_client = AsyncGroq(api_key=GROQ_API_KEY)
client = AsyncGroq(api_key=GROQ_API_KEY)
max_attempts = 5
for attempt in range(max_attempts):
print("Attempt to use Groq (Fase Design Mode):")
try:
response = await groq_client.chat.completions.create(
stream = await client.chat.completions.create(
messages=messages,
# model='gemma-7b-it',
model="mixtral-8x7b-32768",
@@ -98,9 +92,9 @@ async def _achat_completion_stream_groq(messages: list[dict]) -> str:
if attempt < max_attempts - 1: # i is zero indexed
continue
else:
raise Exception("failed")
raise "failed"
full_reply_content = response.choices[0].message.content
full_reply_content = stream.choices[0].message.content
print(colored(full_reply_content, "blue", "on_white"), end="")
print()
return full_reply_content
@@ -109,14 +103,14 @@ async def _achat_completion_stream_groq(messages: list[dict]) -> str:
async def _achat_completion_stream_mixtral(messages: list[dict]) -> str:
from mistralai.client import MistralClient
from mistralai.models.chat_completion import ChatMessage
mistral_client = MistralClient(api_key=MISTRAL_API_KEY)
client = MistralClient(api_key=MISTRAL_API_KEY)
# client=AsyncGroq(api_key=GROQ_API_KEY)
max_attempts = 5
for attempt in range(max_attempts):
try:
messages[len(messages) - 1]["role"] = "user"
stream = mistral_client.chat(
stream = client.chat(
messages=[
ChatMessage(
role=message["role"], content=message["content"]
@@ -125,13 +119,14 @@ async def _achat_completion_stream_mixtral(messages: list[dict]) -> str:
],
# model = "mistral-small-latest",
model="open-mixtral-8x7b",
# response_format={"type": "json_object"},
)
break # If the operation is successful, break the loop
except Exception:
if attempt < max_attempts - 1: # i is zero indexed
continue
else:
raise Exception("failed")
raise "failed"
full_reply_content = stream.choices[0].message.content
print(colored(full_reply_content, "blue", "on_white"), end="")
@@ -140,11 +135,15 @@ async def _achat_completion_stream_mixtral(messages: list[dict]) -> str:
async def _achat_completion_stream_gpt35(messages: list[dict]) -> str:
response = await async_client.chat.completions.create(
openai.api_key = OPENAI_API_KEY
openai.api_base = OPENAI_API_BASE
response = await openai.ChatCompletion.acreate(
messages=messages,
max_tokens=4096,
n=1,
stop=None,
temperature=0.3,
timeout=30,
timeout=3,
model="gpt-3.5-turbo-16k",
stream=True,
)
@@ -155,33 +154,40 @@ async def _achat_completion_stream_gpt35(messages: list[dict]) -> str:
# iterate through the stream of events
async for chunk in response:
collected_chunks.append(chunk) # save the event response
choices = chunk.choices
choices = chunk["choices"]
if len(choices) > 0:
chunk_message = chunk.choices[0].delta
chunk_message = chunk["choices"][0].get(
"delta", {}
) # extract the message
collected_messages.append(chunk_message) # save the message
if chunk_message.content:
if "content" in chunk_message:
print(
colored(chunk_message.content, "blue", "on_white"),
colored(chunk_message["content"], "blue", "on_white"),
end="",
)
print()
full_reply_content = "".join(
[m.content or "" for m in collected_messages if m is not None]
[m.get("content", "") for m in collected_messages]
)
return full_reply_content
async def _achat_completion_json(messages: list[dict]) -> str:
openai.api_key = OPENAI_API_KEY
openai.api_base = OPENAI_API_BASE
max_attempts = 5
for attempt in range(max_attempts):
try:
response = await async_client.chat.completions.create(
stream = await openai.ChatCompletion.acreate(
messages=messages,
max_tokens=4096,
n=1,
stop=None,
temperature=0.3,
timeout=30,
timeout=3,
model=MODEL,
response_format={"type": "json_object"},
)
@@ -190,62 +196,62 @@ async def _achat_completion_json(messages: list[dict]) -> str:
if attempt < max_attempts - 1: # i is zero indexed
continue
else:
raise Exception("failed")
raise "failed"
full_reply_content = response.choices[0].message.content
full_reply_content = stream.choices[0].message.content
print(colored(full_reply_content, "blue", "on_white"), end="")
print()
return full_reply_content
async def _achat_completion_stream(messages: list[dict]) -> str:
try:
response = await async_client.chat.completions.create(
**_cons_kwargs(messages), stream=True
)
openai.api_key = OPENAI_API_KEY
openai.api_base = OPENAI_API_BASE
response = await openai.ChatCompletion.acreate(
**_cons_kwargs(messages), stream=True
)
# create variables to collect the stream of chunks
collected_chunks = []
collected_messages = []
# iterate through the stream of events
async for chunk in response:
collected_chunks.append(chunk) # save the event response
choices = chunk.choices
if len(choices) > 0:
chunk_message = chunk.choices[0].delta
collected_messages.append(chunk_message) # save the message
if chunk_message.content:
print(
colored(chunk_message.content, "blue", "on_white"),
end="",
)
print()
# create variables to collect the stream of chunks
collected_chunks = []
collected_messages = []
# iterate through the stream of events
async for chunk in response:
collected_chunks.append(chunk) # save the event response
choices = chunk["choices"]
if len(choices) > 0:
chunk_message = chunk["choices"][0].get(
"delta", {}
) # extract the message
collected_messages.append(chunk_message) # save the message
if "content" in chunk_message:
print(
colored(chunk_message["content"], "blue", "on_white"),
end="",
)
print()
full_reply_content = "".join(
[m.content or "" for m in collected_messages if m is not None]
)
return full_reply_content
except Exception as e:
print_colored(f"OpenAI API error in _achat_completion_stream: {str(e)}", "red")
raise
full_reply_content = "".join(
[m.get("content", "") for m in collected_messages]
)
return full_reply_content
def _chat_completion(messages: list[dict]) -> str:
try:
rsp = client.chat.completions.create(**_cons_kwargs(messages))
content = rsp.choices[0].message.content
return content
except Exception as e:
print_colored(f"OpenAI API error in _chat_completion: {str(e)}", "red")
raise
print(messages, flush=True)
rsp = openai.ChatCompletion.create(**_cons_kwargs(messages))
content = rsp["choices"][0]["message"]["content"]
print(content, flush=True)
return content
def _cons_kwargs(messages: list[dict]) -> dict:
kwargs = {
"messages": messages,
"max_tokens": 2000,
"temperature": 0.3,
"timeout": 15,
"max_tokens": 4096,
"n": 1,
"stop": None,
"temperature": 0.5,
"timeout": 3,
}
kwargs_mode = {"model": MODEL}
kwargs.update(kwargs_mode)

View File

@@ -33,7 +33,7 @@ class JSON_ABILITY_REQUIREMENT_GENERATION(BaseModel):
PROMPT_AGENT_SELECTION_GENERATION = """
## Instruction
Based on "General Goal", "Current Task" and "Agent Board", output a formatted "Agent Selection Plan". Your selection should consider the ability needed for "Current Task" and the profile of each agent in "Agent Board".
Based on "General Goal", "Current Task" and "Agent Board", output a formatted "Agent Selection Plan". Your selection should consider the ability needed for "Current Task" and the profile of each agent in "Agent Board". Agent Selection Plan ranks from high to low according to ability.
## General Goal (Specify the general goal for the collaboration plan)
{General_Goal}
@@ -80,10 +80,6 @@ def generate_AbilityRequirement(General_Goal, Current_Task):
def generate_AgentSelection(General_Goal, Current_Task, Agent_Board):
# Check if Agent_Board is None or empty
if Agent_Board is None or len(Agent_Board) == 0:
raise ValueError("Agent_Board cannot be None or empty. Please ensure agents are set via /setAgents endpoint before generating a plan.")
messages = [
{
"role": "system",

View File

@@ -7,14 +7,14 @@ import AgentCoord.util as util
def generate_basePlan(
General_Goal, Agent_Board, AgentProfile_Dict, InitialObject_List
General_Goal, Agent_Board, AgentProfile_Dict, InitialObject_List, context
):
basePlan = {
"Initial Input Object": InitialObject_List,
"Collaboration Process": [],
}
PlanOutline = generate_PlanOutline(
InitialObject_List=[], General_Goal=General_Goal
InitialObject_List=[], General_Goal=General_Goal + context
)
for stepItem in PlanOutline:
Current_Task = {
@@ -44,7 +44,7 @@ def generate_basePlan(
),
}
TaskProcess = generate_TaskProcess(
General_Goal=General_Goal,
General_Goal=General_Goal + context,
Current_Task_Description=Current_Task_Description,
)
# add the generated AgentSelection and TaskProcess to the stepItem

View File

@@ -5,7 +5,7 @@ import json
PROMPT_PLAN_OUTLINE_GENERATION = """
## Instruction
Based on "Output Format Example", "General Goal", and "Initial Key Object List", output a formatted "Plan_Outline".
Based on "Output Format Example", "General Goal", and "Initial Key Object List", output a formatted "Plan_Outline". The number of steps in the Plan Outline cannot exceed 5.
## Initial Key Object List (Specify the list of initial key objects available, each initial key object should be the input object of at least one Step)
{InitialObject_List}
@@ -51,6 +51,7 @@ TaskContent: Describe the task of the current step.
InputObject_List: The list of the input obejects that will be used in current step.
OutputObject: The name of the final output object of current step.
请用中文回答
"""
@@ -83,16 +84,4 @@ def generate_PlanOutline(InitialObject_List, General_Goal):
),
},
]
result = read_LLM_Completion(messages)
if isinstance(result, dict) and "Plan_Outline" in result:
return result["Plan_Outline"]
else:
# 如果格式不正确,返回默认的计划大纲
return [
{
"StepName": "Default Step",
"TaskContent": "Generated default plan step due to format error",
"InputObject_List": [],
"OutputObject": "Default Output"
}
]
return read_LLM_Completion(messages)["Plan_Outline"]

View File

@@ -90,6 +90,7 @@ InputObject_List: List existing objects that should be utilized in current step.
AgentName: Specify the agent who will perform the action, You CAN ONLY USE THE NAME APPEARS IN "AgentInvolved".
ActionType: Specify the type of action, note that only the last action can be of type "Finalize", and the last action must be "Finalize".
请用中文回答
"""

View File

@@ -80,14 +80,7 @@ class BaseAction():
Important_Mark = ""
action_Record += PROMPT_TEMPLATE_ACTION_RECORD.format(AgentName = actionInfo["AgentName"], Action_Description = actionInfo["AgentName"], Action_Result = actionInfo["Action_Result"], Important_Mark = Important_Mark)
# Handle missing agent profiles gracefully
if agentName not in AgentProfile_Dict:
print_colored(text=f"Warning: Agent '{agentName}' not found in AgentProfile_Dict. Using default profile.", text_color="yellow")
agentProfile = f"AI Agent named {agentName}"
else:
agentProfile = AgentProfile_Dict[agentName]
prompt = PROMPT_TEMPLATE_TAKE_ACTION_BASE.format(agentName = agentName, agentProfile = agentProfile, General_Goal = General_Goal, Current_Task_Description = TaskDescription, Input_Objects = inputObject_Record, History_Action = action_Record, Action_Description = self.info["Description"], Action_Custom_Note = self.Action_Custom_Note)
prompt = PROMPT_TEMPLATE_TAKE_ACTION_BASE.format(agentName = agentName, agentProfile = AgentProfile_Dict[agentName], General_Goal = General_Goal, Current_Task_Description = TaskDescription, Input_Objects = inputObject_Record, History_Action = action_Record, Action_Description = self.info["Description"], Action_Custom_Note = self.Action_Custom_Note)
print_colored(text = prompt, text_color="red")
messages = [{"role":"system", "content": prompt}]
ActionResult = LLM_Completion(messages,True,False)

View File

@@ -5,7 +5,7 @@ Note: You can say something before you provide the improved version of the conte
The improved version you provide must be a completed version (e.g. if you provide a improved story, you should give completed story content, rather than just reporting where you have improved).
When you decide to give the improved version of the content, it should be start like this:
## Improved version of xxx
## xxx的改进版本
(the improved version of the content)
```

View File

@@ -4,7 +4,7 @@ from termcolor import colored
# Accept inputs: num_StepToRun (the number of step to run, if None, run to the end), plan, RehearsalLog, AgentProfile_Dict
def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict):
def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict, context):
# Prepare for execution
KeyObjects = {}
finishedStep_index = -1
@@ -85,17 +85,9 @@ def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict):
# start the group chat
util.print_colored(TaskDescription, text_color="green")
ActionHistory = []
action_count = 0
total_actions = len(TaskProcess)
for ActionInfo in TaskProcess:
action_count += 1
actionType = ActionInfo["ActionType"]
agentName = ActionInfo["AgentName"]
# 添加进度日志
util.print_colored(f"🔄 Executing action {action_count}/{total_actions}: {actionType} by {agentName}", text_color="yellow")
if actionType in Action.customAction_Dict:
currentAction = Action.customAction_Dict[actionType](
info=ActionInfo,
@@ -109,7 +101,7 @@ def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict):
KeyObjects=KeyObjects,
)
ActionInfo_with_Result = currentAction.run(
General_Goal=plan["General Goal"],
General_Goal=plan["General Goal"] + "\n\n### Useful Information (some information can help accomplish the task)\n如果使用该信息,请在回答中显示指出是来自数联网的数据。例如,基于数联网数据搜索结果。" + context,
TaskDescription=TaskDescription,
agentName=agentName,
AgentProfile_Dict=AgentProfile_Dict,
@@ -121,6 +113,8 @@ def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict):
ActionHistory.append(ActionInfo_with_Result)
# post processing for the group chat (finish)
objectLogNode["content"] = KeyObjects[OutputName]
if StepRun_count == len(plan["Collaboration Process"][(finishedStep_index + 1): run_to]):
objectLogNode["content"] += context
RehearsalLog.append(stepLogNode)
RehearsalLog.append(objectLogNode)
stepLogNode["ActionHistory"] = ActionHistory

View File

@@ -20,8 +20,6 @@ def camel_case_to_normal(s):
def generate_template_sentence_for_CollaborationBrief(
input_object_list, output_object, agent_list, step_task
):
# Ensure step_task is not None
step_task = step_task if step_task is not None else "perform the task"
# Check if the names are in camel case (no spaces) and convert them to normal naming convention
input_object_list = (
[
@@ -33,48 +31,29 @@ def generate_template_sentence_for_CollaborationBrief(
)
output_object = (
camel_case_to_normal(output_object)
if output_object is not None and is_camel_case(output_object)
else (output_object if output_object is not None else "unknown output")
if is_camel_case(output_object)
else output_object
)
# Format the agents into a string with proper grammar
if agent_list is None or len(agent_list) == 0:
agent_str = "Unknown agents"
elif all(agent is not None for agent in agent_list):
agent_str = (
" and ".join([", ".join(agent_list[:-1]), agent_list[-1]])
if len(agent_list) > 1
else agent_list[0]
)
else:
# Filter out None values
filtered_agents = [agent for agent in agent_list if agent is not None]
if filtered_agents:
agent_str = (
" and ".join([", ".join(filtered_agents[:-1]), filtered_agents[-1]])
if len(filtered_agents) > 1
else filtered_agents[0]
)
else:
agent_str = "Unknown agents"
agent_str = (
" and ".join([", ".join(agent_list[:-1]), agent_list[-1]])
if len(agent_list) > 1
else agent_list[0]
)
if input_object_list is None or len(input_object_list) == 0:
# Combine all the parts into the template sentence
template_sentence = f"{agent_str} perform the task of {step_task} to obtain {output_object}."
else:
# Format the input objects into a string with proper grammar
# Filter out None values from input_object_list
filtered_input_list = [obj for obj in input_object_list if obj is not None]
if filtered_input_list:
input_str = (
" and ".join(
[", ".join(filtered_input_list[:-1]), filtered_input_list[-1]]
)
if len(filtered_input_list) > 1
else filtered_input_list[0]
input_str = (
" and ".join(
[", ".join(input_object_list[:-1]), input_object_list[-1]]
)
else:
input_str = "unknown inputs"
if len(input_object_list) > 1
else input_object_list[0]
)
# Combine all the parts into the template sentence
template_sentence = f"Based on {input_str}, {agent_str} perform the task of {step_task} to obtain {output_object}."
@@ -111,7 +90,7 @@ def read_LLM_Completion(messages, useGroq=True):
return json.loads(match.group(0).strip())
except Exception:
pass
return {} # 返回空对象而不是抛出异常
raise ("bad format!")
def read_json_content(text):
@@ -132,7 +111,7 @@ def read_json_content(text):
if match:
return json.loads(match.group(0).strip())
return {} # 返回空对象而不是抛出异常
raise ("bad format!")
def read_outputObject_content(text, keyword):
@@ -148,4 +127,4 @@ def read_outputObject_content(text, keyword):
if match:
return match.group(1).strip()
else:
return "" # 返回空字符串而不是抛出异常
raise ("bad format!")

View File

@@ -0,0 +1,97 @@
[
{
"Icon": "Hailey_Johnson.png",
"Name": "船舶设计师",
"Profile": "提供船舶制造中的实际需求和约束。"
},
{
"Icon": "Jennifer_Moore.png",
"Name": "防护工程专家",
"Profile": "专注于船舶腐蚀防护技术的设计与应用。在你的总结回答中,必须引用来自数联网的搜索数据,是搜索数据,不是数联网的研究成果。"
},
{
"Icon": "Jane_Moreno.png",
"Name": "病理生理学家",
"Profile": "专注于失血性休克的疾病机制,为药物研发提供理论靶点。"
},
{
"Icon": "Giorgio_Rossi.png",
"Name": "药物化学家",
"Profile": "负责将靶点概念转化为实际可合成的分子。"
},
{
"Icon": "Tamara_Taylor.png",
"Name": "制剂工程师",
"Profile": "负责将活性药物成分API变成稳定、可用、符合战场要求的剂型。"
},
{
"Icon": "Maria_Lopez.png",
"Name": "监管事务专家",
"Profile": "深谙药品审评法规,目标是找到最快的合法上市路径。"
},
{
"Icon": "Sam_Moore.png",
"Name": "物理学家",
"Profile": "从热力学与统计力学的基本原理出发,研究液态金属的自由能、焓、熵、比热等参数的理论建模。"
},
{
"Icon": "Yuriko_Yamamoto.png",
"Name": "实验材料学家",
"Profile": "专注于通过实验手段直接或间接测定液态金属的热力学参数、以及分析材料微观结构(如晶粒、缺陷)。"
},
{
"Icon": "Carlos_Gomez.png",
"Name": "计算模拟专家",
"Profile": "侧重于利用数值计算和模拟技术获取液态金属的热力学参数。"
},
{
"Icon": "John_Lin.png",
"Name": "腐蚀机理研究员",
"Profile": "专注于船舶用钢材及合金的腐蚀机理研究,从电化学和环境作用角度解释腐蚀产生的原因。在你的总结回答中,必须引用来自数联网的搜索数据,是搜索数据,不是数联网的研究成果。"
},
{
"Icon": "Arthur_Burton.png",
"Name": "先进材料研发员",
"Profile": "专注于开发和评估新型耐腐蚀材料、复合材料及固态电池材料。"
},
{
"Icon": "Eddy_Lin.png",
"Name": "肾脏病学家",
"Profile": "专注于慢性肾脏病的诊断、治疗和患者管理,能提供临床洞察。"
},
{
"Icon": "Isabella_Rodriguez.png",
"Name": "临床研究协调员",
"Profile": "负责受试者招募和临床试验流程优化。"
},
{
"Icon": "Latoya_Williams.png",
"Name": "中医药专家",
"Profile": "理解药物的中药成分和作用机制。"
},
{
"Icon": "Carmen_Ortiz.png",
"Name": "药物安全专家",
"Profile": "专注于药物不良反应数据收集、分析和报告。"
},
{
"Icon": "Rajiv_Patel.png",
"Name": "二维材料科学家",
"Profile": "专注于二维材料(如石墨烯)的合成、性质和应用。"
},
{
"Icon": "Tom_Moreno.png",
"Name": "光电物理学家",
"Profile": "研究材料的光电转换机制和关键影响因素。"
},
{
"Icon": "Ayesha_Khan.png",
"Name": "机器学习专家",
"Profile": "专注于开发和应用AI模型用于材料模拟。"
},
{
"Icon": "Mei_Lin.png",
"Name": "流体动力学专家",
"Profile": "专注于流体行为理论和模拟。"
}
]

File diff suppressed because one or more lines are too long

View File

@@ -9,4 +9,4 @@ GROQ_API_KEY: ""
MISTRAL_API_KEY: ""
## options under experimentation, leave them as Fasle unless you know what it is for
USE_CACHE: False
USE_CACHE: True

View File

@@ -1,5 +1,5 @@
Flask==3.0.2
openai==2.8.1
openai==0.28.1
PyYAML==6.0.1
termcolor==2.4.0
groq==0.4.2

View File

@@ -1,5 +1,6 @@
from flask import Flask, request, jsonify
import json
import time
from DataProcess import Add_Collaboration_Brief_FrontEnd
from AgentCoord.RehearsalEngine_V2.ExecutePlan import executePlan
from AgentCoord.PlanEngine.basePlan_Generator import generate_basePlan
@@ -31,6 +32,56 @@ else:
USE_CACHE = USE_CACHE.lower() in ["true", "1", "yes"]
AgentBoard = None
AgentProfile_Dict = {}
c1 = '''
=====数联网搜索结果=====
1. 论文
- 《Surface Defect Detection and Evaluation for Marine Vessels using Multi-Stage Deep Learning》提出一个面向船舶制造与检修的多阶段深度学习流水线用于船体表面缺陷检测与评估重点识别和量化与船舶制造材料腐蚀相关的现象包括腐蚀rust、附着物fouling、涂层剥离delamination等。研究特别关注船舶材料腐蚀在不同涂层颜色、光照条件和相机角度下的鲁棒性有助于提高船舶制造过程中对材料腐蚀的检测与管理能力。论文强调其方法在船舶材料表面腐蚀监测和维修决策中的应用价值。链接https://arxiv.org/abs/2203.09580
论文复现链接http://8.130.138.52:21001/#/view/reproduction?doid=newID/5a4fef68-a138-4adb-ba88-43aa4474c08c
- 《Efficient Metal Corrosion Area Detection Model Combining Convolution and Transformer (MCD-Net)》提出了一种高效的金属腐蚀区域检测模型。该模型创新性地结合了卷积编码器与视觉Transformer序列编码器利用注意力融合模块增强边界识别并采用基于得分的多尺度增强机制来突出腐蚀区域。该方法在阴影、遮挡和复杂纹理条件下依然能保持高精度在公开数据集上取得了优异的F1性能特别适用于船舶制造与检修中对船体钢板和涂层腐蚀的自动化识别与量化分析。链接https://www.mdpi.com/2076-3417/14/21/9900
论文复现链接http://8.130.138.52:21001/#/view/reproduction?doid=newID/b200a489-e2c8-4dbd-8767-682d15dd4c04
- 《Mechanical Failure and Metal Degradation of Ships and Marine Structures》系统探讨了船舶与海洋结构中金属材料包括高强钢、不锈钢、铜合金、钛合金等在海洋环境下的机械损伤与电化学腐蚀协同作用问题。重点分析了"机械载荷+腐蚀"耦合导致的疲劳、裂纹及腐蚀裂纹等失效机制并对相应的检测、预警与保护措施进行了深入讨论。链接https://doi.org/10.3390/met13020272
- 《Research Progress of Marine Anti-Fouling Coatings》2024全面综述了海洋防污涂层的研究进展涵盖自我抛光涂料、生物可降解涂料、低表面能涂层、仿生涂层和纳米涂层等新型涂层技术。虽然重点在于防污但文中指出许多防污机制与防腐蚀机制相互关联通过阻碍生物附着可有效减少微生物引起的腐蚀和表面破坏。链接https://doi.org/10.3390/coatings14091227
- 《Corrosion-Wear Behavior of Shipbuilding Steels (EH40, FH40, 921A) in Seawater》研究了三种常用船用钢EH40、FH40、921A在海水环境中磨损与腐蚀共同作用下的复合失效行为。通过机械-电化学方法系统测定了腐蚀磨损损失、表面形貌变化和耐腐蚀性能差异发现在此类工况中921A钢表现最佳。《Journal of Chinese Society for Corrosion and Protection》, Vol 45(4): 894-904, 2025
- 《Sulfur-Selenium Alloy Coatings for Universal Corrosion Resistance》设计了一种硫-硒合金涂层能够在包括模拟海水和硫酸盐还原菌生物环境在内的各种条件下为钢材提供高效的防腐蚀保护约99.9%效率腐蚀速率比裸钢低6-7个数量级。该涂层具有良好的机械性能、非多孔结构能有效阻碍多种扩散性腐蚀因子的渗透。arXiv:2009.02451
2. 代码
- CoaST - Anti-corrosive coatings research (DTU, Denmark)丹麦技术大学的CoaST项目专注于抗腐蚀涂层的性能表征与失效机制分析。该项目采用电化学测试与表面/界面无损检测技术相结合的方法系统评估涂层的保护效果与失效途径为船舶腐蚀防护提供了重要的研究平台。链接https://www.kt.dtu.dk/research/coast/research-areas/anti-corrosive-coatings
- Surface Defect Detection 仓库该仓库集成了多种表面缺陷检测算法为实现船舶腐蚀检测提供了可靠的代码基础和参考架构。链接https://github.com/Charmve/Surface-Defect-Detection
- Hugging Face 预训练模型平台提供了轻量级的腐蚀检测模型可直接下载并微调用于快速验证和部署。链接https://huggingface.co/mahdavian/corrosion-model
3. 数据集
用于训练和验证MCD-Net模型的相关数据集包括
- iamcloud/Corrosion_Dataset一个包含约2000多张标注图像的数据集ImageFolder格式Apache-2.0协议专门收录材料腐蚀图像适用于船舶腐蚀监测模型的训练。链接https://huggingface.co/datasets/iamcloud/Corrosion_Dataset
- Francesco/corrosion-bi3q3此数据集提供了多种腐蚀实例可用于支持船舶涂层损坏与基体腐蚀的自动化检测任务。链接https://huggingface.co/datasets/Francesco/corrosion-bi3q3。
4. 研究团队
- 海洋腐蚀与防护全国重点实验室是我国在该领域最具权威性的国家级研究机构依托于中国船舶集团旗下725所。实验室提供从材料、设计到运维的全生命周期腐蚀控制解决方案其核心优势在于能够进行多技术如“阴极保护+涂层”)的综合集成研发与验证。其研究成果广泛应用于船舶、海洋平台等国家重大工程,为保障我国海洋装备的安全与耐久性提供了关键支撑。
- 水性船舶防腐涂层新材料项目团队是一支源自东北石油大学的创新创业团队专注于推动船舶涂料行业的环保化转型。团队致力于攻克水性涂料在船舶高湿环境下干燥慢、早期耐水性差的技术瓶颈旨在开发出兼具优异防腐性能和施工便利性的水性体系以替代传统溶剂型涂料其目标是实现进口产品替代并大幅减少船舶制造与维修过程中的VOCs排放。
- 微纳一体化防腐耐磨技术研发团队是以广州航海学院青年技术骨干为核心的新锐力量。团队针对铝合金在极端海洋环境中的腐蚀-磨损难题,创新性地提出了“微米级氧化陶瓷层+纳米自润滑固化层+改性硅烷钝化层”的三层复合防护体系。该技术使防护层的结合强度、耐磨性提升2000倍和耐腐蚀性能自腐蚀电流密度降低6个数量级实现了质的飞跃。
- 北京大学南京创新研究院BXCFD团队专注于自主可控的高性能计算流体力学软件研发。团队在船舶与海洋工程水动力学领域取得关键突破其自主研发的软件在船舶兴波阻力预报精度上达到国际先进水平并成功实现了对复杂的船-桨-舵6自由度运动的耦合高精度模拟为船舶水动力性能研究与优化提供了强大的国产化工具。
- 清华大学多相流与生物流动力学实验室由罗先武教授带领,长期专注于流体机械及工程领域。团队重点研究高速船舶喷水推进装置的基础理论与空化流动机理,其研究成果为攻克喷水推进系统中的空化、振动与噪声问题提供了坚实的理论依据和技术支持,对我国高性能船舶推进技术的发展具有重要意义。
'''
context = {"材料腐蚀":c1}
Request_Cache: dict[str, str] = {}
app = Flask(__name__)
@@ -48,6 +99,7 @@ def Handle_fill_stepTask_TaskProcess():
if USE_CACHE:
if requestIdentifier in Request_Cache:
time.sleep(1)
return jsonify(Request_Cache[requestIdentifier])
filled_stepTask = fill_stepTask_TaskProcess(
@@ -118,6 +170,7 @@ def Handle_fill_stepTask():
if USE_CACHE:
if requestIdentifier in Request_Cache:
time.sleep(1)
return jsonify(Request_Cache[requestIdentifier])
filled_stepTask = fill_stepTask(
@@ -211,19 +264,16 @@ def Handle_generate_basePlan():
if USE_CACHE:
if requestIdentifier in Request_Cache:
time.sleep(2)
return jsonify(Request_Cache[requestIdentifier])
try:
basePlan = generate_basePlan(
General_Goal=incoming_data["General Goal"],
Agent_Board=AgentBoard,
AgentProfile_Dict=AgentProfile_Dict,
InitialObject_List=incoming_data["Initial Input Object"],
)
except ValueError as e:
return jsonify({"error": str(e)}), 400
except Exception as e:
return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500
basePlan = generate_basePlan(
General_Goal=incoming_data["General Goal"],
Agent_Board=AgentBoard,
AgentProfile_Dict=AgentProfile_Dict,
InitialObject_List=incoming_data["Initial Input Object"],
context="\n请注意,目标是给出解决方案,而不是工程实现,不需要实验验证,也不需要推广计划。"
)
basePlan_withRenderSpec = Add_Collaboration_Brief_FrontEnd(basePlan)
Request_Cache[requestIdentifier] = basePlan_withRenderSpec
response = jsonify(basePlan_withRenderSpec)
@@ -233,6 +283,10 @@ def Handle_generate_basePlan():
@app.route("/executePlan", methods=["post"])
def Handle_executePlan():
incoming_data = request.get_json()
cur_context = ""
if "材料腐蚀" in incoming_data["plan"]["General Goal"]:
cur_context = context["材料腐蚀"]
requestIdentifier = str(
(
"/executePlan",
@@ -244,6 +298,7 @@ def Handle_executePlan():
if USE_CACHE:
if requestIdentifier in Request_Cache:
time.sleep(3)
return jsonify(Request_Cache[requestIdentifier])
RehearsalLog = executePlan(
@@ -251,20 +306,21 @@ def Handle_executePlan():
incoming_data["num_StepToRun"],
incoming_data["RehearsalLog"],
AgentProfile_Dict,
context=cur_context
)
Request_Cache[requestIdentifier] = RehearsalLog
response = jsonify(RehearsalLog)
return response
@app.route("/_saveRequestCashe", methods=["post"])
@app.route("/_saveRequestCashe", methods=["GET"])
def Handle_saveRequestCashe():
with open(
os.path.join(os.getcwd(), "RequestCache", "Request_Cache.json"), "w"
) as json_file:
json.dump(Request_Cache, json_file, indent=4)
response = jsonify(
{"code": 200, "content": "request cashe sucessfully saved"}
{"code": 200, "content": "request cashe sucessfully saved: " + os.path.join(os.getcwd(), "RequestCache", "Request_Cache.json")}
)
return response
@@ -283,38 +339,10 @@ def set_agents():
def init():
global AgentBoard, AgentProfile_Dict, Request_Cache
# Load Request Cache
try:
with open(
os.path.join(os.getcwd(), "RequestCache", "Request_Cache.json"), "r"
) as json_file:
Request_Cache = json.load(json_file)
print(f"✅ Loaded Request_Cache with {len(Request_Cache)} entries")
except Exception as e:
print(f"⚠️ Failed to load Request_Cache: {e}")
Request_Cache = {}
# Load Agent Board
try:
with open(
os.path.join(os.getcwd(), "AgentRepo", "agentBoard_v1.json"), "r", encoding="utf-8"
) as json_file:
AgentBoard = json.load(json_file)
print(f"✅ Loaded AgentBoard with {len(AgentBoard)} agents")
# Build AgentProfile_Dict
AgentProfile_Dict = {}
for item in AgentBoard:
name = item["Name"]
profile = item["Profile"]
AgentProfile_Dict[name] = profile
print(f"✅ Built AgentProfile_Dict with {len(AgentProfile_Dict)} profiles")
except Exception as e:
print(f"⚠️ Failed to load AgentBoard: {e}")
AgentBoard = []
AgentProfile_Dict = {}
with open(
os.path.join(os.getcwd(), "RequestCache", "Request_Cache.json"), "r"
) as json_file:
Request_Cache = json.load(json_file)
if __name__ == "__main__":
@@ -324,8 +352,8 @@ if __name__ == "__main__":
parser.add_argument(
"--port",
type=int,
default=8000,
help="set the port number, 8000 by defaul.",
default=8017,
help="set the port number, 8017 by defaul.",
)
args = parser.parse_args()
init()

View File

@@ -21,11 +21,12 @@ services:
ports:
- "8000:8000"
environment:
- OPENAI_API_BASE=htts://api.openai.com
- OPENAI_API_KEY=
- OPENAI_API_MODEL=gpt-4-turbo-preview
- FAST_DESIGN_MODE=True
- OPENAI_API_BASE=https://api.moleapi.com/v1
- OPENAI_API_KEY=sk-sps7FBCbEvu85DfPoS8SdnPwYLEoW7u5Dd8vCDTXqPLpHuyb
- OPENAI_API_MODEL=gpt-4.1-mini
- FAST_DESIGN_MODE=False
- GROQ_API_KEY=
- USE_CACHE=True
networks:
- agentcoord-network

View File

@@ -1,31 +0,0 @@
.DS_Store
.pnp
.pnp.js
.env.local
.env.*.local
.history
*.log*
node_modules/
.yarn-integrity
.pnpm-store/
*.tsbuildinfo
.eslintcache
.changeset/pre.json
dist/
coverage/
release/
output/
output_resource/
log/
.vscode/**/*
!.vscode/settings.json
!.vscode/extensions.json
.idea/
**/*/typings/auto-generated
modern.config.local.*

View File

@@ -1,32 +0,0 @@
.DS_Store
.env
.pnp
.pnp.js
.env.local
.env.*.local
.history
*.log*
node_modules/
.yarn-integrity
.pnpm-store/
*.tsbuildinfo
.eslintcache
.changeset/pre.json
dist/
coverage/
release/
output/
output_resource/
log/
.vscode/**/*
!.vscode/settings.json
!.vscode/extensions.json
.idea/
**/*/typings/auto-generated
modern.config.local.*

View File

@@ -1,22 +0,0 @@
FROM node:20-alpine AS base
FROM base AS installer
WORKDIR /app
COPY package.json .npmrc ./
RUN npm install
FROM base AS builder
WORKDIR /app
COPY --from=installer /app/node_modules ./node_modules
COPY . .
RUN npm run build
FROM base AS runner
WORKDIR /app
EXPOSE 8080/tcp
COPY .npmrc ./
RUN npm install @modern-js/app-tools @modern-js/runtime --no-optional --no-shrinkwrap && mkdir src
COPY modern.config.ts package.json ./
COPY --from=builder /app/dist ./dist
ENV API_BASE=
CMD ["npm", "run", "serve"]

View File

@@ -1,39 +0,0 @@
# AgentCoord Frontend
This is the frontend for the AgentCoord project. Root project is located [here](https://github.com/AgentCoord/AgentCoord)
## Installation
You can launch the frontend by simply using `docker-compose` in the root directory of the project.
Or, you can launch the frontend manually by following the steps below.
1. Install the dependencies by running `npm install`.
```bash
npm install
```
2. Build the frontend by running `npm run build`.
```bash
npm run build
```
3. Start the frontend by running `npm run serve`.
```bash
npm run serve
```
Then you can access the frontend by visiting `http://localhost:8080`.
## Development
You can run the frontend in development mode by running `npm run dev`.
```bash
npm run dev
```
The frontend website requires the backend server to be running. You can configure the backend server address by copying the `.env.example` file to `.env` and changing the `API_BASE` value to the backend server address.

View File

@@ -1,66 +0,0 @@
{
"name": "agent-coord-frontend",
"version": "0.0.1",
"scripts": {
"reset": "npx rimraf ./**/node_modules",
"dev": "modern dev",
"build": "modern build",
"start": "modern start",
"serve": "modern serve",
"new": "modern new",
"lint": "modern lint",
"prepare": "husky install",
"upgrade": "modern upgrade"
},
"engines": {
"node": ">=16.18.1"
},
"lint-staged": {
"*.{js,jsx,ts,tsx,mjs,cjs}": [
"node --max_old_space_size=8192 ./node_modules/eslint/bin/eslint.js --fix --color --cache --quiet"
]
},
"eslintIgnore": [
"node_modules/",
"dist/"
],
"dependencies": {
"@emotion/react": "^11.11.3",
"@emotion/styled": "^11.11.0",
"@modern-js/runtime": "2.46.1",
"@mui/icons-material": "^5.15.6",
"@mui/material": "^5.15.6",
"@sttot/api-hooks": "^1.2.5",
"d3": "^7.8.5",
"localforage": "^1.10.0",
"lodash": "^4.17.21",
"mobx": "^6.12.0",
"mobx-react-lite": "^4.0.6",
"re-resizable": "^6.9.11",
"react": "~18.2.0",
"react-dom": "~18.2.0",
"react-markdown": "^9.0.1",
"react-rnd": "^10.4.1",
"rehype-highlight": "^7.0.0",
"rehype-katex": "^7.0.0",
"remark-gfm": "^4.0.0",
"remark-math": "^6.0.0"
},
"devDependencies": {
"@modern-js-app/eslint-config": "2.46.1",
"@modern-js/app-tools": "2.46.1",
"@modern-js/eslint-config": "2.46.1",
"@modern-js/tsconfig": "2.46.1",
"@types/d3": "^7.4.3",
"@types/jest": "~29.2.4",
"@types/lodash": "^4.14.202",
"@types/node": "~16.11.7",
"@types/react": "~18.0.26",
"@types/react-dom": "~18.0.10",
"husky": "~8.0.1",
"lint-staged": "~13.1.0",
"prettier": "~2.8.1",
"rimraf": "~3.0.2",
"typescript": "~5.0.4"
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,170 +0,0 @@
import React from 'react';
import { observer } from 'mobx-react-lite';
import { Divider, SxProps } from '@mui/material';
import Box from '@mui/material/Box';
import UnfoldLessIcon from '@mui/icons-material/UnfoldLess';
import MoreHorizIcon from '@mui/icons-material/MoreHoriz';
import MarkdownBlock from '@/components/MarkdownBlock';
import { globalStorage } from '@/storage';
import type { IExecuteStepHistoryItem } from '@/apis/execute-plan';
import AgentIcon from '@/components/AgentIcon';
import { getAgentActionStyle } from '@/storage/plan';
export interface IStepHistoryItemProps {
item: IExecuteStepHistoryItem;
actionRef: React.RefObject<HTMLDivElement | HTMLElement>;
style?: SxProps;
hoverCallback: (isHovered: boolean) => void;
handleExpand?: () => void;
}
export default observer(
({
item,
actionRef,
hoverCallback,
handleExpand,
style = {},
}: IStepHistoryItemProps) => {
const [expand, setExpand] = React.useState(false);
const refDetail = React.useRef<HTMLDivElement>(null);
// 使用useEffect来更新detail容器的高度
React.useEffect(() => {
if (refDetail.current) {
refDetail.current.style.height = expand
? `${refDetail.current.scrollHeight}px`
: '0px';
}
if (handleExpand) {
let count = 0;
const intervalId = setInterval(() => {
handleExpand();
count++;
if (count >= 20) {
clearInterval(intervalId);
}
}, 10);
}
}, [expand]);
const s = { ...getAgentActionStyle(item.type), ...style } as SxProps;
React.useEffect(() => {
console.log(item);
}, [item]);
return (
<Box
ref={actionRef}
className="step-history-item"
sx={{
userSelect: 'none',
borderRadius: '10px',
padding: '4px',
fontSize: '14px',
position: 'relative',
marginTop: '4px',
backgroundColor: (s as any).backgroundColor,
border: `2px solid ${(s as any).borderColor}`,
}}
onMouseOver={() => hoverCallback(true)}
onMouseOut={() => hoverCallback(false)}
>
<Box sx={{ display: 'flex', alignItems: 'center' }}>
<AgentIcon
name={globalStorage.agentIconMap.get(item.agent)}
style={{ height: '36px', width: 'auto', margin: '0px' }}
tooltipInfo={globalStorage.agentMap.get(item.agent)}
/>
<Box component="span" sx={{ fontWeight: 500, marginLeft: '4px' }}>
{item.agent}
</Box>
<Box component="span" sx={{ fontWeight: 400 }}>
: {item.type}
</Box>
</Box>
{item.result ? (
<Box
ref={refDetail}
sx={{
overflow: 'hidden',
transition: 'height 200ms ease-out', // 添加过渡效果
}}
>
{expand ? (
<>
<Divider
sx={{
margin: '4px 0px',
borderBottom: '2px dashed', // 设置为虚线
borderColor: '#0003',
}}
/>
<Box
sx={{
marginLeft: '6px',
marginBottom: '4px',
color: '#0009',
fontWeight: 400,
}}
>
{item.description}
</Box>
<MarkdownBlock
text={item.result}
style={{
marginTop: '5px',
borderRadius: '10px',
padding: '6px',
background: '#FFF9',
fontSize: '12px',
maxHeight: '240px',
overflowY: 'auto',
border: '1px solid #0003',
whiteSpace: 'pre-wrap',
wordWrap: 'break-word',
marginBottom: '5px',
}}
/>
</>
) : (
<></>
)}
<Box
onClick={e => {
setExpand(v => !v);
e.stopPropagation();
}}
sx={{
position: 'absolute',
right: '8px',
// bottom: '12px',
top: '24px',
cursor: 'pointer',
userSelect: 'none',
height: '14px',
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
background: '#0002',
borderRadius: '8px',
marginLeft: '4px',
padding: '0 4px',
'&:hover': {
background: '#0003',
},
}}
>
{expand ? (
<UnfoldLessIcon
sx={{ fontSize: '16px', transform: 'rotate(90deg)' }}
/>
) : (
<MoreHorizIcon sx={{ fontSize: '16px' }} />
)}
</Box>
</Box>
) : (
<></>
)}
</Box>
);
},
);

View File

@@ -1,101 +0,0 @@
import React from 'react';
import { observer } from 'mobx-react-lite';
import { SxProps } from '@mui/material';
import Box from '@mui/material/Box';
import IconButton from '@mui/material/IconButton';
import FormControl from '@mui/material/FormControl';
import FilledInput from '@mui/material/FilledInput';
import TelegramIcon from '@mui/icons-material/Telegram';
import CircularProgress from '@mui/material/CircularProgress';
import { globalStorage } from '@/storage';
export interface UserGoalInputProps {
style?: SxProps;
}
export default observer(({ style = {} }: UserGoalInputProps) => {
const inputRef = React.useRef<string>('');
const inputElementRef = React.useRef<HTMLInputElement>(null);
React.useEffect(() => {
if (inputElementRef.current) {
if (globalStorage.planManager) {
inputElementRef.current.value = globalStorage.planManager.goal;
} else {
inputElementRef.current.value = globalStorage.briefGoal;
}
}
}, [globalStorage.planManager]);
return (
<FormControl
sx={{
position: 'relative',
...style,
}}
>
<FilledInput
disabled={
globalStorage.api.busy ||
!globalStorage.api.agentsReady ||
globalStorage.api.planReady
}
placeholder="Yout Goal"
fullWidth
inputRef={inputElementRef}
onChange={event => (inputRef.current = event.target.value)}
size="small"
sx={{
borderRadius: '10px',
background: '#E1E1E1',
borderBottom: 'none !important',
'&::before': {
borderBottom: 'none !important',
},
'& > input': {
padding: '10px',
},
}}
startAdornment={
<Box
sx={{
color: '#4A9C9E',
fontWeight: 800,
fontSize: '18px',
textWrap: 'nowrap',
userSelect: 'none',
}}
>
\General Goal:
</Box>
}
/>
{globalStorage.api.planGenerating ? (
<CircularProgress
sx={{
position: 'absolute',
right: '12px',
top: '20px',
width: '24px !important',
height: '24px !important',
}}
/>
) : (
<IconButton
disabled={
globalStorage.api.busy ||
!globalStorage.api.agentsReady ||
globalStorage.api.planReady
}
color="primary"
aria-label="提交"
sx={{ position: 'absolute', right: '6px', top: '12px' }}
onClick={() => {
globalStorage.form.goal = inputRef.current;
globalStorage.generatePlanBase();
}}
>
<TelegramIcon />
</IconButton>
)}
</FormControl>
);
});

View File

@@ -1,14 +0,0 @@
{
"extends": "@modern-js/tsconfig/base",
"compilerOptions": {
"declaration": false,
"jsx": "preserve",
"baseUrl": "./",
"paths": {
"@/*": ["./src/*"],
"@shared/*": ["./shared/*"]
}
},
"include": ["src", "shared", "config", "modern.config.ts"],
"exclude": ["**/node_modules"]
}

View File

@@ -1,6 +1,31 @@
node_modules
dist
.idea
.vscode
.git
.gitignore
.DS_Store
.pnp
.pnp.js
.env.local
.env.*.local
.history
*.log*
node_modules/
.yarn-integrity
.pnpm-store/
*.tsbuildinfo
.eslintcache
.changeset/pre.json
dist/
coverage/
release/
output/
output_resource/
log/
.vscode/**/*
!.vscode/settings.json
!.vscode/extensions.json
.idea/
**/*/typings/auto-generated
modern.config.local.*

View File

@@ -1,8 +0,0 @@
[*.{js,jsx,mjs,cjs,ts,tsx,mts,cts,vue,css,scss,sass,less,styl}]
charset = utf-8
indent_size = 2
indent_style = space
insert_final_newline = true
trim_trailing_whitespace = true
end_of_line = lf
max_line_length = 100

View File

@@ -1,79 +0,0 @@
{
"globals": {
"Component": true,
"ComponentPublicInstance": true,
"ComputedRef": true,
"DirectiveBinding": true,
"EffectScope": true,
"ExtractDefaultPropTypes": true,
"ExtractPropTypes": true,
"ExtractPublicPropTypes": true,
"InjectionKey": true,
"MaybeRef": true,
"MaybeRefOrGetter": true,
"PropType": true,
"Ref": true,
"ShallowRef": true,
"Slot": true,
"Slots": true,
"VNode": true,
"WritableComputedRef": true,
"computed": true,
"createApp": true,
"customRef": true,
"defineAsyncComponent": true,
"defineComponent": true,
"effectScope": true,
"getCurrentInstance": true,
"getCurrentScope": true,
"getCurrentWatcher": true,
"h": true,
"inject": true,
"isProxy": true,
"isReactive": true,
"isReadonly": true,
"isRef": true,
"isShallow": true,
"markRaw": true,
"nextTick": true,
"onActivated": true,
"onBeforeMount": true,
"onBeforeUnmount": true,
"onBeforeUpdate": true,
"onDeactivated": true,
"onErrorCaptured": true,
"onMounted": true,
"onRenderTracked": true,
"onRenderTriggered": true,
"onScopeDispose": true,
"onServerPrefetch": true,
"onUnmounted": true,
"onUpdated": true,
"onWatcherCleanup": true,
"provide": true,
"reactive": true,
"readonly": true,
"ref": true,
"resolveComponent": true,
"shallowReactive": true,
"shallowReadonly": true,
"shallowRef": true,
"toRaw": true,
"toRef": true,
"toRefs": true,
"toValue": true,
"triggerRef": true,
"unref": true,
"useAttrs": true,
"useCssModule": true,
"useCssVars": true,
"useId": true,
"useModel": true,
"useSlots": true,
"useTemplateRef": true,
"watch": true,
"watchEffect": true,
"watchPostEffect": true,
"watchSyncEffect": true
}
}

View File

@@ -1 +0,0 @@
* text=auto eol=lf

54
frontend/.gitignore vendored
View File

@@ -1,36 +1,32 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*
node_modules
.DS_Store
dist
dist-ssr
coverage
*.local
.env
# Editor directories and files
.vscode/*
!.vscode/extensions.json
.idea
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?
.pnp
.pnp.js
.env.local
.env.*.local
.history
*.log*
node_modules/
.yarn-integrity
.pnpm-store/
*.tsbuildinfo
.eslintcache
.changeset/pre.json
# Cypress
/cypress/videos/
/cypress/screenshots/
dist/
coverage/
release/
output/
output_resource/
log/
# Vitest
__screenshots__/
.vscode/**/*
!.vscode/settings.json
!.vscode/extensions.json
.idea/
**/*/typings/auto-generated
modern.config.local.*

View File

@@ -1,6 +0,0 @@
{
"$schema": "https://json.schemastore.org/prettierrc",
"semi": false,
"singleQuote": true,
"printWidth": 100
}

View File

@@ -1,9 +0,0 @@
{
"recommendations": [
"Vue.volar",
"vitest.explorer",
"dbaeumer.vscode-eslint",
"EditorConfig.EditorConfig",
"esbenp.prettier-vscode"
]
}

View File

@@ -1,95 +0,0 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Development Commands
```bash
# Install dependencies
pnpm install
# Development server with hot reload
pnpm dev
# Build for production
pnpm build
# Type checking
pnpm type-check
# Lint and fix code
pnpm lint
# Format code
pnpm format
# Run unit tests
pnpm test:unit
```
## Project Architecture
This is a **Multi-Agent Coordination Platform** (多智能体协同平台) built with Vue 3, TypeScript, and Vite. The application enables users to create and manage AI agents with specialized roles and coordinate them to complete complex tasks through visual workflows.
### Tech Stack
- **Vue 3** with Composition API and TypeScript
- **Vite** for build tooling and development
- **Element Plus** for UI components
- **Pinia** for state management
- **Tailwind CSS** for styling
- **Vue Router** for routing (minimal usage)
- **JSPlumb** for visual workflow connections
- **Axios** for API requests with custom interceptors
### Key Architecture Components
#### State Management (`src/stores/modules/agents.ts`)
Central store managing:
- Agent definitions with profiles and icons
- Task workflow data structures (`IRawStepTask`, `TaskProcess`)
- Search functionality and current task state
- Raw plan responses with UUID generation for tasks
#### Request Layer (`src/utils/request.ts`)
Custom Axios wrapper with:
- Proxy configuration for `/api` -> `http://localhost:8000`
- Response interceptors for error handling
- `useRequest` hook for reactive data fetching
- Integrated Element Plus notifications
#### Component Structure
- **Layout System** (`src/layout/`): Main application layout with Header and Main sections
- **Task Templates** (`src/layout/components/Main/TaskTemplate/`): Different task types including AgentRepo, TaskSyllabus, and TaskResult
- **Visual Workflow**: JSPlumb integration for drag-and-drop agent coordination flows
#### Icon System
- SVG icons stored in `src/assets/icons/`
- Custom `SvgIcon` component with vite-plugin-svg-icons
- Icon categories include specialist roles (doctor, engineer, researcher, etc.)
### Build Configuration
#### Vite (`vite.config.ts`)
- Element Plus auto-import and component resolution
- SVG icon caching with custom symbol IDs
- Proxy setup for API requests to backend
- Path aliases: `@/` maps to `src/`
#### Docker Deployment
- Multi-stage build: Node.js build + Caddy web server
- API proxy configured via Caddyfile
- Environment variable support for different deployment modes
### Data Models
Key interfaces for the agent coordination system:
- `Agent`: Name, Profile, Icon
- `IRawStepTask`: Individual task steps with agent selection and inputs
- `TaskProcess`: Action descriptions with important inputs
- `IRichText`: Template-based content formatting with style support
### Development Notes
- Uses pnpm as package manager (required by package.json)
- Node version constraint: ^20.19.0 or >=22.12.0
- Dark theme enabled by default in App.vue
- Auto-imports configured for Vue APIs
- No traditional Vue routes - uses component-based navigation

View File

@@ -1,27 +1,23 @@
ARG CADDY_VERSION=2.6
ARG BUILD_ENV=prod
FROM node:20-alpine AS base
FROM node:20.19.0 as base
FROM base AS installer
WORKDIR /app
COPY package.json .npmrc ./
RUN npm install
FROM base AS builder
WORKDIR /app
COPY --from=installer /app/node_modules ./node_modules
COPY . .
RUN npm install -g pnpm
RUN pnpm install
RUN pnpm build
RUN npm run build
# The base for mode ENVIRONMENT=prod
FROM caddy:${CADDY_VERSION}-alpine as prod
# Workaround for https://github.com/alpinelinux/docker-alpine/issues/98#issuecomment-679278499
RUN sed -i 's/https/http/' /etc/apk/repositories \
&& apk add --no-cache bash
COPY docker/Caddyfile /etc/caddy/
COPY --from=base /app/dist /frontend
# Run stage
FROM ${BUILD_ENV}
EXPOSE 80 443
VOLUME ["/data", "/etc/caddy"]
CMD ["caddy", "run", "--config", "/etc/caddy/Caddyfile", "--adapter", "caddyfile"]
FROM base AS runner
WORKDIR /app
EXPOSE 8080/tcp
COPY .npmrc ./
COPY src ./src
COPY modern.config.ts package.json ./
COPY --from=builder /app/dist ./dist
COPY --from=builder /app/node_modules ./node_modules
ENV API_BASE=
CMD ["npm", "run", "serve"]

View File

@@ -1,106 +1,39 @@
# 多智能体协同平台 (Agent Coordination Platform)
# AgentCoord Frontend
一个强大的可视化平台用于创建和管理具有专门角色的AI智能体通过直观的工作流程协调它们来完成复杂任务。
This is the frontend for the AgentCoord project. Root project is located [here](https://github.com/AgentCoord/AgentCoord)
## ✨ 功能特性
## Installation
- **多智能体系统**创建具有专门角色和专业知识的AI智能体
- **可视化工作流编辑器**使用JSPlumb设计智能体协调流程的拖放界面
- **任务管理**:定义、执行和跟踪复杂的多步骤任务
- **实时通信**:无缝的智能体交互和协调
- **丰富的模板系统**:支持样式的灵活内容格式化
- **TypeScript支持**:整个应用程序的完整类型安全
You can launch the frontend by simply using `docker-compose` in the root directory of the project.
## 🚀 快速开始
Or, you can launch the frontend manually by following the steps below.
### 开发命令
1. Install the dependencies by running `npm install`.
```bash
# 安装依赖
pnpm install
# 开发服务器(热重载)
pnpm dev
# 生产构建
pnpm build
# 类型检查
pnpm type-check
# 代码检查和修复
pnpm lint
# 代码格式化
pnpm format
# 运行单元测试
pnpm test:unit
npm install
```
### 系统要求
2. Build the frontend by running `npm run build`.
- Node.js ^20.19.0 或 >=22.12.0
- pnpm必需的包管理器
## 🏗️ 架构设计
### 技术栈
- **Vue 3**Composition API 和 TypeScript
- **Vite**:构建工具和开发环境
- **Element Plus**UI组件库
- **Pinia**:状态管理
- **Tailwind CSS**:样式框架
- **JSPlumb**:可视化工作流连接
- **Axios**API请求与自定义拦截器
### 核心组件
#### 状态管理
中央存储管理智能体定义、任务工作流和协调状态
#### 请求层
自定义Axios包装器具有代理配置和集成通知
#### 可视化工作流
JSPlumb集成用于拖放智能体协调流程
#### 图标系统
基于SVG的图标用于不同的智能体专业化和角色
## 📁 项目结构
```
src/
├── assets/ # 静态资源,包括智能体图标
├── components/ # 可复用的Vue组件
├── layout/ # 应用布局和主要组件
├── stores/ # Pinia状态管理
├── utils/ # 工具函数和请求层
├── views/ # 页面组件
└── App.vue # 根组件
```bash
npm run build
```
## 🎯 开发指南
3. Start the frontend by running `npm run serve`.
### IDE设置
```bash
npm run serve
```
[VS Code](https://code.visualstudio.com/) + [Vue (Official)](https://marketplace.visualstudio.com/items?itemName=Vue.volar)禁用Vetur
Then you can access the frontend by visiting `http://localhost:8080`.
### 浏览器开发工具
## Development
- 基于Chromium的浏览器
- [Vue.js devtools](https://chromewebstore.google.com/detail/vuejs-devtools/nhdogjmejiglipccpnnnanhbledajbpd)
- 在DevTools中启用自定义对象格式化程序
- Firefox
- [Vue.js devtools](https://addons.mozilla.org/en-US/firefox/addon/vue-js-devtools/)
- 在DevTools中启用自定义对象格式化程序
You can run the frontend in development mode by running `npm run dev`.
## 🚀 部署
```bash
npm run dev
```
应用程序支持Docker部署使用多阶段构建过程Node.js用于构建Caddy作为Web服务器。
## 📄 许可证
MIT许可证 - 详见LICENSE文件
The frontend website requires the backend server to be running. You can configure the backend server address by copying the `.env.example` file to `.env` and changing the `API_BASE` value to the backend server address.

View File

@@ -1,75 +0,0 @@
/* eslint-disable */
/* prettier-ignore */
// @ts-nocheck
// noinspection JSUnusedGlobalSymbols
// Generated by unplugin-auto-import
// biome-ignore lint: disable
export {}
declare global {
const EffectScope: typeof import('vue').EffectScope
const ElMessage: typeof import('element-plus/es').ElMessage
const ElNotification: typeof import('element-plus/es').ElNotification
const computed: typeof import('vue').computed
const createApp: typeof import('vue').createApp
const customRef: typeof import('vue').customRef
const defineAsyncComponent: typeof import('vue').defineAsyncComponent
const defineComponent: typeof import('vue').defineComponent
const effectScope: typeof import('vue').effectScope
const getCurrentInstance: typeof import('vue').getCurrentInstance
const getCurrentScope: typeof import('vue').getCurrentScope
const getCurrentWatcher: typeof import('vue').getCurrentWatcher
const h: typeof import('vue').h
const inject: typeof import('vue').inject
const isProxy: typeof import('vue').isProxy
const isReactive: typeof import('vue').isReactive
const isReadonly: typeof import('vue').isReadonly
const isRef: typeof import('vue').isRef
const isShallow: typeof import('vue').isShallow
const markRaw: typeof import('vue').markRaw
const nextTick: typeof import('vue').nextTick
const onActivated: typeof import('vue').onActivated
const onBeforeMount: typeof import('vue').onBeforeMount
const onBeforeUnmount: typeof import('vue').onBeforeUnmount
const onBeforeUpdate: typeof import('vue').onBeforeUpdate
const onDeactivated: typeof import('vue').onDeactivated
const onErrorCaptured: typeof import('vue').onErrorCaptured
const onMounted: typeof import('vue').onMounted
const onRenderTracked: typeof import('vue').onRenderTracked
const onRenderTriggered: typeof import('vue').onRenderTriggered
const onScopeDispose: typeof import('vue').onScopeDispose
const onServerPrefetch: typeof import('vue').onServerPrefetch
const onUnmounted: typeof import('vue').onUnmounted
const onUpdated: typeof import('vue').onUpdated
const onWatcherCleanup: typeof import('vue').onWatcherCleanup
const provide: typeof import('vue').provide
const reactive: typeof import('vue').reactive
const readonly: typeof import('vue').readonly
const ref: typeof import('vue').ref
const resolveComponent: typeof import('vue').resolveComponent
const shallowReactive: typeof import('vue').shallowReactive
const shallowReadonly: typeof import('vue').shallowReadonly
const shallowRef: typeof import('vue').shallowRef
const toRaw: typeof import('vue').toRaw
const toRef: typeof import('vue').toRef
const toRefs: typeof import('vue').toRefs
const toValue: typeof import('vue').toValue
const triggerRef: typeof import('vue').triggerRef
const unref: typeof import('vue').unref
const useAttrs: typeof import('vue').useAttrs
const useCssModule: typeof import('vue').useCssModule
const useCssVars: typeof import('vue').useCssVars
const useId: typeof import('vue').useId
const useModel: typeof import('vue').useModel
const useSlots: typeof import('vue').useSlots
const useTemplateRef: typeof import('vue').useTemplateRef
const watch: typeof import('vue').watch
const watchEffect: typeof import('vue').watchEffect
const watchPostEffect: typeof import('vue').watchPostEffect
const watchSyncEffect: typeof import('vue').watchSyncEffect
}
// for type re-export
declare global {
// @ts-ignore
export type { Component, Slot, Slots, ComponentPublicInstance, ComputedRef, DirectiveBinding, ExtractDefaultPropTypes, ExtractPropTypes, ExtractPublicPropTypes, InjectionKey, PropType, Ref, ShallowRef, MaybeRef, MaybeRefOrGetter, VNode, WritableComputedRef } from 'vue'
import('vue')
}

View File

@@ -1,205 +0,0 @@
#!/bin/bash
set -euo pipefail
# ========================
# 常量定义
# ========================
SCRIPT_NAME=$(basename "$0")
NODE_MIN_VERSION=18
NODE_INSTALL_VERSION=22
NVM_VERSION="v0.40.3"
CLAUDE_PACKAGE="@anthropic-ai/claude-code"
CONFIG_DIR="$HOME/.claude"
CONFIG_FILE="$CONFIG_DIR/settings.json"
API_BASE_URL="https://open.bigmodel.cn/api/anthropic"
API_KEY_URL="https://open.bigmodel.cn/usercenter/proj-mgmt/apikeys"
API_TIMEOUT_MS=3000000
# ========================
# 工具函数
# ========================
log_info() {
echo "🔹 $*"
}
log_success() {
echo "$*"
}
log_error() {
echo "$*" >&2
}
ensure_dir_exists() {
local dir="$1"
if [ ! -d "$dir" ]; then
mkdir -p "$dir" || {
log_error "Failed to create directory: $dir"
exit 1
}
fi
}
# ========================
# Node.js 安装函数
# ========================
install_nodejs() {
local platform=$(uname -s)
case "$platform" in
Linux|Darwin)
log_info "Installing Node.js on $platform..."
# 安装 nvm
log_info "Installing nvm ($NVM_VERSION)..."
curl -s https://raw.githubusercontent.com/nvm-sh/nvm/"$NVM_VERSION"/install.sh | bash
# 加载 nvm
log_info "Loading nvm environment..."
\. "$HOME/.nvm/nvm.sh"
# 安装 Node.js
log_info "Installing Node.js $NODE_INSTALL_VERSION..."
nvm install "$NODE_INSTALL_VERSION"
# 验证安装
node -v &>/dev/null || {
log_error "Node.js installation failed"
exit 1
}
log_success "Node.js installed: $(node -v)"
log_success "npm version: $(npm -v)"
;;
*)
log_error "Unsupported platform: $platform"
exit 1
;;
esac
}
# ========================
# Node.js 检查函数
# ========================
check_nodejs() {
if command -v node &>/dev/null; then
current_version=$(node -v | sed 's/v//')
major_version=$(echo "$current_version" | cut -d. -f1)
if [ "$major_version" -ge "$NODE_MIN_VERSION" ]; then
log_success "Node.js is already installed: v$current_version"
return 0
else
log_info "Node.js v$current_version is installed but version < $NODE_MIN_VERSION. Upgrading..."
install_nodejs
fi
else
log_info "Node.js not found. Installing..."
install_nodejs
fi
}
# ========================
# Claude Code 安装
# ========================
install_claude_code() {
if command -v claude &>/dev/null; then
log_success "Claude Code is already installed: $(claude --version)"
else
log_info "Installing Claude Code..."
npm install -g "$CLAUDE_PACKAGE" || {
log_error "Failed to install claude-code"
exit 1
}
log_success "Claude Code installed successfully"
fi
}
configure_claude_json(){
node --eval '
const os = require("os");
const fs = require("fs");
const path = require("path");
const homeDir = os.homedir();
const filePath = path.join(homeDir, ".claude.json");
if (fs.existsSync(filePath)) {
const content = JSON.parse(fs.readFileSync(filePath, "utf-8"));
fs.writeFileSync(filePath, JSON.stringify({ ...content, hasCompletedOnboarding: true }, null, 2), "utf-8");
} else {
fs.writeFileSync(filePath, JSON.stringify({ hasCompletedOnboarding: true }, null, 2), "utf-8");
}'
}
# ========================
# API Key 配置
# ========================
configure_claude() {
log_info "Configuring Claude Code..."
echo " You can get your API key from: $API_KEY_URL"
read -s -p "🔑 Please enter your ZHIPU API key: " api_key
echo
if [ -z "$api_key" ]; then
log_error "API key cannot be empty. Please run the script again."
exit 1
fi
ensure_dir_exists "$CONFIG_DIR"
# 写入配置文件
node --eval '
const os = require("os");
const fs = require("fs");
const path = require("path");
const homeDir = os.homedir();
const filePath = path.join(homeDir, ".claude", "settings.json");
const apiKey = "'"$api_key"'";
const content = fs.existsSync(filePath)
? JSON.parse(fs.readFileSync(filePath, "utf-8"))
: {};
fs.writeFileSync(filePath, JSON.stringify({
...content,
env: {
ANTHROPIC_AUTH_TOKEN: apiKey,
ANTHROPIC_BASE_URL: "'"$API_BASE_URL"'",
API_TIMEOUT_MS: "'"$API_TIMEOUT_MS"'",
CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC: 1
}
}, null, 2), "utf-8");
' || {
log_error "Failed to write settings.json"
exit 1
}
log_success "Claude Code configured successfully"
}
# ========================
# 主流程
# ========================
main() {
echo "🚀 Starting $SCRIPT_NAME"
check_nodejs
install_claude_code
configure_claude_json
configure_claude
echo ""
log_success "🎉 Installation completed successfully!"
echo ""
echo "🚀 You can now start using Claude Code with:"
echo " claude"
}
main "$@"

View File

@@ -1,29 +0,0 @@
/* eslint-disable */
// @ts-nocheck
// biome-ignore lint: disable
// oxlint-disable
// ------
// Generated by unplugin-vue-components
// Read more: https://github.com/vuejs/core/pull/3399
export {}
/* prettier-ignore */
declare module 'vue' {
export interface GlobalComponents {
ElAutocomplete: typeof import('element-plus/es')['ElAutocomplete']
ElButton: typeof import('element-plus/es')['ElButton']
ElCard: typeof import('element-plus/es')['ElCard']
ElCollapse: typeof import('element-plus/es')['ElCollapse']
ElCollapseItem: typeof import('element-plus/es')['ElCollapseItem']
ElPopover: typeof import('element-plus/es')['ElPopover']
ElTooltip: typeof import('element-plus/es')['ElTooltip']
MultiLineTooltip: typeof import('./src/components/MultiLineTooltip/index.vue')['default']
RouterLink: typeof import('vue-router')['RouterLink']
RouterView: typeof import('vue-router')['RouterView']
SvgIcon: typeof import('./src/components/SvgIcon/index.vue')['default']
}
export interface GlobalDirectives {
vLoading: typeof import('element-plus/es')['ElLoadingDirective']
}
}

Some files were not shown because too many files have changed in this diff Show More