Compare commits
9 Commits
f50dcd2fe7
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6392301833 | ||
|
|
ab8c9e294d | ||
|
|
1aa9e280b0 | ||
|
|
4fa5504697 | ||
|
|
041986f5cd | ||
|
|
00ef22505e | ||
|
|
b73419b7a0 | ||
|
|
974af053ca | ||
|
|
0c571dec21 |
@@ -1,24 +1,30 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import openai
|
from openai import OpenAI, AsyncOpenAI
|
||||||
import yaml
|
import yaml
|
||||||
from termcolor import colored
|
from termcolor import colored
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
# Helper function to avoid circular import
|
||||||
|
def print_colored(text, text_color="green", background="on_white"):
|
||||||
|
print(colored(text, text_color, background))
|
||||||
|
|
||||||
# load config (apikey, apibase, model)
|
# load config (apikey, apibase, model)
|
||||||
yaml_file = os.path.join(os.getcwd(), "config", "config.yaml")
|
yaml_file = os.path.join(os.getcwd(), "config", "config.yaml")
|
||||||
try:
|
try:
|
||||||
with open(yaml_file, "r", encoding="utf-8") as file:
|
with open(yaml_file, "r", encoding="utf-8") as file:
|
||||||
yaml_data = yaml.safe_load(file)
|
yaml_data = yaml.safe_load(file)
|
||||||
except Exception:
|
except Exception:
|
||||||
yaml_file = {}
|
yaml_data = {}
|
||||||
OPENAI_API_BASE = os.getenv("OPENAI_API_BASE") or yaml_data.get(
|
OPENAI_API_BASE = os.getenv("OPENAI_API_BASE") or yaml_data.get(
|
||||||
"OPENAI_API_BASE", "https://api.openai.com"
|
"OPENAI_API_BASE", "https://api.openai.com"
|
||||||
)
|
)
|
||||||
openai.api_base = OPENAI_API_BASE
|
|
||||||
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") or yaml_data.get(
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") or yaml_data.get(
|
||||||
"OPENAI_API_KEY", ""
|
"OPENAI_API_KEY", ""
|
||||||
)
|
)
|
||||||
openai.api_key = OPENAI_API_KEY
|
|
||||||
|
# Initialize OpenAI clients
|
||||||
|
client = OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_API_BASE)
|
||||||
|
async_client = AsyncOpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_API_BASE)
|
||||||
MODEL: str = os.getenv("OPENAI_API_MODEL") or yaml_data.get(
|
MODEL: str = os.getenv("OPENAI_API_MODEL") or yaml_data.get(
|
||||||
"OPENAI_API_MODEL", "gpt-4-turbo-preview"
|
"OPENAI_API_MODEL", "gpt-4-turbo-preview"
|
||||||
)
|
)
|
||||||
@@ -71,14 +77,14 @@ def LLM_Completion(
|
|||||||
|
|
||||||
async def _achat_completion_stream_groq(messages: list[dict]) -> str:
|
async def _achat_completion_stream_groq(messages: list[dict]) -> str:
|
||||||
from groq import AsyncGroq
|
from groq import AsyncGroq
|
||||||
client = AsyncGroq(api_key=GROQ_API_KEY)
|
groq_client = AsyncGroq(api_key=GROQ_API_KEY)
|
||||||
|
|
||||||
max_attempts = 5
|
max_attempts = 5
|
||||||
|
|
||||||
for attempt in range(max_attempts):
|
for attempt in range(max_attempts):
|
||||||
print("Attempt to use Groq (Fase Design Mode):")
|
print("Attempt to use Groq (Fase Design Mode):")
|
||||||
try:
|
try:
|
||||||
stream = await client.chat.completions.create(
|
response = await groq_client.chat.completions.create(
|
||||||
messages=messages,
|
messages=messages,
|
||||||
# model='gemma-7b-it',
|
# model='gemma-7b-it',
|
||||||
model="mixtral-8x7b-32768",
|
model="mixtral-8x7b-32768",
|
||||||
@@ -92,9 +98,9 @@ async def _achat_completion_stream_groq(messages: list[dict]) -> str:
|
|||||||
if attempt < max_attempts - 1: # i is zero indexed
|
if attempt < max_attempts - 1: # i is zero indexed
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
raise "failed"
|
raise Exception("failed")
|
||||||
|
|
||||||
full_reply_content = stream.choices[0].message.content
|
full_reply_content = response.choices[0].message.content
|
||||||
print(colored(full_reply_content, "blue", "on_white"), end="")
|
print(colored(full_reply_content, "blue", "on_white"), end="")
|
||||||
print()
|
print()
|
||||||
return full_reply_content
|
return full_reply_content
|
||||||
@@ -103,14 +109,14 @@ async def _achat_completion_stream_groq(messages: list[dict]) -> str:
|
|||||||
async def _achat_completion_stream_mixtral(messages: list[dict]) -> str:
|
async def _achat_completion_stream_mixtral(messages: list[dict]) -> str:
|
||||||
from mistralai.client import MistralClient
|
from mistralai.client import MistralClient
|
||||||
from mistralai.models.chat_completion import ChatMessage
|
from mistralai.models.chat_completion import ChatMessage
|
||||||
client = MistralClient(api_key=MISTRAL_API_KEY)
|
mistral_client = MistralClient(api_key=MISTRAL_API_KEY)
|
||||||
# client=AsyncGroq(api_key=GROQ_API_KEY)
|
# client=AsyncGroq(api_key=GROQ_API_KEY)
|
||||||
max_attempts = 5
|
max_attempts = 5
|
||||||
|
|
||||||
for attempt in range(max_attempts):
|
for attempt in range(max_attempts):
|
||||||
try:
|
try:
|
||||||
messages[len(messages) - 1]["role"] = "user"
|
messages[len(messages) - 1]["role"] = "user"
|
||||||
stream = client.chat(
|
stream = mistral_client.chat(
|
||||||
messages=[
|
messages=[
|
||||||
ChatMessage(
|
ChatMessage(
|
||||||
role=message["role"], content=message["content"]
|
role=message["role"], content=message["content"]
|
||||||
@@ -119,14 +125,13 @@ async def _achat_completion_stream_mixtral(messages: list[dict]) -> str:
|
|||||||
],
|
],
|
||||||
# model = "mistral-small-latest",
|
# model = "mistral-small-latest",
|
||||||
model="open-mixtral-8x7b",
|
model="open-mixtral-8x7b",
|
||||||
# response_format={"type": "json_object"},
|
|
||||||
)
|
)
|
||||||
break # If the operation is successful, break the loop
|
break # If the operation is successful, break the loop
|
||||||
except Exception:
|
except Exception:
|
||||||
if attempt < max_attempts - 1: # i is zero indexed
|
if attempt < max_attempts - 1: # i is zero indexed
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
raise "failed"
|
raise Exception("failed")
|
||||||
|
|
||||||
full_reply_content = stream.choices[0].message.content
|
full_reply_content = stream.choices[0].message.content
|
||||||
print(colored(full_reply_content, "blue", "on_white"), end="")
|
print(colored(full_reply_content, "blue", "on_white"), end="")
|
||||||
@@ -135,15 +140,11 @@ async def _achat_completion_stream_mixtral(messages: list[dict]) -> str:
|
|||||||
|
|
||||||
|
|
||||||
async def _achat_completion_stream_gpt35(messages: list[dict]) -> str:
|
async def _achat_completion_stream_gpt35(messages: list[dict]) -> str:
|
||||||
openai.api_key = OPENAI_API_KEY
|
response = await async_client.chat.completions.create(
|
||||||
openai.api_base = OPENAI_API_BASE
|
|
||||||
response = await openai.ChatCompletion.acreate(
|
|
||||||
messages=messages,
|
messages=messages,
|
||||||
max_tokens=4096,
|
max_tokens=4096,
|
||||||
n=1,
|
|
||||||
stop=None,
|
|
||||||
temperature=0.3,
|
temperature=0.3,
|
||||||
timeout=3,
|
timeout=30,
|
||||||
model="gpt-3.5-turbo-16k",
|
model="gpt-3.5-turbo-16k",
|
||||||
stream=True,
|
stream=True,
|
||||||
)
|
)
|
||||||
@@ -154,40 +155,33 @@ async def _achat_completion_stream_gpt35(messages: list[dict]) -> str:
|
|||||||
# iterate through the stream of events
|
# iterate through the stream of events
|
||||||
async for chunk in response:
|
async for chunk in response:
|
||||||
collected_chunks.append(chunk) # save the event response
|
collected_chunks.append(chunk) # save the event response
|
||||||
choices = chunk["choices"]
|
choices = chunk.choices
|
||||||
if len(choices) > 0:
|
if len(choices) > 0:
|
||||||
chunk_message = chunk["choices"][0].get(
|
chunk_message = chunk.choices[0].delta
|
||||||
"delta", {}
|
|
||||||
) # extract the message
|
|
||||||
collected_messages.append(chunk_message) # save the message
|
collected_messages.append(chunk_message) # save the message
|
||||||
if "content" in chunk_message:
|
if chunk_message.content:
|
||||||
print(
|
print(
|
||||||
colored(chunk_message["content"], "blue", "on_white"),
|
colored(chunk_message.content, "blue", "on_white"),
|
||||||
end="",
|
end="",
|
||||||
)
|
)
|
||||||
print()
|
print()
|
||||||
|
|
||||||
full_reply_content = "".join(
|
full_reply_content = "".join(
|
||||||
[m.get("content", "") for m in collected_messages]
|
[m.content or "" for m in collected_messages if m is not None]
|
||||||
)
|
)
|
||||||
return full_reply_content
|
return full_reply_content
|
||||||
|
|
||||||
|
|
||||||
async def _achat_completion_json(messages: list[dict]) -> str:
|
async def _achat_completion_json(messages: list[dict]) -> str:
|
||||||
openai.api_key = OPENAI_API_KEY
|
|
||||||
openai.api_base = OPENAI_API_BASE
|
|
||||||
|
|
||||||
max_attempts = 5
|
max_attempts = 5
|
||||||
|
|
||||||
for attempt in range(max_attempts):
|
for attempt in range(max_attempts):
|
||||||
try:
|
try:
|
||||||
stream = await openai.ChatCompletion.acreate(
|
response = await async_client.chat.completions.create(
|
||||||
messages=messages,
|
messages=messages,
|
||||||
max_tokens=4096,
|
max_tokens=4096,
|
||||||
n=1,
|
|
||||||
stop=None,
|
|
||||||
temperature=0.3,
|
temperature=0.3,
|
||||||
timeout=3,
|
timeout=30,
|
||||||
model=MODEL,
|
model=MODEL,
|
||||||
response_format={"type": "json_object"},
|
response_format={"type": "json_object"},
|
||||||
)
|
)
|
||||||
@@ -196,62 +190,62 @@ async def _achat_completion_json(messages: list[dict]) -> str:
|
|||||||
if attempt < max_attempts - 1: # i is zero indexed
|
if attempt < max_attempts - 1: # i is zero indexed
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
raise "failed"
|
raise Exception("failed")
|
||||||
|
|
||||||
full_reply_content = stream.choices[0].message.content
|
full_reply_content = response.choices[0].message.content
|
||||||
print(colored(full_reply_content, "blue", "on_white"), end="")
|
print(colored(full_reply_content, "blue", "on_white"), end="")
|
||||||
print()
|
print()
|
||||||
return full_reply_content
|
return full_reply_content
|
||||||
|
|
||||||
|
|
||||||
async def _achat_completion_stream(messages: list[dict]) -> str:
|
async def _achat_completion_stream(messages: list[dict]) -> str:
|
||||||
openai.api_key = OPENAI_API_KEY
|
try:
|
||||||
openai.api_base = OPENAI_API_BASE
|
response = await async_client.chat.completions.create(
|
||||||
response = await openai.ChatCompletion.acreate(
|
**_cons_kwargs(messages), stream=True
|
||||||
**_cons_kwargs(messages), stream=True
|
)
|
||||||
)
|
|
||||||
|
|
||||||
# create variables to collect the stream of chunks
|
# create variables to collect the stream of chunks
|
||||||
collected_chunks = []
|
collected_chunks = []
|
||||||
collected_messages = []
|
collected_messages = []
|
||||||
# iterate through the stream of events
|
# iterate through the stream of events
|
||||||
async for chunk in response:
|
async for chunk in response:
|
||||||
collected_chunks.append(chunk) # save the event response
|
collected_chunks.append(chunk) # save the event response
|
||||||
choices = chunk["choices"]
|
choices = chunk.choices
|
||||||
if len(choices) > 0:
|
if len(choices) > 0:
|
||||||
chunk_message = chunk["choices"][0].get(
|
chunk_message = chunk.choices[0].delta
|
||||||
"delta", {}
|
collected_messages.append(chunk_message) # save the message
|
||||||
) # extract the message
|
if chunk_message.content:
|
||||||
collected_messages.append(chunk_message) # save the message
|
print(
|
||||||
if "content" in chunk_message:
|
colored(chunk_message.content, "blue", "on_white"),
|
||||||
print(
|
end="",
|
||||||
colored(chunk_message["content"], "blue", "on_white"),
|
)
|
||||||
end="",
|
print()
|
||||||
)
|
|
||||||
print()
|
|
||||||
|
|
||||||
full_reply_content = "".join(
|
full_reply_content = "".join(
|
||||||
[m.get("content", "") for m in collected_messages]
|
[m.content or "" for m in collected_messages if m is not None]
|
||||||
)
|
)
|
||||||
return full_reply_content
|
return full_reply_content
|
||||||
|
except Exception as e:
|
||||||
|
print_colored(f"OpenAI API error in _achat_completion_stream: {str(e)}", "red")
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
def _chat_completion(messages: list[dict]) -> str:
|
def _chat_completion(messages: list[dict]) -> str:
|
||||||
print(messages, flush=True)
|
try:
|
||||||
rsp = openai.ChatCompletion.create(**_cons_kwargs(messages))
|
rsp = client.chat.completions.create(**_cons_kwargs(messages))
|
||||||
content = rsp["choices"][0]["message"]["content"]
|
content = rsp.choices[0].message.content
|
||||||
print(content, flush=True)
|
return content
|
||||||
return content
|
except Exception as e:
|
||||||
|
print_colored(f"OpenAI API error in _chat_completion: {str(e)}", "red")
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
def _cons_kwargs(messages: list[dict]) -> dict:
|
def _cons_kwargs(messages: list[dict]) -> dict:
|
||||||
kwargs = {
|
kwargs = {
|
||||||
"messages": messages,
|
"messages": messages,
|
||||||
"max_tokens": 4096,
|
"max_tokens": 2000,
|
||||||
"n": 1,
|
"temperature": 0.3,
|
||||||
"stop": None,
|
"timeout": 15,
|
||||||
"temperature": 0.5,
|
|
||||||
"timeout": 3,
|
|
||||||
}
|
}
|
||||||
kwargs_mode = {"model": MODEL}
|
kwargs_mode = {"model": MODEL}
|
||||||
kwargs.update(kwargs_mode)
|
kwargs.update(kwargs_mode)
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ class JSON_ABILITY_REQUIREMENT_GENERATION(BaseModel):
|
|||||||
|
|
||||||
PROMPT_AGENT_SELECTION_GENERATION = """
|
PROMPT_AGENT_SELECTION_GENERATION = """
|
||||||
## Instruction
|
## Instruction
|
||||||
Based on "General Goal", "Current Task" and "Agent Board", output a formatted "Agent Selection Plan". Your selection should consider the ability needed for "Current Task" and the profile of each agent in "Agent Board". Agent Selection Plan ranks from high to low according to ability.
|
Based on "General Goal", "Current Task" and "Agent Board", output a formatted "Agent Selection Plan". Your selection should consider the ability needed for "Current Task" and the profile of each agent in "Agent Board".
|
||||||
|
|
||||||
## General Goal (Specify the general goal for the collaboration plan)
|
## General Goal (Specify the general goal for the collaboration plan)
|
||||||
{General_Goal}
|
{General_Goal}
|
||||||
@@ -80,6 +80,10 @@ def generate_AbilityRequirement(General_Goal, Current_Task):
|
|||||||
|
|
||||||
|
|
||||||
def generate_AgentSelection(General_Goal, Current_Task, Agent_Board):
|
def generate_AgentSelection(General_Goal, Current_Task, Agent_Board):
|
||||||
|
# Check if Agent_Board is None or empty
|
||||||
|
if Agent_Board is None or len(Agent_Board) == 0:
|
||||||
|
raise ValueError("Agent_Board cannot be None or empty. Please ensure agents are set via /setAgents endpoint before generating a plan.")
|
||||||
|
|
||||||
messages = [
|
messages = [
|
||||||
{
|
{
|
||||||
"role": "system",
|
"role": "system",
|
||||||
|
|||||||
@@ -7,14 +7,14 @@ import AgentCoord.util as util
|
|||||||
|
|
||||||
|
|
||||||
def generate_basePlan(
|
def generate_basePlan(
|
||||||
General_Goal, Agent_Board, AgentProfile_Dict, InitialObject_List, context
|
General_Goal, Agent_Board, AgentProfile_Dict, InitialObject_List
|
||||||
):
|
):
|
||||||
basePlan = {
|
basePlan = {
|
||||||
"Initial Input Object": InitialObject_List,
|
"Initial Input Object": InitialObject_List,
|
||||||
"Collaboration Process": [],
|
"Collaboration Process": [],
|
||||||
}
|
}
|
||||||
PlanOutline = generate_PlanOutline(
|
PlanOutline = generate_PlanOutline(
|
||||||
InitialObject_List=[], General_Goal=General_Goal + context
|
InitialObject_List=[], General_Goal=General_Goal
|
||||||
)
|
)
|
||||||
for stepItem in PlanOutline:
|
for stepItem in PlanOutline:
|
||||||
Current_Task = {
|
Current_Task = {
|
||||||
@@ -44,7 +44,7 @@ def generate_basePlan(
|
|||||||
),
|
),
|
||||||
}
|
}
|
||||||
TaskProcess = generate_TaskProcess(
|
TaskProcess = generate_TaskProcess(
|
||||||
General_Goal=General_Goal + context,
|
General_Goal=General_Goal,
|
||||||
Current_Task_Description=Current_Task_Description,
|
Current_Task_Description=Current_Task_Description,
|
||||||
)
|
)
|
||||||
# add the generated AgentSelection and TaskProcess to the stepItem
|
# add the generated AgentSelection and TaskProcess to the stepItem
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import json
|
|||||||
|
|
||||||
PROMPT_PLAN_OUTLINE_GENERATION = """
|
PROMPT_PLAN_OUTLINE_GENERATION = """
|
||||||
## Instruction
|
## Instruction
|
||||||
Based on "Output Format Example", "General Goal", and "Initial Key Object List", output a formatted "Plan_Outline". The number of steps in the Plan Outline cannot exceed 5.
|
Based on "Output Format Example", "General Goal", and "Initial Key Object List", output a formatted "Plan_Outline".
|
||||||
|
|
||||||
## Initial Key Object List (Specify the list of initial key objects available, each initial key object should be the input object of at least one Step)
|
## Initial Key Object List (Specify the list of initial key objects available, each initial key object should be the input object of at least one Step)
|
||||||
{InitialObject_List}
|
{InitialObject_List}
|
||||||
@@ -51,7 +51,6 @@ TaskContent: Describe the task of the current step.
|
|||||||
InputObject_List: The list of the input obejects that will be used in current step.
|
InputObject_List: The list of the input obejects that will be used in current step.
|
||||||
OutputObject: The name of the final output object of current step.
|
OutputObject: The name of the final output object of current step.
|
||||||
|
|
||||||
请用中文回答
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
@@ -84,4 +83,16 @@ def generate_PlanOutline(InitialObject_List, General_Goal):
|
|||||||
),
|
),
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
return read_LLM_Completion(messages)["Plan_Outline"]
|
result = read_LLM_Completion(messages)
|
||||||
|
if isinstance(result, dict) and "Plan_Outline" in result:
|
||||||
|
return result["Plan_Outline"]
|
||||||
|
else:
|
||||||
|
# 如果格式不正确,返回默认的计划大纲
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"StepName": "Default Step",
|
||||||
|
"TaskContent": "Generated default plan step due to format error",
|
||||||
|
"InputObject_List": [],
|
||||||
|
"OutputObject": "Default Output"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|||||||
@@ -90,7 +90,6 @@ InputObject_List: List existing objects that should be utilized in current step.
|
|||||||
AgentName: Specify the agent who will perform the action, You CAN ONLY USE THE NAME APPEARS IN "AgentInvolved".
|
AgentName: Specify the agent who will perform the action, You CAN ONLY USE THE NAME APPEARS IN "AgentInvolved".
|
||||||
ActionType: Specify the type of action, note that only the last action can be of type "Finalize", and the last action must be "Finalize".
|
ActionType: Specify the type of action, note that only the last action can be of type "Finalize", and the last action must be "Finalize".
|
||||||
|
|
||||||
请用中文回答
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -80,7 +80,14 @@ class BaseAction():
|
|||||||
Important_Mark = ""
|
Important_Mark = ""
|
||||||
action_Record += PROMPT_TEMPLATE_ACTION_RECORD.format(AgentName = actionInfo["AgentName"], Action_Description = actionInfo["AgentName"], Action_Result = actionInfo["Action_Result"], Important_Mark = Important_Mark)
|
action_Record += PROMPT_TEMPLATE_ACTION_RECORD.format(AgentName = actionInfo["AgentName"], Action_Description = actionInfo["AgentName"], Action_Result = actionInfo["Action_Result"], Important_Mark = Important_Mark)
|
||||||
|
|
||||||
prompt = PROMPT_TEMPLATE_TAKE_ACTION_BASE.format(agentName = agentName, agentProfile = AgentProfile_Dict[agentName], General_Goal = General_Goal, Current_Task_Description = TaskDescription, Input_Objects = inputObject_Record, History_Action = action_Record, Action_Description = self.info["Description"], Action_Custom_Note = self.Action_Custom_Note)
|
# Handle missing agent profiles gracefully
|
||||||
|
if agentName not in AgentProfile_Dict:
|
||||||
|
print_colored(text=f"Warning: Agent '{agentName}' not found in AgentProfile_Dict. Using default profile.", text_color="yellow")
|
||||||
|
agentProfile = f"AI Agent named {agentName}"
|
||||||
|
else:
|
||||||
|
agentProfile = AgentProfile_Dict[agentName]
|
||||||
|
|
||||||
|
prompt = PROMPT_TEMPLATE_TAKE_ACTION_BASE.format(agentName = agentName, agentProfile = agentProfile, General_Goal = General_Goal, Current_Task_Description = TaskDescription, Input_Objects = inputObject_Record, History_Action = action_Record, Action_Description = self.info["Description"], Action_Custom_Note = self.Action_Custom_Note)
|
||||||
print_colored(text = prompt, text_color="red")
|
print_colored(text = prompt, text_color="red")
|
||||||
messages = [{"role":"system", "content": prompt}]
|
messages = [{"role":"system", "content": prompt}]
|
||||||
ActionResult = LLM_Completion(messages,True,False)
|
ActionResult = LLM_Completion(messages,True,False)
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ Note: You can say something before you provide the improved version of the conte
|
|||||||
The improved version you provide must be a completed version (e.g. if you provide a improved story, you should give completed story content, rather than just reporting where you have improved).
|
The improved version you provide must be a completed version (e.g. if you provide a improved story, you should give completed story content, rather than just reporting where you have improved).
|
||||||
When you decide to give the improved version of the content, it should be start like this:
|
When you decide to give the improved version of the content, it should be start like this:
|
||||||
|
|
||||||
## xxx的改进版本
|
## Improved version of xxx
|
||||||
(the improved version of the content)
|
(the improved version of the content)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from termcolor import colored
|
|||||||
|
|
||||||
|
|
||||||
# Accept inputs: num_StepToRun (the number of step to run, if None, run to the end), plan, RehearsalLog, AgentProfile_Dict
|
# Accept inputs: num_StepToRun (the number of step to run, if None, run to the end), plan, RehearsalLog, AgentProfile_Dict
|
||||||
def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict, context):
|
def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict):
|
||||||
# Prepare for execution
|
# Prepare for execution
|
||||||
KeyObjects = {}
|
KeyObjects = {}
|
||||||
finishedStep_index = -1
|
finishedStep_index = -1
|
||||||
@@ -85,9 +85,17 @@ def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict, context):
|
|||||||
# start the group chat
|
# start the group chat
|
||||||
util.print_colored(TaskDescription, text_color="green")
|
util.print_colored(TaskDescription, text_color="green")
|
||||||
ActionHistory = []
|
ActionHistory = []
|
||||||
|
action_count = 0
|
||||||
|
total_actions = len(TaskProcess)
|
||||||
|
|
||||||
for ActionInfo in TaskProcess:
|
for ActionInfo in TaskProcess:
|
||||||
|
action_count += 1
|
||||||
actionType = ActionInfo["ActionType"]
|
actionType = ActionInfo["ActionType"]
|
||||||
agentName = ActionInfo["AgentName"]
|
agentName = ActionInfo["AgentName"]
|
||||||
|
|
||||||
|
# 添加进度日志
|
||||||
|
util.print_colored(f"🔄 Executing action {action_count}/{total_actions}: {actionType} by {agentName}", text_color="yellow")
|
||||||
|
|
||||||
if actionType in Action.customAction_Dict:
|
if actionType in Action.customAction_Dict:
|
||||||
currentAction = Action.customAction_Dict[actionType](
|
currentAction = Action.customAction_Dict[actionType](
|
||||||
info=ActionInfo,
|
info=ActionInfo,
|
||||||
@@ -101,7 +109,7 @@ def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict, context):
|
|||||||
KeyObjects=KeyObjects,
|
KeyObjects=KeyObjects,
|
||||||
)
|
)
|
||||||
ActionInfo_with_Result = currentAction.run(
|
ActionInfo_with_Result = currentAction.run(
|
||||||
General_Goal=plan["General Goal"] + "\n\n### Useful Information (some information can help accomplish the task)\n如果使用该信息,请在回答中显示指出是来自数联网的数据。例如,基于数联网数据搜索结果。" + context,
|
General_Goal=plan["General Goal"],
|
||||||
TaskDescription=TaskDescription,
|
TaskDescription=TaskDescription,
|
||||||
agentName=agentName,
|
agentName=agentName,
|
||||||
AgentProfile_Dict=AgentProfile_Dict,
|
AgentProfile_Dict=AgentProfile_Dict,
|
||||||
@@ -113,8 +121,6 @@ def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict, context):
|
|||||||
ActionHistory.append(ActionInfo_with_Result)
|
ActionHistory.append(ActionInfo_with_Result)
|
||||||
# post processing for the group chat (finish)
|
# post processing for the group chat (finish)
|
||||||
objectLogNode["content"] = KeyObjects[OutputName]
|
objectLogNode["content"] = KeyObjects[OutputName]
|
||||||
if StepRun_count == len(plan["Collaboration Process"][(finishedStep_index + 1): run_to]):
|
|
||||||
objectLogNode["content"] += context
|
|
||||||
RehearsalLog.append(stepLogNode)
|
RehearsalLog.append(stepLogNode)
|
||||||
RehearsalLog.append(objectLogNode)
|
RehearsalLog.append(objectLogNode)
|
||||||
stepLogNode["ActionHistory"] = ActionHistory
|
stepLogNode["ActionHistory"] = ActionHistory
|
||||||
|
|||||||
@@ -20,6 +20,8 @@ def camel_case_to_normal(s):
|
|||||||
def generate_template_sentence_for_CollaborationBrief(
|
def generate_template_sentence_for_CollaborationBrief(
|
||||||
input_object_list, output_object, agent_list, step_task
|
input_object_list, output_object, agent_list, step_task
|
||||||
):
|
):
|
||||||
|
# Ensure step_task is not None
|
||||||
|
step_task = step_task if step_task is not None else "perform the task"
|
||||||
# Check if the names are in camel case (no spaces) and convert them to normal naming convention
|
# Check if the names are in camel case (no spaces) and convert them to normal naming convention
|
||||||
input_object_list = (
|
input_object_list = (
|
||||||
[
|
[
|
||||||
@@ -31,29 +33,48 @@ def generate_template_sentence_for_CollaborationBrief(
|
|||||||
)
|
)
|
||||||
output_object = (
|
output_object = (
|
||||||
camel_case_to_normal(output_object)
|
camel_case_to_normal(output_object)
|
||||||
if is_camel_case(output_object)
|
if output_object is not None and is_camel_case(output_object)
|
||||||
else output_object
|
else (output_object if output_object is not None else "unknown output")
|
||||||
)
|
)
|
||||||
|
|
||||||
# Format the agents into a string with proper grammar
|
# Format the agents into a string with proper grammar
|
||||||
agent_str = (
|
if agent_list is None or len(agent_list) == 0:
|
||||||
" and ".join([", ".join(agent_list[:-1]), agent_list[-1]])
|
agent_str = "Unknown agents"
|
||||||
if len(agent_list) > 1
|
elif all(agent is not None for agent in agent_list):
|
||||||
else agent_list[0]
|
agent_str = (
|
||||||
)
|
" and ".join([", ".join(agent_list[:-1]), agent_list[-1]])
|
||||||
|
if len(agent_list) > 1
|
||||||
|
else agent_list[0]
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Filter out None values
|
||||||
|
filtered_agents = [agent for agent in agent_list if agent is not None]
|
||||||
|
if filtered_agents:
|
||||||
|
agent_str = (
|
||||||
|
" and ".join([", ".join(filtered_agents[:-1]), filtered_agents[-1]])
|
||||||
|
if len(filtered_agents) > 1
|
||||||
|
else filtered_agents[0]
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
agent_str = "Unknown agents"
|
||||||
|
|
||||||
if input_object_list is None or len(input_object_list) == 0:
|
if input_object_list is None or len(input_object_list) == 0:
|
||||||
# Combine all the parts into the template sentence
|
# Combine all the parts into the template sentence
|
||||||
template_sentence = f"{agent_str} perform the task of {step_task} to obtain {output_object}."
|
template_sentence = f"{agent_str} perform the task of {step_task} to obtain {output_object}."
|
||||||
else:
|
else:
|
||||||
# Format the input objects into a string with proper grammar
|
# Format the input objects into a string with proper grammar
|
||||||
input_str = (
|
# Filter out None values from input_object_list
|
||||||
" and ".join(
|
filtered_input_list = [obj for obj in input_object_list if obj is not None]
|
||||||
[", ".join(input_object_list[:-1]), input_object_list[-1]]
|
if filtered_input_list:
|
||||||
|
input_str = (
|
||||||
|
" and ".join(
|
||||||
|
[", ".join(filtered_input_list[:-1]), filtered_input_list[-1]]
|
||||||
|
)
|
||||||
|
if len(filtered_input_list) > 1
|
||||||
|
else filtered_input_list[0]
|
||||||
)
|
)
|
||||||
if len(input_object_list) > 1
|
else:
|
||||||
else input_object_list[0]
|
input_str = "unknown inputs"
|
||||||
)
|
|
||||||
# Combine all the parts into the template sentence
|
# Combine all the parts into the template sentence
|
||||||
template_sentence = f"Based on {input_str}, {agent_str} perform the task of {step_task} to obtain {output_object}."
|
template_sentence = f"Based on {input_str}, {agent_str} perform the task of {step_task} to obtain {output_object}."
|
||||||
|
|
||||||
@@ -90,7 +111,7 @@ def read_LLM_Completion(messages, useGroq=True):
|
|||||||
return json.loads(match.group(0).strip())
|
return json.loads(match.group(0).strip())
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
raise ("bad format!")
|
return {} # 返回空对象而不是抛出异常
|
||||||
|
|
||||||
|
|
||||||
def read_json_content(text):
|
def read_json_content(text):
|
||||||
@@ -111,7 +132,7 @@ def read_json_content(text):
|
|||||||
if match:
|
if match:
|
||||||
return json.loads(match.group(0).strip())
|
return json.loads(match.group(0).strip())
|
||||||
|
|
||||||
raise ("bad format!")
|
return {} # 返回空对象而不是抛出异常
|
||||||
|
|
||||||
|
|
||||||
def read_outputObject_content(text, keyword):
|
def read_outputObject_content(text, keyword):
|
||||||
@@ -127,4 +148,4 @@ def read_outputObject_content(text, keyword):
|
|||||||
if match:
|
if match:
|
||||||
return match.group(1).strip()
|
return match.group(1).strip()
|
||||||
else:
|
else:
|
||||||
raise ("bad format!")
|
return "" # 返回空字符串而不是抛出异常
|
||||||
|
|||||||
@@ -1,97 +0,0 @@
|
|||||||
[
|
|
||||||
{
|
|
||||||
"Icon": "Hailey_Johnson.png",
|
|
||||||
"Name": "船舶设计师",
|
|
||||||
"Profile": "提供船舶制造中的实际需求和约束。"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Icon": "Jennifer_Moore.png",
|
|
||||||
"Name": "防护工程专家",
|
|
||||||
"Profile": "专注于船舶腐蚀防护技术的设计与应用。在你的总结回答中,必须引用来自数联网的搜索数据,是搜索数据,不是数联网的研究成果。"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Icon": "Jane_Moreno.png",
|
|
||||||
"Name": "病理生理学家",
|
|
||||||
"Profile": "专注于失血性休克的疾病机制,为药物研发提供理论靶点。"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Icon": "Giorgio_Rossi.png",
|
|
||||||
"Name": "药物化学家",
|
|
||||||
"Profile": "负责将靶点概念转化为实际可合成的分子。"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Icon": "Tamara_Taylor.png",
|
|
||||||
"Name": "制剂工程师",
|
|
||||||
"Profile": "负责将活性药物成分(API)变成稳定、可用、符合战场要求的剂型。"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Icon": "Maria_Lopez.png",
|
|
||||||
"Name": "监管事务专家",
|
|
||||||
"Profile": "深谙药品审评法规,目标是找到最快的合法上市路径。"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Icon": "Sam_Moore.png",
|
|
||||||
"Name": "物理学家",
|
|
||||||
"Profile": "从热力学与统计力学的基本原理出发,研究液态金属的自由能、焓、熵、比热等参数的理论建模。"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Icon": "Yuriko_Yamamoto.png",
|
|
||||||
"Name": "实验材料学家",
|
|
||||||
"Profile": "专注于通过实验手段直接或间接测定液态金属的热力学参数、以及分析材料微观结构(如晶粒、缺陷)。"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Icon": "Carlos_Gomez.png",
|
|
||||||
"Name": "计算模拟专家",
|
|
||||||
"Profile": "侧重于利用数值计算和模拟技术获取液态金属的热力学参数。"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Icon": "John_Lin.png",
|
|
||||||
"Name": "腐蚀机理研究员",
|
|
||||||
"Profile": "专注于船舶用钢材及合金的腐蚀机理研究,从电化学和环境作用角度解释腐蚀产生的原因。在你的总结回答中,必须引用来自数联网的搜索数据,是搜索数据,不是数联网的研究成果。"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Icon": "Arthur_Burton.png",
|
|
||||||
"Name": "先进材料研发员",
|
|
||||||
"Profile": "专注于开发和评估新型耐腐蚀材料、复合材料及固态电池材料。"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Icon": "Eddy_Lin.png",
|
|
||||||
"Name": "肾脏病学家",
|
|
||||||
"Profile": "专注于慢性肾脏病的诊断、治疗和患者管理,能提供临床洞察。"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Icon": "Isabella_Rodriguez.png",
|
|
||||||
"Name": "临床研究协调员",
|
|
||||||
"Profile": "负责受试者招募和临床试验流程优化。"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Icon": "Latoya_Williams.png",
|
|
||||||
"Name": "中医药专家",
|
|
||||||
"Profile": "理解药物的中药成分和作用机制。"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Icon": "Carmen_Ortiz.png",
|
|
||||||
"Name": "药物安全专家",
|
|
||||||
"Profile": "专注于药物不良反应数据收集、分析和报告。"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Icon": "Rajiv_Patel.png",
|
|
||||||
"Name": "二维材料科学家",
|
|
||||||
"Profile": "专注于二维材料(如石墨烯)的合成、性质和应用。"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Icon": "Tom_Moreno.png",
|
|
||||||
"Name": "光电物理学家",
|
|
||||||
"Profile": "研究材料的光电转换机制和关键影响因素。"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Icon": "Ayesha_Khan.png",
|
|
||||||
"Name": "机器学习专家",
|
|
||||||
"Profile": "专注于开发和应用AI模型用于材料模拟。"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Icon": "Mei_Lin.png",
|
|
||||||
"Name": "流体动力学专家",
|
|
||||||
"Profile": "专注于流体行为理论和模拟。"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
File diff suppressed because one or more lines are too long
@@ -9,4 +9,4 @@ GROQ_API_KEY: ""
|
|||||||
MISTRAL_API_KEY: ""
|
MISTRAL_API_KEY: ""
|
||||||
|
|
||||||
## options under experimentation, leave them as Fasle unless you know what it is for
|
## options under experimentation, leave them as Fasle unless you know what it is for
|
||||||
USE_CACHE: True
|
USE_CACHE: False
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
Flask==3.0.2
|
Flask==3.0.2
|
||||||
openai==0.28.1
|
openai==2.8.1
|
||||||
PyYAML==6.0.1
|
PyYAML==6.0.1
|
||||||
termcolor==2.4.0
|
termcolor==2.4.0
|
||||||
groq==0.4.2
|
groq==0.4.2
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
from flask import Flask, request, jsonify
|
from flask import Flask, request, jsonify
|
||||||
import json
|
import json
|
||||||
import time
|
|
||||||
from DataProcess import Add_Collaboration_Brief_FrontEnd
|
from DataProcess import Add_Collaboration_Brief_FrontEnd
|
||||||
from AgentCoord.RehearsalEngine_V2.ExecutePlan import executePlan
|
from AgentCoord.RehearsalEngine_V2.ExecutePlan import executePlan
|
||||||
from AgentCoord.PlanEngine.basePlan_Generator import generate_basePlan
|
from AgentCoord.PlanEngine.basePlan_Generator import generate_basePlan
|
||||||
@@ -32,56 +31,6 @@ else:
|
|||||||
USE_CACHE = USE_CACHE.lower() in ["true", "1", "yes"]
|
USE_CACHE = USE_CACHE.lower() in ["true", "1", "yes"]
|
||||||
AgentBoard = None
|
AgentBoard = None
|
||||||
AgentProfile_Dict = {}
|
AgentProfile_Dict = {}
|
||||||
c1 = '''
|
|
||||||
|
|
||||||
=====数联网搜索结果=====
|
|
||||||
|
|
||||||
1. 论文
|
|
||||||
|
|
||||||
- 《Surface Defect Detection and Evaluation for Marine Vessels using Multi-Stage Deep Learning》提出一个面向船舶制造与检修的多阶段深度学习流水线,用于船体表面缺陷检测与评估,重点识别和量化与船舶制造材料腐蚀相关的现象,包括腐蚀(rust)、附着物(fouling)、涂层剥离(delamination)等。研究特别关注船舶材料腐蚀在不同涂层颜色、光照条件和相机角度下的鲁棒性,有助于提高船舶制造过程中对材料腐蚀的检测与管理能力。论文强调其方法在船舶材料表面腐蚀监测和维修决策中的应用价值。链接:https://arxiv.org/abs/2203.09580
|
|
||||||
论文复现链接:http://8.130.138.52:21001/#/view/reproduction?doid=newID/5a4fef68-a138-4adb-ba88-43aa4474c08c
|
|
||||||
|
|
||||||
- 《Efficient Metal Corrosion Area Detection Model Combining Convolution and Transformer (MCD-Net)》提出了一种高效的金属腐蚀区域检测模型。该模型创新性地结合了卷积编码器与视觉Transformer序列编码器,利用注意力融合模块增强边界识别,并采用基于得分的多尺度增强机制来突出腐蚀区域。该方法在阴影、遮挡和复杂纹理条件下依然能保持高精度,在公开数据集上取得了优异的F1性能,特别适用于船舶制造与检修中对船体钢板和涂层腐蚀的自动化识别与量化分析。链接:https://www.mdpi.com/2076-3417/14/21/9900
|
|
||||||
论文复现链接:http://8.130.138.52:21001/#/view/reproduction?doid=newID/b200a489-e2c8-4dbd-8767-682d15dd4c04
|
|
||||||
|
|
||||||
- 《Mechanical Failure and Metal Degradation of Ships and Marine Structures》系统探讨了船舶与海洋结构中金属材料(包括高强钢、不锈钢、铜合金、钛合金等)在海洋环境下的机械损伤与电化学腐蚀协同作用问题。重点分析了"机械载荷+腐蚀"耦合导致的疲劳、裂纹及腐蚀裂纹等失效机制,并对相应的检测、预警与保护措施进行了深入讨论。链接:https://doi.org/10.3390/met13020272
|
|
||||||
|
|
||||||
- 《Research Progress of Marine Anti-Fouling Coatings》(2024)全面综述了海洋防污涂层的研究进展,涵盖自我抛光涂料、生物可降解涂料、低表面能涂层、仿生涂层和纳米涂层等新型涂层技术。虽然重点在于防污,但文中指出许多防污机制与防腐蚀机制相互关联,通过阻碍生物附着可有效减少微生物引起的腐蚀和表面破坏。链接:https://doi.org/10.3390/coatings14091227
|
|
||||||
|
|
||||||
- 《Corrosion-Wear Behavior of Shipbuilding Steels (EH40, FH40, 921A) in Seawater》研究了三种常用船用钢(EH40、FH40、921A)在海水环境中磨损与腐蚀共同作用下的复合失效行为。通过机械-电化学方法系统测定了腐蚀磨损损失、表面形貌变化和耐腐蚀性能差异,发现在此类工况中921A钢表现最佳。《Journal of Chinese Society for Corrosion and Protection》, Vol 45(4): 894-904, 2025
|
|
||||||
|
|
||||||
- 《Sulfur-Selenium Alloy Coatings for Universal Corrosion Resistance》设计了一种硫-硒合金涂层,能够在包括模拟海水和硫酸盐还原菌生物环境在内的各种条件下为钢材提供高效的防腐蚀保护(约99.9%效率),腐蚀速率比裸钢低6-7个数量级。该涂层具有良好的机械性能、非多孔结构,能有效阻碍多种扩散性腐蚀因子的渗透。arXiv:2009.02451
|
|
||||||
|
|
||||||
2. 代码
|
|
||||||
|
|
||||||
- CoaST - Anti-corrosive coatings research (DTU, Denmark):丹麦技术大学的CoaST项目专注于抗腐蚀涂层的性能表征与失效机制分析。该项目采用电化学测试与表面/界面无损检测技术相结合的方法,系统评估涂层的保护效果与失效途径,为船舶腐蚀防护提供了重要的研究平台。链接:https://www.kt.dtu.dk/research/coast/research-areas/anti-corrosive-coatings
|
|
||||||
|
|
||||||
- Surface Defect Detection 仓库:该仓库集成了多种表面缺陷检测算法,为实现船舶腐蚀检测提供了可靠的代码基础和参考架构。链接:https://github.com/Charmve/Surface-Defect-Detection
|
|
||||||
|
|
||||||
- Hugging Face 预训练模型:平台提供了轻量级的腐蚀检测模型,可直接下载并微调,用于快速验证和部署。链接:https://huggingface.co/mahdavian/corrosion-model
|
|
||||||
|
|
||||||
3. 数据集
|
|
||||||
|
|
||||||
用于训练和验证MCD-Net模型的相关数据集包括:
|
|
||||||
|
|
||||||
- iamcloud/Corrosion_Dataset:一个包含约2000多张标注图像的数据集(ImageFolder格式,Apache-2.0协议),专门收录材料腐蚀图像,适用于船舶腐蚀监测模型的训练。链接:https://huggingface.co/datasets/iamcloud/Corrosion_Dataset
|
|
||||||
|
|
||||||
- Francesco/corrosion-bi3q3:此数据集提供了多种腐蚀实例,可用于支持船舶涂层损坏与基体腐蚀的自动化检测任务。链接:https://huggingface.co/datasets/Francesco/corrosion-bi3q3。
|
|
||||||
|
|
||||||
4. 研究团队
|
|
||||||
|
|
||||||
- 海洋腐蚀与防护全国重点实验室是我国在该领域最具权威性的国家级研究机构,依托于中国船舶集团旗下725所。实验室提供从材料、设计到运维的全生命周期腐蚀控制解决方案,其核心优势在于能够进行多技术(如“阴极保护+涂层”)的综合集成研发与验证。其研究成果广泛应用于船舶、海洋平台等国家重大工程,为保障我国海洋装备的安全与耐久性提供了关键支撑。
|
|
||||||
|
|
||||||
- 水性船舶防腐涂层新材料项目团队是一支源自东北石油大学的创新创业团队,专注于推动船舶涂料行业的环保化转型。团队致力于攻克水性涂料在船舶高湿环境下干燥慢、早期耐水性差的技术瓶颈,旨在开发出兼具优异防腐性能和施工便利性的水性体系,以替代传统溶剂型涂料,其目标是实现进口产品替代并大幅减少船舶制造与维修过程中的VOCs排放。
|
|
||||||
|
|
||||||
- 微纳一体化防腐耐磨技术研发团队是以广州航海学院青年技术骨干为核心的新锐力量。团队针对铝合金在极端海洋环境中的腐蚀-磨损难题,创新性地提出了“微米级氧化陶瓷层+纳米自润滑固化层+改性硅烷钝化层”的三层复合防护体系。该技术使防护层的结合强度、耐磨性(提升2000倍)和耐腐蚀性能(自腐蚀电流密度降低6个数量级)实现了质的飞跃。
|
|
||||||
|
|
||||||
- 北京大学南京创新研究院BXCFD团队专注于自主可控的高性能计算流体力学软件研发。团队在船舶与海洋工程水动力学领域取得关键突破,其自主研发的软件在船舶兴波阻力预报精度上达到国际先进水平,并成功实现了对复杂的船-桨-舵6自由度运动的耦合高精度模拟,为船舶水动力性能研究与优化提供了强大的国产化工具。
|
|
||||||
|
|
||||||
- 清华大学多相流与生物流动力学实验室由罗先武教授带领,长期专注于流体机械及工程领域。团队重点研究高速船舶喷水推进装置的基础理论与空化流动机理,其研究成果为攻克喷水推进系统中的空化、振动与噪声问题提供了坚实的理论依据和技术支持,对我国高性能船舶推进技术的发展具有重要意义。
|
|
||||||
|
|
||||||
'''
|
|
||||||
context = {"材料腐蚀":c1}
|
|
||||||
Request_Cache: dict[str, str] = {}
|
Request_Cache: dict[str, str] = {}
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
|
|
||||||
@@ -99,7 +48,6 @@ def Handle_fill_stepTask_TaskProcess():
|
|||||||
|
|
||||||
if USE_CACHE:
|
if USE_CACHE:
|
||||||
if requestIdentifier in Request_Cache:
|
if requestIdentifier in Request_Cache:
|
||||||
time.sleep(1)
|
|
||||||
return jsonify(Request_Cache[requestIdentifier])
|
return jsonify(Request_Cache[requestIdentifier])
|
||||||
|
|
||||||
filled_stepTask = fill_stepTask_TaskProcess(
|
filled_stepTask = fill_stepTask_TaskProcess(
|
||||||
@@ -170,7 +118,6 @@ def Handle_fill_stepTask():
|
|||||||
|
|
||||||
if USE_CACHE:
|
if USE_CACHE:
|
||||||
if requestIdentifier in Request_Cache:
|
if requestIdentifier in Request_Cache:
|
||||||
time.sleep(1)
|
|
||||||
return jsonify(Request_Cache[requestIdentifier])
|
return jsonify(Request_Cache[requestIdentifier])
|
||||||
|
|
||||||
filled_stepTask = fill_stepTask(
|
filled_stepTask = fill_stepTask(
|
||||||
@@ -264,16 +211,19 @@ def Handle_generate_basePlan():
|
|||||||
|
|
||||||
if USE_CACHE:
|
if USE_CACHE:
|
||||||
if requestIdentifier in Request_Cache:
|
if requestIdentifier in Request_Cache:
|
||||||
time.sleep(2)
|
|
||||||
return jsonify(Request_Cache[requestIdentifier])
|
return jsonify(Request_Cache[requestIdentifier])
|
||||||
|
|
||||||
basePlan = generate_basePlan(
|
try:
|
||||||
General_Goal=incoming_data["General Goal"],
|
basePlan = generate_basePlan(
|
||||||
Agent_Board=AgentBoard,
|
General_Goal=incoming_data["General Goal"],
|
||||||
AgentProfile_Dict=AgentProfile_Dict,
|
Agent_Board=AgentBoard,
|
||||||
InitialObject_List=incoming_data["Initial Input Object"],
|
AgentProfile_Dict=AgentProfile_Dict,
|
||||||
context="\n请注意,目标是给出解决方案,而不是工程实现,不需要实验验证,也不需要推广计划。"
|
InitialObject_List=incoming_data["Initial Input Object"],
|
||||||
)
|
)
|
||||||
|
except ValueError as e:
|
||||||
|
return jsonify({"error": str(e)}), 400
|
||||||
|
except Exception as e:
|
||||||
|
return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500
|
||||||
basePlan_withRenderSpec = Add_Collaboration_Brief_FrontEnd(basePlan)
|
basePlan_withRenderSpec = Add_Collaboration_Brief_FrontEnd(basePlan)
|
||||||
Request_Cache[requestIdentifier] = basePlan_withRenderSpec
|
Request_Cache[requestIdentifier] = basePlan_withRenderSpec
|
||||||
response = jsonify(basePlan_withRenderSpec)
|
response = jsonify(basePlan_withRenderSpec)
|
||||||
@@ -283,10 +233,6 @@ def Handle_generate_basePlan():
|
|||||||
@app.route("/executePlan", methods=["post"])
|
@app.route("/executePlan", methods=["post"])
|
||||||
def Handle_executePlan():
|
def Handle_executePlan():
|
||||||
incoming_data = request.get_json()
|
incoming_data = request.get_json()
|
||||||
cur_context = ""
|
|
||||||
if "材料腐蚀" in incoming_data["plan"]["General Goal"]:
|
|
||||||
cur_context = context["材料腐蚀"]
|
|
||||||
|
|
||||||
requestIdentifier = str(
|
requestIdentifier = str(
|
||||||
(
|
(
|
||||||
"/executePlan",
|
"/executePlan",
|
||||||
@@ -298,7 +244,6 @@ def Handle_executePlan():
|
|||||||
|
|
||||||
if USE_CACHE:
|
if USE_CACHE:
|
||||||
if requestIdentifier in Request_Cache:
|
if requestIdentifier in Request_Cache:
|
||||||
time.sleep(3)
|
|
||||||
return jsonify(Request_Cache[requestIdentifier])
|
return jsonify(Request_Cache[requestIdentifier])
|
||||||
|
|
||||||
RehearsalLog = executePlan(
|
RehearsalLog = executePlan(
|
||||||
@@ -306,21 +251,20 @@ def Handle_executePlan():
|
|||||||
incoming_data["num_StepToRun"],
|
incoming_data["num_StepToRun"],
|
||||||
incoming_data["RehearsalLog"],
|
incoming_data["RehearsalLog"],
|
||||||
AgentProfile_Dict,
|
AgentProfile_Dict,
|
||||||
context=cur_context
|
|
||||||
)
|
)
|
||||||
Request_Cache[requestIdentifier] = RehearsalLog
|
Request_Cache[requestIdentifier] = RehearsalLog
|
||||||
response = jsonify(RehearsalLog)
|
response = jsonify(RehearsalLog)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
@app.route("/_saveRequestCashe", methods=["GET"])
|
@app.route("/_saveRequestCashe", methods=["post"])
|
||||||
def Handle_saveRequestCashe():
|
def Handle_saveRequestCashe():
|
||||||
with open(
|
with open(
|
||||||
os.path.join(os.getcwd(), "RequestCache", "Request_Cache.json"), "w"
|
os.path.join(os.getcwd(), "RequestCache", "Request_Cache.json"), "w"
|
||||||
) as json_file:
|
) as json_file:
|
||||||
json.dump(Request_Cache, json_file, indent=4)
|
json.dump(Request_Cache, json_file, indent=4)
|
||||||
response = jsonify(
|
response = jsonify(
|
||||||
{"code": 200, "content": "request cashe sucessfully saved: " + os.path.join(os.getcwd(), "RequestCache", "Request_Cache.json")}
|
{"code": 200, "content": "request cashe sucessfully saved"}
|
||||||
)
|
)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
@@ -339,10 +283,38 @@ def set_agents():
|
|||||||
|
|
||||||
def init():
|
def init():
|
||||||
global AgentBoard, AgentProfile_Dict, Request_Cache
|
global AgentBoard, AgentProfile_Dict, Request_Cache
|
||||||
with open(
|
|
||||||
os.path.join(os.getcwd(), "RequestCache", "Request_Cache.json"), "r"
|
# Load Request Cache
|
||||||
) as json_file:
|
try:
|
||||||
Request_Cache = json.load(json_file)
|
with open(
|
||||||
|
os.path.join(os.getcwd(), "RequestCache", "Request_Cache.json"), "r"
|
||||||
|
) as json_file:
|
||||||
|
Request_Cache = json.load(json_file)
|
||||||
|
print(f"✅ Loaded Request_Cache with {len(Request_Cache)} entries")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"⚠️ Failed to load Request_Cache: {e}")
|
||||||
|
Request_Cache = {}
|
||||||
|
|
||||||
|
# Load Agent Board
|
||||||
|
try:
|
||||||
|
with open(
|
||||||
|
os.path.join(os.getcwd(), "AgentRepo", "agentBoard_v1.json"), "r", encoding="utf-8"
|
||||||
|
) as json_file:
|
||||||
|
AgentBoard = json.load(json_file)
|
||||||
|
print(f"✅ Loaded AgentBoard with {len(AgentBoard)} agents")
|
||||||
|
|
||||||
|
# Build AgentProfile_Dict
|
||||||
|
AgentProfile_Dict = {}
|
||||||
|
for item in AgentBoard:
|
||||||
|
name = item["Name"]
|
||||||
|
profile = item["Profile"]
|
||||||
|
AgentProfile_Dict[name] = profile
|
||||||
|
print(f"✅ Built AgentProfile_Dict with {len(AgentProfile_Dict)} profiles")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"⚠️ Failed to load AgentBoard: {e}")
|
||||||
|
AgentBoard = []
|
||||||
|
AgentProfile_Dict = {}
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
@@ -352,8 +324,8 @@ if __name__ == "__main__":
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--port",
|
"--port",
|
||||||
type=int,
|
type=int,
|
||||||
default=8017,
|
default=8000,
|
||||||
help="set the port number, 8017 by defaul.",
|
help="set the port number, 8000 by defaul.",
|
||||||
)
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
init()
|
init()
|
||||||
|
|||||||
@@ -21,12 +21,11 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "8000:8000"
|
- "8000:8000"
|
||||||
environment:
|
environment:
|
||||||
- OPENAI_API_BASE=https://api.moleapi.com/v1
|
- OPENAI_API_BASE=htts://api.openai.com
|
||||||
- OPENAI_API_KEY=sk-sps7FBCbEvu85DfPoS8SdnPwYLEoW7u5Dd8vCDTXqPLpHuyb
|
- OPENAI_API_KEY=
|
||||||
- OPENAI_API_MODEL=gpt-4.1-mini
|
- OPENAI_API_MODEL=gpt-4-turbo-preview
|
||||||
- FAST_DESIGN_MODE=False
|
- FAST_DESIGN_MODE=True
|
||||||
- GROQ_API_KEY=
|
- GROQ_API_KEY=
|
||||||
- USE_CACHE=True
|
|
||||||
networks:
|
networks:
|
||||||
- agentcoord-network
|
- agentcoord-network
|
||||||
|
|
||||||
|
|||||||
31
frontend-react/.dockerignore
Normal file
31
frontend-react/.dockerignore
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
.DS_Store
|
||||||
|
|
||||||
|
.pnp
|
||||||
|
.pnp.js
|
||||||
|
.env.local
|
||||||
|
.env.*.local
|
||||||
|
.history
|
||||||
|
*.log*
|
||||||
|
|
||||||
|
node_modules/
|
||||||
|
.yarn-integrity
|
||||||
|
.pnpm-store/
|
||||||
|
*.tsbuildinfo
|
||||||
|
.eslintcache
|
||||||
|
.changeset/pre.json
|
||||||
|
|
||||||
|
dist/
|
||||||
|
coverage/
|
||||||
|
release/
|
||||||
|
output/
|
||||||
|
output_resource/
|
||||||
|
log/
|
||||||
|
|
||||||
|
.vscode/**/*
|
||||||
|
!.vscode/settings.json
|
||||||
|
!.vscode/extensions.json
|
||||||
|
.idea/
|
||||||
|
|
||||||
|
**/*/typings/auto-generated
|
||||||
|
|
||||||
|
modern.config.local.*
|
||||||
32
frontend-react/.gitignore
vendored
Normal file
32
frontend-react/.gitignore
vendored
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
.DS_Store
|
||||||
|
.env
|
||||||
|
|
||||||
|
.pnp
|
||||||
|
.pnp.js
|
||||||
|
.env.local
|
||||||
|
.env.*.local
|
||||||
|
.history
|
||||||
|
*.log*
|
||||||
|
|
||||||
|
node_modules/
|
||||||
|
.yarn-integrity
|
||||||
|
.pnpm-store/
|
||||||
|
*.tsbuildinfo
|
||||||
|
.eslintcache
|
||||||
|
.changeset/pre.json
|
||||||
|
|
||||||
|
dist/
|
||||||
|
coverage/
|
||||||
|
release/
|
||||||
|
output/
|
||||||
|
output_resource/
|
||||||
|
log/
|
||||||
|
|
||||||
|
.vscode/**/*
|
||||||
|
!.vscode/settings.json
|
||||||
|
!.vscode/extensions.json
|
||||||
|
.idea/
|
||||||
|
|
||||||
|
**/*/typings/auto-generated
|
||||||
|
|
||||||
|
modern.config.local.*
|
||||||
22
frontend-react/Dockerfile
Normal file
22
frontend-react/Dockerfile
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
FROM node:20-alpine AS base
|
||||||
|
|
||||||
|
FROM base AS installer
|
||||||
|
WORKDIR /app
|
||||||
|
COPY package.json .npmrc ./
|
||||||
|
RUN npm install
|
||||||
|
|
||||||
|
FROM base AS builder
|
||||||
|
WORKDIR /app
|
||||||
|
COPY --from=installer /app/node_modules ./node_modules
|
||||||
|
COPY . .
|
||||||
|
RUN npm run build
|
||||||
|
|
||||||
|
FROM base AS runner
|
||||||
|
WORKDIR /app
|
||||||
|
EXPOSE 8080/tcp
|
||||||
|
COPY .npmrc ./
|
||||||
|
RUN npm install @modern-js/app-tools @modern-js/runtime --no-optional --no-shrinkwrap && mkdir src
|
||||||
|
COPY modern.config.ts package.json ./
|
||||||
|
COPY --from=builder /app/dist ./dist
|
||||||
|
ENV API_BASE=
|
||||||
|
CMD ["npm", "run", "serve"]
|
||||||
39
frontend-react/README.md
Normal file
39
frontend-react/README.md
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
# AgentCoord Frontend
|
||||||
|
|
||||||
|
This is the frontend for the AgentCoord project. Root project is located [here](https://github.com/AgentCoord/AgentCoord)
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
You can launch the frontend by simply using `docker-compose` in the root directory of the project.
|
||||||
|
|
||||||
|
Or, you can launch the frontend manually by following the steps below.
|
||||||
|
|
||||||
|
1. Install the dependencies by running `npm install`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm install
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Build the frontend by running `npm run build`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm run build
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Start the frontend by running `npm run serve`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm run serve
|
||||||
|
```
|
||||||
|
|
||||||
|
Then you can access the frontend by visiting `http://localhost:8080`.
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
You can run the frontend in development mode by running `npm run dev`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm run dev
|
||||||
|
```
|
||||||
|
|
||||||
|
The frontend website requires the backend server to be running. You can configure the backend server address by copying the `.env.example` file to `.env` and changing the `API_BASE` value to the backend server address.
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user