Compare commits
14 Commits
631aa6dcdc
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6392301833 | ||
|
|
ab8c9e294d | ||
|
|
1aa9e280b0 | ||
|
|
4fa5504697 | ||
|
|
041986f5cd | ||
|
|
00ef22505e | ||
|
|
b73419b7a0 | ||
|
|
974af053ca | ||
|
|
0c571dec21 | ||
|
|
e40cdd1dee | ||
|
|
4604d70848 | ||
|
|
408cd2a384 | ||
|
|
a465fc8fbe | ||
|
|
c779d7756b |
@@ -2,7 +2,8 @@
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
<a ><img src="static/icon.png" alt=" AgentCoord: Visually Exploring Coordination Strategy for LLM-based Multi-Agent Collaboration" width="200px"></a>
|
<a ><img src="static/icon.png" alt=" AgentCoord: Visually Exploring Coordination Strategy for LLM-based Multi-Agent Collaboration" width="200px"></a>
|
||||||
</p>
|
</p>
|
||||||
AgentCoord is an experimental open-source system to help general users design coordination strategies for multiple LLM-based agents (Research paper forthcoming).
|
AgentCoord is an experimental open-source system to help general users design coordination strategies for multiple LLM-based agents.
|
||||||
|
[Research paper](https://arxiv.org/abs/2404.11943)
|
||||||
|
|
||||||
## System Usage
|
## System Usage
|
||||||
<a href="https://youtu.be/s56rHJx-eqY" target="_blank"><img src="static/videoPreview.png"
|
<a href="https://youtu.be/s56rHJx-eqY" target="_blank"><img src="static/videoPreview.png"
|
||||||
|
|||||||
1
backend/.gitignore
vendored
1
backend/.gitignore
vendored
@@ -124,6 +124,7 @@ venv/
|
|||||||
ENV/
|
ENV/
|
||||||
env.bak/
|
env.bak/
|
||||||
venv.bak/
|
venv.bak/
|
||||||
|
.idea/
|
||||||
|
|
||||||
# Spyder project settings
|
# Spyder project settings
|
||||||
.spyderproject
|
.spyderproject
|
||||||
|
|||||||
@@ -1,24 +1,30 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import openai
|
from openai import OpenAI, AsyncOpenAI
|
||||||
import yaml
|
import yaml
|
||||||
from termcolor import colored
|
from termcolor import colored
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
# Helper function to avoid circular import
|
||||||
|
def print_colored(text, text_color="green", background="on_white"):
|
||||||
|
print(colored(text, text_color, background))
|
||||||
|
|
||||||
# load config (apikey, apibase, model)
|
# load config (apikey, apibase, model)
|
||||||
yaml_file = os.path.join(os.getcwd(), "config", "config.yaml")
|
yaml_file = os.path.join(os.getcwd(), "config", "config.yaml")
|
||||||
try:
|
try:
|
||||||
with open(yaml_file, "r", encoding="utf-8") as file:
|
with open(yaml_file, "r", encoding="utf-8") as file:
|
||||||
yaml_data = yaml.safe_load(file)
|
yaml_data = yaml.safe_load(file)
|
||||||
except Exception:
|
except Exception:
|
||||||
yaml_file = {}
|
yaml_data = {}
|
||||||
OPENAI_API_BASE = os.getenv("OPENAI_API_BASE") or yaml_data.get(
|
OPENAI_API_BASE = os.getenv("OPENAI_API_BASE") or yaml_data.get(
|
||||||
"OPENAI_API_BASE", "https://api.openai.com"
|
"OPENAI_API_BASE", "https://api.openai.com"
|
||||||
)
|
)
|
||||||
openai.api_base = OPENAI_API_BASE
|
|
||||||
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") or yaml_data.get(
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") or yaml_data.get(
|
||||||
"OPENAI_API_KEY", ""
|
"OPENAI_API_KEY", ""
|
||||||
)
|
)
|
||||||
openai.api_key = OPENAI_API_KEY
|
|
||||||
|
# Initialize OpenAI clients
|
||||||
|
client = OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_API_BASE)
|
||||||
|
async_client = AsyncOpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_API_BASE)
|
||||||
MODEL: str = os.getenv("OPENAI_API_MODEL") or yaml_data.get(
|
MODEL: str = os.getenv("OPENAI_API_MODEL") or yaml_data.get(
|
||||||
"OPENAI_API_MODEL", "gpt-4-turbo-preview"
|
"OPENAI_API_MODEL", "gpt-4-turbo-preview"
|
||||||
)
|
)
|
||||||
@@ -71,14 +77,14 @@ def LLM_Completion(
|
|||||||
|
|
||||||
async def _achat_completion_stream_groq(messages: list[dict]) -> str:
|
async def _achat_completion_stream_groq(messages: list[dict]) -> str:
|
||||||
from groq import AsyncGroq
|
from groq import AsyncGroq
|
||||||
client = AsyncGroq(api_key=GROQ_API_KEY)
|
groq_client = AsyncGroq(api_key=GROQ_API_KEY)
|
||||||
|
|
||||||
max_attempts = 5
|
max_attempts = 5
|
||||||
|
|
||||||
for attempt in range(max_attempts):
|
for attempt in range(max_attempts):
|
||||||
print("Attempt to use Groq (Fase Design Mode):")
|
print("Attempt to use Groq (Fase Design Mode):")
|
||||||
try:
|
try:
|
||||||
stream = await client.chat.completions.create(
|
response = await groq_client.chat.completions.create(
|
||||||
messages=messages,
|
messages=messages,
|
||||||
# model='gemma-7b-it',
|
# model='gemma-7b-it',
|
||||||
model="mixtral-8x7b-32768",
|
model="mixtral-8x7b-32768",
|
||||||
@@ -92,9 +98,9 @@ async def _achat_completion_stream_groq(messages: list[dict]) -> str:
|
|||||||
if attempt < max_attempts - 1: # i is zero indexed
|
if attempt < max_attempts - 1: # i is zero indexed
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
raise "failed"
|
raise Exception("failed")
|
||||||
|
|
||||||
full_reply_content = stream.choices[0].message.content
|
full_reply_content = response.choices[0].message.content
|
||||||
print(colored(full_reply_content, "blue", "on_white"), end="")
|
print(colored(full_reply_content, "blue", "on_white"), end="")
|
||||||
print()
|
print()
|
||||||
return full_reply_content
|
return full_reply_content
|
||||||
@@ -103,14 +109,14 @@ async def _achat_completion_stream_groq(messages: list[dict]) -> str:
|
|||||||
async def _achat_completion_stream_mixtral(messages: list[dict]) -> str:
|
async def _achat_completion_stream_mixtral(messages: list[dict]) -> str:
|
||||||
from mistralai.client import MistralClient
|
from mistralai.client import MistralClient
|
||||||
from mistralai.models.chat_completion import ChatMessage
|
from mistralai.models.chat_completion import ChatMessage
|
||||||
client = MistralClient(api_key=MISTRAL_API_KEY)
|
mistral_client = MistralClient(api_key=MISTRAL_API_KEY)
|
||||||
# client=AsyncGroq(api_key=GROQ_API_KEY)
|
# client=AsyncGroq(api_key=GROQ_API_KEY)
|
||||||
max_attempts = 5
|
max_attempts = 5
|
||||||
|
|
||||||
for attempt in range(max_attempts):
|
for attempt in range(max_attempts):
|
||||||
try:
|
try:
|
||||||
messages[len(messages) - 1]["role"] = "user"
|
messages[len(messages) - 1]["role"] = "user"
|
||||||
stream = client.chat(
|
stream = mistral_client.chat(
|
||||||
messages=[
|
messages=[
|
||||||
ChatMessage(
|
ChatMessage(
|
||||||
role=message["role"], content=message["content"]
|
role=message["role"], content=message["content"]
|
||||||
@@ -119,14 +125,13 @@ async def _achat_completion_stream_mixtral(messages: list[dict]) -> str:
|
|||||||
],
|
],
|
||||||
# model = "mistral-small-latest",
|
# model = "mistral-small-latest",
|
||||||
model="open-mixtral-8x7b",
|
model="open-mixtral-8x7b",
|
||||||
# response_format={"type": "json_object"},
|
|
||||||
)
|
)
|
||||||
break # If the operation is successful, break the loop
|
break # If the operation is successful, break the loop
|
||||||
except Exception:
|
except Exception:
|
||||||
if attempt < max_attempts - 1: # i is zero indexed
|
if attempt < max_attempts - 1: # i is zero indexed
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
raise "failed"
|
raise Exception("failed")
|
||||||
|
|
||||||
full_reply_content = stream.choices[0].message.content
|
full_reply_content = stream.choices[0].message.content
|
||||||
print(colored(full_reply_content, "blue", "on_white"), end="")
|
print(colored(full_reply_content, "blue", "on_white"), end="")
|
||||||
@@ -135,15 +140,11 @@ async def _achat_completion_stream_mixtral(messages: list[dict]) -> str:
|
|||||||
|
|
||||||
|
|
||||||
async def _achat_completion_stream_gpt35(messages: list[dict]) -> str:
|
async def _achat_completion_stream_gpt35(messages: list[dict]) -> str:
|
||||||
openai.api_key = OPENAI_API_KEY
|
response = await async_client.chat.completions.create(
|
||||||
openai.api_base = OPENAI_API_BASE
|
|
||||||
response = await openai.ChatCompletion.acreate(
|
|
||||||
messages=messages,
|
messages=messages,
|
||||||
max_tokens=4096,
|
max_tokens=4096,
|
||||||
n=1,
|
|
||||||
stop=None,
|
|
||||||
temperature=0.3,
|
temperature=0.3,
|
||||||
timeout=3,
|
timeout=30,
|
||||||
model="gpt-3.5-turbo-16k",
|
model="gpt-3.5-turbo-16k",
|
||||||
stream=True,
|
stream=True,
|
||||||
)
|
)
|
||||||
@@ -154,40 +155,33 @@ async def _achat_completion_stream_gpt35(messages: list[dict]) -> str:
|
|||||||
# iterate through the stream of events
|
# iterate through the stream of events
|
||||||
async for chunk in response:
|
async for chunk in response:
|
||||||
collected_chunks.append(chunk) # save the event response
|
collected_chunks.append(chunk) # save the event response
|
||||||
choices = chunk["choices"]
|
choices = chunk.choices
|
||||||
if len(choices) > 0:
|
if len(choices) > 0:
|
||||||
chunk_message = chunk["choices"][0].get(
|
chunk_message = chunk.choices[0].delta
|
||||||
"delta", {}
|
|
||||||
) # extract the message
|
|
||||||
collected_messages.append(chunk_message) # save the message
|
collected_messages.append(chunk_message) # save the message
|
||||||
if "content" in chunk_message:
|
if chunk_message.content:
|
||||||
print(
|
print(
|
||||||
colored(chunk_message["content"], "blue", "on_white"),
|
colored(chunk_message.content, "blue", "on_white"),
|
||||||
end="",
|
end="",
|
||||||
)
|
)
|
||||||
print()
|
print()
|
||||||
|
|
||||||
full_reply_content = "".join(
|
full_reply_content = "".join(
|
||||||
[m.get("content", "") for m in collected_messages]
|
[m.content or "" for m in collected_messages if m is not None]
|
||||||
)
|
)
|
||||||
return full_reply_content
|
return full_reply_content
|
||||||
|
|
||||||
|
|
||||||
async def _achat_completion_json(messages: list[dict]) -> str:
|
async def _achat_completion_json(messages: list[dict]) -> str:
|
||||||
openai.api_key = OPENAI_API_KEY
|
|
||||||
openai.api_base = OPENAI_API_BASE
|
|
||||||
|
|
||||||
max_attempts = 5
|
max_attempts = 5
|
||||||
|
|
||||||
for attempt in range(max_attempts):
|
for attempt in range(max_attempts):
|
||||||
try:
|
try:
|
||||||
stream = await openai.ChatCompletion.acreate(
|
response = await async_client.chat.completions.create(
|
||||||
messages=messages,
|
messages=messages,
|
||||||
max_tokens=4096,
|
max_tokens=4096,
|
||||||
n=1,
|
|
||||||
stop=None,
|
|
||||||
temperature=0.3,
|
temperature=0.3,
|
||||||
timeout=3,
|
timeout=30,
|
||||||
model=MODEL,
|
model=MODEL,
|
||||||
response_format={"type": "json_object"},
|
response_format={"type": "json_object"},
|
||||||
)
|
)
|
||||||
@@ -196,18 +190,17 @@ async def _achat_completion_json(messages: list[dict]) -> str:
|
|||||||
if attempt < max_attempts - 1: # i is zero indexed
|
if attempt < max_attempts - 1: # i is zero indexed
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
raise "failed"
|
raise Exception("failed")
|
||||||
|
|
||||||
full_reply_content = stream.choices[0].message.content
|
full_reply_content = response.choices[0].message.content
|
||||||
print(colored(full_reply_content, "blue", "on_white"), end="")
|
print(colored(full_reply_content, "blue", "on_white"), end="")
|
||||||
print()
|
print()
|
||||||
return full_reply_content
|
return full_reply_content
|
||||||
|
|
||||||
|
|
||||||
async def _achat_completion_stream(messages: list[dict]) -> str:
|
async def _achat_completion_stream(messages: list[dict]) -> str:
|
||||||
openai.api_key = OPENAI_API_KEY
|
try:
|
||||||
openai.api_base = OPENAI_API_BASE
|
response = await async_client.chat.completions.create(
|
||||||
response = await openai.ChatCompletion.acreate(
|
|
||||||
**_cons_kwargs(messages), stream=True
|
**_cons_kwargs(messages), stream=True
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -217,39 +210,42 @@ async def _achat_completion_stream(messages: list[dict]) -> str:
|
|||||||
# iterate through the stream of events
|
# iterate through the stream of events
|
||||||
async for chunk in response:
|
async for chunk in response:
|
||||||
collected_chunks.append(chunk) # save the event response
|
collected_chunks.append(chunk) # save the event response
|
||||||
choices = chunk["choices"]
|
choices = chunk.choices
|
||||||
if len(choices) > 0:
|
if len(choices) > 0:
|
||||||
chunk_message = chunk["choices"][0].get(
|
chunk_message = chunk.choices[0].delta
|
||||||
"delta", {}
|
|
||||||
) # extract the message
|
|
||||||
collected_messages.append(chunk_message) # save the message
|
collected_messages.append(chunk_message) # save the message
|
||||||
if "content" in chunk_message:
|
if chunk_message.content:
|
||||||
print(
|
print(
|
||||||
colored(chunk_message["content"], "blue", "on_white"),
|
colored(chunk_message.content, "blue", "on_white"),
|
||||||
end="",
|
end="",
|
||||||
)
|
)
|
||||||
print()
|
print()
|
||||||
|
|
||||||
full_reply_content = "".join(
|
full_reply_content = "".join(
|
||||||
[m.get("content", "") for m in collected_messages]
|
[m.content or "" for m in collected_messages if m is not None]
|
||||||
)
|
)
|
||||||
return full_reply_content
|
return full_reply_content
|
||||||
|
except Exception as e:
|
||||||
|
print_colored(f"OpenAI API error in _achat_completion_stream: {str(e)}", "red")
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
def _chat_completion(messages: list[dict]) -> str:
|
def _chat_completion(messages: list[dict]) -> str:
|
||||||
rsp = openai.ChatCompletion.create(**_cons_kwargs(messages))
|
try:
|
||||||
content = rsp["choices"][0]["message"]["content"]
|
rsp = client.chat.completions.create(**_cons_kwargs(messages))
|
||||||
|
content = rsp.choices[0].message.content
|
||||||
return content
|
return content
|
||||||
|
except Exception as e:
|
||||||
|
print_colored(f"OpenAI API error in _chat_completion: {str(e)}", "red")
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
def _cons_kwargs(messages: list[dict]) -> dict:
|
def _cons_kwargs(messages: list[dict]) -> dict:
|
||||||
kwargs = {
|
kwargs = {
|
||||||
"messages": messages,
|
"messages": messages,
|
||||||
"max_tokens": 4096,
|
"max_tokens": 2000,
|
||||||
"n": 1,
|
"temperature": 0.3,
|
||||||
"stop": None,
|
"timeout": 15,
|
||||||
"temperature": 0.5,
|
|
||||||
"timeout": 3,
|
|
||||||
}
|
}
|
||||||
kwargs_mode = {"model": MODEL}
|
kwargs_mode = {"model": MODEL}
|
||||||
kwargs.update(kwargs_mode)
|
kwargs.update(kwargs_mode)
|
||||||
|
|||||||
@@ -80,6 +80,10 @@ def generate_AbilityRequirement(General_Goal, Current_Task):
|
|||||||
|
|
||||||
|
|
||||||
def generate_AgentSelection(General_Goal, Current_Task, Agent_Board):
|
def generate_AgentSelection(General_Goal, Current_Task, Agent_Board):
|
||||||
|
# Check if Agent_Board is None or empty
|
||||||
|
if Agent_Board is None or len(Agent_Board) == 0:
|
||||||
|
raise ValueError("Agent_Board cannot be None or empty. Please ensure agents are set via /setAgents endpoint before generating a plan.")
|
||||||
|
|
||||||
messages = [
|
messages = [
|
||||||
{
|
{
|
||||||
"role": "system",
|
"role": "system",
|
||||||
|
|||||||
@@ -83,4 +83,16 @@ def generate_PlanOutline(InitialObject_List, General_Goal):
|
|||||||
),
|
),
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
return read_LLM_Completion(messages)["Plan_Outline"]
|
result = read_LLM_Completion(messages)
|
||||||
|
if isinstance(result, dict) and "Plan_Outline" in result:
|
||||||
|
return result["Plan_Outline"]
|
||||||
|
else:
|
||||||
|
# 如果格式不正确,返回默认的计划大纲
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"StepName": "Default Step",
|
||||||
|
"TaskContent": "Generated default plan step due to format error",
|
||||||
|
"InputObject_List": [],
|
||||||
|
"OutputObject": "Default Output"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|||||||
@@ -80,7 +80,14 @@ class BaseAction():
|
|||||||
Important_Mark = ""
|
Important_Mark = ""
|
||||||
action_Record += PROMPT_TEMPLATE_ACTION_RECORD.format(AgentName = actionInfo["AgentName"], Action_Description = actionInfo["AgentName"], Action_Result = actionInfo["Action_Result"], Important_Mark = Important_Mark)
|
action_Record += PROMPT_TEMPLATE_ACTION_RECORD.format(AgentName = actionInfo["AgentName"], Action_Description = actionInfo["AgentName"], Action_Result = actionInfo["Action_Result"], Important_Mark = Important_Mark)
|
||||||
|
|
||||||
prompt = PROMPT_TEMPLATE_TAKE_ACTION_BASE.format(agentName = agentName, agentProfile = AgentProfile_Dict[agentName], General_Goal = General_Goal, Current_Task_Description = TaskDescription, Input_Objects = inputObject_Record, History_Action = action_Record, Action_Description = self.info["Description"], Action_Custom_Note = self.Action_Custom_Note)
|
# Handle missing agent profiles gracefully
|
||||||
|
if agentName not in AgentProfile_Dict:
|
||||||
|
print_colored(text=f"Warning: Agent '{agentName}' not found in AgentProfile_Dict. Using default profile.", text_color="yellow")
|
||||||
|
agentProfile = f"AI Agent named {agentName}"
|
||||||
|
else:
|
||||||
|
agentProfile = AgentProfile_Dict[agentName]
|
||||||
|
|
||||||
|
prompt = PROMPT_TEMPLATE_TAKE_ACTION_BASE.format(agentName = agentName, agentProfile = agentProfile, General_Goal = General_Goal, Current_Task_Description = TaskDescription, Input_Objects = inputObject_Record, History_Action = action_Record, Action_Description = self.info["Description"], Action_Custom_Note = self.Action_Custom_Note)
|
||||||
print_colored(text = prompt, text_color="red")
|
print_colored(text = prompt, text_color="red")
|
||||||
messages = [{"role":"system", "content": prompt}]
|
messages = [{"role":"system", "content": prompt}]
|
||||||
ActionResult = LLM_Completion(messages,True,False)
|
ActionResult = LLM_Completion(messages,True,False)
|
||||||
|
|||||||
@@ -85,9 +85,17 @@ def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict):
|
|||||||
# start the group chat
|
# start the group chat
|
||||||
util.print_colored(TaskDescription, text_color="green")
|
util.print_colored(TaskDescription, text_color="green")
|
||||||
ActionHistory = []
|
ActionHistory = []
|
||||||
|
action_count = 0
|
||||||
|
total_actions = len(TaskProcess)
|
||||||
|
|
||||||
for ActionInfo in TaskProcess:
|
for ActionInfo in TaskProcess:
|
||||||
|
action_count += 1
|
||||||
actionType = ActionInfo["ActionType"]
|
actionType = ActionInfo["ActionType"]
|
||||||
agentName = ActionInfo["AgentName"]
|
agentName = ActionInfo["AgentName"]
|
||||||
|
|
||||||
|
# 添加进度日志
|
||||||
|
util.print_colored(f"🔄 Executing action {action_count}/{total_actions}: {actionType} by {agentName}", text_color="yellow")
|
||||||
|
|
||||||
if actionType in Action.customAction_Dict:
|
if actionType in Action.customAction_Dict:
|
||||||
currentAction = Action.customAction_Dict[actionType](
|
currentAction = Action.customAction_Dict[actionType](
|
||||||
info=ActionInfo,
|
info=ActionInfo,
|
||||||
|
|||||||
@@ -20,6 +20,8 @@ def camel_case_to_normal(s):
|
|||||||
def generate_template_sentence_for_CollaborationBrief(
|
def generate_template_sentence_for_CollaborationBrief(
|
||||||
input_object_list, output_object, agent_list, step_task
|
input_object_list, output_object, agent_list, step_task
|
||||||
):
|
):
|
||||||
|
# Ensure step_task is not None
|
||||||
|
step_task = step_task if step_task is not None else "perform the task"
|
||||||
# Check if the names are in camel case (no spaces) and convert them to normal naming convention
|
# Check if the names are in camel case (no spaces) and convert them to normal naming convention
|
||||||
input_object_list = (
|
input_object_list = (
|
||||||
[
|
[
|
||||||
@@ -31,29 +33,48 @@ def generate_template_sentence_for_CollaborationBrief(
|
|||||||
)
|
)
|
||||||
output_object = (
|
output_object = (
|
||||||
camel_case_to_normal(output_object)
|
camel_case_to_normal(output_object)
|
||||||
if is_camel_case(output_object)
|
if output_object is not None and is_camel_case(output_object)
|
||||||
else output_object
|
else (output_object if output_object is not None else "unknown output")
|
||||||
)
|
)
|
||||||
|
|
||||||
# Format the agents into a string with proper grammar
|
# Format the agents into a string with proper grammar
|
||||||
|
if agent_list is None or len(agent_list) == 0:
|
||||||
|
agent_str = "Unknown agents"
|
||||||
|
elif all(agent is not None for agent in agent_list):
|
||||||
agent_str = (
|
agent_str = (
|
||||||
" and ".join([", ".join(agent_list[:-1]), agent_list[-1]])
|
" and ".join([", ".join(agent_list[:-1]), agent_list[-1]])
|
||||||
if len(agent_list) > 1
|
if len(agent_list) > 1
|
||||||
else agent_list[0]
|
else agent_list[0]
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
# Filter out None values
|
||||||
|
filtered_agents = [agent for agent in agent_list if agent is not None]
|
||||||
|
if filtered_agents:
|
||||||
|
agent_str = (
|
||||||
|
" and ".join([", ".join(filtered_agents[:-1]), filtered_agents[-1]])
|
||||||
|
if len(filtered_agents) > 1
|
||||||
|
else filtered_agents[0]
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
agent_str = "Unknown agents"
|
||||||
|
|
||||||
if input_object_list is None or len(input_object_list) == 0:
|
if input_object_list is None or len(input_object_list) == 0:
|
||||||
# Combine all the parts into the template sentence
|
# Combine all the parts into the template sentence
|
||||||
template_sentence = f"{agent_str} perform the task of {step_task} to obtain {output_object}."
|
template_sentence = f"{agent_str} perform the task of {step_task} to obtain {output_object}."
|
||||||
else:
|
else:
|
||||||
# Format the input objects into a string with proper grammar
|
# Format the input objects into a string with proper grammar
|
||||||
|
# Filter out None values from input_object_list
|
||||||
|
filtered_input_list = [obj for obj in input_object_list if obj is not None]
|
||||||
|
if filtered_input_list:
|
||||||
input_str = (
|
input_str = (
|
||||||
" and ".join(
|
" and ".join(
|
||||||
[", ".join(input_object_list[:-1]), input_object_list[-1]]
|
[", ".join(filtered_input_list[:-1]), filtered_input_list[-1]]
|
||||||
)
|
)
|
||||||
if len(input_object_list) > 1
|
if len(filtered_input_list) > 1
|
||||||
else input_object_list[0]
|
else filtered_input_list[0]
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
input_str = "unknown inputs"
|
||||||
# Combine all the parts into the template sentence
|
# Combine all the parts into the template sentence
|
||||||
template_sentence = f"Based on {input_str}, {agent_str} perform the task of {step_task} to obtain {output_object}."
|
template_sentence = f"Based on {input_str}, {agent_str} perform the task of {step_task} to obtain {output_object}."
|
||||||
|
|
||||||
@@ -90,7 +111,7 @@ def read_LLM_Completion(messages, useGroq=True):
|
|||||||
return json.loads(match.group(0).strip())
|
return json.loads(match.group(0).strip())
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
raise ("bad format!")
|
return {} # 返回空对象而不是抛出异常
|
||||||
|
|
||||||
|
|
||||||
def read_json_content(text):
|
def read_json_content(text):
|
||||||
@@ -111,7 +132,7 @@ def read_json_content(text):
|
|||||||
if match:
|
if match:
|
||||||
return json.loads(match.group(0).strip())
|
return json.loads(match.group(0).strip())
|
||||||
|
|
||||||
raise ("bad format!")
|
return {} # 返回空对象而不是抛出异常
|
||||||
|
|
||||||
|
|
||||||
def read_outputObject_content(text, keyword):
|
def read_outputObject_content(text, keyword):
|
||||||
@@ -127,4 +148,4 @@ def read_outputObject_content(text, keyword):
|
|||||||
if match:
|
if match:
|
||||||
return match.group(1).strip()
|
return match.group(1).strip()
|
||||||
else:
|
else:
|
||||||
raise ("bad format!")
|
return "" # 返回空字符串而不是抛出异常
|
||||||
|
|||||||
@@ -13,6 +13,8 @@ colorMap = {
|
|||||||
"agent": [0, 0, 80],
|
"agent": [0, 0, 80],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def OperateCamelStr(_str: str):
|
def OperateCamelStr(_str: str):
|
||||||
if " " in _str:
|
if " " in _str:
|
||||||
return _str
|
return _str
|
||||||
|
|||||||
@@ -18,4 +18,3 @@ python ./server.py --port 8017
|
|||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
This folder is for request chache so that we can reproduce the error.
|
This folder is for request cache so that we can reproduce the error.
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
Flask==3.0.2
|
Flask==3.0.2
|
||||||
openai==0.28.1
|
openai==2.8.1
|
||||||
PyYAML==6.0.1
|
PyYAML==6.0.1
|
||||||
termcolor==2.4.0
|
termcolor==2.4.0
|
||||||
groq==0.4.2
|
groq==0.4.2
|
||||||
|
|||||||
@@ -213,12 +213,17 @@ def Handle_generate_basePlan():
|
|||||||
if requestIdentifier in Request_Cache:
|
if requestIdentifier in Request_Cache:
|
||||||
return jsonify(Request_Cache[requestIdentifier])
|
return jsonify(Request_Cache[requestIdentifier])
|
||||||
|
|
||||||
|
try:
|
||||||
basePlan = generate_basePlan(
|
basePlan = generate_basePlan(
|
||||||
General_Goal=incoming_data["General Goal"],
|
General_Goal=incoming_data["General Goal"],
|
||||||
Agent_Board=AgentBoard,
|
Agent_Board=AgentBoard,
|
||||||
AgentProfile_Dict=AgentProfile_Dict,
|
AgentProfile_Dict=AgentProfile_Dict,
|
||||||
InitialObject_List=incoming_data["Initial Input Object"],
|
InitialObject_List=incoming_data["Initial Input Object"],
|
||||||
)
|
)
|
||||||
|
except ValueError as e:
|
||||||
|
return jsonify({"error": str(e)}), 400
|
||||||
|
except Exception as e:
|
||||||
|
return jsonify({"error": f"An unexpected error occurred: {str(e)}"}), 500
|
||||||
basePlan_withRenderSpec = Add_Collaboration_Brief_FrontEnd(basePlan)
|
basePlan_withRenderSpec = Add_Collaboration_Brief_FrontEnd(basePlan)
|
||||||
Request_Cache[requestIdentifier] = basePlan_withRenderSpec
|
Request_Cache[requestIdentifier] = basePlan_withRenderSpec
|
||||||
response = jsonify(basePlan_withRenderSpec)
|
response = jsonify(basePlan_withRenderSpec)
|
||||||
@@ -278,10 +283,38 @@ def set_agents():
|
|||||||
|
|
||||||
def init():
|
def init():
|
||||||
global AgentBoard, AgentProfile_Dict, Request_Cache
|
global AgentBoard, AgentProfile_Dict, Request_Cache
|
||||||
|
|
||||||
|
# Load Request Cache
|
||||||
|
try:
|
||||||
with open(
|
with open(
|
||||||
os.path.join(os.getcwd(), "RequestCache", "Request_Cache.json"), "r"
|
os.path.join(os.getcwd(), "RequestCache", "Request_Cache.json"), "r"
|
||||||
) as json_file:
|
) as json_file:
|
||||||
Request_Cache = json.load(json_file)
|
Request_Cache = json.load(json_file)
|
||||||
|
print(f"✅ Loaded Request_Cache with {len(Request_Cache)} entries")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"⚠️ Failed to load Request_Cache: {e}")
|
||||||
|
Request_Cache = {}
|
||||||
|
|
||||||
|
# Load Agent Board
|
||||||
|
try:
|
||||||
|
with open(
|
||||||
|
os.path.join(os.getcwd(), "AgentRepo", "agentBoard_v1.json"), "r", encoding="utf-8"
|
||||||
|
) as json_file:
|
||||||
|
AgentBoard = json.load(json_file)
|
||||||
|
print(f"✅ Loaded AgentBoard with {len(AgentBoard)} agents")
|
||||||
|
|
||||||
|
# Build AgentProfile_Dict
|
||||||
|
AgentProfile_Dict = {}
|
||||||
|
for item in AgentBoard:
|
||||||
|
name = item["Name"]
|
||||||
|
profile = item["Profile"]
|
||||||
|
AgentProfile_Dict[name] = profile
|
||||||
|
print(f"✅ Built AgentProfile_Dict with {len(AgentProfile_Dict)} profiles")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"⚠️ Failed to load AgentBoard: {e}")
|
||||||
|
AgentBoard = []
|
||||||
|
AgentProfile_Dict = {}
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
@@ -291,8 +324,8 @@ if __name__ == "__main__":
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--port",
|
"--port",
|
||||||
type=int,
|
type=int,
|
||||||
default=8017,
|
default=8000,
|
||||||
help="set the port number, 8017 by defaul.",
|
help="set the port number, 8000 by defaul.",
|
||||||
)
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
init()
|
init()
|
||||||
|
|||||||
31
frontend-react/.dockerignore
Normal file
31
frontend-react/.dockerignore
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
.DS_Store
|
||||||
|
|
||||||
|
.pnp
|
||||||
|
.pnp.js
|
||||||
|
.env.local
|
||||||
|
.env.*.local
|
||||||
|
.history
|
||||||
|
*.log*
|
||||||
|
|
||||||
|
node_modules/
|
||||||
|
.yarn-integrity
|
||||||
|
.pnpm-store/
|
||||||
|
*.tsbuildinfo
|
||||||
|
.eslintcache
|
||||||
|
.changeset/pre.json
|
||||||
|
|
||||||
|
dist/
|
||||||
|
coverage/
|
||||||
|
release/
|
||||||
|
output/
|
||||||
|
output_resource/
|
||||||
|
log/
|
||||||
|
|
||||||
|
.vscode/**/*
|
||||||
|
!.vscode/settings.json
|
||||||
|
!.vscode/extensions.json
|
||||||
|
.idea/
|
||||||
|
|
||||||
|
**/*/typings/auto-generated
|
||||||
|
|
||||||
|
modern.config.local.*
|
||||||
32
frontend-react/.gitignore
vendored
Normal file
32
frontend-react/.gitignore
vendored
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
.DS_Store
|
||||||
|
.env
|
||||||
|
|
||||||
|
.pnp
|
||||||
|
.pnp.js
|
||||||
|
.env.local
|
||||||
|
.env.*.local
|
||||||
|
.history
|
||||||
|
*.log*
|
||||||
|
|
||||||
|
node_modules/
|
||||||
|
.yarn-integrity
|
||||||
|
.pnpm-store/
|
||||||
|
*.tsbuildinfo
|
||||||
|
.eslintcache
|
||||||
|
.changeset/pre.json
|
||||||
|
|
||||||
|
dist/
|
||||||
|
coverage/
|
||||||
|
release/
|
||||||
|
output/
|
||||||
|
output_resource/
|
||||||
|
log/
|
||||||
|
|
||||||
|
.vscode/**/*
|
||||||
|
!.vscode/settings.json
|
||||||
|
!.vscode/extensions.json
|
||||||
|
.idea/
|
||||||
|
|
||||||
|
**/*/typings/auto-generated
|
||||||
|
|
||||||
|
modern.config.local.*
|
||||||
22
frontend-react/Dockerfile
Normal file
22
frontend-react/Dockerfile
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
FROM node:20-alpine AS base
|
||||||
|
|
||||||
|
FROM base AS installer
|
||||||
|
WORKDIR /app
|
||||||
|
COPY package.json .npmrc ./
|
||||||
|
RUN npm install
|
||||||
|
|
||||||
|
FROM base AS builder
|
||||||
|
WORKDIR /app
|
||||||
|
COPY --from=installer /app/node_modules ./node_modules
|
||||||
|
COPY . .
|
||||||
|
RUN npm run build
|
||||||
|
|
||||||
|
FROM base AS runner
|
||||||
|
WORKDIR /app
|
||||||
|
EXPOSE 8080/tcp
|
||||||
|
COPY .npmrc ./
|
||||||
|
RUN npm install @modern-js/app-tools @modern-js/runtime --no-optional --no-shrinkwrap && mkdir src
|
||||||
|
COPY modern.config.ts package.json ./
|
||||||
|
COPY --from=builder /app/dist ./dist
|
||||||
|
ENV API_BASE=
|
||||||
|
CMD ["npm", "run", "serve"]
|
||||||
39
frontend-react/README.md
Normal file
39
frontend-react/README.md
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
# AgentCoord Frontend
|
||||||
|
|
||||||
|
This is the frontend for the AgentCoord project. Root project is located [here](https://github.com/AgentCoord/AgentCoord)
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
You can launch the frontend by simply using `docker-compose` in the root directory of the project.
|
||||||
|
|
||||||
|
Or, you can launch the frontend manually by following the steps below.
|
||||||
|
|
||||||
|
1. Install the dependencies by running `npm install`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm install
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Build the frontend by running `npm run build`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm run build
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Start the frontend by running `npm run serve`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm run serve
|
||||||
|
```
|
||||||
|
|
||||||
|
Then you can access the frontend by visiting `http://localhost:8080`.
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
You can run the frontend in development mode by running `npm run dev`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm run dev
|
||||||
|
```
|
||||||
|
|
||||||
|
The frontend website requires the backend server to be running. You can configure the backend server address by copying the `.env.example` file to `.env` and changing the `API_BASE` value to the backend server address.
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user