Add backend and frontend

This commit is contained in:
Gk0Wk 2024-04-07 15:04:00 +08:00
parent 49fdd9cc43
commit 84a7cb1b7e
233 changed files with 29927 additions and 0 deletions

BIN
.DS_Store vendored Normal file

Binary file not shown.

149
backend/.dockerignore Normal file
View File

@ -0,0 +1,149 @@
.gitignore
.git
.idea
.vscode
.DS_Store
*.log
local_settings.py
*.sqlite3
*.pot
*.pyc
__pycache__
*.db
gmon.out
# Backup files #
*.bak
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.conda
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/

144
backend/.gitignore vendored Normal file
View File

@ -0,0 +1,144 @@
.DS_Store
*.log
local_settings.py
*.sqlite3
*.pot
*.pyc
__pycache__
*.db
gmon.out
# Backup files #
*.bak
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.conda
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/

View File

@ -0,0 +1,256 @@
import asyncio
import openai
import yaml
from termcolor import colored
import os
from groq import AsyncGroq
from mistralai.client import MistralClient
from mistralai.models.chat_completion import ChatMessage
# load config (apikey, apibase, model)
yaml_file = os.path.join(os.getcwd(), "config", "config.yaml")
try:
with open(yaml_file, "r", encoding="utf-8") as file:
yaml_data = yaml.safe_load(file)
except Exception:
yaml_file = {}
OPENAI_API_BASE = os.getenv("OPENAI_API_BASE") or yaml_data.get(
"OPENAI_API_BASE", "https://api.openai.com"
)
openai.api_base = OPENAI_API_BASE
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") or yaml_data.get(
"OPENAI_API_KEY", ""
)
openai.api_key = OPENAI_API_KEY
MODEL: str = os.getenv("OPENAI_API_MODEL") or yaml_data.get(
"OPENAI_API_MODEL", "gpt-4-turbo-preview"
)
FAST_DESIGN_MODE: bool = os.getenv("FAST_DESIGN_MODE")
if FAST_DESIGN_MODE is None:
FAST_DESIGN_MODE = yaml_data.get("FAST_DESIGN_MODE", False)
else:
FAST_DESIGN_MODE = FAST_DESIGN_MODE.lower() in ["true", "1", "yes"]
GROQ_API_KEY = os.getenv("GROQ_API_KEY") or yaml_data.get("GROQ_API_KEY", "")
MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY") or yaml_data.get(
"MISTRAL_API_KEY", ""
)
# for LLM completion
def LLM_Completion(
messages: list[dict], stream: bool = True, useGroq: bool = True
) -> str:
if not useGroq or not FAST_DESIGN_MODE:
force_gpt4 = True
useGroq = False
else:
force_gpt4 = False
useGroq = True
if stream:
try:
loop = asyncio.get_event_loop()
except RuntimeError as ex:
if "There is no current event loop in thread" in str(ex):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if useGroq:
if force_gpt4:
return loop.run_until_complete(
_achat_completion_json(messages=messages)
)
else:
return loop.run_until_complete(
_achat_completion_stream_groq(messages=messages)
)
else:
return loop.run_until_complete(
_achat_completion_stream(messages=messages)
)
# return asyncio.run(_achat_completion_stream(messages = messages))
else:
return _chat_completion(messages=messages)
async def _achat_completion_stream_groq(messages: list[dict]) -> str:
client = AsyncGroq(api_key=GROQ_API_KEY)
max_attempts = 5
for attempt in range(max_attempts):
print("Attempt to use Groq (Fase Design Mode):")
try:
stream = await client.chat.completions.create(
messages=messages,
# model='gemma-7b-it',
model="mixtral-8x7b-32768",
# model='llama2-70b-4096',
temperature=0.3,
response_format={"type": "json_object"},
stream=False,
)
break
except Exception:
if attempt < max_attempts - 1: # i is zero indexed
continue
else:
raise "failed"
full_reply_content = stream.choices[0].message.content
print(colored(full_reply_content, "blue", "on_white"), end="")
print()
return full_reply_content
async def _achat_completion_stream_mixtral(messages: list[dict]) -> str:
client = MistralClient(api_key=MISTRAL_API_KEY)
# client=AsyncGroq(api_key=GROQ_API_KEY)
max_attempts = 5
for attempt in range(max_attempts):
try:
messages[len(messages) - 1]["role"] = "user"
stream = client.chat(
messages=[
ChatMessage(
role=message["role"], content=message["content"]
)
for message in messages
],
# model = "mistral-small-latest",
model="open-mixtral-8x7b",
# response_format={"type": "json_object"},
)
break # If the operation is successful, break the loop
except Exception:
if attempt < max_attempts - 1: # i is zero indexed
continue
else:
raise "failed"
full_reply_content = stream.choices[0].message.content
print(colored(full_reply_content, "blue", "on_white"), end="")
print()
return full_reply_content
async def _achat_completion_stream_gpt35(messages: list[dict]) -> str:
openai.api_key = OPENAI_API_KEY
openai.api_base = OPENAI_API_BASE
response = await openai.ChatCompletion.acreate(
messages=messages,
max_tokens=4096,
n=1,
stop=None,
temperature=0.3,
timeout=3,
model="gpt-3.5-turbo-16k",
stream=True,
)
# create variables to collect the stream of chunks
collected_chunks = []
collected_messages = []
# iterate through the stream of events
async for chunk in response:
collected_chunks.append(chunk) # save the event response
choices = chunk["choices"]
if len(choices) > 0:
chunk_message = chunk["choices"][0].get(
"delta", {}
) # extract the message
collected_messages.append(chunk_message) # save the message
if "content" in chunk_message:
print(
colored(chunk_message["content"], "blue", "on_white"),
end="",
)
print()
full_reply_content = "".join(
[m.get("content", "") for m in collected_messages]
)
return full_reply_content
async def _achat_completion_json(messages: list[dict]) -> str:
openai.api_key = OPENAI_API_KEY
openai.api_base = OPENAI_API_BASE
max_attempts = 5
for attempt in range(max_attempts):
try:
stream = await openai.ChatCompletion.acreate(
messages=messages,
max_tokens=4096,
n=1,
stop=None,
temperature=0.3,
timeout=3,
model=MODEL,
response_format={"type": "json_object"},
)
break
except Exception:
if attempt < max_attempts - 1: # i is zero indexed
continue
else:
raise "failed"
full_reply_content = stream.choices[0].message.content
print(colored(full_reply_content, "blue", "on_white"), end="")
print()
return full_reply_content
async def _achat_completion_stream(messages: list[dict]) -> str:
openai.api_key = OPENAI_API_KEY
openai.api_base = OPENAI_API_BASE
response = await openai.ChatCompletion.acreate(
**_cons_kwargs(messages), stream=True
)
# create variables to collect the stream of chunks
collected_chunks = []
collected_messages = []
# iterate through the stream of events
async for chunk in response:
collected_chunks.append(chunk) # save the event response
choices = chunk["choices"]
if len(choices) > 0:
chunk_message = chunk["choices"][0].get(
"delta", {}
) # extract the message
collected_messages.append(chunk_message) # save the message
if "content" in chunk_message:
print(
colored(chunk_message["content"], "blue", "on_white"),
end="",
)
print()
full_reply_content = "".join(
[m.get("content", "") for m in collected_messages]
)
return full_reply_content
def _chat_completion(messages: list[dict]) -> str:
rsp = openai.ChatCompletion.create(**_cons_kwargs(messages))
content = rsp["choices"][0]["message"]["content"]
return content
def _cons_kwargs(messages: list[dict]) -> dict:
kwargs = {
"messages": messages,
"max_tokens": 4096,
"n": 1,
"stop": None,
"temperature": 0.5,
"timeout": 3,
}
kwargs_mode = {"model": MODEL}
kwargs.update(kwargs_mode)
return kwargs

View File

@ -0,0 +1,3 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# import LLMAPI

View File

@ -0,0 +1,137 @@
import json
from AgentCoord.util.converter import read_LLM_Completion
from typing import List, Dict
from pydantic import BaseModel, RootModel
PROMPT_ABILITY_REQUIREMENT_GENERATION = """
## Instruction
Based on "General Goal" and "Current Task", output a formatted "Ability Requirement" which lists at least 3 different ability requirement that is required by the "Current Task". The ability should be summarized concisely within a few words.
## General Goal (The general goal for the collaboration plan, "Current Task" is just one of its substep)
{General_Goal}
## Current Task
{Current_Task}
## Output Format Example (Specify the output format)
```json
{{
"AbilityRequirement":["Critical Thinking", "Understanding of VR", "Story Structuring Skill"]
}}
```
"""
class JSON_ABILITY_REQUIREMENT_GENERATION(BaseModel):
AbilityRequirement: List[str]
def generate_AbilityRequirement(General_Goal, Current_Task):
messages = [
{
"role": "system",
"content": f" The JSON object must use the schema: {json.dumps(JSON_ABILITY_REQUIREMENT_GENERATION.model_json_schema(), indent=2)}",
},
{
"role": "system",
"content": PROMPT_ABILITY_REQUIREMENT_GENERATION.format(
General_Goal=General_Goal,
Current_Task=json.dumps(Current_Task, indent=4),
),
},
]
print(messages[1]["content"])
return read_LLM_Completion(messages)["AbilityRequirement"]
PROMPT_AGENT_ABILITY_SCORING = """
## Instruction
Based on "Agent Board" and "Ability Requirement", output a score for each agent to estimate the possibility that the agent can fulfil the "Ability Requirement". The score should be 1-5. Provide a concise reason before you assign the score.
## AgentBoard
{Agent_Board}
## Ability Requirement
{Ability_Requirement}
## Attention
Do not omit any agent from the agentboard.
## Output Format Example (Specify the output format)
```json
{{
"Sam":{{
"Reason":"...",
"Score": 1
}},
"Alice":{{
"Reason":"...",
"Score": 5
}}
}}
```
"""
class JSON_Agent(BaseModel):
Reason: str
Score: int
class JSON_AGENT_ABILITY_SCORING(RootModel):
root: Dict[str, JSON_Agent]
def agentAbilityScoring(Agent_Board, Ability_Requirement_List):
scoreTable = {}
for Ability_Requirement in Ability_Requirement_List:
messages = [
{
"role": "system",
"content": f" The JSON object must use the schema: {json.dumps(JSON_AGENT_ABILITY_SCORING.model_json_schema(), indent=2)}",
},
{
"role": "system",
"content": PROMPT_AGENT_ABILITY_SCORING.format(
Agent_Board=json.dumps(Agent_Board, indent=4),
Ability_Requirement=Ability_Requirement,
),
},
]
print(messages[1]["content"])
scoreTable[Ability_Requirement] = read_LLM_Completion(messages)
return scoreTable
def AgentSelectModify_init(stepTask, General_Goal, Agent_Board):
Current_Task = {
"TaskName": stepTask["StepName"],
"InputObject_List": stepTask["InputObject_List"],
"OutputObject": stepTask["OutputObject"],
"TaskContent": stepTask["TaskContent"],
}
messages = [
{
"role": "system",
"content": f" The JSON object must use the schema: {json.dumps(JSON_ABILITY_REQUIREMENT_GENERATION.model_json_schema(), indent=2)}",
},
{
"role": "system",
"content": PROMPT_ABILITY_REQUIREMENT_GENERATION.format(
General_Goal=General_Goal,
Current_Task=json.dumps(Current_Task, indent=4),
),
},
]
Ability_Requirement_List = read_LLM_Completion(messages)[
"AbilityRequirement"
]
scoreTable = agentAbilityScoring(Agent_Board, Ability_Requirement_List)
return scoreTable
def AgentSelectModify_addAspect(aspectList, Agent_Board):
scoreTable = agentAbilityScoring(Agent_Board, aspectList)
return scoreTable

View File

@ -0,0 +1,116 @@
import json
from AgentCoord.util.converter import read_LLM_Completion
from typing import List
from pydantic import BaseModel
import random
MIN_TEAM_SIZE = 2 # the minimum number of agent allowed in a task for the initial coordination strategy.
MAX_TEAM_SIZE = 3 # the maximun number of agent allowed in a task for the initial coordination strategy. You can set it as any number that is larger than MIN_TEAM_SIZE. Recomend to start with less agents and manually add more.
PROMPT_ABILITY_REQUIREMENT_GENERATION = """
## Instruction
Based on "General Goal" and "Current Task", output a formatted "Ability Requirement" which lists at least 3 different ability requirement that is required by the "Current Task". The ability should be summarized concisely within a few words.
## General Goal (The general goal for the collaboration plan, "Current Task" is just one of its substep)
{General_Goal}
## Current Task
{Current_Task}
## Output Format Example (Specify the output format)
```json
{{
"AbilityRequirement":["Critical Thinking", "Understanding of VR", "Story Structuring Skill"]
}}
```
"""
class JSON_ABILITY_REQUIREMENT_GENERATION(BaseModel):
AbilityRequirement: List[str]
PROMPT_AGENT_SELECTION_GENERATION = """
## Instruction
Based on "General Goal", "Current Task" and "Agent Board", output a formatted "Agent Selection Plan". Your selection should consider the ability needed for "Current Task" and the profile of each agent in "Agent Board".
## General Goal (Specify the general goal for the collaboration plan)
{General_Goal}
## Current Task
{Current_Task}
## Agent Board (Specify the list of agents available for selection)
{Agent_Board}
## Attention
Just use agents in Agent Board.
## Output Format Example (Select one or more agent)
```json
{{
"AgentSelectionPlan": ["Alice","Bob"]
}}
```
"""
class JSON_AGENT_SELECTION_GENERATION(BaseModel):
AgentSelectionPlan: List[str]
def generate_AbilityRequirement(General_Goal, Current_Task):
messages = [
{
"role": "system",
"content": f" The JSON object must use the schema: {json.dumps(JSON_ABILITY_REQUIREMENT_GENERATION.model_json_schema(), indent=2)}",
},
{
"role": "system",
"content": PROMPT_ABILITY_REQUIREMENT_GENERATION.format(
General_Goal=General_Goal,
Current_Task=json.dumps(Current_Task, indent=4),
),
},
]
print(messages[1]["content"])
return read_LLM_Completion(messages)["AbilityRequirement"]
def generate_AgentSelection(General_Goal, Current_Task, Agent_Board):
messages = [
{
"role": "system",
"content": f" The JSON object must use the schema: {json.dumps(JSON_AGENT_SELECTION_GENERATION.model_json_schema(), indent=2)}",
},
{
"role": "system",
"content": PROMPT_AGENT_SELECTION_GENERATION.format(
General_Goal=General_Goal,
Current_Task=json.dumps(Current_Task, indent=4),
Agent_Board=json.dumps(Agent_Board, indent=4),
),
},
]
print(messages[1]["content"])
agentboard_set = {agent["Name"] for agent in Agent_Board}
while True:
candidate = read_LLM_Completion(messages)["AgentSelectionPlan"]
if len(candidate) > MAX_TEAM_SIZE:
teamSize = random.randint(2, MAX_TEAM_SIZE)
candidate = candidate[0:teamSize]
elif len(candidate) < MIN_TEAM_SIZE:
continue
AgentSelectionPlan = sorted(candidate)
AgentSelectionPlan_set = set(AgentSelectionPlan)
# Check if every item in AgentSelectionPlan is in agentboard
if AgentSelectionPlan_set.issubset(agentboard_set):
break # If all items are in agentboard, break the loop
# AgentSelectionPlan= sorted(read_LLM_Completion(messages)["AgentSelectionPlan"]) # sort the select agent list for unique sequence
return AgentSelectionPlan

View File

@ -0,0 +1,2 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-

View File

@ -0,0 +1,55 @@
from AgentCoord.PlanEngine.planOutline_Generator import generate_PlanOutline
from AgentCoord.PlanEngine.AgentSelection_Generator import (
generate_AgentSelection,
)
from AgentCoord.PlanEngine.taskProcess_Generator import generate_TaskProcess
import AgentCoord.util as util
def generate_basePlan(
General_Goal, Agent_Board, AgentProfile_Dict, InitialObject_List
):
basePlan = {
"Initial Input Object": InitialObject_List,
"Collaboration Process": [],
}
PlanOutline = generate_PlanOutline(
InitialObject_List=[], General_Goal=General_Goal
)
for stepItem in PlanOutline:
Current_Task = {
"TaskName": stepItem["StepName"],
"InputObject_List": stepItem["InputObject_List"],
"OutputObject": stepItem["OutputObject"],
"TaskContent": stepItem["TaskContent"],
}
AgentSelection = generate_AgentSelection(
General_Goal=General_Goal,
Current_Task=Current_Task,
Agent_Board=Agent_Board,
)
Current_Task_Description = {
"TaskName": stepItem["StepName"],
"AgentInvolved": [
{"Name": name, "Profile": AgentProfile_Dict[name]}
for name in AgentSelection
],
"InputObject_List": stepItem["InputObject_List"],
"OutputObject": stepItem["OutputObject"],
"CurrentTaskDescription": util.generate_template_sentence_for_CollaborationBrief(
stepItem["InputObject_List"],
stepItem["OutputObject"],
AgentSelection,
stepItem["TaskContent"],
),
}
TaskProcess = generate_TaskProcess(
General_Goal=General_Goal,
Current_Task_Description=Current_Task_Description,
)
# add the generated AgentSelection and TaskProcess to the stepItem
stepItem["AgentSelection"] = AgentSelection
stepItem["TaskProcess"] = TaskProcess
basePlan["Collaboration Process"].append(stepItem)
basePlan["General Goal"] = General_Goal
return basePlan

View File

@ -0,0 +1,102 @@
from AgentCoord.util.converter import read_LLM_Completion
import json
from typing import List
from pydantic import BaseModel, Field
PROMPT_PLAN_OUTLINE_BRANCHING = """
## Instruction
Based on "Existing Steps", your task is to comeplete the "Remaining Steps" for the plan for "General Goal".
Note: "Modification Requirement" specifies how to modify the "Baseline Completion" for a better/alternative solution.
## General Goal (Specify the general goal for the plan)
{General_Goal}
## Initial Key Object List (Specify the list of initial key objects available for use as the input object of a Step)
{InitialObject_List}
## Existing Steps
{Existing_Steps}
## Baseline Completion
{Baseline_Completion}
## Modification Requirement
{Modification_Requirement}
## Output Format Example (Specify the output format)
```json
{{
"Remaining Steps": [
{{
"StepName": "xx",
"TaskContent": "xxx...",
"InputObject_List": [xx],
"OutputObject": "xx"
}},
{{
"StepName": "xx",
"TaskContent": "xxx...",
"InputObject_List": [xx],
"OutputObject": "xx"
}},
{{
"StepName": "xx",
"TaskContent": "xxx...",
"InputObject_List": [xx],
"OutputObject": "xx"
}}
]
}}
```
## Format Explaination (Explain the Output Format):
TaskContent: Describe the task of the current step.
InputObject_List: The list of the input obejects that will be used in current step.
OutputObject: The name of the final output object of current step.
StepName: Provide a CONCISE and UNIQUE name for this step, smartly summarize what this step is doing.
"""
class JSON_Step(BaseModel):
StepName: str
TaskContent: str
InputObject_List: List[str]
OutputObject: str
class JSON_PLAN_OUTLINE_BRANCHING(BaseModel):
Remaining_Steps: List[JSON_Step] = Field(..., alias="Remaining Steps")
def branch_PlanOutline(
branch_Number,
Modification_Requirement,
Existing_Steps,
Baseline_Completion,
InitialObject_List,
General_Goal,
):
prompt = PROMPT_PLAN_OUTLINE_BRANCHING.format(
Modification_Requirement=Modification_Requirement,
Existing_Steps=json.dumps(Existing_Steps, indent=4),
Baseline_Completion=json.dumps(Baseline_Completion, indent=4),
InitialObject_List=str(InitialObject_List),
General_Goal=General_Goal,
)
print(prompt)
branch_List = []
for _ in range(branch_Number):
messages = [
{
"role": "system",
"content": f" The JSON object must use the schema: {json.dumps(JSON_PLAN_OUTLINE_BRANCHING.model_json_schema(), indent=2)}",
},
{"role": "system", "content": prompt},
]
Remaining_Steps = read_LLM_Completion(messages, useGroq=False)[
"Remaining Steps"
]
branch_List.append(Remaining_Steps)
return branch_List

View File

@ -0,0 +1,153 @@
from AgentCoord.util.converter import read_LLM_Completion
import AgentCoord.util as util
from typing import List
from pydantic import BaseModel, Field
import json
ACT_SET = """
- Propose (Propose something that contribute to the current task):
- Description Format Example:
Propose some suggestion for the development of the story from emotional aspects.
- Critique (Provide feedback to the action result of other agents):
- Description Format Example:
Critique the logic soundness of the story outline written by Alice.
- Improve (Improve the result of a previous action):
- Description Format Example:
Improve the story outline written by Bob based on the feedback by Jordan"
- Finalize (Deliver the final result based on previous actions):
- Description Format Example:
Summarize the discussion and submit the final version of the Story Outline.
"""
PROMPT_TASK_PROCESS_BRANCHING = """
## Instruction
Based on "Existing Steps", your task is to comeplete the "Remaining Steps" for the "Task for Current Step".
Note: "Modification Requirement" specifies how to modify the "Baseline Completion" for a better/alternative solution.
## General Goal (The general goal for the collaboration plan, you just design the plan for one of its step (i.e. "Task for Current Step"))
{General_Goal}
## Task for Current Step
{Current_Task_Description}
## Action Set (The list of Action Type Available)
{Act_Set}
## Existing Steps
{Existing_Steps}
## Baseline Completion
{Baseline_Completion}
## Modification Requirement
{Modification_Requirement}
## Output Format Example (Specify the output format)
```json
{{
"Remaining Steps": [
{{
"ID": "Action4",
"ActionType": "Propose",
"AgentName": "Mia",
"Description": "Propose psychological theories on love and attachment that could be applied to AI's emotional development.",
"ImportantInput": [
"InputObject:Story Outline"
]
}},
{{
"ID": "Action5",
"ActionType": "Propose",
"AgentName": "Noah",
"Description": "Propose ethical considerations and philosophical questions regarding AI's capacity for love.",
"ImportantInput": []
}},
{{
"ID": "Action6",
"ActionType": "Finalize",
"AgentName": "Liam",
"Description": "Combine the poetic elements and ethical considerations into a cohesive set of core love elements for the story.",
"ImportantInput": [
"ActionResult:Action1",
"ActionResult:Action5"
]
}}
]
}}
## Format Explaination (Explain the Output Format):
ImportantInput: Specify if there is any previous result that should be taken special consideration during the execution the action. Should be of format "InputObject:xx" or "ActionResult:xx".
InputObject_List: List existing objects that should be utilized in current step.
AgentName: Specify the agent who will perform the action, You CAN ONLY USE THE NAME APPEARS IN "AgentInvolved".
ActionType: Specify the type of action, note that only the last action can be of type "Finalize", and the last action must be "Finalize".
"""
class JSON_Step(BaseModel):
ID: str
ActionType: str
AgentName: str
Description: str
ImportantInput: List[str]
class JSON_TASK_PROCESS_BRANCHING(BaseModel):
Remaining_Steps: List[JSON_Step] = Field(..., alias="Remaining Steps")
def branch_TaskProcess(
branch_Number,
Modification_Requirement,
Existing_Steps,
Baseline_Completion,
stepTaskExisting,
General_Goal,
AgentProfile_Dict,
):
Current_Task_Description = {
"TaskName": stepTaskExisting["StepName"],
"AgentInvolved": [
{"Name": name, "Profile": AgentProfile_Dict[name]}
for name in stepTaskExisting["AgentSelection"]
],
"InputObject_List": stepTaskExisting["InputObject_List"],
"OutputObject": stepTaskExisting["OutputObject"],
"CurrentTaskDescription": util.generate_template_sentence_for_CollaborationBrief(
stepTaskExisting["InputObject_List"],
stepTaskExisting["OutputObject"],
stepTaskExisting["AgentSelection"],
stepTaskExisting["TaskContent"],
),
}
prompt = PROMPT_TASK_PROCESS_BRANCHING.format(
Modification_Requirement=Modification_Requirement,
Current_Task_Description=json.dumps(
Current_Task_Description, indent=4
),
Existing_Steps=json.dumps(Existing_Steps, indent=4),
Baseline_Completion=json.dumps(Baseline_Completion, indent=4),
General_Goal=General_Goal,
Act_Set=ACT_SET,
)
print(prompt)
branch_List = []
for i in range(branch_Number):
messages = [
{
"role": "system",
"content": f" The JSON object must use the schema: {json.dumps(JSON_TASK_PROCESS_BRANCHING.model_json_schema(), indent=2)}",
},
{"role": "system", "content": prompt},
]
Remaining_Steps = read_LLM_Completion(messages, useGroq=False)[
"Remaining Steps"
]
branch_List.append(Remaining_Steps)
return branch_List

View File

@ -0,0 +1,42 @@
from AgentCoord.PlanEngine.AgentSelection_Generator import (
generate_AgentSelection,
)
from AgentCoord.PlanEngine.taskProcess_Generator import generate_TaskProcess
import AgentCoord.util as util
def fill_stepTask(General_Goal, stepTask, Agent_Board, AgentProfile_Dict):
Current_Task = {
"TaskName": stepTask["StepName"],
"InputObject_List": stepTask["InputObject_List"],
"OutputObject": stepTask["OutputObject"],
"TaskContent": stepTask["TaskContent"],
}
AgentSelection = generate_AgentSelection(
General_Goal=General_Goal,
Current_Task=Current_Task,
Agent_Board=Agent_Board,
)
Current_Task_Description = {
"TaskName": stepTask["StepName"],
"AgentInvolved": [
{"Name": name, "Profile": AgentProfile_Dict[name]}
for name in AgentSelection
],
"InputObject_List": stepTask["InputObject_List"],
"OutputObject": stepTask["OutputObject"],
"CurrentTaskDescription": util.generate_template_sentence_for_CollaborationBrief(
stepTask["InputObject_List"],
stepTask["OutputObject"],
AgentSelection,
stepTask["TaskContent"],
),
}
TaskProcess = generate_TaskProcess(
General_Goal=General_Goal,
Current_Task_Description=Current_Task_Description,
)
# add the generated AgentSelection and TaskProcess to the stepItem
stepTask["AgentSelection"] = AgentSelection
stepTask["TaskProcess"] = TaskProcess
return stepTask

View File

@ -0,0 +1,29 @@
from AgentCoord.PlanEngine.taskProcess_Generator import generate_TaskProcess
import AgentCoord.util as util
def fill_stepTask_TaskProcess(General_Goal, stepTask, AgentProfile_Dict):
AgentSelection = stepTask["AgentSelection"]
Current_Task_Description = {
"TaskName": stepTask["StepName"],
"AgentInvolved": [
{"Name": name, "Profile": AgentProfile_Dict[name]}
for name in AgentSelection
],
"InputObject_List": stepTask["InputObject_List"],
"OutputObject": stepTask["OutputObject"],
"CurrentTaskDescription": util.generate_template_sentence_for_CollaborationBrief(
stepTask["InputObject_List"],
stepTask["OutputObject"],
AgentSelection,
stepTask["TaskContent"],
),
}
TaskProcess = generate_TaskProcess(
General_Goal=General_Goal,
Current_Task_Description=Current_Task_Description,
)
# add the generated AgentSelection and TaskProcess to the stepItem
stepTask["AgentSelection"] = AgentSelection
stepTask["TaskProcess"] = TaskProcess
return stepTask

View File

@ -0,0 +1,86 @@
from AgentCoord.util.converter import read_LLM_Completion
from typing import List
from pydantic import BaseModel
import json
PROMPT_PLAN_OUTLINE_GENERATION = """
## Instruction
Based on "Output Format Example", "General Goal", and "Initial Key Object List", output a formatted "Plan_Outline".
## Initial Key Object List (Specify the list of initial key objects available, each initial key object should be the input object of at least one Step)
{InitialObject_List}
## General Goal (Specify the general goal for the collaboration plan)
{General_Goal}
## Output Format Example (Specify the output format)
```json
{{
"Plan_Outline": [
{{
"StepName": "Love Element Brainstorming",
"TaskContent": "Decide the main love element in the love story.",
"InputObject_List": [],
"OutputObject": "Love Element"
}},
{{
"StepName": "Story World Buiding",
"TaskContent": "Build the world setting for the story.",
"InputObject_List": [],
"OutputObject": "World Setting"
}},
{{
"StepName": "Story Outline Drafting",
"TaskContent": "Draft the story outline for the story.",
"InputObject_List": ["Love Element", "World Setting"],
"OutputObject": "Story Outline"
}},
{{
"StepName": "Final Story Writing",
"TaskContent": "Writing the final story.",
"InputObject_List": [],
"OutputObject": "Completed Story"
}}
]
}}
```
## Format Explaination (Explain the Output Format):
StepName: Provide a CONCISE and UNIQUE name for this step, smartly summarize what this step is doing.
TaskContent: Describe the task of the current step.
InputObject_List: The list of the input obejects that will be used in current step.
OutputObject: The name of the final output object of current step.
"""
class Step(BaseModel):
StepName: str
TaskContent: str
InputObject_List: List[str]
OutputObject: str
class PlanOutline(BaseModel):
Plan_Outline: List[Step]
class Config:
extra = "allow"
def generate_PlanOutline(InitialObject_List, General_Goal):
messages = [
{
"role": "system",
"content": "You are a recipe database that outputs recipes in JSON.\n"
f" The JSON object must use the schema: {json.dumps(PlanOutline.model_json_schema(), indent=2)}",
},
{
"role": "system",
"content": PROMPT_PLAN_OUTLINE_GENERATION.format(
InitialObject_List=str(InitialObject_List),
General_Goal=General_Goal,
),
},
]
return read_LLM_Completion(messages)["Plan_Outline"]

View File

@ -0,0 +1,139 @@
import json
from typing import List
from pydantic import BaseModel
from AgentCoord.util.converter import read_LLM_Completion
ACT_SET = """
- Propose (Propose something that contribute to the current task):
- Description Format Example:
Propose some suggestion for the development of the story from emotional aspects.
- Critique (Provide feedback to the action result of other agents):
- Description Format Example:
Critique the logic soundness of the story outline written by Alice.
- Improve (Improve the result of a previous action):
- Description Format Example:
Improve the story outline written by Bob based on the feedback by Jordan"
- Finalize (Deliver the final result based on previous actions):
- Description Format Example:
Summarize the discussion and submit the final version of the Story Outline.
"""
PROMPT_TASK_PROCESS_GENERATION = """
## Instruction
Based on "General Goal", "Task for Current Step", "Action Set" and "Output Format Example", design a plan for "Task for Current Step", output a formatted "Task_Process_Plan".
## General Goal (The general goal for the collaboration plan, you just design the plan for one of its step (i.e. "Task for Current Step"))
{General_Goal}
## Task for Current Step
{Current_Task_Description}
## Action Set (The list of Action Type Available)
{Act_Set}
## Output Format Example (Specify the output format)
```json
{{
"Task_Process_Plan": [
{{
"ID": "Action1",
"ActionType": "Propose",
"AgentName": "Bob",
"Description": "Propose an idea about how the AI can get awaken.",
"ImportantInput": []
}},
{{
"ID": "Action2",
"ActionType": "Propose",
"AgentName": "Alice",
"Description": "Propose some suggestion for the development of the story from emotional aspects.",
"ImportantInput": ["InputObject:Love Element Notes"]
}},
{{
"ID": "Action3",
"ActionType": "Propose",
"AgentName": "Bob",
"Description": "Propose a draft story outline, incorporating the idea proposed by Bob and the suggestion proposed by Alice.",
"ImportantInput": ["ActionResult:Action1", "ActionResult:Action2"]
}},
{{
"ID": "Action4",
"ActType": "Critique",
"AgentName": "Jordan",
"Description": "Critique the technical soundness of the draft story outline proposed by Bob."
"ImportantInput": ["ActionResult:Action3"]
}},
{{
"ID": "Action5",
"ActType": "Improve",
"AgentName": "Bob",
"Description": "Improve the technical soundness of the draft story outline based on Jordan's feedback.",
"ImportantInput": ["ActionResult:Action3", "ActionResult:Action4"]
}},
{{
"ID": "Action6",
"ActType": "Finalize",
"AgentName": "Jordan",
"Description": "Polish the improved draft story outline by Bob and submit it as the final version of the Story Outline",
"ImportantInput": ["ActionResult:Action5"]
}}
]
}}
```
## Format Explaination (Explain the Output Format):
ImportantInput: Specify if there is any previous result that should be taken special consideration during the execution the action. Should be of format "InputObject:xx" or "ActionResult:xx".
InputObject_List: List existing objects that should be utilized in current step.
AgentName: Specify the agent who will perform the action, You CAN ONLY USE THE NAME APPEARS IN "AgentInvolved".
ActionType: Specify the type of action, note that only the last action can be of type "Finalize", and the last action must be "Finalize".
"""
class Action(BaseModel):
ID: str
ActionType: str
AgentName: str
Description: str
ImportantInput: List[str]
class TaskProcessPlan(BaseModel):
Task_Process_Plan: List[Action]
def generate_TaskProcess(General_Goal, Current_Task_Description):
messages = [
{
"role": "system",
"content": f" The JSON object must use the schema: {json.dumps(TaskProcessPlan.model_json_schema(), indent=2)}",
},
{
"role": "system",
"content": PROMPT_TASK_PROCESS_GENERATION.format(
General_Goal=General_Goal,
Current_Task_Description=json.dumps(
Current_Task_Description, indent=4
),
Act_Set=ACT_SET,
),
},
]
print(messages[1]["content"])
# write a callback function, if read_LLM_Completion(messages)["Task_Process_Plan"] dont have the right format, call this function again
while True:
response_json = read_LLM_Completion(messages)["Task_Process_Plan"]
response_agents = {action["AgentName"] for action in response_json}
involved_agents = {
agent["Name"]
for agent in Current_Task_Description["AgentInvolved"]
}
if response_agents.issubset(involved_agents):
break
# AgentSelectionPlan= sorted(read_LLM_Completion(messages)["AgentSelectionPlan"]) # sort the select agent list for unique sequence
return response_json

View File

@ -0,0 +1,9 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .baseAction import BaseAction
from .customAction_Finalize import customAction_Finalize
from .customAction_Improve import customAction_Improve
from .customAction_Critique import customAction_Critique
from .customAction_Propose import customAction_Propose
customAction_Dict = {"Finalize":customAction_Finalize, "Improve":customAction_Improve, "Critique":customAction_Critique, "Propose":customAction_Propose}

View File

@ -0,0 +1,95 @@
from AgentCoord.LLMAPI.LLMAPI import LLM_Completion
from AgentCoord.util.colorLog import print_colored
import copy
PROMPT_TEMPLATE_TAKE_ACTION_BASE = '''
Your name is {agentName}. You will play the role as the Profile indicates.
Profile: {agentProfile}
You are within a multi-agent collaboration for the "Current Task".
Now it's your turn to take action. Read the "Context Information" and take your action following "Instruction for Your Current Action".
Note: Important Input for your action are marked with *Important Input*
## Context Information
### General Goal (The "Current Task" is indeed a substep of the general goal)
{General_Goal}
### Current Task
{Current_Task_Description}
### Input Objects (Input objects for the current task)
{Input_Objects}
### History Action
{History_Action}
## Instruction for Your Current Action
{Action_Description}
{Action_Custom_Note}
'''
PROMPT_TEMPLATE_INPUTOBJECT_RECORD = '''
{{
{Important_Mark}
{Input_Objects_Name}:
{Input_Objects_Content}
}}
'''
PROMPT_TEMPLATE_ACTION_RECORD = '''
{{
{Important_Mark}
{AgentName} ({Action_Description}):
{Action_Result}
}}
'''
class BaseAction():
def __init__(self, info, OutputName, KeyObjects) -> None:
self.KeyObjects = KeyObjects
self.OutputName = OutputName
self.Action_Result = None
self.info = info
self.Action_Custom_Note = ""
def postRun_Callback(self) -> None:
return
def run(self, General_Goal, TaskDescription, agentName, AgentProfile_Dict, InputName_List, OutputName, KeyObjects, ActionHistory):
# construct input record
inputObject_Record = ""
for InputName in InputName_List:
ImportantInput_Identifier = "InputObject:" + InputName
if ImportantInput_Identifier in self.info["ImportantInput"]:
Important_Mark = "*Important Input*"
else:
Important_Mark = ""
inputObject_Record += PROMPT_TEMPLATE_INPUTOBJECT_RECORD.format(Input_Objects_Name = InputName, Input_Objects_Content = KeyObjects[InputName], Important_Mark = Important_Mark)
# construct history action record
action_Record = ""
for actionInfo in ActionHistory:
ImportantInput_Identifier = "ActionResult:" + actionInfo["ID"]
if ImportantInput_Identifier in self.info["ImportantInput"]:
Important_Mark = "*Important Input*"
else:
Important_Mark = ""
action_Record += PROMPT_TEMPLATE_ACTION_RECORD.format(AgentName = actionInfo["AgentName"], Action_Description = actionInfo["AgentName"], Action_Result = actionInfo["Action_Result"], Important_Mark = Important_Mark)
prompt = PROMPT_TEMPLATE_TAKE_ACTION_BASE.format(agentName = agentName, agentProfile = AgentProfile_Dict[agentName], General_Goal = General_Goal, Current_Task_Description = TaskDescription, Input_Objects = inputObject_Record, History_Action = action_Record, Action_Description = self.info["Description"], Action_Custom_Note = self.Action_Custom_Note)
print_colored(text = prompt, text_color="red")
messages = [{"role":"system", "content": prompt}]
ActionResult = LLM_Completion(messages,True,False)
ActionInfo_with_Result = copy.deepcopy(self.info)
ActionInfo_with_Result["Action_Result"] = ActionResult
# run customizable callback
self.Action_Result = ActionResult
self.postRun_Callback()
return ActionInfo_with_Result

View File

@ -0,0 +1,16 @@
from AgentCoord.RehearsalEngine_V2.Action import BaseAction
ACTION_CUSTOM_NOTE = '''
Note: Since you are in a conversation, your critique must be concise, clear and easy to read, don't overwhelm others. If you want to list some points, list at most 2 points.
'''
class customAction_Critique(BaseAction):
def __init__(self, info, OutputName, KeyObjects) -> None:
self.KeyObjects = KeyObjects
self.OutputName = OutputName
self.Action_Result = None
self.info = info
self.Action_Custom_Note = ACTION_CUSTOM_NOTE.format(OutputName = OutputName)

View File

@ -0,0 +1,24 @@
from AgentCoord.util.converter import read_outputObject_content
from AgentCoord.RehearsalEngine_V2.Action import BaseAction
ACTION_CUSTOM_NOTE = '''
Note: You can say something before you give the final content of {OutputName}. When you decide to give the final content of {OutputName}, it should be enclosed like this:
```{OutputName}
(the content of {OutputName})
```
'''
class customAction_Finalize(BaseAction):
def __init__(self, info, OutputName, KeyObjects) -> None:
self.KeyObjects = KeyObjects
self.OutputName = OutputName
self.Action_Result = None
self.info = info
self.Action_Custom_Note = ACTION_CUSTOM_NOTE.format(OutputName = OutputName)
def postRun_Callback(self) -> None:
# extract output object
extracted_outputObject = read_outputObject_content(self.Action_Result, self.OutputName)
# add the extracted output object to KeyObjects
self.KeyObjects[self.OutputName] = extracted_outputObject

View File

@ -0,0 +1,22 @@
from AgentCoord.RehearsalEngine_V2.Action import BaseAction
ACTION_CUSTOM_NOTE = '''
Note: You can say something before you provide the improved version of the content.
The improved version you provide must be a completed version (e.g. if you provide a improved story, you should give completed story content, rather than just reporting where you have improved).
When you decide to give the improved version of the content, it should be start like this:
## Improved version of xxx
(the improved version of the content)
```
'''
class customAction_Improve(BaseAction):
def __init__(self, info, OutputName, KeyObjects) -> None:
self.KeyObjects = KeyObjects
self.OutputName = OutputName
self.Action_Result = None
self.info = info
self.Action_Custom_Note = ACTION_CUSTOM_NOTE.format(OutputName = OutputName)

View File

@ -0,0 +1,15 @@
from AgentCoord.RehearsalEngine_V2.Action import BaseAction
ACTION_CUSTOM_NOTE = '''
'''
class customAction_Propose(BaseAction):
def __init__(self, info, OutputName, KeyObjects) -> None:
self.KeyObjects = KeyObjects
self.OutputName = OutputName
self.Action_Result = None
self.info = info
self.Action_Custom_Note = ACTION_CUSTOM_NOTE.format(OutputName = OutputName)

View File

@ -0,0 +1,128 @@
import AgentCoord.util as util
import AgentCoord.RehearsalEngine_V2.Action as Action
from termcolor import colored
# Accept inputs: num_StepToRun (the number of step to run, if None, run to the end), plan, RehearsalLog, AgentProfile_Dict
def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict):
# Prepare for execution
KeyObjects = {}
finishedStep_index = -1
for logNode in RehearsalLog:
# Increment for finishedStep_index: finishedStep_index is the index of the lastest finished step seen in RehearsalLog, if no step finished yet, index will be -1
if logNode["LogNodeType"] == "step":
finishedStep_index += 1
# Set existing key object
if logNode["LogNodeType"] == "object":
KeyObjects[logNode["NodeId"]] = logNode["content"]
# Execute
# specify which steps will be run
if num_StepToRun is None:
run_to = len(plan["Collaboration Process"])
else:
run_to = (finishedStep_index + 1) + num_StepToRun
StepRun_count = 0 # count how many steps are run during execution
# loop for each step to run
for stepDescrip in plan["Collaboration Process"][
(finishedStep_index + 1): run_to
]:
StepRun_count += 1
# collect infos for executing the current step
StepName = (
util.camel_case_to_normal(stepDescrip["StepName"])
if util.is_camel_case(stepDescrip["StepName"])
else stepDescrip["StepName"]
)
TaskContent = stepDescrip["TaskContent"]
InputName_List = (
[
(
util.camel_case_to_normal(obj)
if util.is_camel_case(obj)
else obj
)
for obj in stepDescrip["InputObject_List"]
]
if stepDescrip["InputObject_List"] is not None
else None
)
OutputName = (
util.camel_case_to_normal(stepDescrip["OutputObject"])
if util.is_camel_case(stepDescrip["OutputObject"])
else stepDescrip["OutputObject"]
)
Agent_List = stepDescrip["AgentSelection"]
TaskProcess = stepDescrip["TaskProcess"]
TaskDescription = (
util.converter.generate_template_sentence_for_CollaborationBrief(
input_object_list=InputName_List,
output_object=OutputName,
agent_list=Agent_List,
step_task=TaskContent,
)
)
# init log for RehearsalLog
inputObject_Record = [
{InputName: KeyObjects[InputName]} for InputName in InputName_List
]
stepLogNode = {
"LogNodeType": "step",
"NodeId": StepName,
"InputName_List": InputName_List,
"OutputName": OutputName,
"chatLog": [],
"inputObject_Record": inputObject_Record,
}
objectLogNode = {
"LogNodeType": "object",
"NodeId": OutputName,
"content": None,
}
# start the group chat
util.print_colored(TaskDescription, text_color="green")
ActionHistory = []
for ActionInfo in TaskProcess:
actionType = ActionInfo["ActionType"]
agentName = ActionInfo["AgentName"]
if actionType in Action.customAction_Dict:
currentAction = Action.customAction_Dict[actionType](
info=ActionInfo,
OutputName=OutputName,
KeyObjects=KeyObjects,
)
else:
currentAction = Action.BaseAction(
info=ActionInfo,
OutputName=OutputName,
KeyObjects=KeyObjects,
)
ActionInfo_with_Result = currentAction.run(
General_Goal=plan["General Goal"],
TaskDescription=TaskDescription,
agentName=agentName,
AgentProfile_Dict=AgentProfile_Dict,
InputName_List=InputName_List,
OutputName=OutputName,
KeyObjects=KeyObjects,
ActionHistory=ActionHistory,
)
ActionHistory.append(ActionInfo_with_Result)
# post processing for the group chat (finish)
objectLogNode["content"] = KeyObjects[OutputName]
RehearsalLog.append(stepLogNode)
RehearsalLog.append(objectLogNode)
stepLogNode["ActionHistory"] = ActionHistory
# Return Output
print(
colored(
"$Run " + str(StepRun_count) + "step$",
color="black",
on_color="on_white",
)
)
return RehearsalLog

View File

@ -0,0 +1,2 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-

View File

@ -0,0 +1,2 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-

View File

@ -0,0 +1,9 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .colorLog import print_colored
from .converter import (
generate_template_sentence_for_CollaborationBrief,
remove_render_spec,
camel_case_to_normal,
is_camel_case,
)

View File

@ -0,0 +1,2 @@
def basePlan_addRenderSpec(basePlan):
return basePlan

View File

@ -0,0 +1,6 @@
from termcolor import colored
# colored print
def print_colored(text, text_color="green", background="on_white"):
print(colored(text, text_color, background))

View File

@ -0,0 +1,130 @@
import re
import json
from AgentCoord.LLMAPI.LLMAPI import LLM_Completion
def create_agent_dict(agent_list):
return {agent["Name"]: agent["Profile"] for agent in agent_list}
def is_camel_case(s):
# If there are no spaces, it might be camel case
return " " not in s
def camel_case_to_normal(s):
# Split the camel case string into words
return "".join(" " + c if c.isupper() else c for c in s).lstrip()
def generate_template_sentence_for_CollaborationBrief(
input_object_list, output_object, agent_list, step_task
):
# Check if the names are in camel case (no spaces) and convert them to normal naming convention
input_object_list = (
[
camel_case_to_normal(obj) if is_camel_case(obj) else obj
for obj in input_object_list
]
if input_object_list is not None
else None
)
output_object = (
camel_case_to_normal(output_object)
if is_camel_case(output_object)
else output_object
)
# Format the agents into a string with proper grammar
agent_str = (
" and ".join([", ".join(agent_list[:-1]), agent_list[-1]])
if len(agent_list) > 1
else agent_list[0]
)
if input_object_list is None or len(input_object_list) == 0:
# Combine all the parts into the template sentence
template_sentence = f"{agent_str} perform the task of {step_task} to obtain {output_object}."
else:
# Format the input objects into a string with proper grammar
input_str = (
" and ".join(
[", ".join(input_object_list[:-1]), input_object_list[-1]]
)
if len(input_object_list) > 1
else input_object_list[0]
)
# Combine all the parts into the template sentence
template_sentence = f"Based on {input_str}, {agent_str} perform the task of {step_task} to obtain {output_object}."
return template_sentence
def remove_render_spec(duty_spec):
if isinstance(duty_spec, dict):
return {
k: remove_render_spec(v)
for k, v in duty_spec.items()
if k != "renderSpec"
}
elif isinstance(duty_spec, list):
return [remove_render_spec(item) for item in duty_spec]
else:
return duty_spec
def read_LLM_Completion(messages, useGroq=True):
for _ in range(3):
text = LLM_Completion(messages, useGroq=useGroq)
pattern = r"(?:.*?```json)(.*?)(?:```.*?)"
match = re.search(pattern, text, re.DOTALL)
if match:
return json.loads(match.group(1).strip())
pattern = r"\{.*\}"
match = re.search(pattern, text, re.DOTALL)
if match:
try:
return json.loads(match.group(0).strip())
except Exception:
pass
raise ("bad format!")
def read_json_content(text):
"""
Extracts and returns content between ```json and ```
:param text: The string containing the content enclosed by ```json and ```
"""
# pattern = r"```json(.*?)```"
pattern = r"(?:.*?```json)(.*?)(?:```.*?)"
match = re.search(pattern, text, re.DOTALL)
if match:
return json.loads(match.group(1).strip())
pattern = r"\{.*\}"
match = re.search(pattern, text, re.DOTALL)
if match:
return json.loads(match.group(0).strip())
raise ("bad format!")
def read_outputObject_content(text, keyword):
"""
Extracts and returns content between ```{keyword} and ```
:param text: The string containing the content enclosed by ```{keyword} and ```
"""
pattern = r"```{}(.*?)```".format(keyword)
match = re.search(pattern, text, re.DOTALL)
if match:
return match.group(1).strip()
else:
raise ("bad format!")

View File

@ -0,0 +1,102 @@
[
{
"Icon": "Abigail_Chen.png",
"Name": "Abigail",
"Profile": "AI Engineer"
},
{
"Icon": "Jane_Moreno.png",
"Name": "Jane",
"Profile": "Cybersecurity Specialist"
},
{
"Icon": "Giorgio_Rossi.png",
"Name": "Giorgio",
"Profile": "Poet"
},
{
"Icon": "Jennifer_Moore.png",
"Name": "Jennifer",
"Profile": "Linguist"
},
{
"Icon": "Maria_Lopez.png",
"Name": "Maria",
"Profile": "Philosopher"
},
{
"Icon": "Sam_Moore.png",
"Name": "Sam",
"Profile": "Ethicist"
},
{
"Icon": "Yuriko_Yamamoto.png",
"Name": "Yuriko",
"Profile": "Futurist"
},
{
"Icon": "Carlos_Gomez.png",
"Name": "Carlos",
"Profile": "Language Expert"
},
{
"Icon": "John_Lin.png",
"Name": "John",
"Profile": "Software Developer"
},
{
"Icon": "Tamara_Taylor.png",
"Name": "Tamara",
"Profile": "Music Composer"
},
{
"Icon": "Arthur_Burton.png",
"Name": "Arthur",
"Profile": "Neuroscientist"
},
{
"Icon": "Eddy_Lin.png",
"Name": "Eddy",
"Profile": "Cognitive Psychologist"
},
{
"Icon": "Isabella_Rodriguez.png",
"Name": "Isabella",
"Profile": "Science Fiction Writer"
},
{
"Icon": "Latoya_Williams.png",
"Name": "Latoya",
"Profile": "Historian of Technology"
},
{
"Icon": "Carmen_Ortiz.png",
"Name": "Carmen",
"Profile": "Robotics Engineer"
},
{
"Icon": "Rajiv_Patel.png",
"Name": "Rajiv",
"Profile": "Science Educator"
},
{
"Icon": "Tom_Moreno.png",
"Name": "Tom",
"Profile": "AI Scientist"
},
{
"Icon": "Ayesha_Khan.png",
"Name": "Ayesha",
"Profile": "Multimedia Artist"
},
{
"Icon": "Mei_Lin.png",
"Name": "Mei",
"Profile": "Graphic Designer"
},
{
"Icon": "Hailey_Johnson.png",
"Name": "Hailey",
"Profile": "Legal Expert on AI Law"
}
]

View File

@ -0,0 +1,188 @@
import re
from .FormatList import FormatList, TransToFrontFormat
is_mock = False
colorMap = {
"propose": [194, 34, 73],
"execute": [242, 28, 86],
"critique": [68, 24, 70],
"finalize": [14, 27, 73],
"inputobject": [108, 45, 74],
"outputobject": [32, 100, 78],
"agent": [0, 0, 80],
}
def OperateCamelStr(_str: str):
if " " in _str:
return _str
return "".join(" " + c if c.isupper() else c for c in _str).lstrip()
def ParseRole(_str: str) -> dict:
if _str[0] == "(" and _str[-1] == ")":
_str = _str[1:-1].split(":")
return dict({"Name": _str[0], "Job": _str[1]})
def ParseStepTask(step_task: str):
if len(step_task) == 0:
return ""
if step_task[0].isupper() and step_task[-1] == ".":
step_task = step_task[0].lower() + step_task[1:-1]
return step_task
def Generate_Collaboration_Specification_frontEnd(_str):
assert isinstance(_str, str)
res = dict()
res["template"] = ""
res["data"] = dict()
Persons = []
temp = re.findall(r"\(\w*:\w*?\)", _str, flags=re.M)
new_str = re.sub(r" *\(\w*:\w*?\) *", " ", _str, flags=re.M)
words = new_str.split(" ")
for i in range(len(temp)):
temp[i] = ParseRole(temp[i])
res["data"][str(len(Persons))] = dict(
{
"text": temp[i]["Name"],
"color": colorMap[temp[i]["Job"].lower()],
}
)
Persons.append(temp[i]["Name"])
index = int(0)
for i in range(len(words)):
if words[i] in Persons:
words[i] = TransToFrontFormat(index)
index += 1
res["template"] = " ".join(words)
return res
def Generate_Agent_DutySpecification_frontEnd(_obj: dict):
_obj = _obj.copy()
for key, val in _obj.items():
if isinstance(val, list):
for i, v in enumerate(val):
if isinstance(v, dict) and "DutyType" in v.keys():
_obj[key][i]["renderSpec"] = dict(
{"color": colorMap[_obj[key][i]["DutyType"].lower()]}
)
return _obj
def CheckDict(_dict: dict) -> bool:
items = ["InputObject_List", "OutputObject", "TaskContent"]
for e in items:
if e not in _dict.keys():
return False
return True
def Add_Collaboration_Brief_FrontEnd(_obj):
if isinstance(_obj, list):
for i in range(len(_obj)):
_obj[i] = Add_Collaboration_Brief_FrontEnd(_obj[i])
elif isinstance(_obj, dict) and (
CheckDict(_obj) or "Collaboration Process" in _obj.keys()
):
if "Collaboration Process" in _obj.keys():
_obj["Collaboration Process"] = Add_Collaboration_Brief_FrontEnd(
_obj["Collaboration Process"]
)
else:
_obj = _obj.copy()
Agent_List = (
_obj["AgentSelection"][:]
if "AgentSelection" in _obj.keys()
else None
)
InputObject_List = (
[OperateCamelStr(b) for b in _obj["InputObject_List"]]
if "InputObject_List" in _obj.keys()
and _obj["InputObject_List"] is not None
else []
)
OutputObject = (
OperateCamelStr(_obj["OutputObject"])
if "OutputObject" in _obj.keys()
else ""
)
_obj["StepName"] = OperateCamelStr(_obj["StepName"])
_obj["InputObject_List"] = InputObject_List
_obj["OutputObject"] = OutputObject
TaskContent = _obj["TaskContent"]
# add Collaboration Brief
_key = (
"Collaboration_Brief_FrontEnd"
if Agent_List is not None and len(Agent_List) > 0
else "Collaboration_Brief_LackAgent_FrontEnd"
)
_obj[_key] = Generate_Collaboration_Brief_FrontEnd(
InputObject_List, OutputObject, Agent_List, TaskContent
)
return _obj
def Generate_Collaboration_Brief_FrontEnd(
InputObject_List, OutputObject, Agent_List, TaskContent
):
res = dict({})
res["template"] = str("")
res["data"] = dict({})
len_in = len(InputObject_List)
len_out = 1
len_ag = len(Agent_List) if Agent_List is not None else 0
Template_Sentence_InputObject = (
f"Based on {FormatList(InputObject_List).Format()}, "
if len_in > 0
else ""
)
Template_Sentence_Agent = (
f"{FormatList(Agent_List, len_in).Format()} "
if Agent_List is not None and len_ag > 0
else ""
)
Template_Sentence_TaskContent = f"perform{'s' if len_ag == 1 else ''} the task of {ParseStepTask(TaskContent)} "
Template_Sentence_OutputObject = (
f"to obtain {FormatList([OutputObject], len_ag + len_in).Format()}."
if len_out > 0
else ""
)
Template_Sentence = (
Template_Sentence_InputObject
+ Template_Sentence_Agent
+ Template_Sentence_TaskContent
+ Template_Sentence_OutputObject
)
res["template"] = Template_Sentence
index = int(0)
for i in range(len_in):
res["data"][str(index)] = dict(
{
"text": InputObject_List[i],
"color": colorMap["InputObject".lower()],
}
)
index += 1
for i in range(len_ag):
res["data"][str(index)] = dict(
{"text": Agent_List[i], "color": colorMap["Agent".lower()]}
)
index += 1
res["data"][str(index)] = dict(
{"text": OutputObject, "color": colorMap["OutputObject".lower()]}
)
index += 1
return res

View File

@ -0,0 +1,47 @@
class FormatList:
_list = []
offset: int = 0
def __init__(self, input_list: list, offset: int = 0):
self._list = input_list.copy()
self.offset = offset
def __str__(self):
s = str("")
if len(self._list) <= 0:
return s
if len(self._list) == 1:
return self._list[0]
if len(self._list) == 2:
return " and ".join(self._list)
if len(self._list) > 2:
return ", ".join(self._list[:-1]) + " and " + self._list[-1]
def Format(self):
s = str("")
if len(self._list) <= 0:
return s
if len(self._list) == 1:
return TransToFrontFormat(0 + self.offset)
if len(self._list) == 2:
return " and ".join(
[
TransToFrontFormat(i + self.offset)
for i in range(len(self._list))
]
)
if len(self._list) > 2:
return (
", ".join(
[
TransToFrontFormat(i + self.offset)
for i in range(len(self._list) - 1)
]
)
+ " and "
+ TransToFrontFormat(len(self._list) - 1 + self.offset)
)
def TransToFrontFormat(index: int):
return "!<" + str(index) + ">!"

View File

@ -0,0 +1 @@
from .DataProcess import Add_Collaboration_Brief_FrontEnd

20
backend/Dockerfile Normal file
View File

@ -0,0 +1,20 @@
FROM python:3.11.8-alpine3.18
WORKDIR /app
EXPOSE 8000/tcp
# Build
COPY requirements.txt ./
RUN pip --no-cache-dir install -r ./requirements.txt
# Mount
COPY . ./
# Run
ENV OPENAI_API_BASE=https://api.openai.com
ENV OPENAI_API_KEY=
ENV OPENAI_API_MODEL=gpt-4-turbo-preview
ENV USE_CACHE=False
ENV FAST_DESIGN_MODE=True
ENV GROQ_API_KEY=
ENV MISTRAL_API_KEY=
CMD ["python", "server.py", "--port", "8000"]

38
backend/README.md Normal file
View File

@ -0,0 +1,38 @@
# AgentCoord: Visually Exploring Coordination Strategy for LLM-based Multi-Agent Collaboration
<p align="center">
<a ><img src="https://github.com/bopan3/AgentCoord_Backend/assets/21981916/bbad2f66-368f-488a-af36-72e79fdb6805" alt=" AgentCoord: Visually Exploring Coordination Strategy for LLM-based Multi-Agent Collaboration" width="200px"></a>
</p>
AgentCoord is an experimental open-source system to help general users design coordination strategies for multiple LLM-based agents (Research paper forthcoming).
## System Usage
<a href="https://youtu.be/s56rHJx-eqY" target="_blank"><img src="https://github.com/bopan3/AgentCoord_Backend/assets/21981916/0d907e64-2a25-4bdf-977d-e90197ab1aab"
alt="System Usage Video" width="800" border="5" /></a>
## Installation
### Installation
```bash
git clone https://github.com/AgentCoord/AgentCoord.git
cd AgentCoord
pip install -r requirements.txt
```
### Configuration
#### LLM configuration
You can set the configuration (i.e. API base, API key, Model name, Max tokens, Response per minute) for default LLM in config\config.yaml. Currently, we only support OpenAIs LLMs as the default model. We recommend using gpt-4-0125-preview as the default model (WARNING: the execution process of multiple agents may consume a significant number of tokens).
You can switch to a fast mode that uses the Mistral 8×7B model with hardware acceleration by [Groq](https://groq.com/) for the first time in strategy generation to strike a balance of response quality and efficiency. To achieve this, you need to set the FAST_DESIGN_MODE field in the yaml file as True and fill the GROQ_API_KEY field with the api key of [Groq](https://wow.groq.com/).
#### Agent configuration
Currently, we support config agents by [role-prompting](https://arxiv.org/abs/2305.14688). You can customize your agents by changing the role prompts in AgentRepo\agentBoard_v1.json. We plan to support more methods to customize agents (e.g., supporting RAG, or providing a unified wrapper for customized agents).
### Launch
Execute the following command to launch the system:
```bash
python server.py
```
## More Papers & Projects for LLM-based Multi-Agent Collaboration
If youre interested in LLM-based multi-agent collaboration and want more papers & projects for reference, you may check out the corpus collected by us:

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1 @@
This folder is for request chache so that we can reproduce the error.

View File

@ -0,0 +1,12 @@
## config for default LLM
OPENAI_API_BASE: ""
OPENAI_API_KEY: ""
OPENAI_API_MODEL: "gpt-4-turbo-preview"
## config for fast mode
FAST_DESIGN_MODE: True
GROQ_API_KEY: ""
MISTRAL_API_KEY: ""
## options under experimentation, leave them as Fasle unless you know what it is for
USE_CACHE: False

7
backend/requirements.txt Normal file
View File

@ -0,0 +1,7 @@
Flask==3.0.2
openai==1.14.2
PyYAML==6.0.1
termcolor==2.4.0
groq==0.4.2
mistralai==0.1.6
socksio==1.0.0

299
backend/server.py Normal file
View File

@ -0,0 +1,299 @@
from flask import Flask, request, jsonify
import json
from DataProcess import Add_Collaboration_Brief_FrontEnd
from AgentCoord.RehearsalEngine_V2.ExecutePlan import executePlan
from AgentCoord.PlanEngine.basePlan_Generator import generate_basePlan
from AgentCoord.PlanEngine.fill_stepTask import fill_stepTask
from AgentCoord.PlanEngine.fill_stepTask_TaskProcess import (
fill_stepTask_TaskProcess,
)
from AgentCoord.PlanEngine.branch_PlanOutline import branch_PlanOutline
from AgentCoord.PlanEngine.branch_TaskProcess import branch_TaskProcess
from AgentCoord.PlanEngine.AgentSelectModify import (
AgentSelectModify_init,
AgentSelectModify_addAspect,
)
import os
import yaml
import argparse
# initialize global variables
yaml_file = os.path.join(os.getcwd(), "config", "config.yaml")
try:
with open(yaml_file, "r", encoding="utf-8") as file:
yaml_data = yaml.safe_load(file)
except Exception:
yaml_file = {}
USE_CACHE: bool = os.getenv("USE_CACHE")
if USE_CACHE is None:
USE_CACHE = yaml_data.get("USE_CACHE", False)
else:
USE_CACHE = USE_CACHE.lower() in ["true", "1", "yes"]
AgentBoard = None
AgentProfile_Dict = {}
Request_Cache: dict[str, str] = {}
app = Flask(__name__)
@app.route("/fill_stepTask_TaskProcess", methods=["post"])
def Handle_fill_stepTask_TaskProcess():
incoming_data = request.get_json()
requestIdentifier = str(
(
"/fill_stepTask_TaskProcess",
incoming_data["General Goal"],
incoming_data["stepTask_lackTaskProcess"],
)
)
if USE_CACHE:
if requestIdentifier in Request_Cache:
return jsonify(Request_Cache[requestIdentifier])
filled_stepTask = fill_stepTask_TaskProcess(
General_Goal=incoming_data["General Goal"],
stepTask=incoming_data["stepTask_lackTaskProcess"],
AgentProfile_Dict=AgentProfile_Dict,
)
filled_stepTask = Add_Collaboration_Brief_FrontEnd(filled_stepTask)
Request_Cache[requestIdentifier] = filled_stepTask
response = jsonify(filled_stepTask)
return response
@app.route("/agentSelectModify_init", methods=["post"])
def Handle_agentSelectModify_init():
incoming_data = request.get_json()
requestIdentifier = str(
(
"/agentSelectModify_init",
incoming_data["General Goal"],
incoming_data["stepTask"],
)
)
if USE_CACHE:
if requestIdentifier in Request_Cache:
return jsonify(Request_Cache[requestIdentifier])
scoreTable = AgentSelectModify_init(
stepTask=incoming_data["stepTask"],
General_Goal=incoming_data["General Goal"],
Agent_Board=AgentBoard,
)
Request_Cache[requestIdentifier] = scoreTable
response = jsonify(scoreTable)
return response
@app.route("/agentSelectModify_addAspect", methods=["post"])
def Handle_agentSelectModify_addAspect():
incoming_data = request.get_json()
requestIdentifier = str(
("/agentSelectModify_addAspect", incoming_data["aspectList"])
)
if USE_CACHE:
if requestIdentifier in Request_Cache:
return jsonify(Request_Cache[requestIdentifier])
scoreTable = AgentSelectModify_addAspect(
aspectList=incoming_data["aspectList"], Agent_Board=AgentBoard
)
Request_Cache[requestIdentifier] = scoreTable
response = jsonify(scoreTable)
return response
@app.route("/fill_stepTask", methods=["post"])
def Handle_fill_stepTask():
incoming_data = request.get_json()
requestIdentifier = str(
(
"/fill_stepTask",
incoming_data["General Goal"],
incoming_data["stepTask"],
)
)
if USE_CACHE:
if requestIdentifier in Request_Cache:
return jsonify(Request_Cache[requestIdentifier])
filled_stepTask = fill_stepTask(
General_Goal=incoming_data["General Goal"],
stepTask=incoming_data["stepTask"],
Agent_Board=AgentBoard,
AgentProfile_Dict=AgentProfile_Dict,
)
filled_stepTask = Add_Collaboration_Brief_FrontEnd(filled_stepTask)
Request_Cache[requestIdentifier] = filled_stepTask
response = jsonify(filled_stepTask)
return response
@app.route("/branch_PlanOutline", methods=["post"])
def Handle_branch_PlanOutline():
incoming_data = request.get_json()
requestIdentifier = str(
(
"/branch_PlanOutline",
incoming_data["branch_Number"],
incoming_data["Modification_Requirement"],
incoming_data["Existing_Steps"],
incoming_data["Baseline_Completion"],
incoming_data["Initial Input Object"],
incoming_data["General Goal"],
)
)
if USE_CACHE:
if requestIdentifier in Request_Cache:
return jsonify(Request_Cache[requestIdentifier])
branchList = branch_PlanOutline(
branch_Number=incoming_data["branch_Number"],
Modification_Requirement=incoming_data["Modification_Requirement"],
Existing_Steps=incoming_data["Existing_Steps"],
Baseline_Completion=incoming_data["Baseline_Completion"],
InitialObject_List=incoming_data["Initial Input Object"],
General_Goal=incoming_data["General Goal"],
)
branchList = Add_Collaboration_Brief_FrontEnd(branchList)
Request_Cache[requestIdentifier] = branchList
response = jsonify(branchList)
return response
@app.route("/branch_TaskProcess", methods=["post"])
def Handle_branch_TaskProcess():
incoming_data = request.get_json()
requestIdentifier = str(
(
"/branch_TaskProcess",
incoming_data["branch_Number"],
incoming_data["Modification_Requirement"],
incoming_data["Existing_Steps"],
incoming_data["Baseline_Completion"],
incoming_data["stepTaskExisting"],
incoming_data["General Goal"],
)
)
if USE_CACHE:
if requestIdentifier in Request_Cache:
return jsonify(Request_Cache[requestIdentifier])
branchList = branch_TaskProcess(
branch_Number=incoming_data["branch_Number"],
Modification_Requirement=incoming_data["Modification_Requirement"],
Existing_Steps=incoming_data["Existing_Steps"],
Baseline_Completion=incoming_data["Baseline_Completion"],
stepTaskExisting=incoming_data["stepTaskExisting"],
General_Goal=incoming_data["General Goal"],
AgentProfile_Dict=AgentProfile_Dict,
)
Request_Cache[requestIdentifier] = branchList
response = jsonify(branchList)
return response
@app.route("/generate_basePlan", methods=["post"])
def Handle_generate_basePlan():
incoming_data = request.get_json()
requestIdentifier = str(
(
"/generate_basePlan",
incoming_data["General Goal"],
incoming_data["Initial Input Object"],
)
)
if USE_CACHE:
if requestIdentifier in Request_Cache:
return jsonify(Request_Cache[requestIdentifier])
basePlan = generate_basePlan(
General_Goal=incoming_data["General Goal"],
Agent_Board=AgentBoard,
AgentProfile_Dict=AgentProfile_Dict,
InitialObject_List=incoming_data["Initial Input Object"],
)
basePlan_withRenderSpec = Add_Collaboration_Brief_FrontEnd(basePlan)
Request_Cache[requestIdentifier] = basePlan_withRenderSpec
response = jsonify(basePlan_withRenderSpec)
return response
@app.route("/executePlan", methods=["post"])
def Handle_executePlan():
incoming_data = request.get_json()
requestIdentifier = str(
(
"/executePlan",
incoming_data["num_StepToRun"],
incoming_data["RehearsalLog"],
incoming_data["plan"],
)
)
if USE_CACHE:
if requestIdentifier in Request_Cache:
return jsonify(Request_Cache[requestIdentifier])
RehearsalLog = executePlan(
incoming_data["plan"],
incoming_data["num_StepToRun"],
incoming_data["RehearsalLog"],
AgentProfile_Dict,
)
Request_Cache[requestIdentifier] = RehearsalLog
response = jsonify(RehearsalLog)
return response
@app.route("/_saveRequestCashe", methods=["post"])
def Handle_saveRequestCashe():
with open(
os.path.join(os.getcwd(), "RequestCache", "Request_Cache.json"), "w"
) as json_file:
json.dump(Request_Cache, json_file, indent=4)
response = jsonify(
{"code": 200, "content": "request cashe sucessfully saved"}
)
return response
@app.route("/setAgents", methods=["POST"])
def set_agents():
global AgentBoard, AgentProfile_Dict
AgentBoard = request.json
AgentProfile_Dict = {}
for item in AgentBoard:
name = item["Name"]
profile = item["Profile"]
AgentProfile_Dict[name] = profile
return jsonify({"code": 200, "content": "set agentboard successfully"})
def init():
global AgentBoard, AgentProfile_Dict, Request_Cache
with open(
os.path.join(os.getcwd(), "RequestCache", "Request_Cache.json"), "r"
) as json_file:
Request_Cache = json.load(json_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="start the backend for AgentCoord"
)
parser.add_argument(
"--port",
type=int,
default=8017,
help="set the port number, 8017 by defaul.",
)
args = parser.parse_args()
init()
app.run(host="0.0.0.0", port=args.port, debug=True)

5
frontend/.browserslistrc Normal file
View File

@ -0,0 +1,5 @@
chrome >= 51
edge >= 15
firefox >= 54
safari >= 10
ios_saf >= 10

1
frontend/.env.example Normal file
View File

@ -0,0 +1 @@
API_BASE=http://127.0.0.1:8000

4
frontend/.eslintrc.js Normal file
View File

@ -0,0 +1,4 @@
module.exports = {
root: true,
extends: ['@modern-js'],
};

32
frontend/.gitignore vendored Normal file
View File

@ -0,0 +1,32 @@
.DS_Store
.env
.pnp
.pnp.js
.env.local
.env.*.local
.history
*.log*
node_modules/
.yarn-integrity
.pnpm-store/
*.tsbuildinfo
.eslintcache
.changeset/pre.json
dist/
coverage/
release/
output/
output_resource/
log/
.vscode/**/*
!.vscode/settings.json
!.vscode/extensions.json
.idea/
**/*/typings/auto-generated
modern.config.local.*

4
frontend/.husky/pre-commit Executable file
View File

@ -0,0 +1,4 @@
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
npx --no-install lint-staged

2
frontend/.npmrc Normal file
View File

@ -0,0 +1,2 @@
strict-peer-dependencies=false
registry=https://registry.npmmirror.com/

1
frontend/.nvmrc Normal file
View File

@ -0,0 +1 @@
lts/iron

5
frontend/.prettierrc Normal file
View File

@ -0,0 +1,5 @@
{
"singleQuote": true,
"trailingComma": "all",
"arrowParens": "avoid"
}

22
frontend/Dockerfile Normal file
View File

@ -0,0 +1,22 @@
FROM node:20-alpine AS base
FROM base AS installer
WORKDIR /app
COPY package.json .npmrc ./
RUN npm install
FROM base AS builder
WORKDIR /app
COPY --from=installer /app/node_modules ./node_modules
COPY . .
RUN npm run build
FROM base AS runner
WORKDIR /app
EXPOSE 8080/tcp
COPY .npmrc ./
RUN npm install @modern-js/app-tools @modern-js/runtime --no-optional --no-shrinkwrap && mkdir src
COPY modern.config.ts package.json ./
COPY --from=builder /app/dist ./dist
ENV API_BASE=
CMD ["npm", "run", "serve"]

39
frontend/README.md Normal file
View File

@ -0,0 +1,39 @@
# AgentCoord Frontend
This is the frontend for the AgentCoord project. Root project is located [here](https://github.com/AgentCoord/AgentCoord)
## Installation
You can launch the frontend by simply using `docker-compose` in the root directory of the project.
Or, you can launch the frontend manually by following the steps below.
1. Install the dependencies by running `npm install`.
```bash
npm install
```
2. Build the frontend by running `npm run build`.
```bash
npm run build
```
3. Start the frontend by running `npm run serve`.
```bash
npm run serve
```
Then you can access the frontend by visiting `http://localhost:8080`.
## Development
You can run the frontend in development mode by running `npm run dev`.
```bash
npm run dev
```
The frontend website requires the backend server to be running. You can configure the backend server address by copying the `.env.example` file to `.env` and changing the `API_BASE` value to the backend server address.

Some files were not shown because too many files have changed in this diff Show More