Add backend and frontend
This commit is contained in:
256
backend/AgentCoord/LLMAPI/LLMAPI.py
Normal file
256
backend/AgentCoord/LLMAPI/LLMAPI.py
Normal file
@@ -0,0 +1,256 @@
|
||||
import asyncio
|
||||
import openai
|
||||
import yaml
|
||||
from termcolor import colored
|
||||
import os
|
||||
from groq import AsyncGroq
|
||||
from mistralai.client import MistralClient
|
||||
from mistralai.models.chat_completion import ChatMessage
|
||||
|
||||
# load config (apikey, apibase, model)
|
||||
yaml_file = os.path.join(os.getcwd(), "config", "config.yaml")
|
||||
try:
|
||||
with open(yaml_file, "r", encoding="utf-8") as file:
|
||||
yaml_data = yaml.safe_load(file)
|
||||
except Exception:
|
||||
yaml_file = {}
|
||||
OPENAI_API_BASE = os.getenv("OPENAI_API_BASE") or yaml_data.get(
|
||||
"OPENAI_API_BASE", "https://api.openai.com"
|
||||
)
|
||||
openai.api_base = OPENAI_API_BASE
|
||||
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") or yaml_data.get(
|
||||
"OPENAI_API_KEY", ""
|
||||
)
|
||||
openai.api_key = OPENAI_API_KEY
|
||||
MODEL: str = os.getenv("OPENAI_API_MODEL") or yaml_data.get(
|
||||
"OPENAI_API_MODEL", "gpt-4-turbo-preview"
|
||||
)
|
||||
FAST_DESIGN_MODE: bool = os.getenv("FAST_DESIGN_MODE")
|
||||
if FAST_DESIGN_MODE is None:
|
||||
FAST_DESIGN_MODE = yaml_data.get("FAST_DESIGN_MODE", False)
|
||||
else:
|
||||
FAST_DESIGN_MODE = FAST_DESIGN_MODE.lower() in ["true", "1", "yes"]
|
||||
GROQ_API_KEY = os.getenv("GROQ_API_KEY") or yaml_data.get("GROQ_API_KEY", "")
|
||||
MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY") or yaml_data.get(
|
||||
"MISTRAL_API_KEY", ""
|
||||
)
|
||||
|
||||
|
||||
# for LLM completion
|
||||
def LLM_Completion(
|
||||
messages: list[dict], stream: bool = True, useGroq: bool = True
|
||||
) -> str:
|
||||
if not useGroq or not FAST_DESIGN_MODE:
|
||||
force_gpt4 = True
|
||||
useGroq = False
|
||||
else:
|
||||
force_gpt4 = False
|
||||
useGroq = True
|
||||
|
||||
if stream:
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
except RuntimeError as ex:
|
||||
if "There is no current event loop in thread" in str(ex):
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
if useGroq:
|
||||
if force_gpt4:
|
||||
return loop.run_until_complete(
|
||||
_achat_completion_json(messages=messages)
|
||||
)
|
||||
else:
|
||||
return loop.run_until_complete(
|
||||
_achat_completion_stream_groq(messages=messages)
|
||||
)
|
||||
else:
|
||||
return loop.run_until_complete(
|
||||
_achat_completion_stream(messages=messages)
|
||||
)
|
||||
# return asyncio.run(_achat_completion_stream(messages = messages))
|
||||
else:
|
||||
return _chat_completion(messages=messages)
|
||||
|
||||
|
||||
async def _achat_completion_stream_groq(messages: list[dict]) -> str:
|
||||
client = AsyncGroq(api_key=GROQ_API_KEY)
|
||||
|
||||
max_attempts = 5
|
||||
|
||||
for attempt in range(max_attempts):
|
||||
print("Attempt to use Groq (Fase Design Mode):")
|
||||
try:
|
||||
stream = await client.chat.completions.create(
|
||||
messages=messages,
|
||||
# model='gemma-7b-it',
|
||||
model="mixtral-8x7b-32768",
|
||||
# model='llama2-70b-4096',
|
||||
temperature=0.3,
|
||||
response_format={"type": "json_object"},
|
||||
stream=False,
|
||||
)
|
||||
break
|
||||
except Exception:
|
||||
if attempt < max_attempts - 1: # i is zero indexed
|
||||
continue
|
||||
else:
|
||||
raise "failed"
|
||||
|
||||
full_reply_content = stream.choices[0].message.content
|
||||
print(colored(full_reply_content, "blue", "on_white"), end="")
|
||||
print()
|
||||
return full_reply_content
|
||||
|
||||
|
||||
async def _achat_completion_stream_mixtral(messages: list[dict]) -> str:
|
||||
client = MistralClient(api_key=MISTRAL_API_KEY)
|
||||
# client=AsyncGroq(api_key=GROQ_API_KEY)
|
||||
max_attempts = 5
|
||||
|
||||
for attempt in range(max_attempts):
|
||||
try:
|
||||
messages[len(messages) - 1]["role"] = "user"
|
||||
stream = client.chat(
|
||||
messages=[
|
||||
ChatMessage(
|
||||
role=message["role"], content=message["content"]
|
||||
)
|
||||
for message in messages
|
||||
],
|
||||
# model = "mistral-small-latest",
|
||||
model="open-mixtral-8x7b",
|
||||
# response_format={"type": "json_object"},
|
||||
)
|
||||
break # If the operation is successful, break the loop
|
||||
except Exception:
|
||||
if attempt < max_attempts - 1: # i is zero indexed
|
||||
continue
|
||||
else:
|
||||
raise "failed"
|
||||
|
||||
full_reply_content = stream.choices[0].message.content
|
||||
print(colored(full_reply_content, "blue", "on_white"), end="")
|
||||
print()
|
||||
return full_reply_content
|
||||
|
||||
|
||||
async def _achat_completion_stream_gpt35(messages: list[dict]) -> str:
|
||||
openai.api_key = OPENAI_API_KEY
|
||||
openai.api_base = OPENAI_API_BASE
|
||||
response = await openai.ChatCompletion.acreate(
|
||||
messages=messages,
|
||||
max_tokens=4096,
|
||||
n=1,
|
||||
stop=None,
|
||||
temperature=0.3,
|
||||
timeout=3,
|
||||
model="gpt-3.5-turbo-16k",
|
||||
stream=True,
|
||||
)
|
||||
|
||||
# create variables to collect the stream of chunks
|
||||
collected_chunks = []
|
||||
collected_messages = []
|
||||
# iterate through the stream of events
|
||||
async for chunk in response:
|
||||
collected_chunks.append(chunk) # save the event response
|
||||
choices = chunk["choices"]
|
||||
if len(choices) > 0:
|
||||
chunk_message = chunk["choices"][0].get(
|
||||
"delta", {}
|
||||
) # extract the message
|
||||
collected_messages.append(chunk_message) # save the message
|
||||
if "content" in chunk_message:
|
||||
print(
|
||||
colored(chunk_message["content"], "blue", "on_white"),
|
||||
end="",
|
||||
)
|
||||
print()
|
||||
|
||||
full_reply_content = "".join(
|
||||
[m.get("content", "") for m in collected_messages]
|
||||
)
|
||||
return full_reply_content
|
||||
|
||||
|
||||
async def _achat_completion_json(messages: list[dict]) -> str:
|
||||
openai.api_key = OPENAI_API_KEY
|
||||
openai.api_base = OPENAI_API_BASE
|
||||
|
||||
max_attempts = 5
|
||||
|
||||
for attempt in range(max_attempts):
|
||||
try:
|
||||
stream = await openai.ChatCompletion.acreate(
|
||||
messages=messages,
|
||||
max_tokens=4096,
|
||||
n=1,
|
||||
stop=None,
|
||||
temperature=0.3,
|
||||
timeout=3,
|
||||
model=MODEL,
|
||||
response_format={"type": "json_object"},
|
||||
)
|
||||
break
|
||||
except Exception:
|
||||
if attempt < max_attempts - 1: # i is zero indexed
|
||||
continue
|
||||
else:
|
||||
raise "failed"
|
||||
|
||||
full_reply_content = stream.choices[0].message.content
|
||||
print(colored(full_reply_content, "blue", "on_white"), end="")
|
||||
print()
|
||||
return full_reply_content
|
||||
|
||||
|
||||
async def _achat_completion_stream(messages: list[dict]) -> str:
|
||||
openai.api_key = OPENAI_API_KEY
|
||||
openai.api_base = OPENAI_API_BASE
|
||||
response = await openai.ChatCompletion.acreate(
|
||||
**_cons_kwargs(messages), stream=True
|
||||
)
|
||||
|
||||
# create variables to collect the stream of chunks
|
||||
collected_chunks = []
|
||||
collected_messages = []
|
||||
# iterate through the stream of events
|
||||
async for chunk in response:
|
||||
collected_chunks.append(chunk) # save the event response
|
||||
choices = chunk["choices"]
|
||||
if len(choices) > 0:
|
||||
chunk_message = chunk["choices"][0].get(
|
||||
"delta", {}
|
||||
) # extract the message
|
||||
collected_messages.append(chunk_message) # save the message
|
||||
if "content" in chunk_message:
|
||||
print(
|
||||
colored(chunk_message["content"], "blue", "on_white"),
|
||||
end="",
|
||||
)
|
||||
print()
|
||||
|
||||
full_reply_content = "".join(
|
||||
[m.get("content", "") for m in collected_messages]
|
||||
)
|
||||
return full_reply_content
|
||||
|
||||
|
||||
def _chat_completion(messages: list[dict]) -> str:
|
||||
rsp = openai.ChatCompletion.create(**_cons_kwargs(messages))
|
||||
content = rsp["choices"][0]["message"]["content"]
|
||||
return content
|
||||
|
||||
|
||||
def _cons_kwargs(messages: list[dict]) -> dict:
|
||||
kwargs = {
|
||||
"messages": messages,
|
||||
"max_tokens": 4096,
|
||||
"n": 1,
|
||||
"stop": None,
|
||||
"temperature": 0.5,
|
||||
"timeout": 3,
|
||||
}
|
||||
kwargs_mode = {"model": MODEL}
|
||||
kwargs.update(kwargs_mode)
|
||||
return kwargs
|
||||
3
backend/AgentCoord/LLMAPI/__init__
Normal file
3
backend/AgentCoord/LLMAPI/__init__
Normal file
@@ -0,0 +1,3 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# import LLMAPI
|
||||
137
backend/AgentCoord/PlanEngine/AgentSelectModify.py
Normal file
137
backend/AgentCoord/PlanEngine/AgentSelectModify.py
Normal file
@@ -0,0 +1,137 @@
|
||||
import json
|
||||
from AgentCoord.util.converter import read_LLM_Completion
|
||||
from typing import List, Dict
|
||||
from pydantic import BaseModel, RootModel
|
||||
|
||||
PROMPT_ABILITY_REQUIREMENT_GENERATION = """
|
||||
## Instruction
|
||||
Based on "General Goal" and "Current Task", output a formatted "Ability Requirement" which lists at least 3 different ability requirement that is required by the "Current Task". The ability should be summarized concisely within a few words.
|
||||
|
||||
## General Goal (The general goal for the collaboration plan, "Current Task" is just one of its substep)
|
||||
{General_Goal}
|
||||
|
||||
## Current Task
|
||||
{Current_Task}
|
||||
|
||||
## Output Format Example (Specify the output format)
|
||||
```json
|
||||
{{
|
||||
"AbilityRequirement":["Critical Thinking", "Understanding of VR", "Story Structuring Skill"]
|
||||
}}
|
||||
```
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class JSON_ABILITY_REQUIREMENT_GENERATION(BaseModel):
|
||||
AbilityRequirement: List[str]
|
||||
|
||||
|
||||
def generate_AbilityRequirement(General_Goal, Current_Task):
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f" The JSON object must use the schema: {json.dumps(JSON_ABILITY_REQUIREMENT_GENERATION.model_json_schema(), indent=2)}",
|
||||
},
|
||||
{
|
||||
"role": "system",
|
||||
"content": PROMPT_ABILITY_REQUIREMENT_GENERATION.format(
|
||||
General_Goal=General_Goal,
|
||||
Current_Task=json.dumps(Current_Task, indent=4),
|
||||
),
|
||||
},
|
||||
]
|
||||
print(messages[1]["content"])
|
||||
return read_LLM_Completion(messages)["AbilityRequirement"]
|
||||
|
||||
|
||||
PROMPT_AGENT_ABILITY_SCORING = """
|
||||
## Instruction
|
||||
Based on "Agent Board" and "Ability Requirement", output a score for each agent to estimate the possibility that the agent can fulfil the "Ability Requirement". The score should be 1-5. Provide a concise reason before you assign the score.
|
||||
|
||||
## AgentBoard
|
||||
{Agent_Board}
|
||||
|
||||
## Ability Requirement
|
||||
{Ability_Requirement}
|
||||
|
||||
## Attention
|
||||
Do not omit any agent from the agentboard.
|
||||
|
||||
## Output Format Example (Specify the output format)
|
||||
```json
|
||||
{{
|
||||
"Sam":{{
|
||||
"Reason":"...",
|
||||
"Score": 1
|
||||
}},
|
||||
"Alice":{{
|
||||
"Reason":"...",
|
||||
"Score": 5
|
||||
}}
|
||||
}}
|
||||
```
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class JSON_Agent(BaseModel):
|
||||
Reason: str
|
||||
Score: int
|
||||
|
||||
|
||||
class JSON_AGENT_ABILITY_SCORING(RootModel):
|
||||
root: Dict[str, JSON_Agent]
|
||||
|
||||
|
||||
def agentAbilityScoring(Agent_Board, Ability_Requirement_List):
|
||||
scoreTable = {}
|
||||
for Ability_Requirement in Ability_Requirement_List:
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f" The JSON object must use the schema: {json.dumps(JSON_AGENT_ABILITY_SCORING.model_json_schema(), indent=2)}",
|
||||
},
|
||||
{
|
||||
"role": "system",
|
||||
"content": PROMPT_AGENT_ABILITY_SCORING.format(
|
||||
Agent_Board=json.dumps(Agent_Board, indent=4),
|
||||
Ability_Requirement=Ability_Requirement,
|
||||
),
|
||||
},
|
||||
]
|
||||
print(messages[1]["content"])
|
||||
scoreTable[Ability_Requirement] = read_LLM_Completion(messages)
|
||||
return scoreTable
|
||||
|
||||
|
||||
def AgentSelectModify_init(stepTask, General_Goal, Agent_Board):
|
||||
Current_Task = {
|
||||
"TaskName": stepTask["StepName"],
|
||||
"InputObject_List": stepTask["InputObject_List"],
|
||||
"OutputObject": stepTask["OutputObject"],
|
||||
"TaskContent": stepTask["TaskContent"],
|
||||
}
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f" The JSON object must use the schema: {json.dumps(JSON_ABILITY_REQUIREMENT_GENERATION.model_json_schema(), indent=2)}",
|
||||
},
|
||||
{
|
||||
"role": "system",
|
||||
"content": PROMPT_ABILITY_REQUIREMENT_GENERATION.format(
|
||||
General_Goal=General_Goal,
|
||||
Current_Task=json.dumps(Current_Task, indent=4),
|
||||
),
|
||||
},
|
||||
]
|
||||
Ability_Requirement_List = read_LLM_Completion(messages)[
|
||||
"AbilityRequirement"
|
||||
]
|
||||
scoreTable = agentAbilityScoring(Agent_Board, Ability_Requirement_List)
|
||||
return scoreTable
|
||||
|
||||
|
||||
def AgentSelectModify_addAspect(aspectList, Agent_Board):
|
||||
scoreTable = agentAbilityScoring(Agent_Board, aspectList)
|
||||
return scoreTable
|
||||
116
backend/AgentCoord/PlanEngine/AgentSelection_Generator.py
Normal file
116
backend/AgentCoord/PlanEngine/AgentSelection_Generator.py
Normal file
@@ -0,0 +1,116 @@
|
||||
import json
|
||||
from AgentCoord.util.converter import read_LLM_Completion
|
||||
from typing import List
|
||||
from pydantic import BaseModel
|
||||
import random
|
||||
|
||||
MIN_TEAM_SIZE = 2 # the minimum number of agent allowed in a task for the initial coordination strategy.
|
||||
MAX_TEAM_SIZE = 3 # the maximun number of agent allowed in a task for the initial coordination strategy. You can set it as any number that is larger than MIN_TEAM_SIZE. Recomend to start with less agents and manually add more.
|
||||
|
||||
PROMPT_ABILITY_REQUIREMENT_GENERATION = """
|
||||
## Instruction
|
||||
Based on "General Goal" and "Current Task", output a formatted "Ability Requirement" which lists at least 3 different ability requirement that is required by the "Current Task". The ability should be summarized concisely within a few words.
|
||||
|
||||
## General Goal (The general goal for the collaboration plan, "Current Task" is just one of its substep)
|
||||
{General_Goal}
|
||||
|
||||
## Current Task
|
||||
{Current_Task}
|
||||
|
||||
## Output Format Example (Specify the output format)
|
||||
```json
|
||||
{{
|
||||
"AbilityRequirement":["Critical Thinking", "Understanding of VR", "Story Structuring Skill"]
|
||||
}}
|
||||
```
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class JSON_ABILITY_REQUIREMENT_GENERATION(BaseModel):
|
||||
AbilityRequirement: List[str]
|
||||
|
||||
|
||||
PROMPT_AGENT_SELECTION_GENERATION = """
|
||||
## Instruction
|
||||
Based on "General Goal", "Current Task" and "Agent Board", output a formatted "Agent Selection Plan". Your selection should consider the ability needed for "Current Task" and the profile of each agent in "Agent Board".
|
||||
|
||||
## General Goal (Specify the general goal for the collaboration plan)
|
||||
{General_Goal}
|
||||
|
||||
## Current Task
|
||||
{Current_Task}
|
||||
|
||||
## Agent Board (Specify the list of agents available for selection)
|
||||
{Agent_Board}
|
||||
|
||||
## Attention
|
||||
Just use agents in Agent Board.
|
||||
|
||||
## Output Format Example (Select one or more agent)
|
||||
```json
|
||||
{{
|
||||
"AgentSelectionPlan": ["Alice","Bob"]
|
||||
}}
|
||||
```
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class JSON_AGENT_SELECTION_GENERATION(BaseModel):
|
||||
AgentSelectionPlan: List[str]
|
||||
|
||||
|
||||
def generate_AbilityRequirement(General_Goal, Current_Task):
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f" The JSON object must use the schema: {json.dumps(JSON_ABILITY_REQUIREMENT_GENERATION.model_json_schema(), indent=2)}",
|
||||
},
|
||||
{
|
||||
"role": "system",
|
||||
"content": PROMPT_ABILITY_REQUIREMENT_GENERATION.format(
|
||||
General_Goal=General_Goal,
|
||||
Current_Task=json.dumps(Current_Task, indent=4),
|
||||
),
|
||||
},
|
||||
]
|
||||
print(messages[1]["content"])
|
||||
return read_LLM_Completion(messages)["AbilityRequirement"]
|
||||
|
||||
|
||||
def generate_AgentSelection(General_Goal, Current_Task, Agent_Board):
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f" The JSON object must use the schema: {json.dumps(JSON_AGENT_SELECTION_GENERATION.model_json_schema(), indent=2)}",
|
||||
},
|
||||
{
|
||||
"role": "system",
|
||||
"content": PROMPT_AGENT_SELECTION_GENERATION.format(
|
||||
General_Goal=General_Goal,
|
||||
Current_Task=json.dumps(Current_Task, indent=4),
|
||||
Agent_Board=json.dumps(Agent_Board, indent=4),
|
||||
),
|
||||
},
|
||||
]
|
||||
print(messages[1]["content"])
|
||||
|
||||
agentboard_set = {agent["Name"] for agent in Agent_Board}
|
||||
|
||||
while True:
|
||||
candidate = read_LLM_Completion(messages)["AgentSelectionPlan"]
|
||||
if len(candidate) > MAX_TEAM_SIZE:
|
||||
teamSize = random.randint(2, MAX_TEAM_SIZE)
|
||||
candidate = candidate[0:teamSize]
|
||||
elif len(candidate) < MIN_TEAM_SIZE:
|
||||
continue
|
||||
AgentSelectionPlan = sorted(candidate)
|
||||
AgentSelectionPlan_set = set(AgentSelectionPlan)
|
||||
|
||||
# Check if every item in AgentSelectionPlan is in agentboard
|
||||
if AgentSelectionPlan_set.issubset(agentboard_set):
|
||||
break # If all items are in agentboard, break the loop
|
||||
|
||||
# AgentSelectionPlan= sorted(read_LLM_Completion(messages)["AgentSelectionPlan"]) # sort the select agent list for unique sequence
|
||||
return AgentSelectionPlan
|
||||
2
backend/AgentCoord/PlanEngine/__init__ .py
Normal file
2
backend/AgentCoord/PlanEngine/__init__ .py
Normal file
@@ -0,0 +1,2 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
55
backend/AgentCoord/PlanEngine/basePlan_Generator.py
Normal file
55
backend/AgentCoord/PlanEngine/basePlan_Generator.py
Normal file
@@ -0,0 +1,55 @@
|
||||
from AgentCoord.PlanEngine.planOutline_Generator import generate_PlanOutline
|
||||
from AgentCoord.PlanEngine.AgentSelection_Generator import (
|
||||
generate_AgentSelection,
|
||||
)
|
||||
from AgentCoord.PlanEngine.taskProcess_Generator import generate_TaskProcess
|
||||
import AgentCoord.util as util
|
||||
|
||||
|
||||
def generate_basePlan(
|
||||
General_Goal, Agent_Board, AgentProfile_Dict, InitialObject_List
|
||||
):
|
||||
basePlan = {
|
||||
"Initial Input Object": InitialObject_List,
|
||||
"Collaboration Process": [],
|
||||
}
|
||||
PlanOutline = generate_PlanOutline(
|
||||
InitialObject_List=[], General_Goal=General_Goal
|
||||
)
|
||||
for stepItem in PlanOutline:
|
||||
Current_Task = {
|
||||
"TaskName": stepItem["StepName"],
|
||||
"InputObject_List": stepItem["InputObject_List"],
|
||||
"OutputObject": stepItem["OutputObject"],
|
||||
"TaskContent": stepItem["TaskContent"],
|
||||
}
|
||||
AgentSelection = generate_AgentSelection(
|
||||
General_Goal=General_Goal,
|
||||
Current_Task=Current_Task,
|
||||
Agent_Board=Agent_Board,
|
||||
)
|
||||
Current_Task_Description = {
|
||||
"TaskName": stepItem["StepName"],
|
||||
"AgentInvolved": [
|
||||
{"Name": name, "Profile": AgentProfile_Dict[name]}
|
||||
for name in AgentSelection
|
||||
],
|
||||
"InputObject_List": stepItem["InputObject_List"],
|
||||
"OutputObject": stepItem["OutputObject"],
|
||||
"CurrentTaskDescription": util.generate_template_sentence_for_CollaborationBrief(
|
||||
stepItem["InputObject_List"],
|
||||
stepItem["OutputObject"],
|
||||
AgentSelection,
|
||||
stepItem["TaskContent"],
|
||||
),
|
||||
}
|
||||
TaskProcess = generate_TaskProcess(
|
||||
General_Goal=General_Goal,
|
||||
Current_Task_Description=Current_Task_Description,
|
||||
)
|
||||
# add the generated AgentSelection and TaskProcess to the stepItem
|
||||
stepItem["AgentSelection"] = AgentSelection
|
||||
stepItem["TaskProcess"] = TaskProcess
|
||||
basePlan["Collaboration Process"].append(stepItem)
|
||||
basePlan["General Goal"] = General_Goal
|
||||
return basePlan
|
||||
102
backend/AgentCoord/PlanEngine/branch_PlanOutline.py
Normal file
102
backend/AgentCoord/PlanEngine/branch_PlanOutline.py
Normal file
@@ -0,0 +1,102 @@
|
||||
from AgentCoord.util.converter import read_LLM_Completion
|
||||
import json
|
||||
from typing import List
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
PROMPT_PLAN_OUTLINE_BRANCHING = """
|
||||
## Instruction
|
||||
Based on "Existing Steps", your task is to comeplete the "Remaining Steps" for the plan for "General Goal".
|
||||
Note: "Modification Requirement" specifies how to modify the "Baseline Completion" for a better/alternative solution.
|
||||
|
||||
## General Goal (Specify the general goal for the plan)
|
||||
{General_Goal}
|
||||
|
||||
## Initial Key Object List (Specify the list of initial key objects available for use as the input object of a Step)
|
||||
{InitialObject_List}
|
||||
|
||||
## Existing Steps
|
||||
{Existing_Steps}
|
||||
|
||||
## Baseline Completion
|
||||
{Baseline_Completion}
|
||||
|
||||
## Modification Requirement
|
||||
{Modification_Requirement}
|
||||
|
||||
## Output Format Example (Specify the output format)
|
||||
```json
|
||||
{{
|
||||
"Remaining Steps": [
|
||||
{{
|
||||
"StepName": "xx",
|
||||
"TaskContent": "xxx...",
|
||||
"InputObject_List": [xx],
|
||||
"OutputObject": "xx"
|
||||
}},
|
||||
{{
|
||||
"StepName": "xx",
|
||||
"TaskContent": "xxx...",
|
||||
"InputObject_List": [xx],
|
||||
"OutputObject": "xx"
|
||||
}},
|
||||
{{
|
||||
"StepName": "xx",
|
||||
"TaskContent": "xxx...",
|
||||
"InputObject_List": [xx],
|
||||
"OutputObject": "xx"
|
||||
}}
|
||||
]
|
||||
}}
|
||||
```
|
||||
|
||||
## Format Explaination (Explain the Output Format):
|
||||
TaskContent: Describe the task of the current step.
|
||||
InputObject_List: The list of the input obejects that will be used in current step.
|
||||
OutputObject: The name of the final output object of current step.
|
||||
StepName: Provide a CONCISE and UNIQUE name for this step, smartly summarize what this step is doing.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class JSON_Step(BaseModel):
|
||||
StepName: str
|
||||
TaskContent: str
|
||||
InputObject_List: List[str]
|
||||
OutputObject: str
|
||||
|
||||
|
||||
class JSON_PLAN_OUTLINE_BRANCHING(BaseModel):
|
||||
Remaining_Steps: List[JSON_Step] = Field(..., alias="Remaining Steps")
|
||||
|
||||
|
||||
def branch_PlanOutline(
|
||||
branch_Number,
|
||||
Modification_Requirement,
|
||||
Existing_Steps,
|
||||
Baseline_Completion,
|
||||
InitialObject_List,
|
||||
General_Goal,
|
||||
):
|
||||
prompt = PROMPT_PLAN_OUTLINE_BRANCHING.format(
|
||||
Modification_Requirement=Modification_Requirement,
|
||||
Existing_Steps=json.dumps(Existing_Steps, indent=4),
|
||||
Baseline_Completion=json.dumps(Baseline_Completion, indent=4),
|
||||
InitialObject_List=str(InitialObject_List),
|
||||
General_Goal=General_Goal,
|
||||
)
|
||||
print(prompt)
|
||||
branch_List = []
|
||||
for _ in range(branch_Number):
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f" The JSON object must use the schema: {json.dumps(JSON_PLAN_OUTLINE_BRANCHING.model_json_schema(), indent=2)}",
|
||||
},
|
||||
{"role": "system", "content": prompt},
|
||||
]
|
||||
Remaining_Steps = read_LLM_Completion(messages, useGroq=False)[
|
||||
"Remaining Steps"
|
||||
]
|
||||
branch_List.append(Remaining_Steps)
|
||||
return branch_List
|
||||
153
backend/AgentCoord/PlanEngine/branch_TaskProcess.py
Normal file
153
backend/AgentCoord/PlanEngine/branch_TaskProcess.py
Normal file
@@ -0,0 +1,153 @@
|
||||
from AgentCoord.util.converter import read_LLM_Completion
|
||||
import AgentCoord.util as util
|
||||
from typing import List
|
||||
from pydantic import BaseModel, Field
|
||||
import json
|
||||
|
||||
|
||||
ACT_SET = """
|
||||
- Propose (Propose something that contribute to the current task):
|
||||
- Description Format Example:
|
||||
Propose some suggestion for the development of the story from emotional aspects.
|
||||
|
||||
- Critique (Provide feedback to the action result of other agents):
|
||||
- Description Format Example:
|
||||
Critique the logic soundness of the story outline written by Alice.
|
||||
|
||||
- Improve (Improve the result of a previous action):
|
||||
- Description Format Example:
|
||||
Improve the story outline written by Bob based on the feedback by Jordan"
|
||||
|
||||
- Finalize (Deliver the final result based on previous actions):
|
||||
- Description Format Example:
|
||||
Summarize the discussion and submit the final version of the Story Outline.
|
||||
|
||||
"""
|
||||
|
||||
PROMPT_TASK_PROCESS_BRANCHING = """
|
||||
## Instruction
|
||||
Based on "Existing Steps", your task is to comeplete the "Remaining Steps" for the "Task for Current Step".
|
||||
Note: "Modification Requirement" specifies how to modify the "Baseline Completion" for a better/alternative solution.
|
||||
|
||||
## General Goal (The general goal for the collaboration plan, you just design the plan for one of its step (i.e. "Task for Current Step"))
|
||||
{General_Goal}
|
||||
|
||||
## Task for Current Step
|
||||
{Current_Task_Description}
|
||||
|
||||
## Action Set (The list of Action Type Available)
|
||||
{Act_Set}
|
||||
|
||||
## Existing Steps
|
||||
{Existing_Steps}
|
||||
|
||||
## Baseline Completion
|
||||
{Baseline_Completion}
|
||||
|
||||
## Modification Requirement
|
||||
{Modification_Requirement}
|
||||
|
||||
## Output Format Example (Specify the output format)
|
||||
```json
|
||||
{{
|
||||
"Remaining Steps": [
|
||||
{{
|
||||
"ID": "Action4",
|
||||
"ActionType": "Propose",
|
||||
"AgentName": "Mia",
|
||||
"Description": "Propose psychological theories on love and attachment that could be applied to AI's emotional development.",
|
||||
"ImportantInput": [
|
||||
"InputObject:Story Outline"
|
||||
]
|
||||
}},
|
||||
{{
|
||||
"ID": "Action5",
|
||||
"ActionType": "Propose",
|
||||
"AgentName": "Noah",
|
||||
"Description": "Propose ethical considerations and philosophical questions regarding AI's capacity for love.",
|
||||
"ImportantInput": []
|
||||
}},
|
||||
{{
|
||||
"ID": "Action6",
|
||||
"ActionType": "Finalize",
|
||||
"AgentName": "Liam",
|
||||
"Description": "Combine the poetic elements and ethical considerations into a cohesive set of core love elements for the story.",
|
||||
"ImportantInput": [
|
||||
"ActionResult:Action1",
|
||||
"ActionResult:Action5"
|
||||
]
|
||||
}}
|
||||
]
|
||||
}}
|
||||
|
||||
## Format Explaination (Explain the Output Format):
|
||||
ImportantInput: Specify if there is any previous result that should be taken special consideration during the execution the action. Should be of format "InputObject:xx" or "ActionResult:xx".
|
||||
InputObject_List: List existing objects that should be utilized in current step.
|
||||
AgentName: Specify the agent who will perform the action, You CAN ONLY USE THE NAME APPEARS IN "AgentInvolved".
|
||||
ActionType: Specify the type of action, note that only the last action can be of type "Finalize", and the last action must be "Finalize".
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class JSON_Step(BaseModel):
|
||||
ID: str
|
||||
ActionType: str
|
||||
AgentName: str
|
||||
Description: str
|
||||
ImportantInput: List[str]
|
||||
|
||||
|
||||
class JSON_TASK_PROCESS_BRANCHING(BaseModel):
|
||||
Remaining_Steps: List[JSON_Step] = Field(..., alias="Remaining Steps")
|
||||
|
||||
|
||||
def branch_TaskProcess(
|
||||
branch_Number,
|
||||
Modification_Requirement,
|
||||
Existing_Steps,
|
||||
Baseline_Completion,
|
||||
stepTaskExisting,
|
||||
General_Goal,
|
||||
AgentProfile_Dict,
|
||||
):
|
||||
Current_Task_Description = {
|
||||
"TaskName": stepTaskExisting["StepName"],
|
||||
"AgentInvolved": [
|
||||
{"Name": name, "Profile": AgentProfile_Dict[name]}
|
||||
for name in stepTaskExisting["AgentSelection"]
|
||||
],
|
||||
"InputObject_List": stepTaskExisting["InputObject_List"],
|
||||
"OutputObject": stepTaskExisting["OutputObject"],
|
||||
"CurrentTaskDescription": util.generate_template_sentence_for_CollaborationBrief(
|
||||
stepTaskExisting["InputObject_List"],
|
||||
stepTaskExisting["OutputObject"],
|
||||
stepTaskExisting["AgentSelection"],
|
||||
stepTaskExisting["TaskContent"],
|
||||
),
|
||||
}
|
||||
prompt = PROMPT_TASK_PROCESS_BRANCHING.format(
|
||||
Modification_Requirement=Modification_Requirement,
|
||||
Current_Task_Description=json.dumps(
|
||||
Current_Task_Description, indent=4
|
||||
),
|
||||
Existing_Steps=json.dumps(Existing_Steps, indent=4),
|
||||
Baseline_Completion=json.dumps(Baseline_Completion, indent=4),
|
||||
General_Goal=General_Goal,
|
||||
Act_Set=ACT_SET,
|
||||
)
|
||||
print(prompt)
|
||||
branch_List = []
|
||||
for i in range(branch_Number):
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f" The JSON object must use the schema: {json.dumps(JSON_TASK_PROCESS_BRANCHING.model_json_schema(), indent=2)}",
|
||||
},
|
||||
{"role": "system", "content": prompt},
|
||||
]
|
||||
Remaining_Steps = read_LLM_Completion(messages, useGroq=False)[
|
||||
"Remaining Steps"
|
||||
]
|
||||
|
||||
branch_List.append(Remaining_Steps)
|
||||
return branch_List
|
||||
42
backend/AgentCoord/PlanEngine/fill_stepTask.py
Normal file
42
backend/AgentCoord/PlanEngine/fill_stepTask.py
Normal file
@@ -0,0 +1,42 @@
|
||||
from AgentCoord.PlanEngine.AgentSelection_Generator import (
|
||||
generate_AgentSelection,
|
||||
)
|
||||
from AgentCoord.PlanEngine.taskProcess_Generator import generate_TaskProcess
|
||||
import AgentCoord.util as util
|
||||
|
||||
|
||||
def fill_stepTask(General_Goal, stepTask, Agent_Board, AgentProfile_Dict):
|
||||
Current_Task = {
|
||||
"TaskName": stepTask["StepName"],
|
||||
"InputObject_List": stepTask["InputObject_List"],
|
||||
"OutputObject": stepTask["OutputObject"],
|
||||
"TaskContent": stepTask["TaskContent"],
|
||||
}
|
||||
AgentSelection = generate_AgentSelection(
|
||||
General_Goal=General_Goal,
|
||||
Current_Task=Current_Task,
|
||||
Agent_Board=Agent_Board,
|
||||
)
|
||||
Current_Task_Description = {
|
||||
"TaskName": stepTask["StepName"],
|
||||
"AgentInvolved": [
|
||||
{"Name": name, "Profile": AgentProfile_Dict[name]}
|
||||
for name in AgentSelection
|
||||
],
|
||||
"InputObject_List": stepTask["InputObject_List"],
|
||||
"OutputObject": stepTask["OutputObject"],
|
||||
"CurrentTaskDescription": util.generate_template_sentence_for_CollaborationBrief(
|
||||
stepTask["InputObject_List"],
|
||||
stepTask["OutputObject"],
|
||||
AgentSelection,
|
||||
stepTask["TaskContent"],
|
||||
),
|
||||
}
|
||||
TaskProcess = generate_TaskProcess(
|
||||
General_Goal=General_Goal,
|
||||
Current_Task_Description=Current_Task_Description,
|
||||
)
|
||||
# add the generated AgentSelection and TaskProcess to the stepItem
|
||||
stepTask["AgentSelection"] = AgentSelection
|
||||
stepTask["TaskProcess"] = TaskProcess
|
||||
return stepTask
|
||||
29
backend/AgentCoord/PlanEngine/fill_stepTask_TaskProcess.py
Normal file
29
backend/AgentCoord/PlanEngine/fill_stepTask_TaskProcess.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from AgentCoord.PlanEngine.taskProcess_Generator import generate_TaskProcess
|
||||
import AgentCoord.util as util
|
||||
|
||||
|
||||
def fill_stepTask_TaskProcess(General_Goal, stepTask, AgentProfile_Dict):
|
||||
AgentSelection = stepTask["AgentSelection"]
|
||||
Current_Task_Description = {
|
||||
"TaskName": stepTask["StepName"],
|
||||
"AgentInvolved": [
|
||||
{"Name": name, "Profile": AgentProfile_Dict[name]}
|
||||
for name in AgentSelection
|
||||
],
|
||||
"InputObject_List": stepTask["InputObject_List"],
|
||||
"OutputObject": stepTask["OutputObject"],
|
||||
"CurrentTaskDescription": util.generate_template_sentence_for_CollaborationBrief(
|
||||
stepTask["InputObject_List"],
|
||||
stepTask["OutputObject"],
|
||||
AgentSelection,
|
||||
stepTask["TaskContent"],
|
||||
),
|
||||
}
|
||||
TaskProcess = generate_TaskProcess(
|
||||
General_Goal=General_Goal,
|
||||
Current_Task_Description=Current_Task_Description,
|
||||
)
|
||||
# add the generated AgentSelection and TaskProcess to the stepItem
|
||||
stepTask["AgentSelection"] = AgentSelection
|
||||
stepTask["TaskProcess"] = TaskProcess
|
||||
return stepTask
|
||||
86
backend/AgentCoord/PlanEngine/planOutline_Generator.py
Normal file
86
backend/AgentCoord/PlanEngine/planOutline_Generator.py
Normal file
@@ -0,0 +1,86 @@
|
||||
from AgentCoord.util.converter import read_LLM_Completion
|
||||
from typing import List
|
||||
from pydantic import BaseModel
|
||||
import json
|
||||
|
||||
PROMPT_PLAN_OUTLINE_GENERATION = """
|
||||
## Instruction
|
||||
Based on "Output Format Example", "General Goal", and "Initial Key Object List", output a formatted "Plan_Outline".
|
||||
|
||||
## Initial Key Object List (Specify the list of initial key objects available, each initial key object should be the input object of at least one Step)
|
||||
{InitialObject_List}
|
||||
|
||||
## General Goal (Specify the general goal for the collaboration plan)
|
||||
{General_Goal}
|
||||
|
||||
## Output Format Example (Specify the output format)
|
||||
```json
|
||||
{{
|
||||
"Plan_Outline": [
|
||||
{{
|
||||
"StepName": "Love Element Brainstorming",
|
||||
"TaskContent": "Decide the main love element in the love story.",
|
||||
"InputObject_List": [],
|
||||
"OutputObject": "Love Element"
|
||||
}},
|
||||
{{
|
||||
"StepName": "Story World Buiding",
|
||||
"TaskContent": "Build the world setting for the story.",
|
||||
"InputObject_List": [],
|
||||
"OutputObject": "World Setting"
|
||||
}},
|
||||
{{
|
||||
"StepName": "Story Outline Drafting",
|
||||
"TaskContent": "Draft the story outline for the story.",
|
||||
"InputObject_List": ["Love Element", "World Setting"],
|
||||
"OutputObject": "Story Outline"
|
||||
}},
|
||||
{{
|
||||
"StepName": "Final Story Writing",
|
||||
"TaskContent": "Writing the final story.",
|
||||
"InputObject_List": [],
|
||||
"OutputObject": "Completed Story"
|
||||
}}
|
||||
]
|
||||
}}
|
||||
```
|
||||
|
||||
## Format Explaination (Explain the Output Format):
|
||||
StepName: Provide a CONCISE and UNIQUE name for this step, smartly summarize what this step is doing.
|
||||
TaskContent: Describe the task of the current step.
|
||||
InputObject_List: The list of the input obejects that will be used in current step.
|
||||
OutputObject: The name of the final output object of current step.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class Step(BaseModel):
|
||||
StepName: str
|
||||
TaskContent: str
|
||||
InputObject_List: List[str]
|
||||
OutputObject: str
|
||||
|
||||
|
||||
class PlanOutline(BaseModel):
|
||||
Plan_Outline: List[Step]
|
||||
|
||||
class Config:
|
||||
extra = "allow"
|
||||
|
||||
|
||||
def generate_PlanOutline(InitialObject_List, General_Goal):
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a recipe database that outputs recipes in JSON.\n"
|
||||
f" The JSON object must use the schema: {json.dumps(PlanOutline.model_json_schema(), indent=2)}",
|
||||
},
|
||||
{
|
||||
"role": "system",
|
||||
"content": PROMPT_PLAN_OUTLINE_GENERATION.format(
|
||||
InitialObject_List=str(InitialObject_List),
|
||||
General_Goal=General_Goal,
|
||||
),
|
||||
},
|
||||
]
|
||||
return read_LLM_Completion(messages)["Plan_Outline"]
|
||||
139
backend/AgentCoord/PlanEngine/taskProcess_Generator.py
Normal file
139
backend/AgentCoord/PlanEngine/taskProcess_Generator.py
Normal file
@@ -0,0 +1,139 @@
|
||||
import json
|
||||
from typing import List
|
||||
from pydantic import BaseModel
|
||||
from AgentCoord.util.converter import read_LLM_Completion
|
||||
|
||||
ACT_SET = """
|
||||
- Propose (Propose something that contribute to the current task):
|
||||
- Description Format Example:
|
||||
Propose some suggestion for the development of the story from emotional aspects.
|
||||
|
||||
- Critique (Provide feedback to the action result of other agents):
|
||||
- Description Format Example:
|
||||
Critique the logic soundness of the story outline written by Alice.
|
||||
|
||||
- Improve (Improve the result of a previous action):
|
||||
- Description Format Example:
|
||||
Improve the story outline written by Bob based on the feedback by Jordan"
|
||||
|
||||
- Finalize (Deliver the final result based on previous actions):
|
||||
- Description Format Example:
|
||||
Summarize the discussion and submit the final version of the Story Outline.
|
||||
|
||||
"""
|
||||
PROMPT_TASK_PROCESS_GENERATION = """
|
||||
## Instruction
|
||||
Based on "General Goal", "Task for Current Step", "Action Set" and "Output Format Example", design a plan for "Task for Current Step", output a formatted "Task_Process_Plan".
|
||||
|
||||
## General Goal (The general goal for the collaboration plan, you just design the plan for one of its step (i.e. "Task for Current Step"))
|
||||
{General_Goal}
|
||||
|
||||
## Task for Current Step
|
||||
{Current_Task_Description}
|
||||
|
||||
## Action Set (The list of Action Type Available)
|
||||
{Act_Set}
|
||||
|
||||
## Output Format Example (Specify the output format)
|
||||
```json
|
||||
{{
|
||||
"Task_Process_Plan": [
|
||||
{{
|
||||
"ID": "Action1",
|
||||
"ActionType": "Propose",
|
||||
"AgentName": "Bob",
|
||||
"Description": "Propose an idea about how the AI can get awaken.",
|
||||
"ImportantInput": []
|
||||
}},
|
||||
{{
|
||||
"ID": "Action2",
|
||||
"ActionType": "Propose",
|
||||
"AgentName": "Alice",
|
||||
"Description": "Propose some suggestion for the development of the story from emotional aspects.",
|
||||
"ImportantInput": ["InputObject:Love Element Notes"]
|
||||
}},
|
||||
{{
|
||||
"ID": "Action3",
|
||||
"ActionType": "Propose",
|
||||
"AgentName": "Bob",
|
||||
"Description": "Propose a draft story outline, incorporating the idea proposed by Bob and the suggestion proposed by Alice.",
|
||||
"ImportantInput": ["ActionResult:Action1", "ActionResult:Action2"]
|
||||
}},
|
||||
{{
|
||||
"ID": "Action4",
|
||||
"ActType": "Critique",
|
||||
"AgentName": "Jordan",
|
||||
"Description": "Critique the technical soundness of the draft story outline proposed by Bob."
|
||||
"ImportantInput": ["ActionResult:Action3"]
|
||||
}},
|
||||
{{
|
||||
"ID": "Action5",
|
||||
"ActType": "Improve",
|
||||
"AgentName": "Bob",
|
||||
"Description": "Improve the technical soundness of the draft story outline based on Jordan's feedback.",
|
||||
"ImportantInput": ["ActionResult:Action3", "ActionResult:Action4"]
|
||||
}},
|
||||
{{
|
||||
"ID": "Action6",
|
||||
"ActType": "Finalize",
|
||||
"AgentName": "Jordan",
|
||||
"Description": "Polish the improved draft story outline by Bob and submit it as the final version of the Story Outline",
|
||||
"ImportantInput": ["ActionResult:Action5"]
|
||||
}}
|
||||
]
|
||||
}}
|
||||
```
|
||||
|
||||
## Format Explaination (Explain the Output Format):
|
||||
ImportantInput: Specify if there is any previous result that should be taken special consideration during the execution the action. Should be of format "InputObject:xx" or "ActionResult:xx".
|
||||
InputObject_List: List existing objects that should be utilized in current step.
|
||||
AgentName: Specify the agent who will perform the action, You CAN ONLY USE THE NAME APPEARS IN "AgentInvolved".
|
||||
ActionType: Specify the type of action, note that only the last action can be of type "Finalize", and the last action must be "Finalize".
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class Action(BaseModel):
|
||||
ID: str
|
||||
ActionType: str
|
||||
AgentName: str
|
||||
Description: str
|
||||
ImportantInput: List[str]
|
||||
|
||||
|
||||
class TaskProcessPlan(BaseModel):
|
||||
Task_Process_Plan: List[Action]
|
||||
|
||||
|
||||
def generate_TaskProcess(General_Goal, Current_Task_Description):
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f" The JSON object must use the schema: {json.dumps(TaskProcessPlan.model_json_schema(), indent=2)}",
|
||||
},
|
||||
{
|
||||
"role": "system",
|
||||
"content": PROMPT_TASK_PROCESS_GENERATION.format(
|
||||
General_Goal=General_Goal,
|
||||
Current_Task_Description=json.dumps(
|
||||
Current_Task_Description, indent=4
|
||||
),
|
||||
Act_Set=ACT_SET,
|
||||
),
|
||||
},
|
||||
]
|
||||
print(messages[1]["content"])
|
||||
|
||||
# write a callback function, if read_LLM_Completion(messages)["Task_Process_Plan"] dont have the right format, call this function again
|
||||
while True:
|
||||
response_json = read_LLM_Completion(messages)["Task_Process_Plan"]
|
||||
response_agents = {action["AgentName"] for action in response_json}
|
||||
involved_agents = {
|
||||
agent["Name"]
|
||||
for agent in Current_Task_Description["AgentInvolved"]
|
||||
}
|
||||
if response_agents.issubset(involved_agents):
|
||||
break
|
||||
|
||||
# AgentSelectionPlan= sorted(read_LLM_Completion(messages)["AgentSelectionPlan"]) # sort the select agent list for unique sequence
|
||||
return response_json
|
||||
9
backend/AgentCoord/RehearsalEngine_V2/Action/__init__.py
Normal file
9
backend/AgentCoord/RehearsalEngine_V2/Action/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
from .baseAction import BaseAction
|
||||
from .customAction_Finalize import customAction_Finalize
|
||||
from .customAction_Improve import customAction_Improve
|
||||
from .customAction_Critique import customAction_Critique
|
||||
from .customAction_Propose import customAction_Propose
|
||||
|
||||
customAction_Dict = {"Finalize":customAction_Finalize, "Improve":customAction_Improve, "Critique":customAction_Critique, "Propose":customAction_Propose}
|
||||
95
backend/AgentCoord/RehearsalEngine_V2/Action/baseAction.py
Normal file
95
backend/AgentCoord/RehearsalEngine_V2/Action/baseAction.py
Normal file
@@ -0,0 +1,95 @@
|
||||
from AgentCoord.LLMAPI.LLMAPI import LLM_Completion
|
||||
from AgentCoord.util.colorLog import print_colored
|
||||
import copy
|
||||
PROMPT_TEMPLATE_TAKE_ACTION_BASE = '''
|
||||
Your name is {agentName}. You will play the role as the Profile indicates.
|
||||
Profile: {agentProfile}
|
||||
|
||||
You are within a multi-agent collaboration for the "Current Task".
|
||||
Now it's your turn to take action. Read the "Context Information" and take your action following "Instruction for Your Current Action".
|
||||
Note: Important Input for your action are marked with *Important Input*
|
||||
|
||||
## Context Information
|
||||
|
||||
### General Goal (The "Current Task" is indeed a substep of the general goal)
|
||||
{General_Goal}
|
||||
|
||||
### Current Task
|
||||
{Current_Task_Description}
|
||||
|
||||
### Input Objects (Input objects for the current task)
|
||||
{Input_Objects}
|
||||
|
||||
### History Action
|
||||
{History_Action}
|
||||
|
||||
## Instruction for Your Current Action
|
||||
{Action_Description}
|
||||
|
||||
{Action_Custom_Note}
|
||||
|
||||
'''
|
||||
|
||||
PROMPT_TEMPLATE_INPUTOBJECT_RECORD = '''
|
||||
{{
|
||||
{Important_Mark}
|
||||
{Input_Objects_Name}:
|
||||
{Input_Objects_Content}
|
||||
}}
|
||||
|
||||
'''
|
||||
|
||||
PROMPT_TEMPLATE_ACTION_RECORD = '''
|
||||
{{
|
||||
{Important_Mark}
|
||||
{AgentName} ({Action_Description}):
|
||||
{Action_Result}
|
||||
}}
|
||||
|
||||
'''
|
||||
|
||||
class BaseAction():
|
||||
def __init__(self, info, OutputName, KeyObjects) -> None:
|
||||
self.KeyObjects = KeyObjects
|
||||
self.OutputName = OutputName
|
||||
self.Action_Result = None
|
||||
self.info = info
|
||||
self.Action_Custom_Note = ""
|
||||
|
||||
def postRun_Callback(self) -> None:
|
||||
return
|
||||
|
||||
def run(self, General_Goal, TaskDescription, agentName, AgentProfile_Dict, InputName_List, OutputName, KeyObjects, ActionHistory):
|
||||
# construct input record
|
||||
inputObject_Record = ""
|
||||
for InputName in InputName_List:
|
||||
ImportantInput_Identifier = "InputObject:" + InputName
|
||||
if ImportantInput_Identifier in self.info["ImportantInput"]:
|
||||
Important_Mark = "*Important Input*"
|
||||
else:
|
||||
Important_Mark = ""
|
||||
inputObject_Record += PROMPT_TEMPLATE_INPUTOBJECT_RECORD.format(Input_Objects_Name = InputName, Input_Objects_Content = KeyObjects[InputName], Important_Mark = Important_Mark)
|
||||
|
||||
# construct history action record
|
||||
action_Record = ""
|
||||
for actionInfo in ActionHistory:
|
||||
ImportantInput_Identifier = "ActionResult:" + actionInfo["ID"]
|
||||
if ImportantInput_Identifier in self.info["ImportantInput"]:
|
||||
Important_Mark = "*Important Input*"
|
||||
else:
|
||||
Important_Mark = ""
|
||||
action_Record += PROMPT_TEMPLATE_ACTION_RECORD.format(AgentName = actionInfo["AgentName"], Action_Description = actionInfo["AgentName"], Action_Result = actionInfo["Action_Result"], Important_Mark = Important_Mark)
|
||||
|
||||
prompt = PROMPT_TEMPLATE_TAKE_ACTION_BASE.format(agentName = agentName, agentProfile = AgentProfile_Dict[agentName], General_Goal = General_Goal, Current_Task_Description = TaskDescription, Input_Objects = inputObject_Record, History_Action = action_Record, Action_Description = self.info["Description"], Action_Custom_Note = self.Action_Custom_Note)
|
||||
print_colored(text = prompt, text_color="red")
|
||||
messages = [{"role":"system", "content": prompt}]
|
||||
ActionResult = LLM_Completion(messages,True,False)
|
||||
ActionInfo_with_Result = copy.deepcopy(self.info)
|
||||
ActionInfo_with_Result["Action_Result"] = ActionResult
|
||||
|
||||
# run customizable callback
|
||||
self.Action_Result = ActionResult
|
||||
self.postRun_Callback()
|
||||
return ActionInfo_with_Result
|
||||
|
||||
|
||||
@@ -0,0 +1,16 @@
|
||||
from AgentCoord.RehearsalEngine_V2.Action import BaseAction
|
||||
|
||||
ACTION_CUSTOM_NOTE = '''
|
||||
Note: Since you are in a conversation, your critique must be concise, clear and easy to read, don't overwhelm others. If you want to list some points, list at most 2 points.
|
||||
|
||||
'''
|
||||
|
||||
class customAction_Critique(BaseAction):
|
||||
def __init__(self, info, OutputName, KeyObjects) -> None:
|
||||
self.KeyObjects = KeyObjects
|
||||
self.OutputName = OutputName
|
||||
self.Action_Result = None
|
||||
self.info = info
|
||||
self.Action_Custom_Note = ACTION_CUSTOM_NOTE.format(OutputName = OutputName)
|
||||
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
from AgentCoord.util.converter import read_outputObject_content
|
||||
from AgentCoord.RehearsalEngine_V2.Action import BaseAction
|
||||
|
||||
ACTION_CUSTOM_NOTE = '''
|
||||
Note: You can say something before you give the final content of {OutputName}. When you decide to give the final content of {OutputName}, it should be enclosed like this:
|
||||
```{OutputName}
|
||||
(the content of {OutputName})
|
||||
```
|
||||
'''
|
||||
|
||||
class customAction_Finalize(BaseAction):
|
||||
def __init__(self, info, OutputName, KeyObjects) -> None:
|
||||
self.KeyObjects = KeyObjects
|
||||
self.OutputName = OutputName
|
||||
self.Action_Result = None
|
||||
self.info = info
|
||||
self.Action_Custom_Note = ACTION_CUSTOM_NOTE.format(OutputName = OutputName)
|
||||
|
||||
def postRun_Callback(self) -> None:
|
||||
# extract output object
|
||||
extracted_outputObject = read_outputObject_content(self.Action_Result, self.OutputName)
|
||||
# add the extracted output object to KeyObjects
|
||||
self.KeyObjects[self.OutputName] = extracted_outputObject
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
from AgentCoord.RehearsalEngine_V2.Action import BaseAction
|
||||
|
||||
ACTION_CUSTOM_NOTE = '''
|
||||
Note: You can say something before you provide the improved version of the content.
|
||||
The improved version you provide must be a completed version (e.g. if you provide a improved story, you should give completed story content, rather than just reporting where you have improved).
|
||||
When you decide to give the improved version of the content, it should be start like this:
|
||||
|
||||
## Improved version of xxx
|
||||
(the improved version of the content)
|
||||
```
|
||||
|
||||
'''
|
||||
|
||||
class customAction_Improve(BaseAction):
|
||||
def __init__(self, info, OutputName, KeyObjects) -> None:
|
||||
self.KeyObjects = KeyObjects
|
||||
self.OutputName = OutputName
|
||||
self.Action_Result = None
|
||||
self.info = info
|
||||
self.Action_Custom_Note = ACTION_CUSTOM_NOTE.format(OutputName = OutputName)
|
||||
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
from AgentCoord.RehearsalEngine_V2.Action import BaseAction
|
||||
|
||||
ACTION_CUSTOM_NOTE = '''
|
||||
|
||||
'''
|
||||
|
||||
class customAction_Propose(BaseAction):
|
||||
def __init__(self, info, OutputName, KeyObjects) -> None:
|
||||
self.KeyObjects = KeyObjects
|
||||
self.OutputName = OutputName
|
||||
self.Action_Result = None
|
||||
self.info = info
|
||||
self.Action_Custom_Note = ACTION_CUSTOM_NOTE.format(OutputName = OutputName)
|
||||
|
||||
|
||||
128
backend/AgentCoord/RehearsalEngine_V2/ExecutePlan.py
Normal file
128
backend/AgentCoord/RehearsalEngine_V2/ExecutePlan.py
Normal file
@@ -0,0 +1,128 @@
|
||||
import AgentCoord.util as util
|
||||
import AgentCoord.RehearsalEngine_V2.Action as Action
|
||||
from termcolor import colored
|
||||
|
||||
|
||||
# Accept inputs: num_StepToRun (the number of step to run, if None, run to the end), plan, RehearsalLog, AgentProfile_Dict
|
||||
def executePlan(plan, num_StepToRun, RehearsalLog, AgentProfile_Dict):
|
||||
# Prepare for execution
|
||||
KeyObjects = {}
|
||||
finishedStep_index = -1
|
||||
for logNode in RehearsalLog:
|
||||
# Increment for finishedStep_index: finishedStep_index is the index of the lastest finished step seen in RehearsalLog, if no step finished yet, index will be -1
|
||||
if logNode["LogNodeType"] == "step":
|
||||
finishedStep_index += 1
|
||||
# Set existing key object
|
||||
if logNode["LogNodeType"] == "object":
|
||||
KeyObjects[logNode["NodeId"]] = logNode["content"]
|
||||
|
||||
# Execute
|
||||
# specify which steps will be run
|
||||
if num_StepToRun is None:
|
||||
run_to = len(plan["Collaboration Process"])
|
||||
else:
|
||||
run_to = (finishedStep_index + 1) + num_StepToRun
|
||||
|
||||
StepRun_count = 0 # count how many steps are run during execution
|
||||
# loop for each step to run
|
||||
for stepDescrip in plan["Collaboration Process"][
|
||||
(finishedStep_index + 1): run_to
|
||||
]:
|
||||
StepRun_count += 1
|
||||
# collect infos for executing the current step
|
||||
StepName = (
|
||||
util.camel_case_to_normal(stepDescrip["StepName"])
|
||||
if util.is_camel_case(stepDescrip["StepName"])
|
||||
else stepDescrip["StepName"]
|
||||
)
|
||||
TaskContent = stepDescrip["TaskContent"]
|
||||
InputName_List = (
|
||||
[
|
||||
(
|
||||
util.camel_case_to_normal(obj)
|
||||
if util.is_camel_case(obj)
|
||||
else obj
|
||||
)
|
||||
for obj in stepDescrip["InputObject_List"]
|
||||
]
|
||||
if stepDescrip["InputObject_List"] is not None
|
||||
else None
|
||||
)
|
||||
OutputName = (
|
||||
util.camel_case_to_normal(stepDescrip["OutputObject"])
|
||||
if util.is_camel_case(stepDescrip["OutputObject"])
|
||||
else stepDescrip["OutputObject"]
|
||||
)
|
||||
Agent_List = stepDescrip["AgentSelection"]
|
||||
TaskProcess = stepDescrip["TaskProcess"]
|
||||
TaskDescription = (
|
||||
util.converter.generate_template_sentence_for_CollaborationBrief(
|
||||
input_object_list=InputName_List,
|
||||
output_object=OutputName,
|
||||
agent_list=Agent_List,
|
||||
step_task=TaskContent,
|
||||
)
|
||||
)
|
||||
|
||||
# init log for RehearsalLog
|
||||
inputObject_Record = [
|
||||
{InputName: KeyObjects[InputName]} for InputName in InputName_List
|
||||
]
|
||||
stepLogNode = {
|
||||
"LogNodeType": "step",
|
||||
"NodeId": StepName,
|
||||
"InputName_List": InputName_List,
|
||||
"OutputName": OutputName,
|
||||
"chatLog": [],
|
||||
"inputObject_Record": inputObject_Record,
|
||||
}
|
||||
objectLogNode = {
|
||||
"LogNodeType": "object",
|
||||
"NodeId": OutputName,
|
||||
"content": None,
|
||||
}
|
||||
|
||||
# start the group chat
|
||||
util.print_colored(TaskDescription, text_color="green")
|
||||
ActionHistory = []
|
||||
for ActionInfo in TaskProcess:
|
||||
actionType = ActionInfo["ActionType"]
|
||||
agentName = ActionInfo["AgentName"]
|
||||
if actionType in Action.customAction_Dict:
|
||||
currentAction = Action.customAction_Dict[actionType](
|
||||
info=ActionInfo,
|
||||
OutputName=OutputName,
|
||||
KeyObjects=KeyObjects,
|
||||
)
|
||||
else:
|
||||
currentAction = Action.BaseAction(
|
||||
info=ActionInfo,
|
||||
OutputName=OutputName,
|
||||
KeyObjects=KeyObjects,
|
||||
)
|
||||
ActionInfo_with_Result = currentAction.run(
|
||||
General_Goal=plan["General Goal"],
|
||||
TaskDescription=TaskDescription,
|
||||
agentName=agentName,
|
||||
AgentProfile_Dict=AgentProfile_Dict,
|
||||
InputName_List=InputName_List,
|
||||
OutputName=OutputName,
|
||||
KeyObjects=KeyObjects,
|
||||
ActionHistory=ActionHistory,
|
||||
)
|
||||
ActionHistory.append(ActionInfo_with_Result)
|
||||
# post processing for the group chat (finish)
|
||||
objectLogNode["content"] = KeyObjects[OutputName]
|
||||
RehearsalLog.append(stepLogNode)
|
||||
RehearsalLog.append(objectLogNode)
|
||||
stepLogNode["ActionHistory"] = ActionHistory
|
||||
|
||||
# Return Output
|
||||
print(
|
||||
colored(
|
||||
"$Run " + str(StepRun_count) + "step$",
|
||||
color="black",
|
||||
on_color="on_white",
|
||||
)
|
||||
)
|
||||
return RehearsalLog
|
||||
2
backend/AgentCoord/RehearsalEngine_V2/__init__.py
Normal file
2
backend/AgentCoord/RehearsalEngine_V2/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
2
backend/AgentCoord/__init__ .py
Normal file
2
backend/AgentCoord/__init__ .py
Normal file
@@ -0,0 +1,2 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
9
backend/AgentCoord/util/__init__.py
Normal file
9
backend/AgentCoord/util/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
from .colorLog import print_colored
|
||||
from .converter import (
|
||||
generate_template_sentence_for_CollaborationBrief,
|
||||
remove_render_spec,
|
||||
camel_case_to_normal,
|
||||
is_camel_case,
|
||||
)
|
||||
2
backend/AgentCoord/util/addRenderSpec.py
Normal file
2
backend/AgentCoord/util/addRenderSpec.py
Normal file
@@ -0,0 +1,2 @@
|
||||
def basePlan_addRenderSpec(basePlan):
|
||||
return basePlan
|
||||
6
backend/AgentCoord/util/colorLog.py
Normal file
6
backend/AgentCoord/util/colorLog.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from termcolor import colored
|
||||
|
||||
|
||||
# colored print
|
||||
def print_colored(text, text_color="green", background="on_white"):
|
||||
print(colored(text, text_color, background))
|
||||
130
backend/AgentCoord/util/converter.py
Normal file
130
backend/AgentCoord/util/converter.py
Normal file
@@ -0,0 +1,130 @@
|
||||
import re
|
||||
import json
|
||||
from AgentCoord.LLMAPI.LLMAPI import LLM_Completion
|
||||
|
||||
|
||||
def create_agent_dict(agent_list):
|
||||
return {agent["Name"]: agent["Profile"] for agent in agent_list}
|
||||
|
||||
|
||||
def is_camel_case(s):
|
||||
# If there are no spaces, it might be camel case
|
||||
return " " not in s
|
||||
|
||||
|
||||
def camel_case_to_normal(s):
|
||||
# Split the camel case string into words
|
||||
return "".join(" " + c if c.isupper() else c for c in s).lstrip()
|
||||
|
||||
|
||||
def generate_template_sentence_for_CollaborationBrief(
|
||||
input_object_list, output_object, agent_list, step_task
|
||||
):
|
||||
# Check if the names are in camel case (no spaces) and convert them to normal naming convention
|
||||
input_object_list = (
|
||||
[
|
||||
camel_case_to_normal(obj) if is_camel_case(obj) else obj
|
||||
for obj in input_object_list
|
||||
]
|
||||
if input_object_list is not None
|
||||
else None
|
||||
)
|
||||
output_object = (
|
||||
camel_case_to_normal(output_object)
|
||||
if is_camel_case(output_object)
|
||||
else output_object
|
||||
)
|
||||
|
||||
# Format the agents into a string with proper grammar
|
||||
agent_str = (
|
||||
" and ".join([", ".join(agent_list[:-1]), agent_list[-1]])
|
||||
if len(agent_list) > 1
|
||||
else agent_list[0]
|
||||
)
|
||||
|
||||
if input_object_list is None or len(input_object_list) == 0:
|
||||
# Combine all the parts into the template sentence
|
||||
template_sentence = f"{agent_str} perform the task of {step_task} to obtain {output_object}."
|
||||
else:
|
||||
# Format the input objects into a string with proper grammar
|
||||
input_str = (
|
||||
" and ".join(
|
||||
[", ".join(input_object_list[:-1]), input_object_list[-1]]
|
||||
)
|
||||
if len(input_object_list) > 1
|
||||
else input_object_list[0]
|
||||
)
|
||||
# Combine all the parts into the template sentence
|
||||
template_sentence = f"Based on {input_str}, {agent_str} perform the task of {step_task} to obtain {output_object}."
|
||||
|
||||
return template_sentence
|
||||
|
||||
|
||||
def remove_render_spec(duty_spec):
|
||||
if isinstance(duty_spec, dict):
|
||||
return {
|
||||
k: remove_render_spec(v)
|
||||
for k, v in duty_spec.items()
|
||||
if k != "renderSpec"
|
||||
}
|
||||
elif isinstance(duty_spec, list):
|
||||
return [remove_render_spec(item) for item in duty_spec]
|
||||
else:
|
||||
return duty_spec
|
||||
|
||||
|
||||
def read_LLM_Completion(messages, useGroq=True):
|
||||
for _ in range(3):
|
||||
text = LLM_Completion(messages, useGroq=useGroq)
|
||||
|
||||
pattern = r"(?:.*?```json)(.*?)(?:```.*?)"
|
||||
match = re.search(pattern, text, re.DOTALL)
|
||||
|
||||
if match:
|
||||
return json.loads(match.group(1).strip())
|
||||
|
||||
pattern = r"\{.*\}"
|
||||
match = re.search(pattern, text, re.DOTALL)
|
||||
if match:
|
||||
try:
|
||||
return json.loads(match.group(0).strip())
|
||||
except Exception:
|
||||
pass
|
||||
raise ("bad format!")
|
||||
|
||||
|
||||
def read_json_content(text):
|
||||
"""
|
||||
Extracts and returns content between ```json and ```
|
||||
|
||||
:param text: The string containing the content enclosed by ```json and ```
|
||||
"""
|
||||
# pattern = r"```json(.*?)```"
|
||||
pattern = r"(?:.*?```json)(.*?)(?:```.*?)"
|
||||
match = re.search(pattern, text, re.DOTALL)
|
||||
|
||||
if match:
|
||||
return json.loads(match.group(1).strip())
|
||||
|
||||
pattern = r"\{.*\}"
|
||||
match = re.search(pattern, text, re.DOTALL)
|
||||
if match:
|
||||
return json.loads(match.group(0).strip())
|
||||
|
||||
raise ("bad format!")
|
||||
|
||||
|
||||
def read_outputObject_content(text, keyword):
|
||||
"""
|
||||
Extracts and returns content between ```{keyword} and ```
|
||||
|
||||
:param text: The string containing the content enclosed by ```{keyword} and ```
|
||||
"""
|
||||
|
||||
pattern = r"```{}(.*?)```".format(keyword)
|
||||
match = re.search(pattern, text, re.DOTALL)
|
||||
|
||||
if match:
|
||||
return match.group(1).strip()
|
||||
else:
|
||||
raise ("bad format!")
|
||||
Reference in New Issue
Block a user