diff --git a/backend/AgentCoord/LLMAPI/LLMAPI.py b/backend/AgentCoord/LLMAPI/LLMAPI.py index e176316..96ff3d9 100644 --- a/backend/AgentCoord/LLMAPI/LLMAPI.py +++ b/backend/AgentCoord/LLMAPI/LLMAPI.py @@ -1,5 +1,7 @@ import asyncio -from openai import OpenAI, AsyncOpenAI + +import httpx +from openai import OpenAI, AsyncOpenAI, max_retries import yaml from termcolor import colored import os @@ -21,6 +23,9 @@ OPENAI_API_BASE = os.getenv("OPENAI_API_BASE") or yaml_data.get( OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") or yaml_data.get( "OPENAI_API_KEY", "" ) +OPENAI_API_MODEL = os.getenv("OPENAI_API_MODEL") or yaml_data.get( + "OPENAI_API_MODEL", "" +) # Initialize OpenAI clients client = OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_API_BASE) @@ -41,8 +46,11 @@ MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY") or yaml_data.get( # for LLM completion def LLM_Completion( - messages: list[dict], stream: bool = True, useGroq: bool = True + messages: list[dict], stream: bool = True, useGroq: bool = True,model_config: dict = None ) -> str: + if model_config: + print_colored(f"Using model config: {model_config}", "blue") + return _call_with_custom_config(messages,stream,model_config) if not useGroq or not FAST_DESIGN_MODE: force_gpt4 = True useGroq = False @@ -75,6 +83,82 @@ def LLM_Completion( return _chat_completion(messages=messages) +def _call_with_custom_config(messages: list[dict], stream: bool, model_config: dict) ->str: + "使用自定义配置调用API" + api_url = model_config.get("apiUrl", OPENAI_API_BASE) + api_key = model_config.get("apiKey", OPENAI_API_KEY) + api_model = model_config.get("apiModel", OPENAI_API_MODEL) + + temp_client = OpenAI(api_key=api_key, base_url=api_url) + temp_async_client = AsyncOpenAI(api_key=api_key, base_url=api_url) + + try: + if stream: + try: + loop = asyncio.get_event_loop() + except RuntimeError as ex: + if "There is no current event loop in thread" in str(ex): + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + return loop.run_until_complete( + _achat_completion_stream_custom(messages=messages, temp_async_client=temp_async_client, api_model=api_model) + ) + else: + response = temp_client.chat.completions.create( + messages=messages, + model=api_model, + temperature=0.3, + max_tokens=4096, + timeout=180 + + ) + full_reply_content = response.choices[0].message.content + print(colored(full_reply_content, "blue", "on_white"), end="") + return full_reply_content + except Exception as e: + print_colored(f"Custom API error for model {api_model} :{str(e)}","red") + raise + + +async def _achat_completion_stream_custom(messages:list[dict], temp_async_client, api_model: str ) -> str: + max_retries=3 + for attempt in range(max_retries): + try: + response = await temp_async_client.chat.completions.create( + messages=messages, + model=api_model, + temperature=0.3, + max_tokens=4096, + stream=True, + timeout=180 + ) + + collected_chunks = [] + collected_messages = [] + async for chunk in response: + collected_chunks.append(chunk) + choices = chunk.choices + if len(choices) > 0: + chunk_message = chunk.choices[0].delta + collected_messages.append(chunk_message) + if chunk_message.content: + print(colored(chunk_message.content, "blue", "on_white"), end="") + print() + full_reply_content = "".join( + [m.content or "" for m in collected_messages if m is not None] + ) + return full_reply_content + except httpx.RemoteProtocolError as e: + if attempt < max_retries - 1: + wait_time = (attempt + 1) *2 + print_colored(f"⚠️ Stream connection interrupted (attempt {attempt+1}/{max_retries}). Retrying in {wait_time}s...", text_color="yellow") + await asyncio.sleep(wait_time) + continue + except Exception as e: + print_colored(f"Custom API stream error for model {api_model} :{str(e)}","red") + raise + + async def _achat_completion_stream_groq(messages: list[dict]) -> str: from groq import AsyncGroq groq_client = AsyncGroq(api_key=GROQ_API_KEY) @@ -144,7 +228,7 @@ async def _achat_completion_stream_gpt35(messages: list[dict]) -> str: messages=messages, max_tokens=4096, temperature=0.3, - timeout=30, + timeout=600, model="gpt-3.5-turbo-16k", stream=True, ) @@ -172,16 +256,15 @@ async def _achat_completion_stream_gpt35(messages: list[dict]) -> str: return full_reply_content -async def _achat_completion_json(messages: list[dict]) -> str: +def _achat_completion_json(messages: list[dict] ) -> str: max_attempts = 5 - for attempt in range(max_attempts): try: - response = await async_client.chat.completions.create( + response = async_client.chat.completions.create( messages=messages, max_tokens=4096, temperature=0.3, - timeout=30, + timeout=600, model=MODEL, response_format={"type": "json_object"}, ) @@ -245,7 +328,7 @@ def _cons_kwargs(messages: list[dict]) -> dict: "messages": messages, "max_tokens": 2000, "temperature": 0.3, - "timeout": 60, + "timeout": 600, } kwargs_mode = {"model": MODEL} kwargs.update(kwargs_mode) diff --git a/backend/AgentCoord/RehearsalEngine_V2/Action/baseAction.py b/backend/AgentCoord/RehearsalEngine_V2/Action/baseAction.py index b369506..4ba519c 100644 --- a/backend/AgentCoord/RehearsalEngine_V2/Action/baseAction.py +++ b/backend/AgentCoord/RehearsalEngine_V2/Action/baseAction.py @@ -81,16 +81,33 @@ class BaseAction(): action_Record += PROMPT_TEMPLATE_ACTION_RECORD.format(AgentName = actionInfo["AgentName"], Action_Description = actionInfo["AgentName"], Action_Result = actionInfo["Action_Result"], Important_Mark = Important_Mark) # Handle missing agent profiles gracefully + model_config = None if agentName not in AgentProfile_Dict: print_colored(text=f"Warning: Agent '{agentName}' not found in AgentProfile_Dict. Using default profile.", text_color="yellow") agentProfile = f"AI Agent named {agentName}" else: - agentProfile = AgentProfile_Dict[agentName] - - prompt = PROMPT_TEMPLATE_TAKE_ACTION_BASE.format(agentName = agentName, agentProfile = agentProfile, General_Goal = General_Goal, Current_Task_Description = TaskDescription, Input_Objects = inputObject_Record, History_Action = action_Record, Action_Description = self.info["Description"], Action_Custom_Note = self.Action_Custom_Note) + # agentProfile = AgentProfile_Dict[agentName] + agent_config = AgentProfile_Dict[agentName] + agentProfile = agent_config.get("profile",f"AI Agent named {agentName}") + if agent_config.get("useCustomAPI",False): + model_config = { + "apiModel":agent_config.get("apiModel"), + "apiUrl":agent_config.get("apiUrl"), + "apiKey":agent_config.get("apiKey"), + } + prompt = PROMPT_TEMPLATE_TAKE_ACTION_BASE.format( + agentName = agentName, + agentProfile = agentProfile, + General_Goal = General_Goal, + Current_Task_Description = TaskDescription, + Input_Objects = inputObject_Record, + History_Action = action_Record, + Action_Description = self.info["Description"], + Action_Custom_Note = self.Action_Custom_Note + ) print_colored(text = prompt, text_color="red") messages = [{"role":"system", "content": prompt}] - ActionResult = LLM_Completion(messages,True,False) + ActionResult = LLM_Completion(messages,True,False,model_config=model_config) ActionInfo_with_Result = copy.deepcopy(self.info) ActionInfo_with_Result["Action_Result"] = ActionResult diff --git a/backend/config/config.yaml b/backend/config/config.yaml index 6dc6ce7..b3f1b78 100644 --- a/backend/config/config.yaml +++ b/backend/config/config.yaml @@ -1,10 +1,10 @@ ## config for default LLM -OPENAI_API_BASE: "" -OPENAI_API_KEY: "" -OPENAI_API_MODEL: "gpt-4-turbo-preview" +OPENAI_API_BASE: "https://ai.gitee.com/v1" +OPENAI_API_KEY: "HYCNGM39GGFNSB1F8MBBMI9QYJR3P1CRSYS2PV1A" +OPENAI_API_MODEL: "DeepSeek-V3" ## config for fast mode -FAST_DESIGN_MODE: True +FAST_DESIGN_MODE: False GROQ_API_KEY: "" MISTRAL_API_KEY: "" diff --git a/backend/server.py b/backend/server.py index a5e9898..8af354a 100644 --- a/backend/server.py +++ b/backend/server.py @@ -271,13 +271,29 @@ def Handle_saveRequestCashe(): @app.route("/setAgents", methods=["POST"]) def set_agents(): - global AgentBoard, AgentProfile_Dict + global AgentBoard, AgentProfile_Dict,yaml_data AgentBoard = request.json AgentProfile_Dict = {} for item in AgentBoard: name = item["Name"] - profile = item["Profile"] - AgentProfile_Dict[name] = profile + if all(item.get(field) for field in ["apiUrl","apiKey","apiModel"]): + agent_config = { + "profile": item["Profile"], + "apiUrl": item["apiUrl"], + "apiKey": item["apiKey"], + "apiModel": item["apiModel"], + "useCustomAPI":True + } + else: + agent_config = { + "profile": item["Profile"], + "apiUrl": yaml_data.get("OPENAI_API_BASE"), + "apiKey": yaml_data.get("OPENAI_API_KEY"), + "apiModel": yaml_data.get("OPENAI_API_MODEL"), + "useCustomAPI":False + } + AgentProfile_Dict[name] = agent_config + return jsonify({"code": 200, "content": "set agentboard successfully"}) diff --git a/frontend/src/layout/components/Main/TaskTemplate/AgentRepo/index.vue b/frontend/src/layout/components/Main/TaskTemplate/AgentRepo/index.vue index 5db626a..ec23364 100644 --- a/frontend/src/layout/components/Main/TaskTemplate/AgentRepo/index.vue +++ b/frontend/src/layout/components/Main/TaskTemplate/AgentRepo/index.vue @@ -19,7 +19,9 @@ onMounted(async () => { const res = await readConfig('agent.json') agentsStore.setAgents(res) } - await api.setAgents(agentsStore.agents.map(item => pick(item, ['Name', 'Profile']))) + await api.setAgents( + agentsStore.agents.map(item => pick(item, ['Name', 'Profile', 'apiUrl', 'apiKey', 'apiModel'])) + ) }) // 上传agent文件 @@ -118,8 +120,6 @@ const readFileContent = (file: File) => { return true }) - - console.log('🔄 开始处理智能体数据...') // 修改发送到后端的数据 const processedAgents = validAgents.map(agent => ({ Name: agent.Name, @@ -131,35 +131,9 @@ const readFileContent = (file: File) => { apiModel: agent.apiModel })) - console.log( - '📤 发送到后端的智能体数据:', - processedAgents.map(a => ({ - Name: a.Name, - apiUrl: a.apiUrl, - apiKey: a.apiKey ? '***' + a.apiKey.slice(-4) : '未设置', - apiModel: a.apiModel - })) - ) - - // 调用API - api - .setAgents(processedAgents) - .then(response => { - console.log('✅ 后端API调用成功,响应:', response) - ElMessage.success('智能体上传成功') - }) - .catch(error => { - console.error('❌ 后端API调用失败:', error) - ElMessage.error('智能体上传失败') - }) - - // 更新store - console.log('💾 更新智能体store...') agentsStore.setAgents(processedAgents) - console.log('✅ 智能体store更新完成') // 调用API - console.log('🌐 开始调用后端API...') api .setAgents(processedAgents) .then(() => { diff --git a/frontend/src/layout/components/Main/TaskTemplate/TaskResult/index copy.vue b/frontend/src/layout/components/Main/TaskTemplate/TaskResult/index copy.vue deleted file mode 100644 index af731bd..0000000 --- a/frontend/src/layout/components/Main/TaskTemplate/TaskResult/index copy.vue +++ /dev/null @@ -1,861 +0,0 @@ - - - - - diff --git a/frontend/src/utils/request.ts b/frontend/src/utils/request.ts index 383afac..5bbc16d 100644 --- a/frontend/src/utils/request.ts +++ b/frontend/src/utils/request.ts @@ -1,4 +1,10 @@ -import type { AxiosError, AxiosInstance, AxiosRequestConfig, AxiosResponse, InternalAxiosRequestConfig } from 'axios' +import type { + AxiosError, + AxiosInstance, + AxiosRequestConfig, + AxiosResponse, + InternalAxiosRequestConfig, +} from 'axios' import axios from 'axios' import qs from 'qs' import type { Ref } from 'vue' @@ -16,7 +22,6 @@ export interface AxiosResponseData { export function initService() { service = axios.create({ baseURL: '/api', - timeout: 50000, headers: { 'Content-Type': 'application/json;charset=utf-8' }, paramsSerializer: (params) => { return qs.stringify(params)