diff --git a/apps/routers/appcenter.py b/apps/routers/appcenter.py
index 3e05516445649b98a90a3e71b9f0c4ce89957e94..646cd8a12f469f503bc9ae061a63631cc7f2f514 100644
--- a/apps/routers/appcenter.py
+++ b/apps/routers/appcenter.py
@@ -259,7 +259,6 @@ async def get_application(appId: Annotated[uuid.UUID, Path()]) -> JSONResponse:
published=app_data.published,
name=app_data.name,
description=app_data.description,
- icon=app_data.icon,
links=app_data.links,
recommendedQuestions=app_data.first_questions,
dialogRounds=app_data.history_len,
@@ -289,7 +288,6 @@ async def get_application(appId: Annotated[uuid.UUID, Path()]) -> JSONResponse:
published=app_data.published,
name=app_data.name,
description=app_data.description,
- icon=app_data.icon,
links=[],
recommendedQuestions=[],
dialogRounds=app_data.history_len,
diff --git a/apps/scheduler/call/cmd/__init__.py b/apps/scheduler/call/cmd/__init__.py
deleted file mode 100644
index ae0a99e47f7d13c02f056b035ebcb3e174750614..0000000000000000000000000000000000000000
--- a/apps/scheduler/call/cmd/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved.
-"""命令生成工具"""
diff --git a/apps/scheduler/call/cmd/assembler.py b/apps/scheduler/call/cmd/assembler.py
deleted file mode 100644
index 6ac19edb0f3751b36c028f0d4f52682c6ba1248b..0000000000000000000000000000000000000000
--- a/apps/scheduler/call/cmd/assembler.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved.
-"""命令行组装器"""
-
-import string
-from typing import Any, Literal, Optional
-
-
-class CommandlineAssembler:
- """命令行组装器"""
-
- @staticmethod
- def convert_dict_to_cmdline(args_dict: dict[str, Any], usage: str) -> str:
- """将字典转换为命令行"""
- opts_result = ""
- for key, val in args_dict["opts"].items():
- if isinstance(val, bool) and val:
- opts_result += f" {key}"
- continue
-
- opts_result += f" {key} {val}"
- # opts_result = opts_result.lstrip(" ") + " ${OPTS}"
- opts_result = opts_result.lstrip(" ")
-
- result = string.Template(usage)
- return result.safe_substitute(OPTS=opts_result, **args_dict["args"])
-
- @staticmethod
- def get_command(instruction: str, collection_name: str) -> str:
- """获取命令行"""
- collection = VectorDB.get_collection(collection_name)
- return VectorDB.get_docs(collection, instruction, {"type": "binary"}, 1)[0].metadata["name"]
-
- @staticmethod
- def _documents_to_choices(docs: list[DocumentWrapper]) -> list[dict[str, Any]]:
- return [{
- "name": doc.metadata["name"],
- "description": doc.data,
- } for doc in docs]
-
- @staticmethod
- def get_data(
- query_type: Literal["subcommand", "global_option", "option", "argument"],
- instruction: str, collection_name: str, binary_name: str, subcmd_name: Optional[str] = None, num: int = 5,
- ) -> list[dict[str, Any]]:
- collection = VectorDB.get_collection(collection_name)
- if collection is None:
- err = f"Collection {collection_name} not found"
- raise ValueError(err)
-
- # Query certain type
- requirements = {
- "$and": [
- {"type": query_type},
- {"binary": binary_name},
- ],
- }
- if subcmd_name is not None:
- requirements["$and"].append({"subcmd": subcmd_name})
-
- result_list = VectorDB.get_docs(collection, instruction, requirements, num)
-
- return CommandlineAssembler._documents_to_choices(result_list)
-
- @staticmethod
- async def select_option(instruction: str, choices: list[dict[str, Any]]) -> tuple[str, str]:
- """选择当前最合适的命令行选项"""
- top_option = await Select().generate(choices, instruction=instruction)
- top_option_description = [choice["description"] for choice in choices if choice["name"] == top_option]
- return top_option, top_option_description[0]
diff --git a/apps/scheduler/call/cmd/cmd.py b/apps/scheduler/call/cmd/cmd.py
deleted file mode 100644
index c4585a5509ea74e6384af9642ad7743c70abcf76..0000000000000000000000000000000000000000
--- a/apps/scheduler/call/cmd/cmd.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved.
-"""自然语言生成命令"""
-
-from collections.abc import AsyncGenerator
-from typing import Any
-
-from pydantic import Field
-
-from apps.models import LanguageType
-from apps.scheduler.call.core import CoreCall
-from apps.schemas.scheduler import CallInfo, CallOutputChunk
-
-
-class Cmd(CoreCall):
- """Cmd工具。用于根据BTDL描述文件,生成命令。"""
-
- exec_name: str | None = Field(default=None, description="命令中可执行文件的名称,可选")
- args: list[str] = Field(default=[], description="命令中可执行文件的参数(例如 `--help`),可选")
-
- @classmethod
- def info(cls, language: LanguageType = LanguageType.CHINESE) -> CallInfo:
- """返回Call的名称和描述"""
- i18n_info = {
- LanguageType.CHINESE: CallInfo(name="Cmd", description="根据BTDL描述文件,生成命令。"),
- LanguageType.ENGLISH: CallInfo(
- name="Cmd", description="Generate commands based on BTDL description files.",
- ),
- }
- return i18n_info[language]
-
- async def _exec(self, _slot_data: dict[str, Any]) -> AsyncGenerator[CallOutputChunk, None]:
- """调用Cmd工具"""
-
diff --git a/apps/scheduler/call/cmd/prompt.py b/apps/scheduler/call/cmd/prompt.py
deleted file mode 100644
index 6172fbd40b71ab306c0dcdc762f1527b1493283c..0000000000000000000000000000000000000000
--- a/apps/scheduler/call/cmd/prompt.py
+++ /dev/null
@@ -1,219 +0,0 @@
-# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved.
-"""命令行生成器相关提示词"""
-
-from textwrap import dedent
-
-from apps.models import LanguageType
-
-CREATE: dict[LanguageType, str] = {
- LanguageType.CHINESE: dedent(r"""
-
-
- 你是一个计划生成器。对于给定的目标,**制定一个简单的计划**,该计划可以逐步生成合适的命令行参数和标志。
-
- 你会收到一个"命令前缀",这是已经确定和生成的命令部分。你需要基于这个前缀使用标志和参数来完成命令。
-
- 在每一步中,指明使用哪个外部工具以及工具输入来获取证据。
-
- 工具可以是以下之一:
- (1) Option["指令"]:查询最相似的命令行标志。只接受一个输入参数,"指令"必须是搜索字符串。\
-搜索字符串应该详细且包含必要的数据。
- (2) Argument[名称]<值>:将任务中的数据放置到命令行的特定位置。接受两个输入参数。
-
- 所有步骤必须以"Plan: "开头,且少于150个单词。
- 不要添加任何多余的步骤。
- 确保每个步骤都包含所需的所有信息 - 不要跳过步骤。
- 不要在证据后面添加任何额外数据。
-
-
-
-
- 开始示例
-
- 任务:在后台运行一个新的alpine:latest容器,将主机/root文件夹挂载至/data,并执行top命令。
- 前缀:`docker run`
- 用法:`docker run ${OPTS} ${image} ${command}`。这是一个Python模板字符串。OPTS是所有标志的\
-占位符。参数必须是 ["image", "command"] 其中之一。
- 前缀描述:二进制程序`docker`的描述为"Docker容器平台",`run`子命令的描述为"从镜像创建并运行一个新的容器"。
-
- Plan: 我需要一个标志使容器在后台运行。 #E1 = Option[在后台运行单个容器]
- Plan: 我需要一个标志,将主机/root目录挂载至容器内/data目录。 #E2 = \
-Option[挂载主机/root目录至/data目录]
- Plan: 我需要从任务中解析出镜像名称。 #E3 = Argument[image]
- Plan: 我需要指定容器中运行的命令。 #E4 = Argument[command]
- Final: 组装上述线索,生成最终命令。 #F
-
-
-
-
- 任务:{{instruction}}
- 前缀:`{{binary_name}} {{subcmd_name}}`
- 用法:`{{subcmd_usage}}`。这是一个Python模板字符串。OPTS是所有标志的占位符。参数必须是 {{argument_list}} \
-其中之一。
- 前缀描述:二进制程序`{{binary_name}}`的描述为"{{binary_description}}",`{{subcmd_name}}`子命令的描述为\
- "{{subcmd_description}}"。
-
- 现在生成相应的计划:
- """),
- LanguageType.ENGLISH: dedent(r"""
-
-
- You are a plan generator. For a given goal, **draft a simple plan** that can step-by-step \
-generate the appropriate command line arguments and flags.
-
- You will receive a "command prefix", which is the already determined and generated command \
-part. You need to use the flags and arguments based on this prefix to complete the command.
-
- In each step, specify which external tool to use and the tool input to get the evidence.
-
- The tool can be one of the following:
- (1) Option["instruction"]: Query the most similar command line flag. Only accepts one input \
-parameter, "instruction" must be a search string. The search string should be detailed and contain necessary data.
- (2) Argument["name"]: Place the data from the task into a specific position in the \
-command line. Accepts two input parameters.
-
- All steps must start with "Plan: " and be less than 150 words.
- Do not add any extra steps.
- Ensure each step contains all the required information - do not skip steps.
- Do not add any extra data after the evidence.
-
-
-
- Task: Run a new alpine:latest container in the background, mount the host /root folder to \
-/data, and execute the top command.
- Prefix: `docker run`
- Usage: `docker run ${OPTS} ${image} ${command}`. This is a Python template string. OPTS is \
-a placeholder for all flags. The arguments must be one of ["image", "command"].
- Prefix description: The description of binary program `docker` is "Docker container platform"\
-, and the description of `run` subcommand is "Create and run a new container from an image".
-
- Plan: I need a flag to make the container run in the background. #E1 = Option[Run a single \
-container in the background]
- Plan: I need a flag to mount the host /root directory to /data directory in the \
-container. #E2 = Option[Mounthost /root directory to /data directory]
- Plan: I need to parse the image name from the task. #E3 = Argument[image]
- Plan: I need to specify the command to be run in the container. #E4 = Argument[command]
- Final: Assemble the above clues to generate the final command. #F
-
-
-
- Task: {{instruction}}
- Prefix: `{{binary_name}} {{subcmd_name}}`
- Usage: `{{subcmd_usage}}`. This is a Python template string. OPTS is a placeholder for all flags. \
-The arguments must be one of {{argument_list}}.
- Prefix description: The description of binary program `{{binary_name}}` is "{{binary_description}}", \
-and the description of `{{subcmd_name}}` subcommand is "{{subcmd_description}}".
-
- Generate the corresponding plan now:
- """),
-}
-
-EVALUATE: dict[LanguageType, str] = {
- LanguageType.CHINESE: dedent(r"""
-
-
- 你是一个计划评估器。你的任务是评估给定的计划是否合理和完整。
-
- 一个好的计划应该:
- 1. 涵盖原始任务的所有要求
- 2. 使用适当的工具收集必要的信息
- 3. 具有清晰和逻辑的步骤
- 4. 没有冗余或不必要的步骤
-
- 对于计划中的每个步骤,评估:
- 1. 工具选择是否适当
- 2. 输入参数是否清晰和充分
- 3. 该步骤是否有助于实现最终目标
-
- 请回复:
- "VALID" - 如果计划良好且完整
- "INVALID: <原因>" - 如果计划有问题,请解释原因
-
-
-
- 任务:{{instruction}}
- 计划:{{plan}}
-
- 现在评估计划,并回复"VALID"或"INVALID: <原因>":
- """),
- LanguageType.ENGLISH: dedent(r"""
-
-
- You are a plan replanner. When the plan is evaluated as invalid, you need to generate a new, \
-improved plan.
-
- The new plan should:
- 1. Solve all problems mentioned in the evaluation
- 2. Keep the same format as the original plan
- 3. Be more precise and complete
- 4. Use appropriate tools for each step
-
- Follow the same format as the original plan:
- - Each step should start with "Plan: "
- - Include tool usage with appropriate parameters
- - Keep steps concise and focused
- - End with the "Final" step
-
-
-
- Task: {{instruction}}
- Original Plan: {{plan}}
- Evaluation: {{evaluation}}
-
- Now evaluate the plan and reply "VALID" or "INVALID: ":
- """),
-}
-
-REPLAN: dict[LanguageType, str] = {
- LanguageType.CHINESE: dedent(r"""
-
-
- 你是一个计划重新规划器。当计划被评估为无效时,你需要生成一个新的、改进的计划。
-
- 新计划应该:
- 1. 解决评估中提到的所有问题
- 2. 保持与原始计划相同的格式
- 3. 更加精确和完整
- 4. 为每个步骤使用适当的工具
-
- 遵循与原始计划相同的格式:
- - 每个步骤应以"Plan: "开头
- - 包含带有适当参数的工具使用
- - 保持步骤简洁和重点突出
- - 以"Final"步骤结束
-
-
-
- 任务:{{instruction}}
- 原始计划:{{plan}}
- 评估:{{evaluation}}
-
- 生成一个新的、改进的计划,解决评估中提到的所有问题:
- """),
- LanguageType.ENGLISH: dedent(r"""
-
-
- You are a plan replanner. When the plan is evaluated as invalid, you need to generate a new, \
-improved plan.
-
- The new plan should:
- 1. Solve all problems mentioned in the evaluation
- 2. Keep the same format as the original plan
- 3. Be more precise and complete
- 4. Use appropriate tools for each step
-
- Follow the same format as the original plan:
- - Each step should start with "Plan: "
- - Include tool usage with appropriate parameters
- - Keep steps concise and focused
- - End with the "Final" step
-
-
-
- Task: {{instruction}}
- Original Plan: {{plan}}
- Evaluation: {{evaluation}}
-
- Now, generate a new, improved plan that solves all problems mentioned in the evaluation:
- """),
-}
diff --git a/apps/scheduler/call/cmd/schema.py b/apps/scheduler/call/cmd/schema.py
deleted file mode 100644
index 107032df956569e32f5529b3d21309da8452637a..0000000000000000000000000000000000000000
--- a/apps/scheduler/call/cmd/schema.py
+++ /dev/null
@@ -1,2 +0,0 @@
-# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved.
-"""命令行生成工具 数据结构"""
diff --git a/apps/scheduler/call/cmd/solver.py b/apps/scheduler/call/cmd/solver.py
deleted file mode 100644
index 49cdd1714a6a3ee4814dc076b7b8f1f2ad32652a..0000000000000000000000000000000000000000
--- a/apps/scheduler/call/cmd/solver.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved.
-"""命令行解析器"""
-
-import copy
-import re
-from typing import Any
-
-from apps.scheduler.call.cmd.assembler import CommandlineAssembler
-
-
-class Solver:
- """解析命令行生成器"""
-
- @staticmethod
- async def _get_option(agent_input: str, collection_name: str, binary_name: str, subcmd_name: str, spec: dict[str, Any]) -> tuple[str, str]:
- """选择最匹配的命令行参数"""
- # 选择最匹配的Global Options
- global_options = CommandlineAssembler.get_data("global_option", agent_input, collection_name, binary_name, num=2)
- # 选择最匹配的Options
- options = CommandlineAssembler.get_data("option", agent_input, collection_name, binary_name, subcmd_name, 3)
- # 判断哪个更符合标准
- choices = options + global_options
- result, result_desc = await CommandlineAssembler.select_option(agent_input, choices)
-
- option_type = ""
- # 从BTDL里面拿出JSON Schema
- if not option_type:
- for opt in global_options:
- if result == opt["name"]:
- option_type = "global_option"
- break
- if not option_type:
- for opt in options:
- if result == opt["name"]:
- option_type = "option"
- break
-
- if option_type == "global_option":
- spec = spec[binary_name][1][result]
- elif option_type == "option":
- spec = spec[binary_name][2][subcmd_name][2][result]
- else:
- err = "No option found."
- raise ValueError(err)
-
- # 返回参数名字、描述
- return result, spec, result_desc
-
- @staticmethod
- async def _get_value(question: str, description: str, spec: dict[str, Any]) -> dict[str, Any]:
- """根据用户目标和JSON Schema,生成命令行参数"""
- gen_input = f"""
- 用户的目标为: [[{question}]]
-
- 依照JSON Schema,生成下列参数:
- {description}
-
- 严格按照JSON Schema格式输出,不要添加或编造字段。""".format(objective=question, description=description)
- return await Json().generate(question=gen_input, background="Empty.", spec=spec)
-
-
- @staticmethod
- async def process_output(output: str, question: str, collection_name: str, binary_name: str, subcmd_name: str, spec: dict[str, Any]) -> tuple[str, str]: # noqa: PLR0913
- """对规划器输出的evidence进行解析,生成命令行参数"""
- spec_template = {
- "type": "object",
- "properties": {},
- }
- opt_spec = copy.deepcopy(spec_template)
- full_opt_desc = ""
- arg_spec = copy.deepcopy(spec_template)
- full_arg_desc = ""
-
- lines = output.split("\n")
- for line in lines:
- if not line.startswith("Plan:"):
- continue
-
- evidence = re.search(r"#E.*", line)
- if not evidence:
- continue
- evidence = evidence.group(0)
-
- if "Option" in evidence:
- action_input = re.search(r"\[.*\]", evidence)
- if not action_input:
- continue
- action_input = action_input.group(0)
- action_input = action_input.rstrip("]").lstrip("[")
- opt_name, single_opt_spec, opt_desc = await Solver._get_option(action_input, collection_name, binary_name, subcmd_name, spec)
-
- opt_spec["properties"].update({opt_name: single_opt_spec})
- full_opt_desc += f"- {opt_name}: {opt_desc}\n"
-
- elif "Argument" in evidence:
- name = re.search(r"\[.*\]", evidence)
- if not name:
- continue
- name = name.group(0)
- name = name.rstrip("]").lstrip("[")
- name = name.lower()
-
- if name not in spec[binary_name][2][subcmd_name][3]:
- continue
-
- value = re.search(r"<.*>", evidence)
- if not value:
- continue
- value = value.group(0)
- value = value.rstrip(">").lstrip("<")
-
- arg_spec["properties"].update({name: spec[binary_name][2][subcmd_name][3][name]})
- arg_desc = spec[binary_name][2][subcmd_name][3][name]["description"]
- full_arg_desc += f"- {name}: {arg_desc}. 可能的值: {value}.\n"
-
- result_dict = {
- "opts": {},
- "args": {},
- }
- result_dict["opts"].update(await Solver._get_value(question, full_opt_desc, opt_spec))
- result_dict["args"].update(await Solver._get_value(question, full_arg_desc, arg_spec))
-
- result_cmd = CommandlineAssembler.convert_dict_to_cmdline(result_dict, spec[binary_name][2][subcmd_name][1])
- full_description = "各命令行标志的描述为:\n" + full_opt_desc + "\n\n各参数的描述为:\n" + full_arg_desc
- return result_cmd, full_description
diff --git a/apps/scheduler/call/core.py b/apps/scheduler/call/core.py
index 8628af9fd05ba7ed451a7eed3f07cb78cbee5c43..b350b9bf635296d4996d331c9ab25fc3922b0140 100644
--- a/apps/scheduler/call/core.py
+++ b/apps/scheduler/call/core.py
@@ -128,6 +128,7 @@ class CoreCall(BaseModel):
step_order=history_order,
background=executor.background,
thinking=executor.task.runtime.reasoning,
+ app_metadata=executor.app_metadata,
)
diff --git a/apps/scheduler/call/llm/llm.py b/apps/scheduler/call/llm/llm.py
index a3dc92b890bdb04520b565eb40b090be55e559f6..8ec8ad56cddbf1ceb9f9a1155ee41de11a5bddd7 100644
--- a/apps/scheduler/call/llm/llm.py
+++ b/apps/scheduler/call/llm/llm.py
@@ -21,7 +21,6 @@ from apps.schemas.scheduler import (
CallVars,
)
-from .prompt import LLM_CONTEXT_PROMPT, LLM_DEFAULT_PROMPT
from .schema import LLMInput, LLMOutput
if TYPE_CHECKING:
@@ -38,9 +37,9 @@ class LLM(CoreCall, input_model=LLMInput, output_model=LLMOutput):
# 大模型参数
temperature: float = Field(description="大模型温度(随机化程度)", default=0.7)
step_history_size: int = Field(description="上下文信息中包含的步骤历史数量", default=3, ge=0, le=10)
- history_length: int = Field(description="历史对话记录数量", default=0, ge=0)
+ history_length: int | None = Field(description="历史对话记录数量", default=None, ge=0)
system_prompt: str = Field(description="大模型系统提示词", default="You are a helpful assistant.")
- user_prompt: str = Field(description="大模型用户提示词", default=LLM_DEFAULT_PROMPT)
+ user_prompt: str | None = Field(description="大模型用户提示词", default=None)
@classmethod
@@ -84,20 +83,15 @@ class LLM(CoreCall, input_model=LLMInput, output_model=LLMOutput):
else:
context_prompt = "无背景信息。"
- # 历史对话记录
history_messages = []
- if self.history_length > 0:
- # 从 conversation 中提取历史记录
+ history_len = self.history_length
+ if history_len is None and call_vars.app_metadata is not None:
+ history_len = call_vars.app_metadata.history_len
+
+ if history_len is not None and history_len > 0:
conversation = self._sys_vars.background.conversation
- # 取最后 history_length 条记录
- recent_conversation = conversation[-self.history_length:]
- # 将历史记录转换为消息格式
- for item in recent_conversation:
- if "question" in item and "answer" in item:
- history_messages.extend([
- {"role": "user", "content": item["question"]},
- {"role": "assistant", "content": item["answer"]},
- ])
+ # 每对消息包含 2 条记录(user + assistant)
+ history_messages = conversation[-history_len * 2:]
# 参数
time = datetime.now(tz=pytz.timezone("Asia/Shanghai")).strftime("%Y-%m-%d %H:%M:%S")
@@ -114,18 +108,16 @@ class LLM(CoreCall, input_model=LLMInput, output_model=LLMOutput):
system_input = system_tmpl.render(**formatter)
# 准备用户提示词
- user_tmpl = env.from_string(self.user_prompt)
+ user_prompt = self.user_prompt if self.user_prompt is not None else self._load_prompt("llm")
+ user_tmpl = env.from_string(user_prompt)
user_input = user_tmpl.render(**formatter)
except Exception as e:
raise CallError(message=f"用户提示词渲染失败:{e!s}", data={}) from e
# 构建消息列表,将历史消息放在前面
- messages = []
+ messages = [{"role": "system", "content": system_input}]
messages.extend(history_messages)
- messages.extend([
- {"role": "system", "content": system_input},
- {"role": "user", "content": user_input},
- ])
+ messages.append({"role": "user", "content": user_input})
return messages
diff --git a/apps/scheduler/call/llm/prompt.py b/apps/scheduler/call/llm/prompt.py
deleted file mode 100644
index 0d4ccaa95775d049d5ef20383e335c1298c77481..0000000000000000000000000000000000000000
--- a/apps/scheduler/call/llm/prompt.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved.
-"""大模型工具的提示词"""
-
-from textwrap import dedent
-
-from apps.models import LanguageType
-
-LLM_CONTEXT_PROMPT: dict[LanguageType, str] = {
- LanguageType.CHINESE: dedent(
- r"""
- 以下是AI处理用户指令时所做的思考,在中给出:
-
- {{ reasoning }}
-
-
- 你作为AI,在完成用户指令前,需要获取必要的信息。为此,你调用了一些工具,并获得了它们的输出:
- 工具的输出数据将在中给出, 其中为工具的名称,