From 4a0ee9cae16719c879f3cfbfa4e6ce89c3aec369 Mon Sep 17 00:00:00 2001 From: Ethan-Zhang Date: Mon, 10 Nov 2025 21:41:00 +0800 Subject: [PATCH] =?UTF-8?q?Fix:=20configmap=E6=9C=80=E7=BB=88=E4=BF=AE?= =?UTF-8?q?=E6=AD=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../configs/framework/config-authelia.toml | 6 ++--- .../configs/framework/config.toml | 6 ++--- deploy/chart/euler_copilot/values.yaml | 27 +++++++++++++++---- 3 files changed, 28 insertions(+), 11 deletions(-) diff --git a/deploy/chart/euler_copilot/configs/framework/config-authelia.toml b/deploy/chart/euler_copilot/configs/framework/config-authelia.toml index 47bd3040b..9eb751976 100644 --- a/deploy/chart/euler_copilot/configs/framework/config-authelia.toml +++ b/deploy/chart/euler_copilot/configs/framework/config-authelia.toml @@ -112,9 +112,9 @@ provider = '{{ default .Values.models.answer.provider .Values.models.functionCal endpoint = '{{ default .Values.models.answer.endpoint .Values.models.functionCall.endpoint }}' model = '{{ default .Values.models.answer.name .Values.models.functionCall.name }}' api_key = '{{ default .Values.models.answer.key .Values.models.functionCall.key }}' -ctx_length = {{ default .Values.models.answer.ctxLength .Values.models.functionCall.ctxLength }} -max_tokens = {{ default .Values.models.answer.maxTokens .Values.models.functionCall.maxTokens }} -temperature = {{ default .Values.models.answer.temperature .Values.models.functionCall.temperature }} +ctx_length = {{ default 8192 (default .Values.models.answer.ctxLength .Values.models.functionCall.ctxLength) }} +max_tokens = {{ default 2048 (default .Values.models.answer.maxTokens .Values.models.functionCall.maxTokens) }} +temperature = {{ default 0.7 (default .Values.models.answer.temperature .Values.models.functionCall.temperature) }} [check] enable = false diff --git a/deploy/chart/euler_copilot/configs/framework/config.toml b/deploy/chart/euler_copilot/configs/framework/config.toml index 98a876382..e079db2fe 100644 --- a/deploy/chart/euler_copilot/configs/framework/config.toml +++ b/deploy/chart/euler_copilot/configs/framework/config.toml @@ -104,9 +104,9 @@ provider = '{{ default .Values.models.answer.provider .Values.models.functionCal endpoint = '{{ default .Values.models.answer.endpoint .Values.models.functionCall.endpoint }}' model = '{{ default .Values.models.answer.name .Values.models.functionCall.name }}' api_key = '{{ default .Values.models.answer.key .Values.models.functionCall.key }}' -ctx_length = {{ default .Values.models.answer.ctxLength .Values.models.functionCall.ctxLength }} -max_tokens = {{ default .Values.models.answer.maxTokens .Values.models.functionCall.maxTokens }} -temperature = {{ default .Values.models.answer.temperature .Values.models.functionCall.temperature }} +ctx_length = {{ default 8192 (default .Values.models.answer.ctxLength .Values.models.functionCall.ctxLength) }} +max_tokens = {{ default 2048 (default .Values.models.answer.maxTokens .Values.models.functionCall.maxTokens) }} +temperature = {{ default 0.7 (default .Values.models.answer.temperature .Values.models.functionCall.temperature) }} [check] enable = false diff --git a/deploy/chart/euler_copilot/values.yaml b/deploy/chart/euler_copilot/values.yaml index b74f7fb5b..9b7c7f9c2 100644 --- a/deploy/chart/euler_copilot/values.yaml +++ b/deploy/chart/euler_copilot/values.yaml @@ -12,7 +12,10 @@ globals: models: # 用于问答的大模型;需要为OpenAI兼容接口 answer: - # 可用的框架类型:["vllm", "sglang", "ollama", "openai"] + # 在线大模型平台支持["bailian", "wenxin", "siliconflow", "spark", "baichuan"]等 + # 离线大模型推理服务支持["mindie", "vllm", "ollama", "modelscope"]等 + # 如需额外配置,可以参考deploy/chart/euler_copilot/configs/framework/provider_config_summary.md修改相关配置新增支持 + # [必填]模型提供方 provider: # [必填] 接口URL(请根据 API 提供商文档确定是否需要带上“v1”后缀) endpoint: @@ -22,10 +25,17 @@ models: name: # [必填] 模型最大上下文数;建议>=8192 ctxLength: 8192 - # 模型最大输出长度,建议>=2048 + # [必填] 模型最大输出长度,建议>=2048 maxTokens: 2048 + # [必填] 模型温度, 控制生成文本随机性与创造性, 建议0.7 + temperature: 0.7 # 用于Function Call的模型;建议使用特定推理框架 functionCall: + # 在线大模型平台支持["bailian", "wenxin", "siliconflow", "spark", "baichuan"]等 + # 离线大模型推理服务支持["mindie", "vllm", "ollama", "modelscope"]等 + # 如需额外配置,可以参考deploy/chart/euler_copilot/configs/framework/provider_config_summary.md修改相关配置新增支持 + # [必填]模型提供方;不填则与问答模型一致 + provider: # [必填] 模型地址;请根据 API 提供商文档确定是否需要带上“v1”后缀 # 选择不填则与问答模型一致 endpoint: @@ -37,10 +47,14 @@ models: ctxLength: # 模型最大输出长度;不填则与问答模型一致 maxTokens: + # 模型温度, 控制生成文本随机性与创造性, 建议0.7;不填则与问答模型一致 + temperature: # 用于数据向量化(Embedding)的模型 embedding: - # 推理框架类型,默认为openai - # [必填] Embedding接口类型:["openai", "mindie"] + # 在线大模型平台支持["bailian", "wenxin", "siliconflow", "spark", "baichuan"]等 + # 离线大模型推理服务支持["mindie", "vllm", "ollama", "modelscope"]等 + # 如需额外配置,可以参考deploy/chart/euler_copilot/configs/framework/provider_config_summary.md修改相关配置新增支持 + # [必填]模型提供方 provider: # [必填] Embedding URL(需要带上“v1”后缀) endpoint: @@ -50,7 +64,10 @@ models: name: # 用于对rag检索结果重排的模型,支持硅基流动 百炼 vllm asscend等模型供应方提供的api reranker: - # [必填] reranker接口类型:["guijiliudong", "bailian", "v1lm", "assecend"] + # 在线大模型平台支持["bailian", "wenxin", "siliconflow", "spark", "baichuan"]等 + # 离线大模型推理服务支持["mindie", "vllm", "ollama", "modelscope"]等 + # 如需额外配置,可以参考deploy/chart/euler_copilot/configs/framework/provider_config_summary.md修改相关配置新增支持 + # [必填]模型提供方 provider: # [必填] reranker URL(需要带上“/v1/rerank”后缀) endpoint: -- Gitee