diff --git a/apps/routers/llm.py b/apps/routers/llm.py index 3ab65eb157ed780193e4b7415e87befe3e5677c8..7990f0aca00e05d0c6de1cd30b629685a10e5cd9 100644 --- a/apps/routers/llm.py +++ b/apps/routers/llm.py @@ -3,13 +3,14 @@ from typing import cast -from fastapi import APIRouter, Depends, status +from fastapi import APIRouter, Depends, Request, status from fastapi.encoders import jsonable_encoder from fastapi.responses import JSONResponse from apps.dependency import verify_admin, verify_personal_token from apps.schemas.request_data import ( UpdateLLMReq, + UpdateSpecialLlmReq, ) from apps.schemas.response_data import ( ListLLMAdminRsp, @@ -19,6 +20,7 @@ from apps.schemas.response_data import ( ResponseData, ) from apps.services.llm import LLMManager +from apps.services.settings import SettingsManager router = APIRouter( prefix="/api/llm", @@ -71,27 +73,38 @@ async def list_llm(llmId: str | None = None) -> JSONResponse: # noqa: N803 ) -@admin_router.get("/config", response_model=ListLLMAdminRsp, +@admin_router.get("/config", response_model=ResponseData, responses={status.HTTP_404_NOT_FOUND: {"model": ResponseData}}, ) -async def admin_list_llm(llmId: str | None = None) -> JSONResponse: # noqa: N803 - """GET /llm/config: 获取大模型配置列表(管理员视图)""" - llm_list_raw = await LLMManager.list_llm(llmId, admin_view=True) +async def get_llm_config(llmId: str) -> JSONResponse: # noqa: N803 + """GET /llm/config: 获取单个大模型的详细配置信息(管理员视图)""" + llm = await LLMManager.get_llm(llmId) - # 检查返回类型是否符合预期 - if llm_list_raw and not all(isinstance(item, LLMAdminInfo) for item in llm_list_raw): + if not llm: return JSONResponse( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + status_code=status.HTTP_404_NOT_FOUND, content=jsonable_encoder( ResponseData( - code=status.HTTP_500_INTERNAL_SERVER_ERROR, - message="大模型配置列表数据类型不符合预期", + code=status.HTTP_404_NOT_FOUND, + message=f"大模型 {llmId} 不存在", result=None, ).model_dump(exclude_none=True, by_alias=True), ), ) - llm_list = cast("list[LLMAdminInfo]", llm_list_raw) + llm_config = LLMAdminInfo( + llmId=llm.id, + llmDescription=llm.llmDescription, + llmType=llm.llmType, + baseUrl=llm.baseUrl, + apiKey=llm.apiKey, + modelName=llm.modelName, + maxTokens=llm.maxToken, + ctxLength=llm.ctxLength, + temperature=llm.temperature, + provider=llm.provider.value if llm.provider else None, + extraConfig=llm.extraConfig, + ) return JSONResponse( status_code=status.HTTP_200_OK, @@ -99,22 +112,54 @@ async def admin_list_llm(llmId: str | None = None) -> JSONResponse: # noqa: N80 ListLLMAdminRsp( code=status.HTTP_200_OK, message="success", - result=llm_list, + result=llm_config, ).model_dump(exclude_none=True, by_alias=True), ), ) +@admin_router.put("/setting", response_model=ResponseData, + responses={status.HTTP_404_NOT_FOUND: {"model": ResponseData}}, +) +async def change_global_llm(request: Request, req: UpdateSpecialLlmReq) -> JSONResponse: + """PUT /llm/setting: 更改全局Function和Embedding模型设置(管理员)""" + try: + user_id = request.state.user_id + + await SettingsManager.update_global_llm_settings(user_id, req) + + return JSONResponse( + status_code=status.HTTP_200_OK, + content=jsonable_encoder( + ResponseData( + code=status.HTTP_200_OK, + message="success", + result={ + "functionLLM": req.functionLLM, + "embeddingLLM": req.embeddingLLM, + }, + ).model_dump(exclude_none=True, by_alias=True), + ), + ) + except ValueError as e: + return JSONResponse( + status_code=status.HTTP_400_BAD_REQUEST, + content=jsonable_encoder( + ResponseData( + code=status.HTTP_400_BAD_REQUEST, + message=str(e), + result=None, + ).model_dump(exclude_none=True, by_alias=True), + ), + ) + @admin_router.put("", responses={status.HTTP_404_NOT_FOUND: {"model": ResponseData}}, ) -async def create_llm( - req: UpdateLLMReq, - llmId: str, # noqa: N803 -) -> JSONResponse: +async def create_llm(req: UpdateLLMReq) -> JSONResponse: """PUT /llm: 创建或更新大模型配置""" try: - await LLMManager.update_llm(llmId, req) + await LLMManager.update_llm(req.llm_id, req) except ValueError as e: return JSONResponse( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, @@ -132,7 +177,7 @@ async def create_llm( ResponseData( code=status.HTTP_200_OK, message="success", - result=llmId, + result=req.llm_id, ).model_dump(exclude_none=True, by_alias=True), ), ) diff --git a/apps/schemas/request_data.py b/apps/schemas/request_data.py index 8f966f3f5b701a1c56119e1482ede18843cd56d2..28df1ab204e4db64243cb93af788d3b3074789ff 100644 --- a/apps/schemas/request_data.py +++ b/apps/schemas/request_data.py @@ -6,7 +6,7 @@ from typing import Any from pydantic import BaseModel, Field -from apps.models import LanguageType, LLMProvider +from apps.models import LanguageType, LLMProvider, LLMType from .flow_topology import FlowItem @@ -58,6 +58,7 @@ class UpdateLLMReq(BaseModel): provider: LLMProvider = Field(description="大模型提供商", alias="provider") ctx_length: int = Field(description="上下文长度", alias="ctxLength") llm_description: str = Field(default="", description="大模型描述", alias="llmDescription") + llm_type: list[LLMType] | None = Field(default=None, description="大模型类型列表", alias="llmType") extra_data: dict[str, Any] | None = Field(default=None, description="额外数据", alias="extraData") diff --git a/apps/schemas/response_data.py b/apps/schemas/response_data.py index 93541887cdecaa9a21d6d9443c643c5a16b7104e..f59a656c8320862102483f205a032cee3d2574f1 100644 --- a/apps/schemas/response_data.py +++ b/apps/schemas/response_data.py @@ -93,7 +93,7 @@ class LLMAdminInfo(BaseModel): class ListLLMAdminRsp(ResponseData): """GET /api/llm/config 返回数据结构""" - result: list[LLMAdminInfo] = Field(default=[], title="Result") + result: LLMAdminInfo | None = Field(default=None, title="Result") class ParamsNode(BaseModel): diff --git a/apps/services/llm.py b/apps/services/llm.py index 8f99df31fc445297358cd6277562d1d07fcff671..a3774f0022e3d00fffc2a95df0fd81d1f3b738df 100644 --- a/apps/services/llm.py +++ b/apps/services/llm.py @@ -99,7 +99,7 @@ class LLMManager: @staticmethod - async def update_llm(llm_id: str, req: UpdateLLMReq) -> str: + async def update_llm(llm_id: str | None, req: UpdateLLMReq) -> str: """ 创建大模型 @@ -107,7 +107,7 @@ class LLMManager: :return: 大模型对象 """ async with postgres.session() as session: - if llm_id: + if llm_id is not None: llm = (await session.scalars( select(LLMData).where( LLMData.id == llm_id, @@ -123,20 +123,24 @@ class LLMManager: llm.provider = req.provider llm.ctxLength = req.ctx_length llm.llmDescription = req.llm_description + if req.llm_type is not None: + llm.llmType = req.llm_type llm.extraConfig = req.extra_data or {} await session.commit() else: - llm = LLMData( - id=llm_id, - baseUrl=req.base_url, - apiKey=req.api_key, - modelName=req.model_name, - maxToken=req.max_tokens, - provider=req.provider, - ctxLength=req.ctx_length, - llmDescription=req.llm_description, - extraConfig=req.extra_data or {}, - ) + llm_data = { + "id": llm_id, + "baseUrl": req.base_url, + "apiKey": req.api_key, + "modelName": req.model_name, + "maxToken": req.max_tokens, + "provider": req.provider, + "ctxLength": req.ctx_length, + "llmDescription": req.llm_description, + "llmType": req.llm_type or [], + "extraConfig": req.extra_data or {}, + } + llm = LLMData(**llm_data) session.add(llm) await session.commit() return llm.id