From 5d9db38ad10e947c02d0581ecadf752196e4be8a Mon Sep 17 00:00:00 2001 From: qdaxb <4157870+qdaxb@users.noreply.github.com> Date: Sun, 30 Nov 2025 00:57:16 +0800 Subject: [PATCH 01/10] feat(shell): add custom Base Image support for Bot Shell - Add baseImage and baseShellRef fields to ShellSpec schema - Update public shells init data with default baseImage - Create Shell unified API endpoints (/shells/unified, CRUD operations) - Add image validation API to check compatibility with shell types - Modify task dispatch to pass baseImage to Executor Manager - Add executor binary extraction on Executor Manager startup - Support Base Image + Executor mount mode in Docker executor - Create frontend Shell API client and management UI components - Add Shells tab to Settings page with i18n support The feature enables users to create custom shells with their own base images while using the latest executor binary via Named Volume mount. --- backend/app/api/api.py | 2 + backend/app/api/endpoints/adapter/shells.py | 597 ++++++++++++++++++ backend/app/schemas/kind.py | 2 + .../app/services/adapters/executor_kinds.py | 28 +- backend/init_data/02-public-shells.yaml | 2 + .../executors/docker/binary_extractor.py | 188 ++++++ executor_manager/executors/docker/executor.py | 79 ++- executor_manager/main.py | 19 +- frontend/src/apis/shells.ts | 129 ++++ frontend/src/app/settings/page.tsx | 49 +- .../settings/components/ShellEdit.tsx | 383 +++++++++++ .../settings/components/ShellList.tsx | 244 +++++++ frontend/src/i18n/locales/en/common.json | 45 ++ frontend/src/i18n/locales/zh-CN/common.json | 45 ++ 14 files changed, 1780 insertions(+), 32 deletions(-) create mode 100644 backend/app/api/endpoints/adapter/shells.py create mode 100644 executor_manager/executors/docker/binary_extractor.py create mode 100644 frontend/src/apis/shells.ts create mode 100644 frontend/src/features/settings/components/ShellEdit.tsx create mode 100644 frontend/src/features/settings/components/ShellList.tsx diff --git a/backend/app/api/api.py b/backend/app/api/api.py index 9b3e99e9..a5cbcc24 100644 --- a/backend/app/api/api.py +++ b/backend/app/api/api.py @@ -9,6 +9,7 @@ dify, executors, models, + shells, tasks, teams, ) @@ -21,6 +22,7 @@ api_router.include_router(admin.router, prefix="/admin", tags=["admin"]) api_router.include_router(bots.router, prefix="/bots", tags=["bots"]) api_router.include_router(models.router, prefix="/models", tags=["public-models"]) +api_router.include_router(shells.router, prefix="/shells", tags=["shells"]) api_router.include_router(agents.router, prefix="/agents", tags=["public-shell"]) api_router.include_router(teams.router, prefix="/teams", tags=["teams"]) api_router.include_router(tasks.router, prefix="/tasks", tags=["tasks"]) diff --git a/backend/app/api/endpoints/adapter/shells.py b/backend/app/api/endpoints/adapter/shells.py new file mode 100644 index 00000000..2fee5d32 --- /dev/null +++ b/backend/app/api/endpoints/adapter/shells.py @@ -0,0 +1,597 @@ +# SPDX-FileCopyrightText: 2025 Weibo, Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +import logging +import re +from typing import List, Optional + +from fastapi import APIRouter, Depends, HTTPException, Query, status +from pydantic import BaseModel +from sqlalchemy.orm import Session + +from app.api.dependencies import get_db +from app.core import security +from app.models.kind import Kind +from app.models.public_shell import PublicShell +from app.models.user import User +from app.schemas.kind import Shell as ShellCRD + +router = APIRouter() +logger = logging.getLogger(__name__) + + +# Request/Response Models +class UnifiedShell(BaseModel): + """Unified shell representation for API responses""" + + name: str + type: str # 'public' or 'user' + displayName: Optional[str] = None + runtime: str + baseImage: Optional[str] = None + baseShellRef: Optional[str] = None + supportModel: Optional[List[str]] = None + shellType: Optional[str] = None # 'local_engine' or 'external_api' + + +class ShellCreateRequest(BaseModel): + """Request body for creating a user shell""" + + name: str + displayName: Optional[str] = None + baseShellRef: str # Required: base public shell name (e.g., "ClaudeCode") + baseImage: str # Required: custom base image address + + +class ShellUpdateRequest(BaseModel): + """Request body for updating a user shell""" + + displayName: Optional[str] = None + baseImage: Optional[str] = None + + +class ImageValidationRequest(BaseModel): + """Request body for validating base image compatibility""" + + image: str + shellType: str # e.g., "ClaudeCode", "Agno" + + +class ImageCheckResult(BaseModel): + """Individual check result""" + + name: str + version: Optional[str] = None + status: str # 'pass' or 'fail' + message: Optional[str] = None + + +class ImageValidationResponse(BaseModel): + """Response for image validation""" + + valid: bool + checks: List[ImageCheckResult] + errors: List[str] + + +def _public_shell_to_unified(shell: PublicShell) -> UnifiedShell: + """Convert PublicShell to UnifiedShell""" + shell_crd = ShellCRD.model_validate(shell.json) + labels = shell_crd.metadata.labels or {} + return UnifiedShell( + name=shell.name, + type="public", + displayName=shell_crd.metadata.displayName or shell.name, + runtime=shell_crd.spec.runtime, + baseImage=shell_crd.spec.baseImage, + baseShellRef=shell_crd.spec.baseShellRef, + supportModel=shell_crd.spec.supportModel, + shellType=labels.get("type"), + ) + + +def _user_shell_to_unified(kind: Kind) -> UnifiedShell: + """Convert Kind (user shell) to UnifiedShell""" + shell_crd = ShellCRD.model_validate(kind.json) + labels = shell_crd.metadata.labels or {} + return UnifiedShell( + name=kind.name, + type="user", + displayName=shell_crd.metadata.displayName or kind.name, + runtime=shell_crd.spec.runtime, + baseImage=shell_crd.spec.baseImage, + baseShellRef=shell_crd.spec.baseShellRef, + supportModel=shell_crd.spec.supportModel, + shellType=labels.get("type"), + ) + + +@router.get("/unified", response_model=dict) +def list_unified_shells( + db: Session = Depends(get_db), + current_user: User = Depends(security.get_current_user), +): + """ + Get unified list of all available shells (both public and user-defined). + + Each shell includes a 'type' field ('public' or 'user') to identify its source. + + Response: + { + "data": [ + { + "name": "shell-name", + "type": "public" | "user", + "displayName": "Human Readable Name", + "runtime": "ClaudeCode", + "baseImage": "ghcr.io/...", + "shellType": "local_engine" | "external_api" + } + ] + } + """ + result = [] + + # Get public shells + public_shells = ( + db.query(PublicShell) + .filter(PublicShell.is_active == True) # noqa: E712 + .order_by(PublicShell.name.asc()) + .all() + ) + for shell in public_shells: + try: + result.append(_public_shell_to_unified(shell)) + except Exception as e: + logger.warning(f"Failed to parse public shell {shell.name}: {e}") + + # Get user-defined shells + user_shells = ( + db.query(Kind) + .filter( + Kind.user_id == current_user.id, + Kind.kind == "Shell", + Kind.namespace == "default", + Kind.is_active == True, # noqa: E712 + ) + .order_by(Kind.name.asc()) + .all() + ) + for shell in user_shells: + try: + result.append(_user_shell_to_unified(shell)) + except Exception as e: + logger.warning(f"Failed to parse user shell {shell.name}: {e}") + + return {"data": [s.model_dump() for s in result]} + + +@router.get("/unified/{shell_name}", response_model=dict) +def get_unified_shell( + shell_name: str, + shell_type: Optional[str] = Query( + None, description="Shell type ('public' or 'user')" + ), + db: Session = Depends(get_db), + current_user: User = Depends(security.get_current_user), +): + """ + Get a specific shell by name, optionally with type hint. + + If shell_type is not provided, it will try to find the shell + in the following order: + 1. User's own shells (type='user') + 2. Public shells (type='public') + """ + # Try user shells first if no type specified or type is 'user' + if shell_type in (None, "user"): + user_shell = ( + db.query(Kind) + .filter( + Kind.user_id == current_user.id, + Kind.kind == "Shell", + Kind.name == shell_name, + Kind.namespace == "default", + Kind.is_active == True, # noqa: E712 + ) + .first() + ) + if user_shell: + return _user_shell_to_unified(user_shell).model_dump() + if shell_type == "user": + raise HTTPException(status_code=404, detail="User shell not found") + + # Try public shells + public_shell = ( + db.query(PublicShell) + .filter( + PublicShell.name == shell_name, + PublicShell.is_active == True, # noqa: E712 + ) + .first() + ) + if public_shell: + return _public_shell_to_unified(public_shell).model_dump() + + raise HTTPException(status_code=404, detail="Shell not found") + + +@router.post("", response_model=dict, status_code=status.HTTP_201_CREATED) +def create_shell( + request: ShellCreateRequest, + db: Session = Depends(get_db), + current_user: User = Depends(security.get_current_user), +): + """ + Create a user-defined shell. + + The shell must be based on an existing public shell (baseShellRef). + """ + # Validate name format + name_regex = r"^[a-z0-9][a-z0-9-]*[a-z0-9]$|^[a-z0-9]$" + if not re.match(name_regex, request.name): + raise HTTPException( + status_code=400, + detail="Shell name must contain only lowercase letters, numbers, and hyphens", + ) + + # Check if name already exists for this user + existing = ( + db.query(Kind) + .filter( + Kind.user_id == current_user.id, + Kind.kind == "Shell", + Kind.name == request.name, + Kind.namespace == "default", + Kind.is_active == True, # noqa: E712 + ) + .first() + ) + if existing: + raise HTTPException(status_code=400, detail="Shell name already exists") + + # Validate baseShellRef - must be a public shell with local_engine type + base_shell = ( + db.query(PublicShell) + .filter( + PublicShell.name == request.baseShellRef, + PublicShell.is_active == True, # noqa: E712 + ) + .first() + ) + if not base_shell: + raise HTTPException( + status_code=400, detail=f"Base shell '{request.baseShellRef}' not found" + ) + + base_shell_crd = ShellCRD.model_validate(base_shell.json) + base_labels = base_shell_crd.metadata.labels or {} + if base_labels.get("type") != "local_engine": + raise HTTPException( + status_code=400, + detail="Base shell must be a local_engine type (not external_api)", + ) + + # Validate baseImage format (basic URL validation) + if not request.baseImage or not re.match( + r"^[a-z0-9.-]+(/[a-z0-9._-]+)+:[a-z0-9._-]+$", request.baseImage, re.IGNORECASE + ): + raise HTTPException( + status_code=400, + detail="Invalid base image format. Expected format: registry/image:tag", + ) + + # Create Shell CRD + shell_crd = { + "apiVersion": "agent.wecode.io/v1", + "kind": "Shell", + "metadata": { + "name": request.name, + "namespace": "default", + "displayName": request.displayName, + "labels": {"type": "local_engine"}, # User shells inherit local_engine type + }, + "spec": { + "runtime": base_shell_crd.spec.runtime, # Inherit runtime from base shell + "supportModel": base_shell_crd.spec.supportModel or [], + "baseImage": request.baseImage, + "baseShellRef": request.baseShellRef, + }, + "status": {"state": "Available"}, + } + + db_obj = Kind( + user_id=current_user.id, + kind="Shell", + name=request.name, + namespace="default", + json=shell_crd, + is_active=True, + ) + db.add(db_obj) + db.commit() + db.refresh(db_obj) + + return _user_shell_to_unified(db_obj).model_dump() + + +@router.put("/{shell_name}", response_model=dict) +def update_shell( + shell_name: str, + request: ShellUpdateRequest, + db: Session = Depends(get_db), + current_user: User = Depends(security.get_current_user), +): + """ + Update a user-defined shell. + + Only user-defined shells can be updated. Public shells are read-only. + """ + # Get user shell + shell = ( + db.query(Kind) + .filter( + Kind.user_id == current_user.id, + Kind.kind == "Shell", + Kind.name == shell_name, + Kind.namespace == "default", + Kind.is_active == True, # noqa: E712 + ) + .first() + ) + if not shell: + raise HTTPException(status_code=404, detail="User shell not found") + + # Parse existing CRD + shell_crd = ShellCRD.model_validate(shell.json) + + # Update fields + if request.displayName is not None: + shell_crd.metadata.displayName = request.displayName + + if request.baseImage is not None: + # Validate baseImage format + if not re.match( + r"^[a-z0-9.-]+(/[a-z0-9._-]+)+:[a-z0-9._-]+$", + request.baseImage, + re.IGNORECASE, + ): + raise HTTPException( + status_code=400, + detail="Invalid base image format. Expected format: registry/image:tag", + ) + shell_crd.spec.baseImage = request.baseImage + + # Save changes + shell.json = shell_crd.model_dump(mode="json") + db.add(shell) + db.commit() + db.refresh(shell) + + return _user_shell_to_unified(shell).model_dump() + + +@router.delete("/{shell_name}") +def delete_shell( + shell_name: str, + db: Session = Depends(get_db), + current_user: User = Depends(security.get_current_user), +): + """ + Delete a user-defined shell. + + Only user-defined shells can be deleted. Public shells cannot be deleted. + """ + # Get user shell + shell = ( + db.query(Kind) + .filter( + Kind.user_id == current_user.id, + Kind.kind == "Shell", + Kind.name == shell_name, + Kind.namespace == "default", + Kind.is_active == True, # noqa: E712 + ) + .first() + ) + if not shell: + raise HTTPException(status_code=404, detail="User shell not found") + + # Soft delete + shell.is_active = False + db.add(shell) + db.commit() + + return {"message": "Shell deleted successfully"} + + +@router.post("/validate-image", response_model=ImageValidationResponse) +def validate_image( + request: ImageValidationRequest, + current_user: User = Depends(security.get_current_user), +): + """ + Validate if a base image is compatible with a specific shell type. + + This endpoint pulls the image and checks for required dependencies: + - ClaudeCode: Node.js 20.x, claude-code CLI, SQLite 3.50+, Python 3.12 + - Agno: Python 3.12 + - Dify: No check needed (external_api type) + + Note: Only supports public image registries. + """ + import subprocess + + shell_type = request.shellType + image = request.image + + # Dify doesn't need validation + if shell_type == "Dify": + return ImageValidationResponse( + valid=True, + checks=[], + errors=["Dify is an external_api type and doesn't require image validation"], + ) + + # Define checks based on shell type + checks_config = { + "ClaudeCode": [ + { + "name": "node", + "command": "node --version", + "version_regex": r"v(\d+\.\d+\.\d+)", + "min_version": "20.0.0", + }, + { + "name": "claude-code", + "command": "claude --version 2>/dev/null || echo 'not found'", + "version_regex": r"(\d+\.\d+\.\d+)", + "min_version": None, + }, + { + "name": "sqlite", + "command": "sqlite3 --version", + "version_regex": r"(\d+\.\d+\.\d+)", + "min_version": "3.50.0", + }, + { + "name": "python", + "command": "python3 --version", + "version_regex": r"Python (\d+\.\d+\.\d+)", + "min_version": "3.12.0", + }, + ], + "Agno": [ + { + "name": "python", + "command": "python3 --version", + "version_regex": r"Python (\d+\.\d+\.\d+)", + "min_version": "3.12.0", + }, + ], + } + + if shell_type not in checks_config: + return ImageValidationResponse( + valid=False, checks=[], errors=[f"Unknown shell type: {shell_type}"] + ) + + checks_to_run = checks_config[shell_type] + results = [] + errors = [] + all_passed = True + + try: + # Pull the image first (with timeout) + logger.info(f"Pulling image {image} for validation...") + pull_result = subprocess.run( + ["docker", "pull", image], + capture_output=True, + text=True, + timeout=300, # 5 minutes timeout for pull + ) + if pull_result.returncode != 0: + return ImageValidationResponse( + valid=False, + checks=[], + errors=[f"Failed to pull image: {pull_result.stderr}"], + ) + + # Run checks + for check in checks_to_run: + try: + result = subprocess.run( + [ + "docker", + "run", + "--rm", + image, + "sh", + "-c", + check["command"], + ], + capture_output=True, + text=True, + timeout=30, + ) + + output = result.stdout.strip() + if result.returncode != 0 or "not found" in output.lower(): + results.append( + ImageCheckResult( + name=check["name"], + status="fail", + message=f"Command failed or not found", + ) + ) + all_passed = False + continue + + # Extract version + import re as re_module + + version_match = re_module.search(check["version_regex"], output) + if version_match: + version = version_match.group(1) + # Check minimum version if specified + if check["min_version"]: + from packaging import version as pkg_version + + try: + if pkg_version.parse(version) < pkg_version.parse( + check["min_version"] + ): + results.append( + ImageCheckResult( + name=check["name"], + version=version, + status="fail", + message=f"Version {version} < required {check['min_version']}", + ) + ) + all_passed = False + continue + except Exception: + pass # Skip version comparison on error + + results.append( + ImageCheckResult( + name=check["name"], version=version, status="pass" + ) + ) + else: + results.append( + ImageCheckResult( + name=check["name"], + status="pass", + message="Detected but version not parsed", + ) + ) + + except subprocess.TimeoutExpired: + results.append( + ImageCheckResult( + name=check["name"], status="fail", message="Check timed out" + ) + ) + all_passed = False + except Exception as e: + results.append( + ImageCheckResult( + name=check["name"], status="fail", message=str(e) + ) + ) + all_passed = False + + except subprocess.TimeoutExpired: + return ImageValidationResponse( + valid=False, checks=results, errors=["Image pull timed out"] + ) + except Exception as e: + logger.error(f"Image validation error: {e}") + return ImageValidationResponse( + valid=False, checks=results, errors=[f"Validation error: {str(e)}"] + ) + + return ImageValidationResponse(valid=all_passed, checks=results, errors=errors) diff --git a/backend/app/schemas/kind.py b/backend/app/schemas/kind.py index 028a5fb5..27fe161e 100644 --- a/backend/app/schemas/kind.py +++ b/backend/app/schemas/kind.py @@ -112,6 +112,8 @@ class ShellSpec(BaseModel): runtime: str supportModel: Optional[List[str]] = None + baseImage: Optional[str] = None # Custom base image address for user-defined shells + baseShellRef: Optional[str] = None # Reference to base public shell (e.g., "ClaudeCode") class ShellStatus(Status): diff --git a/backend/app/services/adapters/executor_kinds.py b/backend/app/services/adapters/executor_kinds.py index b58c4d87..8eb99f5b 100644 --- a/backend/app/services/adapters/executor_kinds.py +++ b/backend/app/services/adapters/executor_kinds.py @@ -395,7 +395,7 @@ def _format_subtasks_response( .first() ) - # Get shell for agent name + # Get shell for agent name - first check user's custom shells, then public shells shell = ( db.query(Kind) .filter( @@ -408,6 +408,28 @@ def _format_subtasks_response( .first() ) + # If user shell not found, try public shells + shell_base_image = None + if not shell: + from app.models.public_shell import PublicShell + + public_shell = ( + db.query(PublicShell) + .filter( + PublicShell.name == bot_crd.spec.shellRef.name, + PublicShell.is_active == True, + ) + .first() + ) + if public_shell and public_shell.json: + shell_crd_temp = Shell.model_validate(public_shell.json) + shell_base_image = shell_crd_temp.spec.baseImage + # Create a mock shell object for compatibility + class MockShell: + def __init__(self, json_data): + self.json = json_data + shell = MockShell(public_shell.json) + # Get model for agent config (modelRef is optional) model = None if bot_crd.spec.modelRef: @@ -442,6 +464,9 @@ def _format_subtasks_response( if shell and shell.json: shell_crd = Shell.model_validate(shell.json) agent_name = shell_crd.spec.runtime + # Extract baseImage from shell (user-defined shell overrides public shell) + if shell_crd.spec.baseImage: + shell_base_image = shell_crd.spec.baseImage if model and model.json: model_crd = Model.model_validate(model.json) @@ -593,6 +618,7 @@ def _format_subtasks_response( "mcp_servers": mcp_servers, "skills": skills, "role": team_member_info.role if team_member_info else "", + "base_image": shell_base_image, # Custom base image for executor } ) diff --git a/backend/init_data/02-public-shells.yaml b/backend/init_data/02-public-shells.yaml index d48ecf7c..03b50403 100644 --- a/backend/init_data/02-public-shells.yaml +++ b/backend/init_data/02-public-shells.yaml @@ -14,6 +14,7 @@ metadata: spec: runtime: ClaudeCode supportModel: [] + baseImage: ghcr.io/wecode-ai/wegent-base-python3.12:1.0.0 status: state: Available --- @@ -27,6 +28,7 @@ metadata: spec: runtime: Agno supportModel: [] + baseImage: ghcr.io/wecode-ai/wegent-base-python3.12:1.0.0 status: state: Available --- diff --git a/executor_manager/executors/docker/binary_extractor.py b/executor_manager/executors/docker/binary_extractor.py new file mode 100644 index 00000000..7ef7b8bb --- /dev/null +++ b/executor_manager/executors/docker/binary_extractor.py @@ -0,0 +1,188 @@ +#!/usr/bin/env python + +# SPDX-FileCopyrightText: 2025 Weibo, Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +# -*- coding: utf-8 -*- + +""" +Binary extractor module for extracting executor binary from official image to Named Volume. +This enables the Init Container pattern where custom base images can run the latest executor. +""" + +import os +import subprocess +from typing import Optional, Tuple + +from shared.logger import setup_logger + +logger = setup_logger(__name__) + +# Constants +EXECUTOR_BINARY_VOLUME = "wegent-executor-binary" +EXECUTOR_BINARY_PATH = "/app/executor" +VERSION_FILE_PATH = "/target/.version" + + +def get_executor_image() -> str: + """Get the executor image from environment variable""" + return os.getenv("EXECUTOR_IMAGE", "") + + +def extract_executor_binary() -> bool: + """ + Extract executor binary from official image to Named Volume. + + This function: + 1. Checks if the Named Volume exists with the current version + 2. If not, creates/updates the volume with executor binary from official image + 3. Records the version for future comparison + + Returns: + bool: True if extraction was successful or already up-to-date, False otherwise + """ + executor_image = get_executor_image() + if not executor_image: + logger.warning("EXECUTOR_IMAGE environment variable not set, skipping binary extraction") + return True # Not an error, just not configured + + logger.info(f"Checking executor binary extraction for image: {executor_image}") + + try: + # Check if volume exists and has matching version + should_extract, current_version = _should_extract_binary(executor_image) + + if not should_extract: + logger.info(f"Executor binary already up-to-date (version: {current_version})") + return True + + logger.info(f"Extracting executor binary from {executor_image}...") + + # Extract binary from official image to Named Volume + success = _extract_binary_to_volume(executor_image) + + if success: + logger.info(f"Successfully extracted executor binary to volume {EXECUTOR_BINARY_VOLUME}") + return True + else: + logger.error("Failed to extract executor binary") + return False + + except Exception as e: + logger.error(f"Error during executor binary extraction: {e}") + return False + + +def _should_extract_binary(target_image: str) -> Tuple[bool, Optional[str]]: + """ + Check if binary extraction is needed by comparing versions. + + Args: + target_image: The target executor image to compare against + + Returns: + Tuple of (should_extract, current_version) + """ + try: + # Try to read version from existing volume + result = subprocess.run( + [ + "docker", "run", "--rm", + "-v", f"{EXECUTOR_BINARY_VOLUME}:/target:ro", + "alpine:latest", + "cat", VERSION_FILE_PATH + ], + capture_output=True, + text=True, + timeout=30 + ) + + if result.returncode == 0: + current_version = result.stdout.strip() + if current_version == target_image: + return False, current_version + else: + logger.info(f"Version mismatch: current={current_version}, target={target_image}") + return True, current_version + else: + # Volume doesn't exist or version file not found + logger.info("No existing version found, extraction needed") + return True, None + + except subprocess.TimeoutExpired: + logger.warning("Timeout checking version, will extract") + return True, None + except Exception as e: + logger.warning(f"Error checking version: {e}, will extract") + return True, None + + +def _extract_binary_to_volume(executor_image: str) -> bool: + """ + Extract executor binary from image to Named Volume. + + Args: + executor_image: The source executor image + + Returns: + bool: True if successful, False otherwise + """ + try: + # Step 1: Create/ensure the Named Volume exists + subprocess.run( + ["docker", "volume", "create", EXECUTOR_BINARY_VOLUME], + capture_output=True, + text=True, + timeout=30 + ) + logger.info(f"Created/verified volume: {EXECUTOR_BINARY_VOLUME}") + + # Step 2: Extract executor binary and write version file + # Using a single container to copy files and write version + extract_cmd = f""" + cp -r /app/* /target/ 2>/dev/null || cp /app/executor /target/executor; + echo '{executor_image}' > {VERSION_FILE_PATH}; + chmod +x /target/executor 2>/dev/null || true + """ + + result = subprocess.run( + [ + "docker", "run", "--rm", + "-v", f"{EXECUTOR_BINARY_VOLUME}:/target", + executor_image, + "sh", "-c", extract_cmd + ], + capture_output=True, + text=True, + timeout=120 # 2 minutes for extraction + ) + + if result.returncode != 0: + logger.error(f"Failed to extract binary: {result.stderr}") + return False + + logger.info("Binary extraction completed successfully") + return True + + except subprocess.TimeoutExpired: + logger.error("Binary extraction timed out") + return False + except Exception as e: + logger.error(f"Error extracting binary: {e}") + return False + + +def get_volume_mount_config() -> dict: + """ + Get the volume mount configuration for containers using custom base image. + + Returns: + dict: Configuration for volume mount + """ + return { + "volume_name": EXECUTOR_BINARY_VOLUME, + "mount_path": "/app", + "readonly": True, + "entrypoint": "/app/executor" + } diff --git a/executor_manager/executors/docker/executor.py b/executor_manager/executors/docker/executor.py index 4debf6cb..0f54c0bf 100644 --- a/executor_manager/executors/docker/executor.py +++ b/executor_manager/executors/docker/executor.py @@ -176,20 +176,33 @@ def _create_new_container(self, task: Dict[str, Any], task_info: Dict[str, Any], """Create new Docker container""" executor_name = status["executor_name"] task_id = task_info["task_id"] - + + # Check for custom base_image from bot configuration + base_image = self._get_base_image_from_task(task) + # Get executor image executor_image = self._get_executor_image(task) - - # Prepare Docker command - cmd = self._prepare_docker_command(task, task_info, executor_name, executor_image) - + + # Prepare Docker command with optional base_image support + cmd = self._prepare_docker_command(task, task_info, executor_name, executor_image, base_image) + # Execute Docker command - logger.info(f"Starting Docker container for task {task_id}: {executor_name}") + logger.info(f"Starting Docker container for task {task_id}: {executor_name} (base_image={base_image or 'default'})") result = self.subprocess.run(cmd, check=True, capture_output=True, text=True) - + # Record container ID container_id = result.stdout.strip() logger.info(f"Started Docker container {executor_name} with ID {container_id}") + + def _get_base_image_from_task(self, task: Dict[str, Any]) -> Optional[str]: + """Extract custom base_image from task's bot configuration""" + bots = task.get("bot", []) + if bots and isinstance(bots, list) and len(bots) > 0: + # Use the first bot's base_image if available + first_bot = bots[0] + if isinstance(first_bot, dict): + return first_bot.get("base_image") + return None def _get_executor_image(self, task: Dict[str, Any]) -> str: """Get executor image name""" @@ -203,16 +216,33 @@ def _prepare_docker_command( task: Dict[str, Any], task_info: Dict[str, Any], executor_name: str, - executor_image: str + executor_image: str, + base_image: Optional[str] = None ) -> List[str]: - """Prepare Docker run command""" + """ + Prepare Docker run command. + + If base_image is provided, uses the Init Container pattern: + - Uses the custom base_image as container image + - Mounts executor binary from Named Volume + - Overrides entrypoint to /app/executor + + Args: + task: Task information + task_info: Extracted task info + executor_name: Container name + executor_image: Default executor image + base_image: Optional custom base image + """ + from executors.docker.binary_extractor import EXECUTOR_BINARY_VOLUME + task_id = task_info["task_id"] subtask_id = task_info["subtask_id"] user_name = task_info["user_name"] - + # Convert task to JSON string task_str = json.dumps(task) - + # Basic command cmd = [ "docker", @@ -236,27 +266,36 @@ def _prepare_docker_command( # Mount "-v", f"{DOCKER_SOCKET_PATH}:{DOCKER_SOCKET_PATH}" ] - + + # If using custom base_image, mount executor binary from Named Volume + if base_image: + cmd.extend([ + "-v", f"{EXECUTOR_BINARY_VOLUME}:/app:ro", # Mount executor binary as read-only + "--entrypoint", "/app/executor" # Override entrypoint + ]) + logger.info(f"Using custom base image mode: {base_image} with executor from {EXECUTOR_BINARY_VOLUME}") + # Add TASK_API_DOMAIN environment variable for executor to access backend API self._add_task_api_domain(cmd) - + # Add workspace mount self._add_workspace_mount(cmd) - + # Add network configuration self._add_network_config(cmd) - + # Add port mapping port = find_available_port() logger.info(f"Assigned port {port} for container {executor_name}") cmd.extend(["-p", f"{port}:{port}", "-e", f"PORT={port}"]) - + # Add callback URL self._add_callback_url(cmd, task) - - # Add executor image - cmd.append(executor_image) - + + # Add executor image (use base_image if provided, otherwise use default executor_image) + final_image = base_image if base_image else executor_image + cmd.append(final_image) + return cmd def _add_task_api_domain(self, cmd: List[str]) -> None: diff --git a/executor_manager/main.py b/executor_manager/main.py index 8c2b8ce2..daa674e7 100644 --- a/executor_manager/main.py +++ b/executor_manager/main.py @@ -33,19 +33,30 @@ async def lifespan(app): FastAPI application lifecycle manager Starts the task scheduler when the application starts, and performs cleanup operations when the application shuts down """ + # Extract executor binary to Named Volume on startup + logger.info("Extracting executor binary to Named Volume...") + try: + from executors.docker.binary_extractor import extract_executor_binary + if extract_executor_binary(): + logger.info("Executor binary extraction completed") + else: + logger.warning("Executor binary extraction failed, custom base images may not work") + except Exception as e: + logger.warning(f"Executor binary extraction error: {e}, custom base images may not work") + # Start the task scheduler logger.info("Initializing task scheduler...") scheduler_instance = TaskScheduler() - + # Start the scheduler in a separate thread scheduler_thread = threading.Thread(target=start_scheduler, args=(scheduler_instance,)) scheduler_thread.daemon = True scheduler_thread.start() - + logger.info("Task scheduler started successfully") - + yield # During FastAPI application runtime - + # Cleanup operations when the application shuts down (if needed) logger.info("Shutting down task scheduler...") # If TaskScheduler has a stop method, you can call it here diff --git a/frontend/src/apis/shells.ts b/frontend/src/apis/shells.ts new file mode 100644 index 00000000..1f8d7bc5 --- /dev/null +++ b/frontend/src/apis/shells.ts @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2025 Weibo, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +import { apiClient } from './client' + +// Shell Types +export type ShellTypeEnum = 'public' | 'user' + +export interface UnifiedShell { + name: string + type: ShellTypeEnum // 'public' or 'user' - identifies shell source + displayName?: string | null + runtime: string + baseImage?: string | null + baseShellRef?: string | null + supportModel?: string[] | null + shellType?: 'local_engine' | 'external_api' | null // Shell execution type +} + +export interface UnifiedShellListResponse { + data: UnifiedShell[] +} + +export interface ShellCreateRequest { + name: string + displayName?: string + baseShellRef: string // Required: base public shell name (e.g., "ClaudeCode") + baseImage: string // Required: custom base image address +} + +export interface ShellUpdateRequest { + displayName?: string + baseImage?: string +} + +// Image Validation Types +export interface ImageValidationRequest { + image: string + shellType: string // e.g., "ClaudeCode", "Agno" +} + +export interface ImageCheckResult { + name: string + version?: string | null + status: 'pass' | 'fail' + message?: string | null +} + +export interface ImageValidationResponse { + valid: boolean + checks: ImageCheckResult[] + errors: string[] +} + +// Shell Services +export const shellApis = { + /** + * Get unified list of all available shells (both public and user-defined) + * + * Each shell includes a 'type' field ('public' or 'user') to identify its source. + */ + async getUnifiedShells(): Promise { + return apiClient.get('/shells/unified') + }, + + /** + * Get a specific shell by name and optional type + * + * @param shellName - Shell name + * @param shellType - Optional shell type ('public' or 'user') + */ + async getUnifiedShell(shellName: string, shellType?: ShellTypeEnum): Promise { + const params = new URLSearchParams() + if (shellType) { + params.append('shell_type', shellType) + } + const queryString = params.toString() + return apiClient.get( + `/shells/unified/${encodeURIComponent(shellName)}${queryString ? `?${queryString}` : ''}` + ) + }, + + /** + * Create a new user-defined shell + */ + async createShell(request: ShellCreateRequest): Promise { + return apiClient.post('/shells', request) + }, + + /** + * Update an existing user-defined shell + */ + async updateShell(name: string, request: ShellUpdateRequest): Promise { + return apiClient.put(`/shells/${encodeURIComponent(name)}`, request) + }, + + /** + * Delete a user-defined shell + */ + async deleteShell(name: string): Promise { + return apiClient.delete(`/shells/${encodeURIComponent(name)}`) + }, + + /** + * Validate base image compatibility with a shell type + */ + async validateImage(request: ImageValidationRequest): Promise { + return apiClient.post('/shells/validate-image', request) + }, + + /** + * Get public shells only (filter from unified list) + */ + async getPublicShells(): Promise { + const response = await this.getUnifiedShells() + return (response.data || []).filter(shell => shell.type === 'public') + }, + + /** + * Get local_engine type shells only (for base shell selection) + */ + async getLocalEngineShells(): Promise { + const response = await this.getUnifiedShells() + return (response.data || []).filter( + shell => shell.type === 'public' && shell.shellType === 'local_engine' + ) + }, +} diff --git a/frontend/src/app/settings/page.tsx b/frontend/src/app/settings/page.tsx index 313acd14..8bf9ab45 100644 --- a/frontend/src/app/settings/page.tsx +++ b/frontend/src/app/settings/page.tsx @@ -9,11 +9,12 @@ import { useRouter, useSearchParams } from 'next/navigation'; import TopNavigation from '@/features/layout/TopNavigation'; import UserMenu from '@/features/layout/UserMenu'; import { Tab } from '@headlessui/react'; -import { PuzzlePieceIcon, UsersIcon, BellIcon, CpuChipIcon } from '@heroicons/react/24/outline'; +import { PuzzlePieceIcon, UsersIcon, BellIcon, CpuChipIcon, CommandLineIcon } from '@heroicons/react/24/outline'; import GitHubIntegration from '@/features/settings/components/GitHubIntegration'; import TeamList from '@/features/settings/components/TeamList'; import NotificationSettings from '@/features/settings/components/NotificationSettings'; import ModelList from '@/features/settings/components/ModelList'; +import ShellList from '@/features/settings/components/ShellList'; import { UserProvider } from '@/features/common/UserContext'; import { useTranslation } from '@/hooks/useTranslation'; import { GithubStarButton } from '@/features/layout/GithubStarButton'; @@ -27,9 +28,10 @@ function DashboardContent() { const tabIndexToName = useMemo( (): Record => ({ 0: 'models', - 1: 'team', - 2: 'integrations', - 3: 'notifications', + 1: 'shells', + 2: 'team', + 3: 'integrations', + 4: 'notifications', }), [] ); @@ -38,9 +40,10 @@ function DashboardContent() { const tabNameToIndex = useMemo( (): Record => ({ models: 0, - team: 1, - integrations: 2, - notifications: 3, + shells: 1, + team: 2, + integrations: 3, + notifications: 4, }), [] ); @@ -114,6 +117,19 @@ function DashboardContent() { {t('settings.models')} + + `w-full flex items-center space-x-3 px-3 py-2 text-sm rounded-md transition-colors duration-200 focus:outline-none ${ + selected + ? 'bg-muted text-text-primary' + : 'text-text-muted hover:text-text-primary hover:bg-muted' + }` + } + > + + {t('settings.shells')} + + `w-full flex items-center space-x-3 px-3 py-2 text-sm rounded-md transition-colors duration-200 focus:outline-none ${ @@ -159,6 +175,9 @@ function DashboardContent() { + + + @@ -189,6 +208,19 @@ function DashboardContent() { {t('settings.models')} + + `flex-1 flex items-center justify-center space-x-1 px-2 py-2 text-xs rounded-md transition-colors duration-200 focus:outline-none ${ + selected + ? 'bg-muted text-text-primary' + : 'text-text-muted hover:text-text-primary hover:bg-muted' + }` + } + > + + {t('settings.shells')} + + `flex-1 flex items-center justify-center space-x-1 px-2 py-2 text-xs rounded-md transition-colors duration-200 focus:outline-none ${ @@ -235,6 +267,9 @@ function DashboardContent() { + + + diff --git a/frontend/src/features/settings/components/ShellEdit.tsx b/frontend/src/features/settings/components/ShellEdit.tsx new file mode 100644 index 00000000..29275761 --- /dev/null +++ b/frontend/src/features/settings/components/ShellEdit.tsx @@ -0,0 +1,383 @@ +// SPDX-FileCopyrightText: 2025 Weibo, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +'use client' + +import React, { useCallback, useState, useEffect } from 'react' +import { Button } from '@/components/ui/button' +import { Input } from '@/components/ui/input' +import { Label } from '@/components/ui/label' +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from '@/components/ui/select' +import { Loader2 } from 'lucide-react' +import { BeakerIcon, CheckCircleIcon, XCircleIcon } from '@heroicons/react/24/outline' +import { useTranslation } from '@/hooks/useTranslation' +import { shellApis, UnifiedShell, ImageCheckResult } from '@/apis/shells' + +interface ShellEditProps { + shell: UnifiedShell | null + onClose: () => void + toast: ReturnType['toast'] +} + +const ShellEdit: React.FC = ({ shell, onClose, toast }) => { + const { t } = useTranslation('common') + const isEditing = !!shell + + // Form state + const [name, setName] = useState(shell?.name || '') + const [displayName, setDisplayName] = useState(shell?.displayName || '') + const [baseShellRef, setBaseShellRef] = useState(shell?.baseShellRef || '') + const [baseImage, setBaseImage] = useState(shell?.baseImage || '') + const [saving, setSaving] = useState(false) + const [validating, setValidating] = useState(false) + const [validationResult, setValidationResult] = useState<{ + valid: boolean + checks: ImageCheckResult[] + errors: string[] + } | null>(null) + + // Available base shells (public local_engine shells) + const [baseShells, setBaseShells] = useState([]) + const [loadingBaseShells, setLoadingBaseShells] = useState(true) + + useEffect(() => { + const fetchBaseShells = async () => { + try { + const shells = await shellApis.getLocalEngineShells() + setBaseShells(shells) + } catch (error) { + console.error('Failed to fetch base shells:', error) + } finally { + setLoadingBaseShells(false) + } + } + fetchBaseShells() + }, []) + + const handleValidateImage = async () => { + if (!baseImage || !baseShellRef) { + toast({ + variant: 'destructive', + title: t('shells.errors.base_image_and_shell_required'), + }) + return + } + + // Find the runtime for selected base shell + const selectedBaseShell = baseShells.find(s => s.name === baseShellRef) + if (!selectedBaseShell) { + toast({ + variant: 'destructive', + title: t('shells.errors.base_shell_not_found'), + }) + return + } + + setValidating(true) + setValidationResult(null) + + try { + const result = await shellApis.validateImage({ + image: baseImage, + shellType: selectedBaseShell.runtime, + }) + setValidationResult(result) + + if (result.valid) { + toast({ + title: t('shells.validation_success'), + }) + } else { + toast({ + variant: 'destructive', + title: t('shells.validation_failed'), + description: result.errors.join(', ') || t('shells.errors.image_not_compatible'), + }) + } + } catch (error) { + toast({ + variant: 'destructive', + title: t('shells.validation_failed'), + description: (error as Error).message, + }) + } finally { + setValidating(false) + } + } + + const handleSave = async () => { + // Validation + if (!name.trim()) { + toast({ + variant: 'destructive', + title: t('shells.errors.name_required'), + }) + return + } + + // Validate name format (lowercase letters, numbers, and hyphens only) + const nameRegex = /^[a-z0-9][a-z0-9-]*[a-z0-9]$|^[a-z0-9]$/ + if (!nameRegex.test(name)) { + toast({ + variant: 'destructive', + title: t('shells.errors.name_invalid'), + }) + return + } + + if (!isEditing) { + if (!baseShellRef) { + toast({ + variant: 'destructive', + title: t('shells.errors.base_shell_required'), + }) + return + } + + if (!baseImage.trim()) { + toast({ + variant: 'destructive', + title: t('shells.errors.base_image_required'), + }) + return + } + } + + setSaving(true) + try { + if (isEditing) { + await shellApis.updateShell(shell.name, { + displayName: displayName.trim() || undefined, + baseImage: baseImage.trim() || undefined, + }) + toast({ + title: t('shells.update_success'), + }) + } else { + await shellApis.createShell({ + name: name.trim(), + displayName: displayName.trim() || undefined, + baseShellRef, + baseImage: baseImage.trim(), + }) + toast({ + title: t('shells.create_success'), + }) + } + + onClose() + } catch (error) { + toast({ + variant: 'destructive', + title: isEditing ? t('shells.errors.update_failed') : t('shells.errors.create_failed'), + description: (error as Error).message, + }) + } finally { + setSaving(false) + } + } + + const handleBack = useCallback(() => { + onClose() + }, [onClose]) + + useEffect(() => { + const handleEsc = (event: KeyboardEvent) => { + if (event.key !== 'Escape') return + handleBack() + } + + window.addEventListener('keydown', handleEsc) + return () => window.removeEventListener('keydown', handleEsc) + }, [handleBack]) + + return ( +
+ {/* Top Navigation */} +
+ +
+ +
+
+ + {/* Form */} +
+ {/* Shell Name */} +
+ + setName(e.target.value)} + placeholder="my-custom-shell" + disabled={isEditing} + className="bg-base" + /> +

+ {isEditing ? t('shells.name_readonly_hint') : t('shells.name_hint')} +

+
+ + {/* Display Name */} +
+ + setDisplayName(e.target.value)} + placeholder={t('shells.display_name_placeholder')} + className="bg-base" + /> +

{t('shells.display_name_hint')}

+
+ + {/* Base Shell Reference */} +
+ + +

{t('shells.base_shell_hint')}

+
+ + {/* Base Image */} +
+ +
+ { + setBaseImage(e.target.value) + setValidationResult(null) // Clear validation result on change + }} + placeholder="ghcr.io/your-org/your-image:latest" + className="bg-base flex-1" + /> + +
+

{t('shells.base_image_hint')}

+ + {/* Validation Results */} + {validationResult && ( +
+
+ {validationResult.valid ? ( + + ) : ( + + )} + + {validationResult.valid + ? t('shells.validation_passed') + : t('shells.validation_not_passed')} + +
+ {validationResult.checks.length > 0 && ( +
    + {validationResult.checks.map((check, index) => ( +
  • + {check.status === 'pass' ? ( + + ) : ( + + )} + + {check.name} + {check.version && ` (${check.version})`} + {check.message && `: ${check.message}`} + +
  • + ))} +
+ )} + {validationResult.errors.length > 0 && ( +
    + {validationResult.errors.map((error, index) => ( +
  • {error}
  • + ))} +
+ )} +
+ )} +
+
+
+ ) +} + +export default ShellEdit diff --git a/frontend/src/features/settings/components/ShellList.tsx b/frontend/src/features/settings/components/ShellList.tsx new file mode 100644 index 00000000..86adec0d --- /dev/null +++ b/frontend/src/features/settings/components/ShellList.tsx @@ -0,0 +1,244 @@ +// SPDX-FileCopyrightText: 2025 Weibo, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +'use client' +import '@/features/common/scrollbar.css' + +import React, { useEffect, useState, useCallback } from 'react' +import { Button } from '@/components/ui/button' +import { Card } from '@/components/ui/card' +import { Tag } from '@/components/ui/tag' +import { + CommandLineIcon, + PencilIcon, + TrashIcon, + GlobeAltIcon, +} from '@heroicons/react/24/outline' +import { Loader2 } from 'lucide-react' +import { useToast } from '@/hooks/use-toast' +import { useTranslation } from '@/hooks/useTranslation' +import ShellEdit from './ShellEdit' +import { + AlertDialog, + AlertDialogAction, + AlertDialogCancel, + AlertDialogContent, + AlertDialogDescription, + AlertDialogFooter, + AlertDialogHeader, + AlertDialogTitle, +} from '@/components/ui/alert-dialog' +import { shellApis, UnifiedShell } from '@/apis/shells' +import UnifiedAddButton from '@/components/common/UnifiedAddButton' + +const ShellList: React.FC = () => { + const { t } = useTranslation('common') + const { toast } = useToast() + const [shells, setShells] = useState([]) + const [loading, setLoading] = useState(true) + const [editingShell, setEditingShell] = useState(null) + const [isCreating, setIsCreating] = useState(false) + const [deleteConfirmShell, setDeleteConfirmShell] = useState(null) + + const fetchShells = useCallback(async () => { + setLoading(true) + try { + const response = await shellApis.getUnifiedShells() + setShells(response.data || []) + } catch (error) { + console.error('Failed to fetch shells:', error) + toast({ + variant: 'destructive', + title: t('shells.errors.load_shells_failed'), + }) + } finally { + setLoading(false) + } + }, [toast, t]) + + useEffect(() => { + fetchShells() + }, [fetchShells]) + + const handleDelete = async () => { + if (!deleteConfirmShell) return + + try { + await shellApis.deleteShell(deleteConfirmShell.name) + toast({ + title: t('shells.delete_success'), + }) + setDeleteConfirmShell(null) + fetchShells() + } catch (error) { + toast({ + variant: 'destructive', + title: t('shells.errors.delete_failed'), + description: (error as Error).message, + }) + } + } + + const handleEdit = (shell: UnifiedShell) => { + if (shell.type === 'public') return + setEditingShell(shell) + } + + const handleEditClose = () => { + setEditingShell(null) + setIsCreating(false) + fetchShells() + } + + const getShellTypeLabel = (shellType?: string | null) => { + if (shellType === 'local_engine') return 'Local Engine' + if (shellType === 'external_api') return 'External API' + return shellType || 'Unknown' + } + + if (editingShell || isCreating) { + return + } + + return ( +
+ {/* Header */} +
+

{t('shells.title')}

+

{t('shells.description')}

+
+ + {/* Content Container */} +
+ {/* Loading State */} + {loading && ( +
+ +
+ )} + + {/* Empty State */} + {!loading && shells.length === 0 && ( +
+ +

{t('shells.no_shells')}

+

{t('shells.no_shells_hint')}

+
+ )} + + {/* Shell List */} + {!loading && shells.length > 0 && ( + <> +
+ {shells.map(shell => { + const isPublic = shell.type === 'public' + return ( + +
+
+ {isPublic ? ( + + ) : ( + + )} +
+
+

+ {shell.displayName || shell.name} +

+ {isPublic && ( + + {t('shells.public')} + + )} +
+ {/* Show ID if different from display name */} + {!isPublic && shell.displayName && shell.displayName !== shell.name && ( +

ID: {shell.name}

+ )} +
+ + {shell.runtime} + + + {getShellTypeLabel(shell.shellType)} + + {shell.baseImage && ( + + {shell.baseImage} + + )} +
+
+
+
+ {/* Only show action buttons for user's own shells */} + {!isPublic && ( + <> + + + + )} +
+
+
+ ) + })} +
+ + )} + + {/* Add Button */} + {!loading && ( +
+
+ setIsCreating(true)}> + {t('shells.create')} + +
+
+ )} +
+ + {/* Delete Confirmation Dialog */} + setDeleteConfirmShell(null)}> + + + {t('shells.delete_confirm_title')} + + {t('shells.delete_confirm_message', { name: deleteConfirmShell?.name })} + + + + {t('actions.cancel')} + + {t('actions.delete')} + + + + +
+ ) +} + +export default ShellList diff --git a/frontend/src/i18n/locales/en/common.json b/frontend/src/i18n/locales/en/common.json index 4ddfdd14..4c1ff6e6 100644 --- a/frontend/src/i18n/locales/en/common.json +++ b/frontend/src/i18n/locales/en/common.json @@ -312,6 +312,7 @@ "bot": "Bot", "team": "Team", "models": "Models", + "shells": "Shells", "sections": { "general": "General" } @@ -536,5 +537,49 @@ "mixed_team_warning": "This team contains multiple executor types, cannot specify a unified model", "default_model": "Default", "use_bot_model": "Use Bot's predefined model" + }, + "shells": { + "title": "Shell Management", + "description": "Manage custom shells with your own base images", + "public": "Public", + "create": "Create Shell", + "edit": "Edit Shell", + "delete": "Delete Shell", + "validate": "Validate", + "shell_name": "Shell Name", + "name_hint": "Unique identifier, can only contain lowercase letters, numbers, and hyphens", + "name_readonly_hint": "Shell name cannot be changed after creation", + "display_name": "Display Name", + "display_name_placeholder": "My Custom Shell", + "display_name_hint": "Human-readable name for display (optional)", + "base_shell": "Base Shell", + "select_base_shell": "Select a base shell type", + "base_shell_hint": "The shell type this custom shell is based on (ClaudeCode, Agno)", + "base_image": "Base Image", + "base_image_hint": "Docker image address (e.g., ghcr.io/your-org/your-image:latest)", + "no_shells": "No shells available", + "no_shells_hint": "Click the button above to create your first custom shell", + "validation_success": "Image validation passed", + "validation_failed": "Image validation failed", + "validation_passed": "All checks passed", + "validation_not_passed": "Some checks failed", + "create_success": "Shell created successfully", + "update_success": "Shell updated successfully", + "delete_success": "Shell deleted successfully", + "delete_confirm_title": "Confirm Delete", + "delete_confirm_message": "Are you sure you want to delete the shell \"{{name}}\"? This action cannot be undone.", + "errors": { + "load_shells_failed": "Failed to load shells", + "create_failed": "Failed to create shell", + "update_failed": "Failed to update shell", + "delete_failed": "Failed to delete shell", + "name_required": "Shell name is required", + "name_invalid": "Shell name can only contain lowercase letters, numbers, and hyphens", + "base_shell_required": "Base shell is required", + "base_image_required": "Base image is required", + "base_image_and_shell_required": "Both base image and base shell are required for validation", + "base_shell_not_found": "Selected base shell not found", + "image_not_compatible": "Image is not compatible with the selected shell type" + } } } diff --git a/frontend/src/i18n/locales/zh-CN/common.json b/frontend/src/i18n/locales/zh-CN/common.json index 6d9bebaa..f73339ed 100644 --- a/frontend/src/i18n/locales/zh-CN/common.json +++ b/frontend/src/i18n/locales/zh-CN/common.json @@ -313,6 +313,7 @@ "bot": "机器人", "team": "机器人", "models": "模型", + "shells": "Shell", "sections": { "general": "通用" } @@ -537,5 +538,49 @@ "mixed_team_warning": "当前团队包含多种执行器类型,无法统一指定模型", "default_model": "默认绑定模型", "use_bot_model": "使用 Bot 预设模型" + }, + "shells": { + "title": "Shell 管理", + "description": "使用自定义 Base Image 管理 Shell", + "public": "公共", + "create": "创建 Shell", + "edit": "编辑 Shell", + "delete": "删除 Shell", + "validate": "验证", + "shell_name": "Shell 名称", + "name_hint": "唯一标识符,只能包含小写字母、数字和连字符", + "name_readonly_hint": "Shell 名称创建后无法更改", + "display_name": "显示名称", + "display_name_placeholder": "我的自定义 Shell", + "display_name_hint": "用于显示的友好名称(可选)", + "base_shell": "基础 Shell", + "select_base_shell": "选择基础 Shell 类型", + "base_shell_hint": "此自定义 Shell 基于的 Shell 类型(ClaudeCode、Agno)", + "base_image": "Base Image", + "base_image_hint": "Docker 镜像地址(例如:ghcr.io/your-org/your-image:latest)", + "no_shells": "暂无 Shell", + "no_shells_hint": "点击上方按钮创建您的第一个自定义 Shell", + "validation_success": "镜像验证通过", + "validation_failed": "镜像验证失败", + "validation_passed": "所有检查通过", + "validation_not_passed": "部分检查未通过", + "create_success": "Shell 创建成功", + "update_success": "Shell 更新成功", + "delete_success": "Shell 删除成功", + "delete_confirm_title": "确认删除", + "delete_confirm_message": "确定要删除 Shell \"{{name}}\" 吗?此操作无法撤销。", + "errors": { + "load_shells_failed": "加载 Shell 列表失败", + "create_failed": "创建 Shell 失败", + "update_failed": "更新 Shell 失败", + "delete_failed": "删除 Shell 失败", + "name_required": "Shell 名称不能为空", + "name_invalid": "Shell 名称只能包含小写字母、数字和连字符", + "base_shell_required": "基础 Shell 不能为空", + "base_image_required": "Base Image 不能为空", + "base_image_and_shell_required": "验证需要同时填写 Base Image 和基础 Shell", + "base_shell_not_found": "未找到所选基础 Shell", + "image_not_compatible": "镜像与所选 Shell 类型不兼容" + } } } From f6d85b01d562af8ef3bb6e4ee6fc70059284d165 Mon Sep 17 00:00:00 2001 From: qdaxb <4157870+qdaxb@users.noreply.github.com> Date: Sun, 30 Nov 2025 01:15:40 +0800 Subject: [PATCH 02/10] fix: fix Tag variant and move validate_image to executor manager - Fix ShellList Tag variant error (use 'default' instead of 'outline') - Move image validation logic to Executor Manager API (/executor-manager/images/validate) - Backend shells API now proxies validation requests to Executor Manager - Support various deployment modes (Docker, K8s) where backend may not have direct Docker access --- backend/app/api/endpoints/adapter/shells.py | 208 +++++------------ executor_manager/routers/routers.py | 214 ++++++++++++++++++ .../settings/components/ShellList.tsx | 2 +- 3 files changed, 273 insertions(+), 151 deletions(-) diff --git a/backend/app/api/endpoints/adapter/shells.py b/backend/app/api/endpoints/adapter/shells.py index 2fee5d32..ca55269d 100644 --- a/backend/app/api/endpoints/adapter/shells.py +++ b/backend/app/api/endpoints/adapter/shells.py @@ -414,14 +414,18 @@ def validate_image( """ Validate if a base image is compatible with a specific shell type. - This endpoint pulls the image and checks for required dependencies: + This endpoint proxies the validation request to Executor Manager, which: + - Pulls the image and runs a temporary container to check dependencies - ClaudeCode: Node.js 20.x, claude-code CLI, SQLite 3.50+, Python 3.12 - Agno: Python 3.12 - Dify: No check needed (external_api type) - Note: Only supports public image registries. + Note: Validation is performed by Executor Manager to support various deployment + modes (Docker, Kubernetes) where the backend may not have direct Docker access. """ - import subprocess + import os + + import httpx shell_type = request.shellType image = request.image @@ -434,164 +438,68 @@ def validate_image( errors=["Dify is an external_api type and doesn't require image validation"], ) - # Define checks based on shell type - checks_config = { - "ClaudeCode": [ - { - "name": "node", - "command": "node --version", - "version_regex": r"v(\d+\.\d+\.\d+)", - "min_version": "20.0.0", - }, - { - "name": "claude-code", - "command": "claude --version 2>/dev/null || echo 'not found'", - "version_regex": r"(\d+\.\d+\.\d+)", - "min_version": None, - }, - { - "name": "sqlite", - "command": "sqlite3 --version", - "version_regex": r"(\d+\.\d+\.\d+)", - "min_version": "3.50.0", - }, - { - "name": "python", - "command": "python3 --version", - "version_regex": r"Python (\d+\.\d+\.\d+)", - "min_version": "3.12.0", - }, - ], - "Agno": [ - { - "name": "python", - "command": "python3 --version", - "version_regex": r"Python (\d+\.\d+\.\d+)", - "min_version": "3.12.0", - }, - ], - } + # Get executor manager URL from environment + executor_manager_url = os.getenv("EXECUTOR_MANAGER_URL", "http://localhost:8001") + validate_url = f"{executor_manager_url}/executor-manager/images/validate" - if shell_type not in checks_config: - return ImageValidationResponse( - valid=False, checks=[], errors=[f"Unknown shell type: {shell_type}"] - ) + try: + logger.info(f"Forwarding image validation to executor manager: {image}") - checks_to_run = checks_config[shell_type] - results = [] - errors = [] - all_passed = True + # Call executor manager's validate-image API + with httpx.Client(timeout=360.0) as client: # 6 minutes timeout + response = client.post( + validate_url, + json={"image": image, "shell_type": shell_type}, + ) - try: - # Pull the image first (with timeout) - logger.info(f"Pulling image {image} for validation...") - pull_result = subprocess.run( - ["docker", "pull", image], - capture_output=True, - text=True, - timeout=300, # 5 minutes timeout for pull - ) - if pull_result.returncode != 0: + if response.status_code != 200: + logger.error( + f"Executor manager validation failed: {response.status_code} {response.text}" + ) return ImageValidationResponse( valid=False, checks=[], - errors=[f"Failed to pull image: {pull_result.stderr}"], + errors=[f"Executor manager error: {response.text}"], ) - # Run checks - for check in checks_to_run: - try: - result = subprocess.run( - [ - "docker", - "run", - "--rm", - image, - "sh", - "-c", - check["command"], - ], - capture_output=True, - text=True, - timeout=30, - ) - - output = result.stdout.strip() - if result.returncode != 0 or "not found" in output.lower(): - results.append( - ImageCheckResult( - name=check["name"], - status="fail", - message=f"Command failed or not found", - ) - ) - all_passed = False - continue - - # Extract version - import re as re_module - - version_match = re_module.search(check["version_regex"], output) - if version_match: - version = version_match.group(1) - # Check minimum version if specified - if check["min_version"]: - from packaging import version as pkg_version - - try: - if pkg_version.parse(version) < pkg_version.parse( - check["min_version"] - ): - results.append( - ImageCheckResult( - name=check["name"], - version=version, - status="fail", - message=f"Version {version} < required {check['min_version']}", - ) - ) - all_passed = False - continue - except Exception: - pass # Skip version comparison on error - - results.append( - ImageCheckResult( - name=check["name"], version=version, status="pass" - ) - ) - else: - results.append( - ImageCheckResult( - name=check["name"], - status="pass", - message="Detected but version not parsed", - ) - ) - - except subprocess.TimeoutExpired: - results.append( - ImageCheckResult( - name=check["name"], status="fail", message="Check timed out" - ) - ) - all_passed = False - except Exception as e: - results.append( - ImageCheckResult( - name=check["name"], status="fail", message=str(e) - ) - ) - all_passed = False - - except subprocess.TimeoutExpired: + result = response.json() + logger.info(f"Image validation result from executor manager: valid={result.get('valid')}") + + # Convert result to response model + checks = [ + ImageCheckResult( + name=c.get("name", ""), + version=c.get("version"), + status=c.get("status", "fail"), + message=c.get("message"), + ) + for c in result.get("checks", []) + ] + + return ImageValidationResponse( + valid=result.get("valid", False), + checks=checks, + errors=result.get("errors", []), + ) + + except httpx.TimeoutException: + logger.error(f"Timeout calling executor manager for image validation: {image}") + return ImageValidationResponse( + valid=False, + checks=[], + errors=["Validation request timed out. The image may be large or slow to pull."], + ) + except httpx.RequestError as e: + logger.error(f"Error calling executor manager: {e}") return ImageValidationResponse( - valid=False, checks=results, errors=["Image pull timed out"] + valid=False, + checks=[], + errors=[f"Failed to connect to executor manager: {str(e)}"], ) except Exception as e: logger.error(f"Image validation error: {e}") return ImageValidationResponse( - valid=False, checks=results, errors=[f"Validation error: {str(e)}"] + valid=False, + checks=[], + errors=[f"Validation error: {str(e)}"], ) - - return ImageValidationResponse(valid=all_passed, checks=results, errors=errors) diff --git a/executor_manager/routers/routers.py b/executor_manager/routers/routers.py index 89f9e8cb..0062dda0 100644 --- a/executor_manager/routers/routers.py +++ b/executor_manager/routers/routers.py @@ -172,6 +172,220 @@ class CancelTaskRequest(BaseModel): task_id: int +class ValidateImageRequest(BaseModel): + """Request body for validating base image compatibility""" + image: str + shell_type: str # e.g., "ClaudeCode", "Agno" + + +class ImageCheckResult(BaseModel): + """Individual check result""" + name: str + version: Optional[str] = None + status: str # 'pass' or 'fail' + message: Optional[str] = None + + +class ValidateImageResponse(BaseModel): + """Response for image validation""" + valid: bool + checks: list + errors: list + + +@app.post("/executor-manager/images/validate") +async def validate_image(request: ValidateImageRequest, http_request: Request): + """ + Validate if a base image is compatible with a specific shell type. + + This endpoint pulls the image and runs a temporary container to check for required dependencies: + - ClaudeCode: Node.js 20.x, claude-code CLI, SQLite 3.50+, Python 3.12 + - Agno: Python 3.12 + - Dify: No check needed (external_api type) + + Note: Only supports public image registries. + """ + import subprocess + import re as re_module + + client_ip = http_request.client.host if http_request.client else "unknown" + logger.info(f"Received image validation request: image={request.image}, shell_type={request.shell_type} from {client_ip}") + + shell_type = request.shell_type + image = request.image + + # Dify doesn't need validation + if shell_type == "Dify": + return ValidateImageResponse( + valid=True, + checks=[], + errors=["Dify is an external_api type and doesn't require image validation"], + ) + + # Define checks based on shell type + checks_config = { + "ClaudeCode": [ + { + "name": "node", + "command": "node --version", + "version_regex": r"v(\d+\.\d+\.\d+)", + "min_version": "20.0.0", + }, + { + "name": "claude-code", + "command": "claude --version 2>/dev/null || echo 'not found'", + "version_regex": r"(\d+\.\d+\.\d+)", + "min_version": None, + }, + { + "name": "sqlite", + "command": "sqlite3 --version", + "version_regex": r"(\d+\.\d+\.\d+)", + "min_version": "3.50.0", + }, + { + "name": "python", + "command": "python3 --version", + "version_regex": r"Python (\d+\.\d+\.\d+)", + "min_version": "3.12.0", + }, + ], + "Agno": [ + { + "name": "python", + "command": "python3 --version", + "version_regex": r"Python (\d+\.\d+\.\d+)", + "min_version": "3.12.0", + }, + ], + } + + if shell_type not in checks_config: + return ValidateImageResponse( + valid=False, checks=[], errors=[f"Unknown shell type: {shell_type}"] + ) + + checks_to_run = checks_config[shell_type] + results = [] + errors = [] + all_passed = True + + try: + # Pull the image first (with timeout) + logger.info(f"Pulling image {image} for validation...") + pull_result = subprocess.run( + ["docker", "pull", image], + capture_output=True, + text=True, + timeout=300, # 5 minutes timeout for pull + ) + if pull_result.returncode != 0: + logger.error(f"Failed to pull image {image}: {pull_result.stderr}") + return ValidateImageResponse( + valid=False, + checks=[], + errors=[f"Failed to pull image: {pull_result.stderr}"], + ) + + # Run checks in a single container for efficiency + for check in checks_to_run: + try: + result = subprocess.run( + [ + "docker", + "run", + "--rm", + image, + "sh", + "-c", + check["command"], + ], + capture_output=True, + text=True, + timeout=30, + ) + + output = result.stdout.strip() + if result.returncode != 0 or "not found" in output.lower(): + results.append( + ImageCheckResult( + name=check["name"], + status="fail", + message="Command failed or not found", + ).model_dump() + ) + all_passed = False + continue + + # Extract version + version_match = re_module.search(check["version_regex"], output) + if version_match: + version = version_match.group(1) + # Check minimum version if specified + if check["min_version"]: + from packaging import version as pkg_version + + try: + if pkg_version.parse(version) < pkg_version.parse( + check["min_version"] + ): + results.append( + ImageCheckResult( + name=check["name"], + version=version, + status="fail", + message=f"Version {version} < required {check['min_version']}", + ).model_dump() + ) + all_passed = False + continue + except Exception: + pass # Skip version comparison on error + + results.append( + ImageCheckResult( + name=check["name"], version=version, status="pass" + ).model_dump() + ) + else: + results.append( + ImageCheckResult( + name=check["name"], + status="pass", + message="Detected but version not parsed", + ).model_dump() + ) + + except subprocess.TimeoutExpired: + results.append( + ImageCheckResult( + name=check["name"], status="fail", message="Check timed out" + ).model_dump() + ) + all_passed = False + except Exception as e: + results.append( + ImageCheckResult( + name=check["name"], status="fail", message=str(e) + ).model_dump() + ) + all_passed = False + + except subprocess.TimeoutExpired: + logger.error(f"Image pull timed out for {image}") + return ValidateImageResponse( + valid=False, checks=results, errors=["Image pull timed out"] + ) + except Exception as e: + logger.error(f"Image validation error for {image}: {e}") + return ValidateImageResponse( + valid=False, checks=results, errors=[f"Validation error: {str(e)}"] + ) + + logger.info(f"Image validation completed for {image}: valid={all_passed}") + return ValidateImageResponse(valid=all_passed, checks=results, errors=errors) + + @app.post("/executor-manager/tasks/cancel") async def cancel_task(request: CancelTaskRequest, http_request: Request): """ diff --git a/frontend/src/features/settings/components/ShellList.tsx b/frontend/src/features/settings/components/ShellList.tsx index 86adec0d..2b2d78d0 100644 --- a/frontend/src/features/settings/components/ShellList.tsx +++ b/frontend/src/features/settings/components/ShellList.tsx @@ -168,7 +168,7 @@ const ShellList: React.FC = () => { {getShellTypeLabel(shell.shellType)} {shell.baseImage && ( - + {shell.baseImage} )} From cb6944ae40589a673384cec3d57d834e8f0e6eb4 Mon Sep 17 00:00:00 2001 From: qdaxb <4157870+qdaxb@users.noreply.github.com> Date: Sun, 30 Nov 2025 01:37:27 +0800 Subject: [PATCH 03/10] refactor(shell): use executor flow for async image validation - Create ImageValidatorAgent for running validation inside container - Modify executor_manager to dispatch validation tasks via task processor - Update backend shells API for async validation response - Update frontend to handle async validation status - Add i18n translations for validation status messages This approach uses the actual executor run flow instead of direct docker subprocess calls, making it extensible for both Docker and K8s modes. Validation runs inside the target container and reports results via callback. --- backend/app/api/endpoints/adapter/shells.py | 88 ++++--- executor/agents/factory.py | 11 +- executor/agents/image_validator/__init__.py | 7 + .../image_validator/image_validator_agent.py | 212 +++++++++++++++ executor/callback/callback_handler.py | 5 +- executor_manager/routers/routers.py | 245 +++++------------- frontend/src/apis/shells.ts | 11 +- .../settings/components/ShellEdit.tsx | 95 +++++-- frontend/src/i18n/locales/en/common.json | 4 + frontend/src/i18n/locales/zh-CN/common.json | 4 + 10 files changed, 439 insertions(+), 243 deletions(-) create mode 100644 executor/agents/image_validator/__init__.py create mode 100644 executor/agents/image_validator/image_validator_agent.py diff --git a/backend/app/api/endpoints/adapter/shells.py b/backend/app/api/endpoints/adapter/shells.py index ca55269d..ed3d39af 100644 --- a/backend/app/api/endpoints/adapter/shells.py +++ b/backend/app/api/endpoints/adapter/shells.py @@ -56,6 +56,7 @@ class ImageValidationRequest(BaseModel): image: str shellType: str # e.g., "ClaudeCode", "Agno" + shellName: Optional[str] = None # Optional shell name for tracking class ImageCheckResult(BaseModel): @@ -68,11 +69,15 @@ class ImageCheckResult(BaseModel): class ImageValidationResponse(BaseModel): - """Response for image validation""" + """Response for image validation - async mode returns task submission status""" - valid: bool - checks: List[ImageCheckResult] - errors: List[str] + status: str # 'submitted', 'skipped', 'error' + message: str + validationTaskId: Optional[int] = None + # For immediate results (e.g., Dify skip) + valid: Optional[bool] = None + checks: Optional[List[ImageCheckResult]] = None + errors: Optional[List[str]] = None def _public_shell_to_unified(shell: PublicShell) -> UnifiedShell: @@ -414,14 +419,18 @@ def validate_image( """ Validate if a base image is compatible with a specific shell type. - This endpoint proxies the validation request to Executor Manager, which: - - Pulls the image and runs a temporary container to check dependencies + This endpoint submits an async validation task to Executor Manager: + - The validation runs inside the target image container + - Results are returned via callback mechanism + - Frontend should poll or use WebSocket to get final results + + Validation checks: - ClaudeCode: Node.js 20.x, claude-code CLI, SQLite 3.50+, Python 3.12 - Agno: Python 3.12 - - Dify: No check needed (external_api type) + - Dify: No check needed (external_api type, returns immediately) - Note: Validation is performed by Executor Manager to support various deployment - modes (Docker, Kubernetes) where the backend may not have direct Docker access. + Note: Validation is asynchronous to support various deployment modes + (Docker, Kubernetes) and to perform validation inside the actual container. """ import os @@ -430,12 +439,14 @@ def validate_image( shell_type = request.shellType image = request.image - # Dify doesn't need validation + # Dify doesn't need validation - return immediately if shell_type == "Dify": return ImageValidationResponse( + status="skipped", + message="Dify is an external_api type and doesn't require image validation", valid=True, checks=[], - errors=["Dify is an external_api type and doesn't require image validation"], + errors=[], ) # Get executor manager URL from environment @@ -443,63 +454,64 @@ def validate_image( validate_url = f"{executor_manager_url}/executor-manager/images/validate" try: - logger.info(f"Forwarding image validation to executor manager: {image}") + logger.info(f"Submitting image validation task to executor manager: {image}") # Call executor manager's validate-image API - with httpx.Client(timeout=360.0) as client: # 6 minutes timeout + with httpx.Client(timeout=30.0) as client: # Short timeout since this just submits a task response = client.post( validate_url, - json={"image": image, "shell_type": shell_type}, + json={ + "image": image, + "shell_type": shell_type, + "shell_name": request.shellName or "", + }, ) if response.status_code != 200: logger.error( - f"Executor manager validation failed: {response.status_code} {response.text}" + f"Executor manager validation request failed: {response.status_code} {response.text}" ) return ImageValidationResponse( + status="error", + message=f"Failed to submit validation task: {response.text}", valid=False, - checks=[], errors=[f"Executor manager error: {response.text}"], ) result = response.json() - logger.info(f"Image validation result from executor manager: valid={result.get('valid')}") - - # Convert result to response model - checks = [ - ImageCheckResult( - name=c.get("name", ""), - version=c.get("version"), - status=c.get("status", "fail"), - message=c.get("message"), - ) - for c in result.get("checks", []) - ] + logger.info(f"Validation task submission result: status={result.get('status')}") + # Return the submission status return ImageValidationResponse( - valid=result.get("valid", False), - checks=checks, - errors=result.get("errors", []), + status=result.get("status", "error"), + message=result.get("message", ""), + validationTaskId=result.get("validation_task_id"), + valid=result.get("valid"), + checks=None, + errors=result.get("errors"), ) except httpx.TimeoutException: - logger.error(f"Timeout calling executor manager for image validation: {image}") + logger.error(f"Timeout submitting validation task for image: {image}") return ImageValidationResponse( + status="error", + message="Request timed out while submitting validation task", valid=False, - checks=[], - errors=["Validation request timed out. The image may be large or slow to pull."], + errors=["Validation request timed out"], ) except httpx.RequestError as e: logger.error(f"Error calling executor manager: {e}") return ImageValidationResponse( + status="error", + message=f"Failed to connect to executor manager: {str(e)}", valid=False, - checks=[], - errors=[f"Failed to connect to executor manager: {str(e)}"], + errors=[f"Connection error: {str(e)}"], ) except Exception as e: logger.error(f"Image validation error: {e}") return ImageValidationResponse( + status="error", + message=f"Validation error: {str(e)}", valid=False, - checks=[], - errors=[f"Validation error: {str(e)}"], + errors=[str(e)], ) diff --git a/executor/agents/factory.py b/executor/agents/factory.py index e301480b..d6cbb65a 100644 --- a/executor/agents/factory.py +++ b/executor/agents/factory.py @@ -13,6 +13,7 @@ from executor.agents.claude_code.claude_code_agent import ClaudeCodeAgent from executor.agents.agno.agno_agent import AgnoAgent from executor.agents.dify.dify_agent import DifyAgent +from executor.agents.image_validator.image_validator_agent import ImageValidatorAgent logger = setup_logger("agent_factory") @@ -21,12 +22,18 @@ class AgentFactory: """ Factory class for creating agent instances based on agent_type - Agents are classified into two types: + Agents are classified into types: - local_engine: Agents that execute code locally (ClaudeCode, Agno) - external_api: Agents that delegate execution to external services (Dify) + - validator: Agents that perform validation tasks (ImageValidator) """ - _agents = {"claudecode": ClaudeCodeAgent, "agno": AgnoAgent, "dify": DifyAgent} + _agents = { + "claudecode": ClaudeCodeAgent, + "agno": AgnoAgent, + "dify": DifyAgent, + "imagevalidator": ImageValidatorAgent, + } @classmethod def get_agent(cls, agent_type: str, task_data: Dict[str, Any]) -> Optional[Agent]: diff --git a/executor/agents/image_validator/__init__.py b/executor/agents/image_validator/__init__.py new file mode 100644 index 00000000..70f92bfc --- /dev/null +++ b/executor/agents/image_validator/__init__.py @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: 2025 Weibo, Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +from executor.agents.image_validator.image_validator_agent import ImageValidatorAgent + +__all__ = ["ImageValidatorAgent"] diff --git a/executor/agents/image_validator/image_validator_agent.py b/executor/agents/image_validator/image_validator_agent.py new file mode 100644 index 00000000..6cd85812 --- /dev/null +++ b/executor/agents/image_validator/image_validator_agent.py @@ -0,0 +1,212 @@ +#!/usr/bin/env python + +# SPDX-FileCopyrightText: 2025 Weibo, Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +# -*- coding: utf-8 -*- + +""" +Image Validator Agent for validating custom base images. +This agent runs validation checks inside the container to verify +compatibility with specific shell types. +""" + +import re +import subprocess +from typing import Dict, Any, Tuple, Optional, List + +from shared.logger import setup_logger +from shared.status import TaskStatus +from executor.agents.base import Agent + +logger = setup_logger("image_validator") + + +class ImageValidatorAgent(Agent): + """ + Agent for validating custom base images. + + This agent executes validation commands inside the container + and reports results back via callback. + """ + + AGENT_TYPE = "validator" + + # Shell type to validation checks mapping + VALIDATION_CHECKS = { + "ClaudeCode": [ + { + "name": "node", + "command": "node --version", + "version_regex": r"v(\d+\.\d+\.\d+)", + "min_version": "20.0.0", + }, + { + "name": "claude-code", + "command": "claude --version 2>/dev/null || echo 'not found'", + "version_regex": r"(\d+\.\d+\.\d+)", + "min_version": None, + }, + { + "name": "sqlite", + "command": "sqlite3 --version", + "version_regex": r"(\d+\.\d+\.\d+)", + "min_version": "3.50.0", + }, + { + "name": "python", + "command": "python3 --version", + "version_regex": r"Python (\d+\.\d+\.\d+)", + "min_version": "3.12.0", + }, + ], + "Agno": [ + { + "name": "python", + "command": "python3 --version", + "version_regex": r"Python (\d+\.\d+\.\d+)", + "min_version": "3.12.0", + }, + ], + } + + def __init__(self, task_data: Dict[str, Any]): + super().__init__(task_data) + self.task_data = task_data + + # Get validation parameters from task data + validation_params = task_data.get("validation_params", {}) + self.shell_type = validation_params.get("shell_type", "") + self.image = validation_params.get("image", "") + self.shell_name = validation_params.get("shell_name", "") + + def get_name(self) -> str: + return "ImageValidator" + + def initialize(self) -> TaskStatus: + """Initialize the validator agent""" + if not self.shell_type: + logger.error("shell_type is required for validation") + return TaskStatus.FAILED + + if self.shell_type not in self.VALIDATION_CHECKS: + logger.error(f"Unknown shell type: {self.shell_type}") + return TaskStatus.FAILED + + logger.info(f"ImageValidator initialized for shell_type={self.shell_type}") + return TaskStatus.SUCCESS + + def execute(self) -> TaskStatus: + """Execute validation checks and return results""" + logger.info(f"Starting image validation for shell_type={self.shell_type}") + + checks = self.VALIDATION_CHECKS.get(self.shell_type, []) + results = [] + all_passed = True + + for check in checks: + check_result = self._run_check(check) + results.append(check_result) + if check_result["status"] == "fail": + all_passed = False + + # Build result data to be returned via callback + validation_result = { + "valid": all_passed, + "checks": results, + "errors": [], + "shell_name": self.shell_name, + "shell_type": self.shell_type, + "image": self.image, + } + + logger.info(f"Validation completed: valid={all_passed}, checks={len(results)}") + + # Send result via callback with result data + self.report_progress( + progress=100, + status=TaskStatus.COMPLETED.value, + message="Image validation completed", + result=validation_result, + ) + + return TaskStatus.COMPLETED + + def _run_check(self, check: Dict[str, Any]) -> Dict[str, Any]: + """Run a single validation check""" + name = check["name"] + command = check["command"] + version_regex = check["version_regex"] + min_version = check.get("min_version") + + try: + result = subprocess.run( + ["sh", "-c", command], + capture_output=True, + text=True, + timeout=30, + ) + + output = result.stdout.strip() + if result.returncode != 0 or "not found" in output.lower(): + logger.warning(f"Check '{name}' failed: command returned error or not found") + return { + "name": name, + "status": "fail", + "message": "Command failed or not found", + } + + # Extract version + version_match = re.search(version_regex, output) + if version_match: + version = version_match.group(1) + + # Check minimum version if specified + if min_version: + try: + from packaging import version as pkg_version + + if pkg_version.parse(version) < pkg_version.parse(min_version): + logger.warning(f"Check '{name}': version {version} < required {min_version}") + return { + "name": name, + "version": version, + "status": "fail", + "message": f"Version {version} < required {min_version}", + } + except Exception as e: + logger.warning(f"Version comparison error for '{name}': {e}") + + logger.info(f"Check '{name}' passed: version={version}") + return { + "name": name, + "version": version, + "status": "pass", + } + else: + logger.info(f"Check '{name}' passed but version not parsed") + return { + "name": name, + "status": "pass", + "message": "Detected but version not parsed", + } + + except subprocess.TimeoutExpired: + logger.error(f"Check '{name}' timed out") + return { + "name": name, + "status": "fail", + "message": "Check timed out", + } + except Exception as e: + logger.error(f"Check '{name}' error: {e}") + return { + "name": name, + "status": "fail", + "message": str(e), + } + + def cancel_run(self) -> bool: + """Cancel is not applicable for validation tasks""" + return True diff --git a/executor/callback/callback_handler.py b/executor/callback/callback_handler.py index e7690d46..4d4ecefd 100644 --- a/executor/callback/callback_handler.py +++ b/executor/callback/callback_handler.py @@ -118,6 +118,7 @@ def send_task_completed_callback( message: str = "Task executed successfully", executor_name: Optional[str] = None, executor_namespace: Optional[str] = None, + result: Optional[Dict[str, Any]] = None, ) -> Dict[str, Any]: """ Send task completed callback @@ -130,11 +131,12 @@ def send_task_completed_callback( message (str, optional): Message. Defaults to "Task executed successfully". executor_name (str, optional): Executor name executor_namespace (str, optional): Executor namespace + result (dict, optional): Result data to include in callback Returns: Dict[str, Any]: Callback response """ - return send_status_callback( + return callback_client.send_callback( task_id=task_id, subtask_id=subtask_id, task_title=task_title, @@ -144,6 +146,7 @@ def send_task_completed_callback( progress=100, executor_name=executor_name, executor_namespace=executor_namespace, + result=result, ) diff --git a/executor_manager/routers/routers.py b/executor_manager/routers/routers.py index 0062dda0..f26a4111 100644 --- a/executor_manager/routers/routers.py +++ b/executor_manager/routers/routers.py @@ -176,6 +176,7 @@ class ValidateImageRequest(BaseModel): """Request body for validating base image compatibility""" image: str shell_type: str # e.g., "ClaudeCode", "Agno" + shell_name: Optional[str] = None # Optional shell name for tracking class ImageCheckResult(BaseModel): @@ -188,9 +189,9 @@ class ImageCheckResult(BaseModel): class ValidateImageResponse(BaseModel): """Response for image validation""" - valid: bool - checks: list - errors: list + status: str # 'submitted' for async validation + message: str + validation_task_id: Optional[int] = None @app.post("/executor-manager/images/validate") @@ -198,192 +199,88 @@ async def validate_image(request: ValidateImageRequest, http_request: Request): """ Validate if a base image is compatible with a specific shell type. - This endpoint pulls the image and runs a temporary container to check for required dependencies: - - ClaudeCode: Node.js 20.x, claude-code CLI, SQLite 3.50+, Python 3.12 - - Agno: Python 3.12 - - Dify: No check needed (external_api type) + This endpoint creates a validation task that runs inside the target image container. + The validation is asynchronous - results are returned via callback mechanism. - Note: Only supports public image registries. - """ - import subprocess - import re as re_module + For ClaudeCode: checks Node.js 20.x, claude-code CLI, SQLite 3.50+, Python 3.12 + For Agno: checks Python 3.12 + For Dify: No check needed (external_api type) + The validation task will: + 1. Start a container with the specified base_image + 2. Run ImageValidatorAgent to execute validation checks + 3. Report results back via callback with validation_result in result field + """ client_ip = http_request.client.host if http_request.client else "unknown" logger.info(f"Received image validation request: image={request.image}, shell_type={request.shell_type} from {client_ip}") shell_type = request.shell_type image = request.image - # Dify doesn't need validation + # Dify doesn't need validation (external_api type) if shell_type == "Dify": - return ValidateImageResponse( - valid=True, - checks=[], - errors=["Dify is an external_api type and doesn't require image validation"], - ) + return { + "status": "skipped", + "message": "Dify is an external_api type and doesn't require image validation", + "valid": True, + "checks": [], + "errors": [], + } - # Define checks based on shell type - checks_config = { - "ClaudeCode": [ - { - "name": "node", - "command": "node --version", - "version_regex": r"v(\d+\.\d+\.\d+)", - "min_version": "20.0.0", - }, - { - "name": "claude-code", - "command": "claude --version 2>/dev/null || echo 'not found'", - "version_regex": r"(\d+\.\d+\.\d+)", - "min_version": None, - }, - { - "name": "sqlite", - "command": "sqlite3 --version", - "version_regex": r"(\d+\.\d+\.\d+)", - "min_version": "3.50.0", - }, - { - "name": "python", - "command": "python3 --version", - "version_regex": r"Python (\d+\.\d+\.\d+)", - "min_version": "3.12.0", - }, - ], - "Agno": [ - { - "name": "python", - "command": "python3 --version", - "version_regex": r"Python (\d+\.\d+\.\d+)", - "min_version": "3.12.0", - }, - ], + # Validate shell_type + if shell_type not in ["ClaudeCode", "Agno"]: + return { + "status": "error", + "message": f"Unknown shell type: {shell_type}", + "valid": False, + "checks": [], + "errors": [f"Unknown shell type: {shell_type}"], + } + + # Build validation task data + # Use a unique negative task_id to distinguish validation tasks from regular tasks + import time + validation_task_id = -int(time.time() * 1000) % 1000000 # Negative ID for validation tasks + + validation_task = { + "task_id": validation_task_id, + "subtask_id": 1, + "task_title": f"Image Validation: {request.shell_name or image}", + "subtask_title": f"Validating {shell_type} dependencies", + "type": "validation", + "bot": [{ + "agent_name": "ImageValidator", + "base_image": image, # Use the target image for validation + }], + "validation_params": { + "shell_type": shell_type, + "image": image, + "shell_name": request.shell_name or "", + }, + "executor_image": os.getenv("EXECUTOR_IMAGE", ""), } - if shell_type not in checks_config: - return ValidateImageResponse( - valid=False, checks=[], errors=[f"Unknown shell type: {shell_type}"] - ) + try: + # Submit validation task using the task processor + task_processor.process_tasks([validation_task]) - checks_to_run = checks_config[shell_type] - results = [] - errors = [] - all_passed = True + logger.info(f"Validation task submitted: task_id={validation_task_id}, image={image}") - try: - # Pull the image first (with timeout) - logger.info(f"Pulling image {image} for validation...") - pull_result = subprocess.run( - ["docker", "pull", image], - capture_output=True, - text=True, - timeout=300, # 5 minutes timeout for pull - ) - if pull_result.returncode != 0: - logger.error(f"Failed to pull image {image}: {pull_result.stderr}") - return ValidateImageResponse( - valid=False, - checks=[], - errors=[f"Failed to pull image: {pull_result.stderr}"], - ) - - # Run checks in a single container for efficiency - for check in checks_to_run: - try: - result = subprocess.run( - [ - "docker", - "run", - "--rm", - image, - "sh", - "-c", - check["command"], - ], - capture_output=True, - text=True, - timeout=30, - ) - - output = result.stdout.strip() - if result.returncode != 0 or "not found" in output.lower(): - results.append( - ImageCheckResult( - name=check["name"], - status="fail", - message="Command failed or not found", - ).model_dump() - ) - all_passed = False - continue - - # Extract version - version_match = re_module.search(check["version_regex"], output) - if version_match: - version = version_match.group(1) - # Check minimum version if specified - if check["min_version"]: - from packaging import version as pkg_version - - try: - if pkg_version.parse(version) < pkg_version.parse( - check["min_version"] - ): - results.append( - ImageCheckResult( - name=check["name"], - version=version, - status="fail", - message=f"Version {version} < required {check['min_version']}", - ).model_dump() - ) - all_passed = False - continue - except Exception: - pass # Skip version comparison on error - - results.append( - ImageCheckResult( - name=check["name"], version=version, status="pass" - ).model_dump() - ) - else: - results.append( - ImageCheckResult( - name=check["name"], - status="pass", - message="Detected but version not parsed", - ).model_dump() - ) - - except subprocess.TimeoutExpired: - results.append( - ImageCheckResult( - name=check["name"], status="fail", message="Check timed out" - ).model_dump() - ) - all_passed = False - except Exception as e: - results.append( - ImageCheckResult( - name=check["name"], status="fail", message=str(e) - ).model_dump() - ) - all_passed = False - - except subprocess.TimeoutExpired: - logger.error(f"Image pull timed out for {image}") - return ValidateImageResponse( - valid=False, checks=results, errors=["Image pull timed out"] - ) - except Exception as e: - logger.error(f"Image validation error for {image}: {e}") - return ValidateImageResponse( - valid=False, checks=results, errors=[f"Validation error: {str(e)}"] - ) + return { + "status": "submitted", + "message": f"Validation task submitted. Results will be returned via callback.", + "validation_task_id": validation_task_id, + } - logger.info(f"Image validation completed for {image}: valid={all_passed}") - return ValidateImageResponse(valid=all_passed, checks=results, errors=errors) + except Exception as e: + logger.error(f"Failed to submit validation task for {image}: {e}") + return { + "status": "error", + "message": f"Failed to submit validation task: {str(e)}", + "valid": False, + "checks": [], + "errors": [str(e)], + } @app.post("/executor-manager/tasks/cancel") diff --git a/frontend/src/apis/shells.ts b/frontend/src/apis/shells.ts index 1f8d7bc5..df5bf04b 100644 --- a/frontend/src/apis/shells.ts +++ b/frontend/src/apis/shells.ts @@ -38,6 +38,7 @@ export interface ShellUpdateRequest { export interface ImageValidationRequest { image: string shellType: string // e.g., "ClaudeCode", "Agno" + shellName?: string // Optional shell name for tracking } export interface ImageCheckResult { @@ -48,9 +49,13 @@ export interface ImageCheckResult { } export interface ImageValidationResponse { - valid: boolean - checks: ImageCheckResult[] - errors: string[] + status: 'submitted' | 'skipped' | 'error' + message: string + validationTaskId?: number | null + // For immediate results (e.g., Dify skip) + valid?: boolean | null + checks?: ImageCheckResult[] | null + errors?: string[] | null } // Shell Services diff --git a/frontend/src/features/settings/components/ShellEdit.tsx b/frontend/src/features/settings/components/ShellEdit.tsx index 29275761..e8467aaa 100644 --- a/frontend/src/features/settings/components/ShellEdit.tsx +++ b/frontend/src/features/settings/components/ShellEdit.tsx @@ -37,10 +37,12 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { const [baseImage, setBaseImage] = useState(shell?.baseImage || '') const [saving, setSaving] = useState(false) const [validating, setValidating] = useState(false) - const [validationResult, setValidationResult] = useState<{ - valid: boolean - checks: ImageCheckResult[] - errors: string[] + const [validationStatus, setValidationStatus] = useState<{ + status: 'submitted' | 'skipped' | 'error' | 'success' | 'failed' + message: string + valid?: boolean + checks?: ImageCheckResult[] + errors?: string[] } | null>(null) // Available base shells (public local_engine shells) @@ -81,27 +83,61 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { } setValidating(true) - setValidationResult(null) + setValidationStatus(null) try { const result = await shellApis.validateImage({ image: baseImage, shellType: selectedBaseShell.runtime, + shellName: name || undefined, }) - setValidationResult(result) - if (result.valid) { + // Handle different response statuses + if (result.status === 'skipped') { + // Dify type - validation not needed + setValidationStatus({ + status: 'success', + message: result.message, + valid: true, + checks: [], + errors: [], + }) toast({ - title: t('shells.validation_success'), + title: t('shells.validation_skipped'), + description: result.message, + }) + } else if (result.status === 'submitted') { + // Async validation task submitted + setValidationStatus({ + status: 'submitted', + message: result.message, + valid: undefined, + }) + toast({ + title: t('shells.validation_submitted'), + description: t('shells.validation_async_hint'), + }) + } else if (result.status === 'error') { + // Error submitting validation + setValidationStatus({ + status: 'error', + message: result.message, + valid: false, + errors: result.errors || [], }) - } else { toast({ variant: 'destructive', title: t('shells.validation_failed'), - description: result.errors.join(', ') || t('shells.errors.image_not_compatible'), + description: result.message, }) } } catch (error) { + setValidationStatus({ + status: 'error', + message: (error as Error).message, + valid: false, + errors: [(error as Error).message], + }) toast({ variant: 'destructive', title: t('shells.validation_failed'), @@ -300,7 +336,7 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { value={baseImage} onChange={e => { setBaseImage(e.target.value) - setValidationResult(null) // Clear validation result on change + setValidationStatus(null) // Clear validation status on change }} placeholder="ghcr.io/your-org/your-image:latest" className="bg-base flex-1" @@ -320,36 +356,45 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => {

{t('shells.base_image_hint')}

- {/* Validation Results */} - {validationResult && ( + {/* Validation Status */} + {validationStatus && (
- {validationResult.valid ? ( + {validationStatus.status === 'success' || validationStatus.valid === true ? ( + ) : validationStatus.status === 'submitted' ? ( + ) : ( )} - {validationResult.valid + {validationStatus.status === 'success' ? t('shells.validation_passed') - : t('shells.validation_not_passed')} + : validationStatus.status === 'submitted' + ? t('shells.validation_in_progress') + : t('shells.validation_not_passed')}
- {validationResult.checks.length > 0 && ( -
    - {validationResult.checks.map((check, index) => ( +

    {validationStatus.message}

    + {validationStatus.checks && validationStatus.checks.length > 0 && ( +
      + {validationStatus.checks.map((check, index) => (
    • {check.status === 'pass' ? ( @@ -365,9 +410,9 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { ))}
    )} - {validationResult.errors.length > 0 && ( + {validationStatus.errors && validationStatus.errors.length > 0 && (
      - {validationResult.errors.map((error, index) => ( + {validationStatus.errors.map((error, index) => (
    • {error}
    • ))}
    diff --git a/frontend/src/i18n/locales/en/common.json b/frontend/src/i18n/locales/en/common.json index 4c1ff6e6..d2a6bcf4 100644 --- a/frontend/src/i18n/locales/en/common.json +++ b/frontend/src/i18n/locales/en/common.json @@ -563,6 +563,10 @@ "validation_failed": "Image validation failed", "validation_passed": "All checks passed", "validation_not_passed": "Some checks failed", + "validation_submitted": "Validation task submitted", + "validation_skipped": "Validation skipped", + "validation_in_progress": "Validation in progress", + "validation_async_hint": "Validation is running in the background. Results will be available shortly.", "create_success": "Shell created successfully", "update_success": "Shell updated successfully", "delete_success": "Shell deleted successfully", diff --git a/frontend/src/i18n/locales/zh-CN/common.json b/frontend/src/i18n/locales/zh-CN/common.json index f73339ed..a43414d8 100644 --- a/frontend/src/i18n/locales/zh-CN/common.json +++ b/frontend/src/i18n/locales/zh-CN/common.json @@ -564,6 +564,10 @@ "validation_failed": "镜像验证失败", "validation_passed": "所有检查通过", "validation_not_passed": "部分检查未通过", + "validation_submitted": "验证任务已提交", + "validation_skipped": "验证已跳过", + "validation_in_progress": "验证进行中", + "validation_async_hint": "验证任务正在后台运行,结果稍后可用。", "create_success": "Shell 创建成功", "update_success": "Shell 更新成功", "delete_success": "Shell 删除成功", From ffbaf5b90dce67387b93a4e5e73b39df3b3acf6c Mon Sep 17 00:00:00 2001 From: qdaxb <4157870+qdaxb@users.noreply.github.com> Date: Sun, 30 Nov 2025 15:20:28 +0800 Subject: [PATCH 04/10] feat(shells): add forced image validation with real-time progress polling - Add UUID-based validation tracking with Redis storage (5min TTL) - Add validation status query endpoint GET /api/shells/validation-status/{id} - Add validation status update endpoint for internal callback forwarding - Enhance Executor Manager to pass validation_id and forward callbacks to Backend - Enhance Docker executor to report container start/pull failures - Add stage progress reporting in ImageValidatorAgent (submitted/pulling/starting/running_checks/completed) - Implement frontend polling mechanism with 2s interval, 120s timeout - Disable save button until validation passes (forced validation for new/changed baseImage) - Add progress bar UI showing real-time validation stages - Add i18n messages for validation stages in both en/zh-CN --- backend/app/api/endpoints/adapter/shells.py | 236 +++++++++++++++- .../image_validator/image_validator_agent.py | 39 ++- executor_manager/executors/docker/executor.py | 89 +++++- executor_manager/routers/routers.py | 59 +++- frontend/src/apis/shells.ts | 31 ++- .../settings/components/ShellEdit.tsx | 261 ++++++++++++++++-- frontend/src/i18n/locales/en/common.json | 8 + frontend/src/i18n/locales/zh-CN/common.json | 8 + 8 files changed, 689 insertions(+), 42 deletions(-) diff --git a/backend/app/api/endpoints/adapter/shells.py b/backend/app/api/endpoints/adapter/shells.py index ed3d39af..0ffb431c 100644 --- a/backend/app/api/endpoints/adapter/shells.py +++ b/backend/app/api/endpoints/adapter/shells.py @@ -4,6 +4,8 @@ import logging import re +import uuid +from datetime import datetime from typing import List, Optional from fastapi import APIRouter, Depends, HTTPException, Query, status @@ -12,6 +14,7 @@ from app.api.dependencies import get_db from app.core import security +from app.core.cache import cache_manager from app.models.kind import Kind from app.models.public_shell import PublicShell from app.models.user import User @@ -20,6 +23,10 @@ router = APIRouter() logger = logging.getLogger(__name__) +# Redis key prefix and TTL for validation status +VALIDATION_STATUS_KEY_PREFIX = "shell_validation:" +VALIDATION_STATUS_TTL = 300 # 5 minutes + # Request/Response Models class UnifiedShell(BaseModel): @@ -73,13 +80,39 @@ class ImageValidationResponse(BaseModel): status: str # 'submitted', 'skipped', 'error' message: str - validationTaskId: Optional[int] = None + validationId: Optional[str] = None # UUID for polling validation status + validationTaskId: Optional[int] = None # Legacy field for backward compatibility # For immediate results (e.g., Dify skip) valid: Optional[bool] = None checks: Optional[List[ImageCheckResult]] = None errors: Optional[List[str]] = None +class ValidationStatusResponse(BaseModel): + """Response for validation status query""" + + validationId: str + status: str # 'submitted', 'pulling_image', 'starting_container', 'running_checks', 'completed' + stage: str # Human-readable stage description + progress: int # 0-100 + valid: Optional[bool] = None + checks: Optional[List[ImageCheckResult]] = None + errors: Optional[List[str]] = None + errorMessage: Optional[str] = None + + +class ValidationStatusUpdateRequest(BaseModel): + """Request body for updating validation status (internal API)""" + + status: str + stage: Optional[str] = None + progress: Optional[int] = None + valid: Optional[bool] = None + checks: Optional[List[ImageCheckResult]] = None + errors: Optional[List[str]] = None + errorMessage: Optional[str] = None + + def _public_shell_to_unified(shell: PublicShell) -> UnifiedShell: """Convert PublicShell to UnifiedShell""" shell_crd = ShellCRD.model_validate(shell.json) @@ -412,7 +445,7 @@ def delete_shell( @router.post("/validate-image", response_model=ImageValidationResponse) -def validate_image( +async def validate_image( request: ImageValidationRequest, current_user: User = Depends(security.get_current_user), ): @@ -422,7 +455,7 @@ def validate_image( This endpoint submits an async validation task to Executor Manager: - The validation runs inside the target image container - Results are returned via callback mechanism - - Frontend should poll or use WebSocket to get final results + - Frontend should poll GET /api/shells/validation-status/{validation_id} to get results Validation checks: - ClaudeCode: Node.js 20.x, claude-code CLI, SQLite 3.50+, Python 3.12 @@ -449,6 +482,34 @@ def validate_image( errors=[], ) + # Generate UUID for validation tracking + validation_id = str(uuid.uuid4()) + + # Initialize validation status in Redis + initial_status = { + "validation_id": validation_id, + "status": "submitted", + "stage": "Validation task submitted", + "progress": 10, + "valid": None, + "checks": None, + "errors": None, + "error_message": None, + "image": image, + "shell_type": shell_type, + "created_at": datetime.utcnow().isoformat(), + "updated_at": datetime.utcnow().isoformat(), + } + + try: + # Store initial status in Redis + cache_key = f"{VALIDATION_STATUS_KEY_PREFIX}{validation_id}" + await cache_manager.set(cache_key, initial_status, expire=VALIDATION_STATUS_TTL) + logger.info(f"Initialized validation status in Redis: {validation_id}") + except Exception as e: + logger.error(f"Failed to initialize validation status in Redis: {e}") + # Continue even if Redis fails - validation can still work + # Get executor manager URL from environment executor_manager_url = os.getenv("EXECUTOR_MANAGER_URL", "http://localhost:8001") validate_url = f"{executor_manager_url}/executor-manager/images/validate" @@ -456,14 +517,15 @@ def validate_image( try: logger.info(f"Submitting image validation task to executor manager: {image}") - # Call executor manager's validate-image API - with httpx.Client(timeout=30.0) as client: # Short timeout since this just submits a task + # Call executor manager's validate-image API with validation_id + with httpx.Client(timeout=30.0) as client: response = client.post( validate_url, json={ "image": image, "shell_type": shell_type, "shell_name": request.shellName or "", + "validation_id": validation_id, # Pass UUID to executor manager }, ) @@ -471,9 +533,19 @@ def validate_image( logger.error( f"Executor manager validation request failed: {response.status_code} {response.text}" ) + # Update Redis status to error + await _update_validation_status( + validation_id, + status="completed", + stage="Error", + progress=100, + valid=False, + error_message=f"Failed to submit validation task: {response.text}", + ) return ImageValidationResponse( status="error", message=f"Failed to submit validation task: {response.text}", + validationId=validation_id, valid=False, errors=[f"Executor manager error: {response.text}"], ) @@ -481,10 +553,11 @@ def validate_image( result = response.json() logger.info(f"Validation task submission result: status={result.get('status')}") - # Return the submission status + # Return the submission status with validation_id for polling return ImageValidationResponse( status=result.get("status", "error"), message=result.get("message", ""), + validationId=validation_id, validationTaskId=result.get("validation_task_id"), valid=result.get("valid"), checks=None, @@ -493,25 +566,176 @@ def validate_image( except httpx.TimeoutException: logger.error(f"Timeout submitting validation task for image: {image}") + await _update_validation_status( + validation_id, + status="completed", + stage="Error", + progress=100, + valid=False, + error_message="Request timed out while submitting validation task", + ) return ImageValidationResponse( status="error", message="Request timed out while submitting validation task", + validationId=validation_id, valid=False, errors=["Validation request timed out"], ) except httpx.RequestError as e: logger.error(f"Error calling executor manager: {e}") + await _update_validation_status( + validation_id, + status="completed", + stage="Error", + progress=100, + valid=False, + error_message=f"Failed to connect to executor manager: {str(e)}", + ) return ImageValidationResponse( status="error", message=f"Failed to connect to executor manager: {str(e)}", + validationId=validation_id, valid=False, errors=[f"Connection error: {str(e)}"], ) except Exception as e: logger.error(f"Image validation error: {e}") + await _update_validation_status( + validation_id, + status="completed", + stage="Error", + progress=100, + valid=False, + error_message=f"Validation error: {str(e)}", + ) return ImageValidationResponse( status="error", message=f"Validation error: {str(e)}", + validationId=validation_id, valid=False, errors=[str(e)], ) + + +async def _update_validation_status( + validation_id: str, + status: str, + stage: Optional[str] = None, + progress: Optional[int] = None, + valid: Optional[bool] = None, + checks: Optional[List[dict]] = None, + errors: Optional[List[str]] = None, + error_message: Optional[str] = None, +) -> bool: + """Helper function to update validation status in Redis""" + try: + cache_key = f"{VALIDATION_STATUS_KEY_PREFIX}{validation_id}" + existing = await cache_manager.get(cache_key) + + if existing is None: + # Create new status if not exists + existing = { + "validation_id": validation_id, + "status": "submitted", + "stage": "Validation task submitted", + "progress": 10, + "valid": None, + "checks": None, + "errors": None, + "error_message": None, + "created_at": datetime.utcnow().isoformat(), + } + + # Update fields + existing["status"] = status + if stage is not None: + existing["stage"] = stage + if progress is not None: + existing["progress"] = progress + if valid is not None: + existing["valid"] = valid + if checks is not None: + existing["checks"] = checks + if errors is not None: + existing["errors"] = errors + if error_message is not None: + existing["error_message"] = error_message + existing["updated_at"] = datetime.utcnow().isoformat() + + await cache_manager.set(cache_key, existing, expire=VALIDATION_STATUS_TTL) + logger.info(f"Updated validation status: {validation_id} -> {status}") + return True + except Exception as e: + logger.error(f"Failed to update validation status: {e}") + return False + + +@router.get("/validation-status/{validation_id}", response_model=ValidationStatusResponse) +async def get_validation_status( + validation_id: str, + current_user: User = Depends(security.get_current_user), +): + """ + Get the current status of a validation task. + + This endpoint is used by frontend to poll for validation results. + """ + try: + cache_key = f"{VALIDATION_STATUS_KEY_PREFIX}{validation_id}" + status_data = await cache_manager.get(cache_key) + + if status_data is None: + raise HTTPException( + status_code=404, + detail=f"Validation status not found for ID: {validation_id}", + ) + + return ValidationStatusResponse( + validationId=validation_id, + status=status_data.get("status", "unknown"), + stage=status_data.get("stage", "Unknown"), + progress=status_data.get("progress", 0), + valid=status_data.get("valid"), + checks=status_data.get("checks"), + errors=status_data.get("errors"), + errorMessage=status_data.get("error_message"), + ) + except HTTPException: + raise + except Exception as e: + logger.error(f"Error getting validation status: {e}") + raise HTTPException(status_code=500, detail=f"Error getting validation status: {str(e)}") + + +@router.post("/validation-status/{validation_id}") +async def update_validation_status( + validation_id: str, + request: ValidationStatusUpdateRequest, +): + """ + Update the status of a validation task (internal API for Executor Manager callback). + + This endpoint is called by Executor Manager to update validation progress. + Note: This is an internal API and should not be exposed publicly. + """ + try: + success = await _update_validation_status( + validation_id=validation_id, + status=request.status, + stage=request.stage, + progress=request.progress, + valid=request.valid, + checks=[c.model_dump() for c in request.checks] if request.checks else None, + errors=request.errors, + error_message=request.errorMessage, + ) + + if not success: + raise HTTPException(status_code=500, detail="Failed to update validation status") + + return {"status": "success", "message": "Validation status updated"} + except HTTPException: + raise + except Exception as e: + logger.error(f"Error updating validation status: {e}") + raise HTTPException(status_code=500, detail=f"Error updating validation status: {str(e)}") diff --git a/executor/agents/image_validator/image_validator_agent.py b/executor/agents/image_validator/image_validator_agent.py index 6cd85812..cdaac299 100644 --- a/executor/agents/image_validator/image_validator_agent.py +++ b/executor/agents/image_validator/image_validator_agent.py @@ -80,6 +80,7 @@ def __init__(self, task_data: Dict[str, Any]): self.shell_type = validation_params.get("shell_type", "") self.image = validation_params.get("image", "") self.shell_name = validation_params.get("shell_name", "") + self.validation_id = validation_params.get("validation_id", "") def get_name(self) -> str: return "ImageValidator" @@ -94,7 +95,7 @@ def initialize(self) -> TaskStatus: logger.error(f"Unknown shell type: {self.shell_type}") return TaskStatus.FAILED - logger.info(f"ImageValidator initialized for shell_type={self.shell_type}") + logger.info(f"ImageValidator initialized for shell_type={self.shell_type}, validation_id={self.validation_id}") return TaskStatus.SUCCESS def execute(self) -> TaskStatus: @@ -104,8 +105,34 @@ def execute(self) -> TaskStatus: checks = self.VALIDATION_CHECKS.get(self.shell_type, []) results = [] all_passed = True + total_checks = len(checks) + + # Report running_checks stage + self.report_progress( + progress=70, + status=TaskStatus.RUNNING.value, + message="Running dependency checks", + result={ + "stage": "running_checks", + "validation_id": self.validation_id, + "current_check": None, + }, + ) + + for index, check in enumerate(checks): + # Report current check progress + current_progress = 70 + int((index / total_checks) * 25) + self.report_progress( + progress=current_progress, + status=TaskStatus.RUNNING.value, + message=f"Checking {check['name']}", + result={ + "stage": "running_checks", + "validation_id": self.validation_id, + "current_check": check["name"], + }, + ) - for check in checks: check_result = self._run_check(check) results.append(check_result) if check_result["status"] == "fail": @@ -123,12 +150,16 @@ def execute(self) -> TaskStatus: logger.info(f"Validation completed: valid={all_passed}, checks={len(results)}") - # Send result via callback with result data + # Send result via callback with result data including validation_id self.report_progress( progress=100, status=TaskStatus.COMPLETED.value, message="Image validation completed", - result=validation_result, + result={ + "stage": "completed", + "validation_id": self.validation_id, + "validation_result": validation_result, + }, ) return TaskStatus.COMPLETED diff --git a/executor_manager/executors/docker/executor.py b/executor_manager/executors/docker/executor.py index 0f54c0bf..6fdbb34c 100644 --- a/executor_manager/executors/docker/executor.py +++ b/executor_manager/executors/docker/executor.py @@ -16,6 +16,7 @@ import subprocess from typing import Any, Dict, List, Optional, Tuple import requests +import httpx from executor_manager.config.config import EXECUTOR_ENV from executor_manager.utils.executor_name import generate_executor_name @@ -188,11 +189,39 @@ def _create_new_container(self, task: Dict[str, Any], task_info: Dict[str, Any], # Execute Docker command logger.info(f"Starting Docker container for task {task_id}: {executor_name} (base_image={base_image or 'default'})") - result = self.subprocess.run(cmd, check=True, capture_output=True, text=True) - # Record container ID - container_id = result.stdout.strip() - logger.info(f"Started Docker container {executor_name} with ID {container_id}") + try: + result = self.subprocess.run(cmd, check=True, capture_output=True, text=True) + + # Record container ID + container_id = result.stdout.strip() + logger.info(f"Started Docker container {executor_name} with ID {container_id}") + + # For validation tasks, report starting_container stage + if task.get("type") == "validation": + self._report_validation_stage( + task, + stage="starting_container", + status="running", + progress=50, + message="Container started, running validation checks", + ) + + except subprocess.CalledProcessError as e: + # For validation tasks, report image pull or container start failure + if task.get("type") == "validation": + error_msg = e.stderr or str(e) + stage = "pulling_image" if "pull" in error_msg.lower() or "not found" in error_msg.lower() else "starting_container" + self._report_validation_stage( + task, + stage=stage, + status="failed", + progress=100, + message=f"Container start failed: {error_msg}", + error_message=error_msg, + valid=False, + ) + raise def _get_base_image_from_task(self, task: Dict[str, Any]) -> Optional[str]: """Extract custom base_image from task's bot configuration""" @@ -532,7 +561,7 @@ def _call_callback( """ if not callback: return - + try: callback( task_id=task_id, @@ -543,3 +572,53 @@ def _call_callback( ) except Exception as e: logger.error(f"Error in callback for task {task_id}: {e}") + + def _report_validation_stage( + self, + task: Dict[str, Any], + stage: str, + status: str, + progress: int, + message: str, + error_message: Optional[str] = None, + valid: Optional[bool] = None, + ) -> None: + """ + Report validation stage progress to Backend via HTTP call. + + Args: + task: Task data containing validation_params + stage: Current validation stage (pulling_image, starting_container, etc.) + status: Status (running, failed, completed) + progress: Progress percentage (0-100) + message: Human-readable message + error_message: Optional error message + valid: Optional validation result (True/False/None) + """ + validation_params = task.get("validation_params", {}) + validation_id = validation_params.get("validation_id") + + if not validation_id: + logger.debug("No validation_id in task, skipping stage report") + return + + task_api_domain = os.getenv("TASK_API_DOMAIN", "http://localhost:8000") + update_url = f"{task_api_domain}/api/shells/validation-status/{validation_id}" + + update_payload = { + "status": "completed" if status == "failed" else stage, + "stage": message, + "progress": progress, + "valid": valid, + "errorMessage": error_message, + } + + try: + with httpx.Client(timeout=10.0) as client: + response = client.post(update_url, json=update_payload) + if response.status_code == 200: + logger.info(f"Reported validation stage: {validation_id} -> {stage} ({progress}%)") + else: + logger.warning(f"Failed to report validation stage: {response.status_code} {response.text}") + except Exception as e: + logger.error(f"Error reporting validation stage: {e}") diff --git a/executor_manager/routers/routers.py b/executor_manager/routers/routers.py index 3d70f502..04a506ae 100644 --- a/executor_manager/routers/routers.py +++ b/executor_manager/routers/routers.py @@ -85,6 +85,11 @@ async def callback_handler(request: CallbackRequest, http_request: Request): try: client_ip = http_request.client.host if http_request.client else "unknown" logger.info(f"Received callback: body={request} from {client_ip}") + + # Check if this is a validation task callback (has validation_id in result) + if request.result and request.result.get("validation_id"): + await _forward_validation_callback(request) + # Directly call the API client to update task status success, result = api_client.update_task_status_by_fields( task_id=request.task_id, @@ -109,6 +114,53 @@ async def callback_handler(request: CallbackRequest, http_request: Request): raise HTTPException(status_code=500, detail=str(e)) +async def _forward_validation_callback(request: CallbackRequest): + """Forward validation task callback to Backend for Redis status update""" + import httpx + + validation_id = request.result.get("validation_id") + if not validation_id: + return + + # Map callback status to validation status + status_mapping = { + "running": "running_checks", + "completed": "completed", + "failed": "completed", + } + validation_status = status_mapping.get(request.status, request.status) + + # Extract validation result from callback + validation_result = request.result.get("validation_result", {}) + stage = request.result.get("stage", "Running checks") + progress = request.progress + + # Build update payload + update_payload = { + "status": validation_status, + "stage": stage, + "progress": progress, + "valid": validation_result.get("valid"), + "checks": validation_result.get("checks"), + "errors": validation_result.get("errors"), + "errorMessage": request.error_message, + } + + # Get backend URL + task_api_domain = os.getenv("TASK_API_DOMAIN", "http://localhost:8000") + update_url = f"{task_api_domain}/api/shells/validation-status/{validation_id}" + + try: + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.post(update_url, json=update_payload) + if response.status_code == 200: + logger.info(f"Successfully forwarded validation callback: {validation_id} -> {validation_status}") + else: + logger.warning(f"Failed to forward validation callback: {response.status_code} {response.text}") + except Exception as e: + logger.error(f"Error forwarding validation callback: {e}") + + @app.post("/executor-manager/tasks/receive") async def receive_tasks(request: TasksRequest, http_request: Request): """ @@ -177,6 +229,7 @@ class ValidateImageRequest(BaseModel): image: str shell_type: str # e.g., "ClaudeCode", "Agno" shell_name: Optional[str] = None # Optional shell name for tracking + validation_id: Optional[str] = None # UUID for tracking validation status class ImageCheckResult(BaseModel): @@ -212,10 +265,11 @@ async def validate_image(request: ValidateImageRequest, http_request: Request): 3. Report results back via callback with validation_result in result field """ client_ip = http_request.client.host if http_request.client else "unknown" - logger.info(f"Received image validation request: image={request.image}, shell_type={request.shell_type} from {client_ip}") + logger.info(f"Received image validation request: image={request.image}, shell_type={request.shell_type}, validation_id={request.validation_id} from {client_ip}") shell_type = request.shell_type image = request.image + validation_id = request.validation_id # Dify doesn't need validation (external_api type) if shell_type == "Dify": @@ -256,6 +310,7 @@ async def validate_image(request: ValidateImageRequest, http_request: Request): "shell_type": shell_type, "image": image, "shell_name": request.shell_name or "", + "validation_id": validation_id, # Pass validation_id for callback forwarding }, "executor_image": os.getenv("EXECUTOR_IMAGE", ""), } @@ -264,7 +319,7 @@ async def validate_image(request: ValidateImageRequest, http_request: Request): # Submit validation task using the task processor task_processor.process_tasks([validation_task]) - logger.info(f"Validation task submitted: task_id={validation_task_id}, image={image}") + logger.info(f"Validation task submitted: task_id={validation_task_id}, validation_id={validation_id}, image={image}") return { "status": "submitted", diff --git a/frontend/src/apis/shells.ts b/frontend/src/apis/shells.ts index df5bf04b..83fbd4bb 100644 --- a/frontend/src/apis/shells.ts +++ b/frontend/src/apis/shells.ts @@ -51,13 +51,33 @@ export interface ImageCheckResult { export interface ImageValidationResponse { status: 'submitted' | 'skipped' | 'error' message: string - validationTaskId?: number | null + validationId?: string | null // UUID for polling validation status + validationTaskId?: number | null // Legacy field // For immediate results (e.g., Dify skip) valid?: boolean | null checks?: ImageCheckResult[] | null errors?: string[] | null } +// Validation Status Types +export type ValidationStage = + | 'submitted' + | 'pulling_image' + | 'starting_container' + | 'running_checks' + | 'completed' + +export interface ValidationStatusResponse { + validationId: string + status: ValidationStage + stage: string // Human-readable stage description + progress: number // 0-100 + valid?: boolean | null + checks?: ImageCheckResult[] | null + errors?: string[] | null + errorMessage?: string | null +} + // Shell Services export const shellApis = { /** @@ -114,6 +134,15 @@ export const shellApis = { return apiClient.post('/shells/validate-image', request) }, + /** + * Get validation status by validation ID (for polling) + * + * @param validationId - UUID of the validation task + */ + async getValidationStatus(validationId: string): Promise { + return apiClient.get(`/shells/validation-status/${encodeURIComponent(validationId)}`) + }, + /** * Get public shells only (filter from unified list) */ diff --git a/frontend/src/features/settings/components/ShellEdit.tsx b/frontend/src/features/settings/components/ShellEdit.tsx index e8467aaa..0d4bf112 100644 --- a/frontend/src/features/settings/components/ShellEdit.tsx +++ b/frontend/src/features/settings/components/ShellEdit.tsx @@ -4,7 +4,7 @@ 'use client' -import React, { useCallback, useState, useEffect } from 'react' +import React, { useCallback, useState, useEffect, useRef } from 'react' import { Button } from '@/components/ui/button' import { Input } from '@/components/ui/input' import { Label } from '@/components/ui/label' @@ -15,10 +15,30 @@ import { SelectTrigger, SelectValue, } from '@/components/ui/select' +import { Progress } from '@/components/ui/progress' import { Loader2 } from 'lucide-react' import { BeakerIcon, CheckCircleIcon, XCircleIcon } from '@heroicons/react/24/outline' import { useTranslation } from '@/hooks/useTranslation' -import { shellApis, UnifiedShell, ImageCheckResult } from '@/apis/shells' +import { + shellApis, + UnifiedShell, + ImageCheckResult, + ValidationStage, + ValidationStatusResponse, +} from '@/apis/shells' + +// Polling configuration +const POLLING_INTERVAL = 2000 // 2 seconds +const MAX_POLLING_COUNT = 60 // 60 * 2s = 120 seconds timeout + +// Stage progress mapping +const STAGE_PROGRESS: Record = { + submitted: 10, + pulling_image: 30, + starting_container: 50, + running_checks: 70, + completed: 100, +} interface ShellEditProps { shell: UnifiedShell | null @@ -35,11 +55,16 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { const [displayName, setDisplayName] = useState(shell?.displayName || '') const [baseShellRef, setBaseShellRef] = useState(shell?.baseShellRef || '') const [baseImage, setBaseImage] = useState(shell?.baseImage || '') + const [originalBaseImage] = useState(shell?.baseImage || '') // Track original value for edit mode const [saving, setSaving] = useState(false) const [validating, setValidating] = useState(false) + const [validationId, setValidationId] = useState(null) + const [pollingCount, setPollingCount] = useState(0) + const pollingRef = useRef(null) const [validationStatus, setValidationStatus] = useState<{ - status: 'submitted' | 'skipped' | 'error' | 'success' | 'failed' + status: ValidationStage | 'error' | 'success' | 'failed' message: string + progress: number valid?: boolean checks?: ImageCheckResult[] errors?: string[] @@ -49,6 +74,15 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { const [baseShells, setBaseShells] = useState([]) const [loadingBaseShells, setLoadingBaseShells] = useState(true) + // Cleanup polling on unmount + useEffect(() => { + return () => { + if (pollingRef.current) { + clearInterval(pollingRef.current) + } + } + }, []) + useEffect(() => { const fetchBaseShells = async () => { try { @@ -63,6 +97,96 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { fetchBaseShells() }, []) + // Start polling for validation status + const startPolling = useCallback( + (validationIdToCheck: string) => { + if (pollingRef.current) { + clearInterval(pollingRef.current) + } + + setPollingCount(0) + let count = 0 + + pollingRef.current = setInterval(async () => { + count++ + setPollingCount(count) + + if (count >= MAX_POLLING_COUNT) { + // Timeout + clearInterval(pollingRef.current!) + pollingRef.current = null + setValidating(false) + setValidationStatus({ + status: 'error', + message: t('shells.validation_timeout'), + progress: 0, + valid: false, + errors: [t('shells.validation_timeout')], + }) + toast({ + variant: 'destructive', + title: t('shells.validation_failed'), + description: t('shells.validation_timeout'), + }) + return + } + + try { + const result: ValidationStatusResponse = + await shellApis.getValidationStatus(validationIdToCheck) + + // Update validation status display + setValidationStatus({ + status: result.status, + message: result.stage, + progress: result.progress, + valid: result.valid ?? undefined, + checks: result.checks ?? undefined, + errors: result.errors ?? undefined, + }) + + // Check if validation is completed + if (result.status === 'completed') { + clearInterval(pollingRef.current!) + pollingRef.current = null + setValidating(false) + + if (result.valid === true) { + setValidationStatus({ + status: 'success', + message: t('shells.validation_passed'), + progress: 100, + valid: true, + checks: result.checks ?? undefined, + }) + toast({ + title: t('shells.validation_success'), + }) + } else { + setValidationStatus({ + status: 'failed', + message: result.errorMessage || t('shells.validation_not_passed'), + progress: 100, + valid: false, + checks: result.checks ?? undefined, + errors: result.errors ?? undefined, + }) + toast({ + variant: 'destructive', + title: t('shells.validation_failed'), + description: result.errorMessage || t('shells.validation_not_passed'), + }) + } + } + } catch (error) { + console.error('Failed to poll validation status:', error) + // Don't stop polling on transient errors, just log it + } + }, POLLING_INTERVAL) + }, + [t, toast] + ) + const handleValidateImage = async () => { if (!baseImage || !baseShellRef) { toast({ @@ -83,7 +207,11 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { } setValidating(true) - setValidationStatus(null) + setValidationStatus({ + status: 'submitted', + message: t('shells.validation_stage_submitted'), + progress: STAGE_PROGRESS.submitted, + }) try { const result = await shellApis.validateImage({ @@ -95,9 +223,11 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { // Handle different response statuses if (result.status === 'skipped') { // Dify type - validation not needed + setValidating(false) setValidationStatus({ status: 'success', message: result.message, + progress: 100, valid: true, checks: [], errors: [], @@ -106,22 +236,21 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { title: t('shells.validation_skipped'), description: result.message, }) - } else if (result.status === 'submitted') { - // Async validation task submitted - setValidationStatus({ - status: 'submitted', - message: result.message, - valid: undefined, - }) + } else if (result.status === 'submitted' && result.validationId) { + // Async validation task submitted - start polling + setValidationId(result.validationId) + startPolling(result.validationId) toast({ title: t('shells.validation_submitted'), description: t('shells.validation_async_hint'), }) } else if (result.status === 'error') { // Error submitting validation + setValidating(false) setValidationStatus({ status: 'error', message: result.message, + progress: 0, valid: false, errors: result.errors || [], }) @@ -132,9 +261,11 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { }) } } catch (error) { + setValidating(false) setValidationStatus({ status: 'error', message: (error as Error).message, + progress: 0, valid: false, errors: [(error as Error).message], }) @@ -143,11 +274,31 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { title: t('shells.validation_failed'), description: (error as Error).message, }) - } finally { - setValidating(false) } } + // Check if save button should be disabled + const isSaveDisabled = useCallback(() => { + // If there's no baseImage, no validation needed + if (!baseImage) return false + + // In edit mode, if baseImage hasn't changed, no re-validation needed + if (isEditing && baseImage === originalBaseImage) return false + + // If there's a baseImage, validation must pass + if (!validationStatus) return true + if (validationStatus.status !== 'success' || validationStatus.valid !== true) return true + + return false + }, [baseImage, isEditing, originalBaseImage, validationStatus]) + + const getSaveButtonTooltip = useCallback(() => { + if (isSaveDisabled()) { + return t('shells.validation_required') + } + return undefined + }, [isSaveDisabled, t]) + const handleSave = async () => { // Validation if (!name.trim()) { @@ -221,6 +372,10 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { } const handleBack = useCallback(() => { + // Clean up polling when going back + if (pollingRef.current) { + clearInterval(pollingRef.current) + } onClose() }, [onClose]) @@ -234,6 +389,28 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { return () => window.removeEventListener('keydown', handleEsc) }, [handleBack]) + // Get stage display text + const getStageDisplayText = (status: ValidationStage | 'error' | 'success' | 'failed') => { + switch (status) { + case 'submitted': + return t('shells.validation_stage_submitted') + case 'pulling_image': + return t('shells.validation_stage_pulling') + case 'starting_container': + return t('shells.validation_stage_starting') + case 'running_checks': + return t('shells.validation_stage_checking') + case 'completed': + case 'success': + return t('shells.validation_passed') + case 'failed': + case 'error': + return t('shells.validation_not_passed') + default: + return status + } + } + return (
    {/* Top Navigation */} @@ -256,7 +433,11 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { {t('common.back')}
    - @@ -336,7 +517,13 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { value={baseImage} onChange={e => { setBaseImage(e.target.value) - setValidationStatus(null) // Clear validation status on change + // Reset validation status on change + setValidationStatus(null) + setValidationId(null) + if (pollingRef.current) { + clearInterval(pollingRef.current) + pollingRef.current = null + } }} placeholder="ghcr.io/your-org/your-image:latest" className="bg-base flex-1" @@ -362,7 +549,10 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { className={`mt-3 p-3 rounded-md border ${ validationStatus.status === 'success' || validationStatus.valid === true ? 'bg-green-50 border-green-200 dark:bg-green-900/20 dark:border-green-800' - : validationStatus.status === 'submitted' + : validationStatus.status === 'submitted' || + validationStatus.status === 'pulling_image' || + validationStatus.status === 'starting_container' || + validationStatus.status === 'running_checks' ? 'bg-blue-50 border-blue-200 dark:bg-blue-900/20 dark:border-blue-800' : 'bg-red-50 border-red-200 dark:bg-red-900/20 dark:border-red-800' }`} @@ -370,7 +560,10 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => {
    {validationStatus.status === 'success' || validationStatus.valid === true ? ( - ) : validationStatus.status === 'submitted' ? ( + ) : validationStatus.status === 'submitted' || + validationStatus.status === 'pulling_image' || + validationStatus.status === 'starting_container' || + validationStatus.status === 'running_checks' ? ( ) : ( @@ -379,19 +572,32 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { className={`font-medium ${ validationStatus.status === 'success' || validationStatus.valid === true ? 'text-green-700 dark:text-green-300' - : validationStatus.status === 'submitted' + : validationStatus.status === 'submitted' || + validationStatus.status === 'pulling_image' || + validationStatus.status === 'starting_container' || + validationStatus.status === 'running_checks' ? 'text-blue-700 dark:text-blue-300' : 'text-red-700 dark:text-red-300' }`} > - {validationStatus.status === 'success' - ? t('shells.validation_passed') - : validationStatus.status === 'submitted' - ? t('shells.validation_in_progress') - : t('shells.validation_not_passed')} + {getStageDisplayText(validationStatus.status)}
    -

    {validationStatus.message}

    + + {/* Progress bar for in-progress validation */} + {validating && validationStatus.progress > 0 && ( +
    + +

    + {validationStatus.message} ({validationStatus.progress}%) +

    +
    + )} + + {!validating && ( +

    {validationStatus.message}

    + )} + {validationStatus.checks && validationStatus.checks.length > 0 && (
      {validationStatus.checks.map((check, index) => ( @@ -419,6 +625,13 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { )}
    )} + + {/* Validation required hint when save is disabled */} + {isSaveDisabled() && !validating && ( +

    + {t('shells.validation_required')} +

    + )}
diff --git a/frontend/src/i18n/locales/en/common.json b/frontend/src/i18n/locales/en/common.json index d2a6bcf4..fea61f35 100644 --- a/frontend/src/i18n/locales/en/common.json +++ b/frontend/src/i18n/locales/en/common.json @@ -567,6 +567,14 @@ "validation_skipped": "Validation skipped", "validation_in_progress": "Validation in progress", "validation_async_hint": "Validation is running in the background. Results will be available shortly.", + "validation_required": "Please complete image validation first", + "validation_timeout": "Validation timed out, please check network or image size", + "validation_stage_submitted": "Validation task submitted...", + "validation_stage_pulling": "Pulling image...", + "validation_stage_starting": "Starting container...", + "validation_stage_checking": "Running dependency checks...", + "error_image_pull": "Image pull failed", + "error_container_start": "Container start failed", "create_success": "Shell created successfully", "update_success": "Shell updated successfully", "delete_success": "Shell deleted successfully", diff --git a/frontend/src/i18n/locales/zh-CN/common.json b/frontend/src/i18n/locales/zh-CN/common.json index a43414d8..a1ad62e0 100644 --- a/frontend/src/i18n/locales/zh-CN/common.json +++ b/frontend/src/i18n/locales/zh-CN/common.json @@ -568,6 +568,14 @@ "validation_skipped": "验证已跳过", "validation_in_progress": "验证进行中", "validation_async_hint": "验证任务正在后台运行,结果稍后可用。", + "validation_required": "请先完成镜像校验", + "validation_timeout": "校验超时,请检查网络或镜像大小", + "validation_stage_submitted": "校验任务已提交...", + "validation_stage_pulling": "正在拉取镜像...", + "validation_stage_starting": "正在启动容器...", + "validation_stage_checking": "正在执行依赖检查...", + "error_image_pull": "镜像拉取失败", + "error_container_start": "容器启动失败", "create_success": "Shell 创建成功", "update_success": "Shell 更新成功", "delete_success": "Shell 删除成功", From 073d867a1f0a9e5c034966d525e3773bb4525514 Mon Sep 17 00:00:00 2001 From: qdaxb <4157870+qdaxb@users.noreply.github.com> Date: Sun, 30 Nov 2025 16:29:58 +0800 Subject: [PATCH 05/10] fix(executor_manager): add httpx dependency for validation status reporting Add httpx>=0.24.0 to requirements.txt to fix CI test failure. The httpx library is required by docker executor to report validation stage progress to the backend API. --- executor_manager/requirements.txt | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/executor_manager/requirements.txt b/executor_manager/requirements.txt index 0b7df4dc..d4963e08 100644 --- a/executor_manager/requirements.txt +++ b/executor_manager/requirements.txt @@ -498,6 +498,14 @@ uvicorn==0.38.0 \ --hash=sha256:48c0afd214ceb59340075b4a052ea1ee91c16fbc2a9b1469cca0e54566977b02 \ --hash=sha256:fd97093bdd120a2609fc0d3afe931d4d4ad688b6e75f0f929fde1bc36fe0e91d # via executor-manager +httpcore==1.0.9 \ + --hash=sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55 \ + --hash=sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8 + # via httpx +httpx==0.28.1 \ + --hash=sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad \ + --hash=sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc + # via executor-manager (for validation status reporting) websocket-client==1.9.0 \ --hash=sha256:9e813624b6eb619999a97dc7958469217c3176312b3a16a4bd1bc7e08a46ec98 \ --hash=sha256:af248a825037ef591efbf6ed20cc5faa03d3b47b9e5a2230a529eeee1c1fc3ef From de165200ba18a1f005597f0d16d21a2db5236810 Mon Sep 17 00:00:00 2001 From: qdaxb <4157870+qdaxb@users.noreply.github.com> Date: Sun, 30 Nov 2025 16:37:59 +0800 Subject: [PATCH 06/10] fix(frontend): remove unused variables in ShellEdit component Fix ESLint errors for unused variables: - Rename validationId to _validationId (stored for future use) - Remove pollingCount state (internal count only needed in closure) --- frontend/src/features/settings/components/ShellEdit.tsx | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/frontend/src/features/settings/components/ShellEdit.tsx b/frontend/src/features/settings/components/ShellEdit.tsx index 0d4bf112..a4cd7d8e 100644 --- a/frontend/src/features/settings/components/ShellEdit.tsx +++ b/frontend/src/features/settings/components/ShellEdit.tsx @@ -58,8 +58,7 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { const [originalBaseImage] = useState(shell?.baseImage || '') // Track original value for edit mode const [saving, setSaving] = useState(false) const [validating, setValidating] = useState(false) - const [validationId, setValidationId] = useState(null) - const [pollingCount, setPollingCount] = useState(0) + const [_validationId, setValidationId] = useState(null) const pollingRef = useRef(null) const [validationStatus, setValidationStatus] = useState<{ status: ValidationStage | 'error' | 'success' | 'failed' @@ -104,12 +103,10 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { clearInterval(pollingRef.current) } - setPollingCount(0) let count = 0 pollingRef.current = setInterval(async () => { count++ - setPollingCount(count) if (count >= MAX_POLLING_COUNT) { // Timeout From 4404c8ae9da16bbbd48d7b8b2353a49902d372fa Mon Sep 17 00:00:00 2001 From: axb Date: Mon, 1 Dec 2025 01:01:11 +0800 Subject: [PATCH 07/10] feat: implement custom shell and shell types --- backend/app/api/endpoints/adapter/models.py | 22 +- backend/app/api/endpoints/adapter/shells.py | 70 +++-- backend/app/schemas/bot.py | 31 +- backend/app/schemas/kind.py | 6 +- backend/app/services/adapters/bot_kinds.py | 197 ++++++------ .../app/services/adapters/executor_kinds.py | 6 +- backend/app/services/adapters/public_model.py | 16 +- backend/app/services/adapters/public_shell.py | 6 +- backend/app/services/adapters/shell_utils.py | 267 ++++++++++++++-- backend/app/services/adapters/task_kinds.py | 6 +- backend/app/services/adapters/team_kinds.py | 42 +-- .../app/services/model_aggregation_service.py | 46 ++- backend/init_data/02-public-shells.yaml | 6 +- executor/agents/base.py | 4 +- executor/callback/callback_client.py | 6 +- executor/callback/callback_handler.py | 22 +- executor/services/agent_service.py | 23 +- executor/tasks/task_processor.py | 37 ++- .../executors/docker/binary_extractor.py | 118 +++++++- executor_manager/executors/docker/executor.py | 184 ++++++++++- executor_manager/routers/routers.py | 41 ++- frontend/src/apis/bots.ts | 4 +- frontend/src/apis/mocks/bot.ts | 7 +- frontend/src/apis/models.ts | 20 +- frontend/src/apis/shells.ts | 112 +++---- .../features/settings/components/BotEdit.tsx | 71 +++-- .../features/settings/components/BotList.tsx | 2 +- .../settings/components/ShellEdit.tsx | 286 +++++++++--------- .../settings/components/ShellList.tsx | 126 ++++---- .../features/settings/components/TeamEdit.tsx | 40 ++- .../components/team-modes/BotTransfer.tsx | 12 +- .../team-modes/LeaderModeEditor.tsx | 12 +- .../components/team-modes/SoloModeEditor.tsx | 6 +- .../settings/components/team-modes/index.ts | 49 ++- .../src/features/settings/services/bots.ts | 2 +- frontend/src/i18n/locales/en/common.json | 4 +- frontend/src/i18n/locales/zh-CN/common.json | 6 +- frontend/src/types/api.ts | 5 +- 38 files changed, 1293 insertions(+), 627 deletions(-) diff --git a/backend/app/api/endpoints/adapter/models.py b/backend/app/api/endpoints/adapter/models.py index 0a3a6080..91cdc299 100644 --- a/backend/app/api/endpoints/adapter/models.py +++ b/backend/app/api/endpoints/adapter/models.py @@ -48,7 +48,7 @@ def list_models( @router.get("/names") def list_model_names( - agent_name: str = Query(..., description="Agent name (Agno、ClaudeCode)"), + shell_type: str = Query(..., description="Shell type (Agno, ClaudeCode)"), db: Session = Depends(get_db), current_user: User = Depends(security.get_current_user), ): @@ -63,15 +63,15 @@ def list_model_names( } """ data = public_model_service.list_model_names( - db=db, current_user=current_user, agent_name=agent_name + db=db, current_user=current_user, shell_type=shell_type ) return {"data": data} @router.get("/unified") def list_unified_models( - agent_name: Optional[str] = Query( - None, description="Agent name to filter compatible models (Agno, ClaudeCode)" + shell_type: Optional[str] = Query( + None, description="Shell type to filter compatible models (Agno, ClaudeCode)" ), include_config: bool = Query( False, description="Whether to include full config in response" @@ -90,7 +90,7 @@ def list_unified_models( important for avoiding naming conflicts when binding models. Parameters: - - agent_name: Optional agent name to filter compatible models + - shell_type: Optional shell type to filter compatible models - include_config: Whether to include full model config in response Response: @@ -109,7 +109,7 @@ def list_unified_models( data = model_aggregation_service.list_available_models( db=db, current_user=current_user, - agent_name=agent_name, + shell_type=shell_type, include_config=include_config, ) return {"data": data} @@ -354,15 +354,15 @@ def test_model_connection( @router.get("/compatible") def get_compatible_models( - agent_name: str = Query(..., description="Agent name (Agno or ClaudeCode)"), + shell_type: str = Query(..., description="Shell type (Agno or ClaudeCode)"), current_user: User = Depends(security.get_current_user), db: Session = Depends(get_db), ): """ - Get models compatible with a specific agent type + Get models compatible with a specific shell type Parameters: - - agent_name: "Agno" or "ClaudeCode" + - shell_type: "Agno" or "ClaudeCode" Response: { @@ -399,9 +399,9 @@ def get_compatible_models( model_type = env.get("model", "") # Filter compatible models - if agent_name == "Agno" and model_type == "openai": + if shell_type == "Agno" and model_type == "openai": compatible_models.append({"name": model_kind.name}) - elif agent_name == "ClaudeCode" and model_type == "claude": + elif shell_type == "ClaudeCode" and model_type == "claude": compatible_models.append({"name": model_kind.name}) except Exception as e: logger.warning(f"Failed to parse model {model_kind.name}: {e}") diff --git a/backend/app/api/endpoints/adapter/shells.py b/backend/app/api/endpoints/adapter/shells.py index 0ffb431c..3e5ab9c0 100644 --- a/backend/app/api/endpoints/adapter/shells.py +++ b/backend/app/api/endpoints/adapter/shells.py @@ -35,11 +35,11 @@ class UnifiedShell(BaseModel): name: str type: str # 'public' or 'user' displayName: Optional[str] = None - runtime: str + shellType: str # Agent type: 'ClaudeCode', 'Agno', 'Dify', etc. baseImage: Optional[str] = None baseShellRef: Optional[str] = None supportModel: Optional[List[str]] = None - shellType: Optional[str] = None # 'local_engine' or 'external_api' + executionType: Optional[str] = None # 'local_engine' or 'external_api' (from labels) class ShellCreateRequest(BaseModel): @@ -121,11 +121,11 @@ def _public_shell_to_unified(shell: PublicShell) -> UnifiedShell: name=shell.name, type="public", displayName=shell_crd.metadata.displayName or shell.name, - runtime=shell_crd.spec.runtime, + shellType=shell_crd.spec.shellType, baseImage=shell_crd.spec.baseImage, baseShellRef=shell_crd.spec.baseShellRef, supportModel=shell_crd.spec.supportModel, - shellType=labels.get("type"), + executionType=labels.get("type"), ) @@ -137,11 +137,11 @@ def _user_shell_to_unified(kind: Kind) -> UnifiedShell: name=kind.name, type="user", displayName=shell_crd.metadata.displayName or kind.name, - runtime=shell_crd.spec.runtime, + shellType=shell_crd.spec.shellType, baseImage=shell_crd.spec.baseImage, baseShellRef=shell_crd.spec.baseShellRef, supportModel=shell_crd.spec.supportModel, - shellType=labels.get("type"), + executionType=labels.get("type"), ) @@ -154,19 +154,19 @@ def list_unified_shells( Get unified list of all available shells (both public and user-defined). Each shell includes a 'type' field ('public' or 'user') to identify its source. - - Response: +Response: +{ + "data": [ { - "data": [ - { - "name": "shell-name", - "type": "public" | "user", - "displayName": "Human Readable Name", - "runtime": "ClaudeCode", - "baseImage": "ghcr.io/...", - "shellType": "local_engine" | "external_api" - } - ] + "name": "shell-name", + "type": "public" | "user", + "displayName": "Human Readable Name", + "shellType": "ClaudeCode", + "baseImage": "ghcr.io/...", + "executionType": "local_engine" | "external_api" + } + ] +} } """ result = [] @@ -311,13 +311,24 @@ def create_shell( detail="Base shell must be a local_engine type (not external_api)", ) - # Validate baseImage format (basic URL validation) + # Validate baseImage format + # Docker image name formats: + # - image (e.g., ubuntu) + # - image:tag (e.g., ubuntu:22.04) + # - registry/image:tag (e.g., docker.io/library/ubuntu:22.04) + # - registry:port/image:tag (e.g., localhost:5000/myimage:latest) + # Pattern breakdown: + # - Optional registry with optional port: ([a-z0-9.-]+(:[0-9]+)?/)? + # - Image path (one or more segments): [a-z0-9._-]+(/[a-z0-9._-]+)* + # - Optional tag: (:[a-z0-9._-]+)? + # - Optional digest: (@sha256:[a-f0-9]+)? + docker_image_pattern = r"^([a-z0-9.-]+(:[0-9]+)?/)?[a-z0-9._-]+(/[a-z0-9._-]+)*(:[a-z0-9._-]+)?(@sha256:[a-f0-9]+)?$" if not request.baseImage or not re.match( - r"^[a-z0-9.-]+(/[a-z0-9._-]+)+:[a-z0-9._-]+$", request.baseImage, re.IGNORECASE + docker_image_pattern, request.baseImage, re.IGNORECASE ): raise HTTPException( status_code=400, - detail="Invalid base image format. Expected format: registry/image:tag", + detail="Invalid base image format. Expected formats: image, image:tag, registry/image:tag, or registry:port/image:tag", ) # Create Shell CRD @@ -331,7 +342,7 @@ def create_shell( "labels": {"type": "local_engine"}, # User shells inherit local_engine type }, "spec": { - "runtime": base_shell_crd.spec.runtime, # Inherit runtime from base shell + "shellType": base_shell_crd.spec.shellType, # Inherit shellType from base shell "supportModel": base_shell_crd.spec.supportModel or [], "baseImage": request.baseImage, "baseShellRef": request.baseShellRef, @@ -390,14 +401,20 @@ def update_shell( if request.baseImage is not None: # Validate baseImage format + # Docker image name formats: + # - image (e.g., ubuntu) + # - image:tag (e.g., ubuntu:22.04) + # - registry/image:tag (e.g., docker.io/library/ubuntu:22.04) + # - registry:port/image:tag (e.g., localhost:5000/myimage:latest) + docker_image_pattern = r"^([a-z0-9.-]+(:[0-9]+)?/)?[a-z0-9._-]+(/[a-z0-9._-]+)*(:[a-z0-9._-]+)?(@sha256:[a-f0-9]+)?$" if not re.match( - r"^[a-z0-9.-]+(/[a-z0-9._-]+)+:[a-z0-9._-]+$", + docker_image_pattern, request.baseImage, re.IGNORECASE, ): raise HTTPException( status_code=400, - detail="Invalid base image format. Expected format: registry/image:tag", + detail="Invalid base image format. Expected formats: image, image:tag, registry/image:tag, or registry:port/image:tag", ) shell_crd.spec.baseImage = request.baseImage @@ -518,8 +535,9 @@ async def validate_image( logger.info(f"Submitting image validation task to executor manager: {image}") # Call executor manager's validate-image API with validation_id - with httpx.Client(timeout=30.0) as client: - response = client.post( + # Use AsyncClient to avoid blocking the event loop + async with httpx.AsyncClient(timeout=30.0) as client: + response = await client.post( validate_url, json={ "image": image, diff --git a/backend/app/schemas/bot.py b/backend/app/schemas/bot.py index 16ac267c..3535c785 100644 --- a/backend/app/schemas/bot.py +++ b/backend/app/schemas/bot.py @@ -10,11 +10,11 @@ from app.schemas.user import UserInDB -class BotBase(BaseModel): - """Bot base model""" +class BotCreate(BaseModel): + """Bot creation model - request schema""" name: str - agent_name: str + shell_name: str # Shell name (e.g., 'ClaudeCode', 'Agno', 'my-custom-shell') agent_config: dict[str, Any] system_prompt: Optional[str] = None mcp_servers: Optional[dict[str, Any]] = None @@ -22,17 +22,11 @@ class BotBase(BaseModel): is_active: bool = True -class BotCreate(BotBase): - """Bot creation model""" - - pass - - class BotUpdate(BaseModel): - """Bot update model""" + """Bot update model - request schema""" name: Optional[str] = None - agent_name: Optional[str] = None + shell_name: Optional[str] = None # Shell name (e.g., 'ClaudeCode', 'Agno', 'my-custom-shell') agent_config: Optional[dict[str, Any]] = None system_prompt: Optional[str] = None mcp_servers: Optional[dict[str, Any]] = None @@ -40,11 +34,19 @@ class BotUpdate(BaseModel): is_active: Optional[bool] = None -class BotInDB(BotBase): - """Database bot model""" +class BotInDB(BaseModel): + """Database bot model - response schema""" id: int user_id: int + name: str + shell_name: str # Shell name (the name user selected, e.g., 'ClaudeCode', 'my-custom-shell') + shell_type: str # Actual agent type (e.g., 'ClaudeCode', 'Agno', 'Dify') + agent_config: dict[str, Any] + system_prompt: Optional[str] = None + mcp_servers: Optional[dict[str, Any]] = None + skills: Optional[List[str]] = None + is_active: bool = True created_at: datetime updated_at: datetime @@ -57,7 +59,8 @@ class BotDetail(BaseModel): id: int name: str - agent_name: str + shell_name: str # Shell name (the name user selected, e.g., 'ClaudeCode', 'my-custom-shell') + shell_type: str # Actual agent type (e.g., 'ClaudeCode', 'Agno', 'Dify') agent_config: dict[str, Any] system_prompt: Optional[str] = None mcp_servers: Optional[dict[str, Any]] = None diff --git a/backend/app/schemas/kind.py b/backend/app/schemas/kind.py index 27fe161e..c92ab287 100644 --- a/backend/app/schemas/kind.py +++ b/backend/app/schemas/kind.py @@ -8,7 +8,7 @@ from datetime import datetime from typing import Any, Dict, List, Optional -from pydantic import BaseModel, Field +from pydantic import AliasChoices, BaseModel, Field class ObjectMeta(BaseModel): @@ -110,7 +110,9 @@ class ModelRef(BaseModel): class ShellSpec(BaseModel): """Shell specification""" - runtime: str + shellType: str = Field( + ..., validation_alias=AliasChoices("shellType", "runtime") + ) # Agent type: 'ClaudeCode', 'Agno', 'Dify', etc. Accepts 'runtime' for backward compatibility supportModel: Optional[List[str]] = None baseImage: Optional[str] = None # Custom base image address for user-defined shells baseShellRef: Optional[str] = None # Reference to base public shell (e.g., "ClaudeCode") diff --git a/backend/app/services/adapters/bot_kinds.py b/backend/app/services/adapters/bot_kinds.py index e4ceaa66..d73eb3a9 100644 --- a/backend/app/services/adapters/bot_kinds.py +++ b/backend/app/services/adapters/bot_kinds.py @@ -19,7 +19,12 @@ from app.models.user import User from app.schemas.bot import BotCreate, BotDetail, BotInDB, BotUpdate from app.schemas.kind import Bot, Ghost, Model, Shell, Team -from app.services.adapters.shell_utils import get_shell_type +from app.services.adapters.shell_utils import ( + get_shell_by_name, + get_shell_info_by_name, + get_shell_type, + get_shells_by_names_batch, +) from app.services.base import BaseService @@ -228,12 +233,21 @@ def _get_model_by_name( db, model_name, namespace, user_id, model_type=None ) + # Note: _get_shell_info_by_name has been moved to shell_utils.py + # Use get_shell_info_by_name from shell_utils instead + def create_with_user( self, db: Session, *, obj_in: BotCreate, user_id: int ) -> Dict[str, Any]: """ - Create user Bot using kinds table + Create user Bot using kinds table. + + Bot's shellRef directly points to the user-selected Shell (custom or public), + instead of creating a dedicated shell for each bot. """ + import logging + logger = logging.getLogger(__name__) + # Check duplicate bot name under the same user (only active bots) existing = ( db.query(Kind) @@ -329,53 +343,32 @@ def create_with_user( ) db.add(model) - support_model = [] - shell_type = "local_engine" # Default shell type - if obj_in.agent_name: - public_shell = ( - db.query(PublicShell) - .filter( - PublicShell.name == obj_in.agent_name, - PublicShell.namespace == "default", - ) - .first() - ) - - if public_shell and isinstance(public_shell.json, dict): - shell_crd = Shell.model_validate(public_shell.json) - support_model = shell_crd.spec.supportModel or [] - # Get shell type from metadata.labels - if shell_crd.metadata.labels and "type" in shell_crd.metadata.labels: - shell_type = shell_crd.metadata.labels["type"] - - shell_json = { - "kind": "Shell", - "spec": {"runtime": obj_in.agent_name, "supportModel": support_model}, - "metadata": { - "name": f"{obj_in.name}-shell", - "namespace": "default", - "labels": {"type": shell_type}, - }, - "status": {"state": "Available"}, - "apiVersion": "agent.wecode.io/v1", - } - - shell = Kind( - user_id=user_id, - kind="Shell", - name=f"{obj_in.name}-shell", - namespace="default", - json=shell_json, - is_active=True, + # Get shell info by name (resolves actual shell_type from shell_name) + # The shell_name is the name of the user-selected Shell (custom or public) + try: + shell_info = get_shell_info_by_name(db, obj_in.shell_name, user_id) + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + + logger.info( + f"[DEBUG] create_with_user: shell_name={obj_in.shell_name}, " + f"resolved shell_type={shell_info['shell_type']}, " + f"execution_type={shell_info['execution_type']}, " + f"base_image={shell_info['base_image']}, " + f"is_custom={shell_info['is_custom']}" ) - db.add(shell) - # Create Bot with modelRef pointing to the selected model + # Bot's shellRef directly points to the user-selected Shell + # No need to create a dedicated shell for each bot + shell_ref_name = obj_in.shell_name + shell_ref_namespace = "default" + + # Create Bot with shellRef pointing to the user-selected Shell bot_json = { "kind": "Bot", "spec": { "ghostRef": {"name": f"{obj_in.name}-ghost", "namespace": "default"}, - "shellRef": {"name": f"{obj_in.name}-shell", "namespace": "default"}, + "shellRef": {"name": shell_ref_name, "namespace": shell_ref_namespace}, "modelRef": {"name": model_ref_name, "namespace": model_ref_namespace}, }, "status": {"state": "Available"}, @@ -405,6 +398,9 @@ def create_with_user( else: db.refresh(model) + # Get the shell for response (from user's custom shells or public shells) + shell = get_shell_by_name(db, shell_ref_name, user_id) + # Return bot-like structure return self._convert_to_bot_dict(bot, ghost, shell, model, obj_in.agent_config) @@ -554,40 +550,31 @@ def update_with_user( bot.name = new_name flag_modified(bot, "json") # Mark JSON field as modified - if "agent_name" in update_data and shell: - # Query public_shells table to get supportModel and shell type based on new agent_name - support_model = [] - shell_type = "local_engine" # Default shell type - new_agent_name = update_data["agent_name"] - if new_agent_name: - public_shell = ( - db.query(PublicShell) - .filter( - PublicShell.name == new_agent_name, - PublicShell.namespace == "default", - ) - .first() - ) - - if public_shell and isinstance(public_shell.json, dict): - public_shell_crd = Shell.model_validate(public_shell.json) - support_model = public_shell_crd.spec.supportModel or [] - # Get shell type from metadata.labels - if ( - public_shell_crd.metadata.labels - and "type" in public_shell_crd.metadata.labels - ): - shell_type = public_shell_crd.metadata.labels["type"] + if "shell_name" in update_data: + # Update Bot's shellRef to point directly to the user-selected Shell + new_shell_name = update_data["shell_name"] + try: + shell_info = get_shell_info_by_name(db, new_shell_name, user_id) + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + + logger.info( + f"[DEBUG] update_with_user: shell_name={new_shell_name}, " + f"resolved shell_type={shell_info['shell_type']}, " + f"execution_type={shell_info['execution_type']}, " + f"base_image={shell_info['base_image']}, " + f"is_custom={shell_info['is_custom']}" + ) - shell_crd = Shell.model_validate(shell.json) - shell_crd.spec.runtime = new_agent_name - shell_crd.spec.supportModel = support_model - # Update shell type in metadata.labels - if not shell_crd.metadata.labels: - shell_crd.metadata.labels = {} - shell_crd.metadata.labels["type"] = shell_type - shell.json = shell_crd.model_dump() - flag_modified(shell, "json") # Mark JSON field as modified + # Update Bot's shellRef to point to the user-selected Shell + bot_crd = Bot.model_validate(bot.json) + bot_crd.spec.shellRef.name = new_shell_name + bot_crd.spec.shellRef.namespace = "default" + bot.json = bot_crd.model_dump() + flag_modified(bot, "json") + + # Update shell reference for response + shell = get_shell_by_name(db, new_shell_name, user_id) if "agent_config" in update_data: new_agent_config = update_data["agent_config"] @@ -779,8 +766,8 @@ def update_with_user( bot.updated_at = datetime.now() if ghost: ghost.updated_at = datetime.now() - if shell: - shell.updated_at = datetime.now() + # Note: shell is now a reference to user's custom shell or public shell, + # we don't update its timestamp as it's not owned by this bot if model and hasattr(model, "updated_at"): model.updated_at = datetime.now() @@ -788,7 +775,8 @@ def update_with_user( db.refresh(bot) if ghost: db.refresh(ghost) - if shell: + # Note: shell may be a PublicShell which doesn't need refresh + if shell and isinstance(shell, Kind): db.refresh(shell) if model and hasattr(model, "id"): try: @@ -802,7 +790,10 @@ def update_with_user( def delete_with_user(self, db: Session, *, bot_id: int, user_id: int) -> None: """ - Delete user Bot and related components + Delete user Bot and related components. + + Note: Shell is not deleted because it's now a reference to user's custom shell + or public shell, not a dedicated shell for this bot. """ bot = ( db.query(Kind) @@ -843,15 +834,15 @@ def delete_with_user(self, db: Session, *, bot_id: int, user_id: int) -> None: detail=f"Bot '{bot_name}' is being used in team '{team.name}'. Please remove it from the team first.", ) - # Get related components + # Get related components (only ghost needs to be deleted) ghost, shell, model = self._get_bot_components(db, bot, user_id) - # Delete all components + # Delete bot and ghost only + # Shell is not deleted because it's a reference to user's custom shell or public shell db.delete(bot) if ghost: db.delete(ghost) - if shell: - db.delete(shell) + # Note: shell is not deleted - it's a shared resource db.commit() @@ -896,17 +887,14 @@ def _get_bot_components(self, db: Session, bot: Kind, user_id: int): .first() ) - # Get shell - shell = ( - db.query(Kind) - .filter( - Kind.user_id == user_id, - Kind.kind == "Shell", - Kind.name == bot_crd.spec.shellRef.name, - Kind.namespace == bot_crd.spec.shellRef.namespace, - Kind.is_active == True, - ) - .first() + # Get shell - try user's custom shells first, then public shells + shell_ref_name = bot_crd.spec.shellRef.name + shell = get_shell_by_name(db, shell_ref_name, user_id) + + logger.info( + f"[DEBUG] _get_bot_components: shellRef.name={shell_ref_name}, " + f"shell found={shell is not None}, " + f"shell type={type(shell).__name__ if shell else 'None'}" ) # Get model - try private models first, then public models # modelRef is optional, only get if it exists @@ -983,16 +971,15 @@ def build_or_filters(kind_name: str, keys: set): if ghost_filter is not None: ghosts = db.query(Kind).filter(base_filter).filter(ghost_filter).all() - shell_filter = build_or_filters("Shell", shell_keys) - if shell_filter is not None: - shells = db.query(Kind).filter(base_filter).filter(shell_filter).all() + # Use unified shell query function that checks both user shells and public shells + shell_map = get_shells_by_names_batch(db, shell_keys, user_id) model_filter = build_or_filters("Model", model_keys) if model_filter is not None: models = db.query(Kind).filter(base_filter).filter(model_filter).all() ghost_map = {(g.name, g.namespace): g for g in ghosts} - shell_map = {(s.name, s.namespace): s for s in shells} + # shell_map is already populated by get_shells_by_names_batch model_map = {(m.name, m.namespace): m for m in models} # For models not found in kinds table, try to find in public_models table @@ -1051,9 +1038,14 @@ def _convert_to_bot_dict( # Extract data from components system_prompt = "" mcp_servers = {} - agent_name = "" + shell_type = "" + shell_name = "" agent_config = {} + # Get shell_name from bot's shellRef - this is the name user selected + bot_crd = Bot.model_validate(bot.json) + shell_name = bot_crd.spec.shellRef.name if bot_crd.spec.shellRef else "" + if ghost and ghost.json: ghost_crd = Ghost.model_validate(ghost.json) system_prompt = ghost_crd.spec.systemPrompt @@ -1061,7 +1053,7 @@ def _convert_to_bot_dict( if shell and shell.json: shell_crd = Shell.model_validate(shell.json) - agent_name = shell_crd.spec.runtime + shell_type = shell_crd.spec.shellType or "" # Determine agent_config # For frontend display, we need to return { bind_model: "xxx", bind_model_type: "public"|"user" } format when: @@ -1153,7 +1145,8 @@ def _convert_to_bot_dict( "id": bot.id, "user_id": bot.user_id, "name": bot.name, - "agent_name": agent_name, + "shell_name": shell_name, # The shell name user selected (e.g., 'ClaudeCode', 'my-custom-shell') + "shell_type": shell_type, # The actual agent type (e.g., 'ClaudeCode', 'Agno', 'Dify') "agent_config": agent_config, "system_prompt": system_prompt, "mcp_servers": mcp_servers, diff --git a/backend/app/services/adapters/executor_kinds.py b/backend/app/services/adapters/executor_kinds.py index 8eb99f5b..6abfa7f4 100644 --- a/backend/app/services/adapters/executor_kinds.py +++ b/backend/app/services/adapters/executor_kinds.py @@ -449,7 +449,7 @@ def __init__(self, json_data): system_prompt = "" mcp_servers = {} skills = [] - agent_name = "" + shell_type = "" agent_config = {} if ghost and ghost.json: @@ -463,7 +463,7 @@ def __init__(self, json_data): if shell and shell.json: shell_crd = Shell.model_validate(shell.json) - agent_name = shell_crd.spec.runtime + shell_type = shell_crd.spec.shellType # Extract baseImage from shell (user-defined shell overrides public shell) if shell_crd.spec.baseImage: shell_base_image = shell_crd.spec.baseImage @@ -612,7 +612,7 @@ def __init__(self, json_data): { "id": bot.id, "name": bot.name, - "agent_name": agent_name, + "shell_type": shell_type, "agent_config": agent_config_data, "system_prompt": bot_prompt, "mcp_servers": mcp_servers, diff --git a/backend/app/services/adapters/public_model.py b/backend/app/services/adapters/public_model.py index d3273e99..1889d621 100644 --- a/backend/app/services/adapters/public_model.py +++ b/backend/app/services/adapters/public_model.py @@ -199,13 +199,13 @@ def count_active_models(self, db: Session, *, current_user: User) -> int: ) # noqa: E712 def list_model_names( - self, db: Session, *, current_user: User, agent_name: str + self, db: Session, *, current_user: User, shell_type: str ) -> List[Dict[str, str]]: """ - List available model names based on agent type and shell supportModel filter. + List available model names based on shell type and shell supportModel filter. Queries both user's own models (kinds table) and public models (public_models table). - Agent to model provider mapping: + Shell type to model provider mapping: - Agno -> openai - ClaudeCode -> claude @@ -213,10 +213,10 @@ def list_model_names( """ # Get shell configuration from public_shells table shell_row = ( - db.query(PublicShell.json).filter(PublicShell.name == agent_name).first() + db.query(PublicShell.json).filter(PublicShell.name == shell_type).first() ) if not shell_row: - raise HTTPException(status_code=400, detail="Agent not found") + raise HTTPException(status_code=400, detail="Shell type not found") shell_json = shell_row[0] if isinstance(shell_row[0], dict) else {} @@ -227,10 +227,10 @@ def list_model_names( supportModel = shell_crd.spec.supportModel or [] supportModel = [str(x) for x in supportModel if x] - # Determine required model provider based on agent_name + # Determine required model provider based on shell_type # Agno uses openai protocol, ClaudeCode uses claude protocol - agent_provider_map = {"Agno": "openai", "ClaudeCode": "claude"} - required_provider = agent_provider_map.get(agent_name) + shell_provider_map = {"Agno": "openai", "ClaudeCode": "claude"} + required_provider = shell_provider_map.get(shell_type) # If supportModel is specified, use it; otherwise filter by agent's required provider use_support_model_filter = len(supportModel) > 0 diff --git a/backend/app/services/adapters/public_shell.py b/backend/app/services/adapters/public_shell.py index dee0aab6..9e1f3201 100644 --- a/backend/app/services/adapters/public_shell.py +++ b/backend/app/services/adapters/public_shell.py @@ -81,7 +81,7 @@ def create_agent( # Convert to JSON format matching kinds table structure json_data = { "kind": "Shell", - "spec": {"runtime": obj_in.name, "supportModel": supportModel}, + "spec": {"shellType": obj_in.name, "supportModel": supportModel}, "status": {"state": "Available"}, "metadata": {"name": obj_in.name, "namespace": "default"}, "apiVersion": "agent.wecode.io/v1", @@ -175,11 +175,11 @@ def update_agent( for field, value in update_data.items(): if field == "name": setattr(shell, field, value) - # Also update metadata and runtime in json + # Also update metadata and shellType in json if isinstance(shell.json, dict): shell_crd = Shell.model_validate(shell.json) shell_crd.metadata.name = value - shell_crd.spec.runtime = value + shell_crd.spec.shellType = value shell.json = shell_crd.model_dump() elif field == "config": # Update supportModel from config.mode_filter diff --git a/backend/app/services/adapters/shell_utils.py b/backend/app/services/adapters/shell_utils.py index bce2fab7..18f7c642 100644 --- a/backend/app/services/adapters/shell_utils.py +++ b/backend/app/services/adapters/shell_utils.py @@ -3,56 +3,191 @@ # SPDX-License-Identifier: Apache-2.0 """ -Utility functions for Shell type detection and classification +Utility functions for Shell type detection and classification. + +This module provides unified Shell lookup and information retrieval functions +that can be used across different services (bot_kinds, executor_kinds, etc.). """ -from typing import Optional +import logging +from typing import Any, Dict, List, Optional, Set, Tuple, Union +from sqlalchemy import and_, or_ from sqlalchemy.orm import Session from app.models.kind import Kind +from app.models.public_shell import PublicShell from app.schemas.kind import Shell +logger = logging.getLogger(__name__) -def get_shell_type( - db: Session, shell_name: str, shell_namespace: str, user_id: int -) -> Optional[str]: + +def get_shell_by_name( + db: Session, shell_name: str, user_id: int, namespace: str = "default" +) -> Optional[Union[Kind, PublicShell]]: + """ + Get a Shell by name, first checking user's custom shells, then public shells. + + Args: + db: Database session + shell_name: Name of the shell (e.g., 'ClaudeCode', 'my-custom-shell') + user_id: User ID + namespace: Namespace (default: 'default') + + Returns: + Kind object (for user shells) or PublicShell object (for public shells), + or None if not found. """ - Get the shell type (local_engine or external_api) for a given shell + # First, try to find in user's custom shells (kinds table) + user_shell = ( + db.query(Kind) + .filter( + Kind.user_id == user_id, + Kind.kind == "Shell", + Kind.name == shell_name, + Kind.namespace == namespace, + Kind.is_active == True, + ) + .first() + ) + + if user_shell: + logger.debug(f"Found user shell '{shell_name}' for user {user_id}") + return user_shell + + # Then, try to find in public shells + public_shell = ( + db.query(PublicShell) + .filter( + PublicShell.name == shell_name, + PublicShell.namespace == namespace, + PublicShell.is_active == True, + ) + .first() + ) + + if public_shell: + logger.debug(f"Found public shell '{shell_name}'") + return public_shell + + logger.warning(f"Shell '{shell_name}' not found in user shells or public shells") + return None - Shell type is stored in metadata.labels.type +def get_shell_info_by_name( + db: Session, shell_name: str, user_id: int, namespace: str = "default" +) -> Dict[str, Any]: + """ + Get shell information by shell name. + + First tries to find a user-defined custom shell in kinds table, + then falls back to public shells in public_shells table. + Args: db: Database session - shell_name: Name of the shell - shell_namespace: Namespace of the shell + shell_name: Name of the shell (e.g., 'ClaudeCode', 'my-custom-shell') user_id: User ID - + namespace: Namespace (default: 'default') + Returns: - "local_engine", "external_api", or None if shell not found + Dict with: + - shell_type: The actual agent type (e.g., 'ClaudeCode', 'Agno', 'Dify') + - support_model: List of supported models + - execution_type: 'local_engine' or 'external_api' + - base_image: Base Docker image (optional) + - is_custom: Whether this is a user-defined custom shell + + Raises: + ValueError: If shell is not found """ - shell = ( + # First, try to find in user's custom shells (kinds table) + user_shell = ( db.query(Kind) .filter( Kind.user_id == user_id, Kind.kind == "Shell", Kind.name == shell_name, - Kind.namespace == shell_namespace, + Kind.namespace == namespace, Kind.is_active == True, ) .first() ) + + if user_shell and isinstance(user_shell.json, dict): + shell_crd = Shell.model_validate(user_shell.json) + # For custom shells, shellType contains the actual agent type + result = { + "shell_type": shell_crd.spec.shellType, + "support_model": shell_crd.spec.supportModel or [], + "execution_type": "local_engine", + "base_image": shell_crd.spec.baseImage, + "is_custom": True, + } + if shell_crd.metadata.labels and "type" in shell_crd.metadata.labels: + result["execution_type"] = shell_crd.metadata.labels["type"] + logger.info( + f"Found user shell '{shell_name}', " + f"shell_type={result['shell_type']}, execution_type={result['execution_type']}, " + f"base_image={result['base_image']}" + ) + return result + + # Then, try to find in public shells + public_shell = ( + db.query(PublicShell) + .filter( + PublicShell.name == shell_name, + PublicShell.namespace == namespace, + PublicShell.is_active == True, + ) + .first() + ) + + if public_shell and isinstance(public_shell.json, dict): + shell_crd = Shell.model_validate(public_shell.json) + # For public shells, the shell name IS the shell type (e.g., 'ClaudeCode') + result = { + "shell_type": shell_crd.spec.shellType, + "support_model": shell_crd.spec.supportModel or [], + "execution_type": "local_engine", + "base_image": shell_crd.spec.baseImage, + "is_custom": False, + } + if shell_crd.metadata.labels and "type" in shell_crd.metadata.labels: + result["execution_type"] = shell_crd.metadata.labels["type"] + logger.info( + f"Found public shell '{shell_name}', " + f"shell_type={result['shell_type']}, execution_type={result['execution_type']}, " + f"base_image={result['base_image']}" + ) + return result + + # Shell not found - raise error instead of using fallback + raise ValueError(f"Shell '{shell_name}' not found in user shells or public shells") + - if not shell: - return None +def get_shell_type( + db: Session, shell_name: str, shell_namespace: str, user_id: int +) -> str: + """ + Get the shell type (local_engine or external_api) for a given shell. - shell_crd = Shell.model_validate(shell.json) + Shell type is stored in metadata.labels.type - # Get type from metadata.labels, default to local_engine - if shell_crd.metadata.labels and "type" in shell_crd.metadata.labels: - return shell_crd.metadata.labels["type"] + Args: + db: Database session + shell_name: Name of the shell + shell_namespace: Namespace of the shell + user_id: User ID - return "local_engine" + Returns: + "local_engine" or "external_api" + + Raises: + ValueError: If shell is not found + """ + shell_info = get_shell_info_by_name(db, shell_name, user_id, shell_namespace) + return shell_info["execution_type"] def is_external_api_shell( @@ -91,3 +226,95 @@ def is_local_engine_shell( """ shell_type = get_shell_type(db, shell_name, shell_namespace, user_id) return shell_type == "local_engine" + + +def get_shells_by_names_batch( + db: Session, + shell_keys: Set[Tuple[str, str]], + user_id: int, +) -> Dict[Tuple[str, str], Union[Kind, PublicShell]]: + """ + Batch-fetch shells by (name, namespace) keys. + + First queries user's custom shells from kinds table, then queries public shells + for any missing keys. + + Args: + db: Database session + shell_keys: Set of (name, namespace) tuples to query + user_id: User ID + + Returns: + Dict mapping (name, namespace) to Kind or PublicShell objects + """ + if not shell_keys: + return {} + + shell_map: Dict[Tuple[str, str], Union[Kind, PublicShell]] = {} + + # Build OR filter for user shells + def build_user_shell_or_filters(keys: Set[Tuple[str, str]]): + return ( + or_( + *[ + and_(Kind.name == n, Kind.namespace == ns) + for (n, ns) in keys + ] + ) + if keys + else None + ) + + # Query user's custom shells first + user_shell_filter = build_user_shell_or_filters(shell_keys) + if user_shell_filter is not None: + user_shells = ( + db.query(Kind) + .filter( + Kind.user_id == user_id, + Kind.kind == "Shell", + Kind.is_active == True, + ) + .filter(user_shell_filter) + .all() + ) + + for shell in user_shells: + shell_map[(shell.name, shell.namespace)] = shell + + # Find missing keys and query public shells + found_keys = set(shell_map.keys()) + missing_keys = shell_keys - found_keys + + if missing_keys: + def build_public_shell_or_filters(keys: Set[Tuple[str, str]]): + return ( + or_( + *[ + and_(PublicShell.name == n, PublicShell.namespace == ns) + for (n, ns) in keys + ] + ) + if keys + else None + ) + + public_shell_filter = build_public_shell_or_filters(missing_keys) + if public_shell_filter is not None: + public_shells = ( + db.query(PublicShell) + .filter(PublicShell.is_active == True) + .filter(public_shell_filter) + .all() + ) + + for shell in public_shells: + shell_map[(shell.name, shell.namespace)] = shell + + logger.debug( + f"Batch fetched {len(shell_map)} shells for {len(shell_keys)} keys " + f"(user: {len(shell_map) - len(missing_keys) + len(found_keys)}, " + f"public: {len(shell_map) - len(found_keys)})" + ) + + return shell_map diff --git a/backend/app/services/adapters/task_kinds.py b/backend/app/services/adapters/task_kinds.py index 99abf360..2945e123 100644 --- a/backend/app/services/adapters/task_kinds.py +++ b/backend/app/services/adapters/task_kinds.py @@ -548,7 +548,7 @@ def get_task_detail( bot_crd = Bot.model_validate(bot.json) # Initialize default values - agent_name = "" + shell_type = "" agent_config = {} system_prompt = "" mcp_servers = {} @@ -601,14 +601,14 @@ def get_task_detail( ) if shell and shell.json: shell_crd = Shell.model_validate(shell.json) - agent_name = shell_crd.spec.runtime + shell_type = shell_crd.spec.shellType # Create bot dict compatible with BotInDB schema bot_dict = { "id": bot.id, "user_id": bot.user_id, "name": bot.name, - "agent_name": agent_name, + "shell_type": shell_type, "agent_config": agent_config, "system_prompt": system_prompt, "mcp_servers": mcp_servers, diff --git a/backend/app/services/adapters/team_kinds.py b/backend/app/services/adapters/team_kinds.py index b17428d8..278d7427 100644 --- a/backend/app/services/adapters/team_kinds.py +++ b/backend/app/services/adapters/team_kinds.py @@ -674,7 +674,7 @@ def _validate_bots(self, db: Session, bots: List[BotInfo], user_id: int) -> None if len(bots) > 1: raise HTTPException( status_code=400, - detail=f"Teams using external API shells ({shell_crd.spec.runtime}) must have exactly one bot. Found {len(bots)} bots.", + detail=f"Teams using external API shells ({shell_crd.spec.shellType}) must have exactly one bot. Found {len(bots)} bots.", ) def get_team_by_id( @@ -796,9 +796,9 @@ def _convert_to_team_dict( team_crd = Team.model_validate(team.json) - # Convert members to bots format and collect agent_names for is_mix_team calculation + # Convert members to bots format and collect shell_types for is_mix_team calculation bots = [] - agent_names = set() + shell_types = set() for member in team_crd.spec.members: # Find bot in kinds table @@ -824,12 +824,12 @@ def _convert_to_team_dict( } bots.append(bot_info) - # Collect agent_name for is_mix_team calculation - if bot_summary.get("agent_name"): - agent_names.add(bot_summary["agent_name"]) + # Collect shell_type for is_mix_team calculation + if bot_summary.get("shell_type"): + shell_types.add(bot_summary["shell_type"]) - # Calculate is_mix_team: true if there are multiple different agent types - is_mix_team = len(agent_names) > 1 + # Calculate is_mix_team: true if there are multiple different shell types + is_mix_team = len(shell_types) > 1 # Get agent_type from the first bot's shell agent_type = None @@ -861,16 +861,16 @@ def _convert_to_team_dict( ) if shell: shell_crd = Shell.model_validate(shell.json) - runtime = shell_crd.spec.runtime - # Map runtime to agent type - if runtime == "AgnoShell": + shell_type = shell_crd.spec.shellType + # Map shellType to agent type + if shell_type == "Agno": agent_type = "agno" - elif runtime == "ClaudeCodeShell": + elif shell_type == "ClaudeCode": agent_type = "claude" - elif runtime == "DifyShell": + elif shell_type == "Dify": agent_type = "dify" else: - agent_type = runtime.lower().replace("shell", "") + agent_type = shell_type.lower() if shell_type else None # Convert collaboration model to workflow format workflow = {"mode": team_crd.spec.collaborationModel} @@ -911,7 +911,7 @@ def _get_bot_summary(self, bot: Kind, db: Session, user_id: int) -> Dict[str, An f"[_get_bot_summary] bot.name={bot.name}, modelRef.name={model_ref_name}, modelRef.namespace={model_ref_namespace}" ) - # Get shell to extract agent_name + # Get shell to extract shell_type shell = ( db.query(Kind) .filter( @@ -924,10 +924,10 @@ def _get_bot_summary(self, bot: Kind, db: Session, user_id: int) -> Dict[str, An .first() ) - agent_name = "" + shell_type = "" if shell and shell.json: shell_crd = Shell.model_validate(shell.json) - agent_name = shell_crd.spec.runtime + shell_type = shell_crd.spec.shellType agent_config = {} @@ -1008,7 +1008,7 @@ def _get_bot_summary(self, bot: Kind, db: Session, user_id: int) -> Dict[str, An else: logger.info(f"[_get_bot_summary] No modelRef for bot {bot.name}") - result = {"agent_config": agent_config, "agent_name": agent_name} + result = {"agent_config": agent_config, "shell_type": shell_type} logger.info(f"[_get_bot_summary] Returning: {result}") return result @@ -1064,7 +1064,7 @@ def _convert_bot_to_dict( # Extract data from components system_prompt = "" mcp_servers = {} - agent_name = "" + shell_type = "" agent_config = {} if ghost and ghost.json: @@ -1074,7 +1074,7 @@ def _convert_bot_to_dict( if shell and shell.json: shell_crd = Shell.model_validate(shell.json) - agent_name = shell_crd.spec.runtime + shell_type = shell_crd.spec.shellType if model and model.json: model_crd = Model.model_validate(model.json) @@ -1084,7 +1084,7 @@ def _convert_bot_to_dict( "id": bot.id, "user_id": bot.user_id, "name": bot.name, - "agent_name": agent_name, + "shell_type": shell_type, "agent_config": agent_config, "system_prompt": system_prompt, "mcp_servers": mcp_servers, diff --git a/backend/app/services/model_aggregation_service.py b/backend/app/services/model_aggregation_service.py index 16d4ad51..86a57fed 100644 --- a/backend/app/services/model_aggregation_service.py +++ b/backend/app/services/model_aggregation_service.py @@ -145,42 +145,42 @@ def _extract_model_info_from_crd( "config": {}, } - def _is_model_compatible_with_agent( - self, provider: Optional[str], agent_name: str, support_model: List[str] + def _is_model_compatible_with_shell( + self, provider: Optional[str], shell_type: str, support_model: List[str] ) -> bool: """ - Check if a model is compatible with the given agent. + Check if a model is compatible with the given shell type. Args: provider: Model provider (e.g., 'openai', 'claude') - agent_name: Agent name (e.g., 'Agno', 'ClaudeCode') + shell_type: Shell type (e.g., 'Agno', 'ClaudeCode') support_model: List of supported model providers from shell spec Returns: True if compatible, False otherwise """ - # Agent to model provider mapping - agent_provider_map = {"Agno": "openai", "ClaudeCode": "claude"} + # Shell type to model provider mapping + shell_provider_map = {"Agno": "openai", "ClaudeCode": "claude"} # If supportModel is specified in shell, use it if support_model: return provider in support_model - # Otherwise, filter by agent's required provider - required_provider = agent_provider_map.get(agent_name) + # Otherwise, filter by shell's required provider + required_provider = shell_provider_map.get(shell_type) if required_provider: return provider == required_provider # No filter, allow all return True - def _get_shell_support_model(self, db: Session, agent_name: str) -> List[str]: + def _get_shell_support_model(self, db: Session, shell_type: str) -> List[str]: """ Get supported model list from shell configuration. Args: db: Database session - agent_name: Agent name + shell_type: Shell type Returns: List of supported model providers @@ -188,7 +188,7 @@ def _get_shell_support_model(self, db: Session, agent_name: str) -> List[str]: from app.models.public_shell import PublicShell shell_row = ( - db.query(PublicShell.json).filter(PublicShell.name == agent_name).first() + db.query(PublicShell.json).filter(PublicShell.name == shell_type).first() ) if shell_row and isinstance(shell_row[0], dict): @@ -230,7 +230,7 @@ def list_available_models( self, db: Session, current_user: User, - agent_name: Optional[str] = None, + shell_type: Optional[str] = None, include_config: bool = False, ) -> List[Dict[str, Any]]: """ @@ -245,7 +245,7 @@ def list_available_models( Args: db: Database session current_user: Current user - agent_name: Optional agent name to filter compatible models + shell_type: Optional shell type to filter compatible models include_config: Whether to include full config in response Returns: @@ -256,10 +256,10 @@ def list_available_models( - provider: Model provider - modelId: Model ID """ - # Get shell configuration if agent_name is provided + # Get shell configuration if shell_type is provided support_model: List[str] = [] - if agent_name: - support_model = self._get_shell_support_model(db, agent_name) + if shell_type: + support_model = self._get_shell_support_model(db, shell_type) result: List[UnifiedModel] = [] seen_names: Dict[str, ModelType] = {} # Track names to handle duplicates @@ -279,15 +279,13 @@ def list_available_models( # Custom models are user-specific configurations (isCustomConfig=True) if self._is_custom_model(model_data): continue - info = self._extract_model_info_from_crd(model_data) - # Filter by agent compatibility if agent_name is provided - if agent_name and not self._is_model_compatible_with_agent( - info["provider"], agent_name, support_model + # Filter by shell compatibility if shell_type is provided + if shell_type and not self._is_model_compatible_with_shell( + info["provider"], shell_type, support_model ): continue - unified = UnifiedModel( name=resource.name, model_type=ModelType.USER, # Mark as user-defined model @@ -316,9 +314,9 @@ def list_available_models( provider = env.get("model") if isinstance(env, dict) else None model_id = env.get("model_id") if isinstance(env, dict) else None - # Filter by agent compatibility if agent_name is provided - if agent_name and not self._is_model_compatible_with_agent( - provider, agent_name, support_model + # Filter by shell compatibility if shell_type is provided + if shell_type and not self._is_model_compatible_with_shell( + provider, shell_type, support_model ): continue diff --git a/backend/init_data/02-public-shells.yaml b/backend/init_data/02-public-shells.yaml index 03b50403..6863fb55 100644 --- a/backend/init_data/02-public-shells.yaml +++ b/backend/init_data/02-public-shells.yaml @@ -12,7 +12,7 @@ metadata: labels: type: local_engine spec: - runtime: ClaudeCode + shellType: ClaudeCode supportModel: [] baseImage: ghcr.io/wecode-ai/wegent-base-python3.12:1.0.0 status: @@ -26,7 +26,7 @@ metadata: labels: type: local_engine spec: - runtime: Agno + shellType: Agno supportModel: [] baseImage: ghcr.io/wecode-ai/wegent-base-python3.12:1.0.0 status: @@ -40,7 +40,7 @@ metadata: labels: type: external_api spec: - runtime: Dify + shellType: Dify supportModel: [] status: state: Available diff --git a/executor/agents/base.py b/executor/agents/base.py index a8429cca..a8394c6c 100644 --- a/executor/agents/base.py +++ b/executor/agents/base.py @@ -50,6 +50,7 @@ def __init__(self, task_data: Dict[str, Any]): self.subtask_id = task_data.get("subtask_id", -1) self.task_title = task_data.get("task_title", "") self.subtask_title = task_data.get("subtask_title", "") + self.task_type = task_data.get("type") # Task type (e.g., "validation" for validation tasks) self.execution_status = TaskStatus.INITIALIZED self.project_path = None @@ -113,7 +114,7 @@ def report_progress( message: Optional message string result: Optional result data dictionary """ - logger.info(f"Reporting progress: {progress}%, status: {status}, message: {message}, result: {result}") + logger.info(f"Reporting progress: {progress}%, status: {status}, message: {message}, result: {result}, task_type: {self.task_type}") self.callback_client.send_callback( task_id=self.task_id, subtask_id=self.subtask_id, @@ -123,6 +124,7 @@ def report_progress( status=status, message=message, result=result, + task_type=self.task_type, ) def pre_execute(self) -> TaskStatus: diff --git a/executor/callback/callback_client.py b/executor/callback/callback_client.py index 0e92c66c..a4067102 100644 --- a/executor/callback/callback_client.py +++ b/executor/callback/callback_client.py @@ -98,6 +98,7 @@ def send_callback( executor_name: Optional[str] = None, executor_namespace: Optional[str] = None, result: Optional[Dict[str, Any]] = None, + task_type: Optional[str] = None, ) -> Dict[str, Any]: """ Send a callback to the executor_manager @@ -111,12 +112,13 @@ def send_callback( executor_name: Optional executor name executor_namespace: Optional executor namespace result: Optional result data dictionary + task_type: Optional task type (e.g., "validation" for validation tasks) Returns: Dict[str, Any]: Result returned by the callback interface """ logger.info( - f"Sending callback: task_id={task_id} subtask_id={subtask_id}, task_title={task_title}, progress={progress}" + f"Sending callback: task_id={task_id} subtask_id={subtask_id}, task_title={task_title}, progress={progress}, task_type={task_type}" ) if executor_name is None: @@ -139,6 +141,8 @@ def send_callback( data["error_message"] = message if result: data["result"] = result + if task_type: + data["task_type"] = task_type try: return self._request_with_retry(lambda: self._do_send_callback(data)) diff --git a/executor/callback/callback_handler.py b/executor/callback/callback_handler.py index 4d4ecefd..43f7755c 100644 --- a/executor/callback/callback_handler.py +++ b/executor/callback/callback_handler.py @@ -34,6 +34,7 @@ def send_status_callback( progress: int, executor_name: Optional[str] = None, executor_namespace: Optional[str] = None, + task_type: Optional[str] = None, ) -> Dict[str, Any]: """ Send status callback @@ -46,6 +47,7 @@ def send_status_callback( progress (int): Progress executor_name (str, optional): Executor name executor_namespace (str, optional): Executor namespace + task_type (str, optional): Task type (e.g., "validation" for validation tasks) Returns: Dict[str, Any]: Callback response @@ -61,6 +63,7 @@ def send_status_callback( message=message, executor_name=executor_name, executor_namespace=executor_namespace, + task_type=task_type, ) if result and result.get("status") == TaskStatus.SUCCESS.value: @@ -84,6 +87,8 @@ def send_task_started_callback( subtask_title: Optional[str] = None, executor_name: Optional[str] = None, executor_namespace: Optional[str] = None, + result: Optional[Dict[str, Any]] = None, + task_type: Optional[str] = None, ) -> Dict[str, Any]: """ Send task started callback @@ -93,11 +98,13 @@ def send_task_started_callback( task_title (str): Task title executor_name (str, optional): Executor name executor_namespace (str, optional): Executor namespace + result (dict, optional): Result data to include in callback (e.g., validation_id) + task_type (str, optional): Task type (e.g., "validation" for validation tasks) Returns: Dict[str, Any]: Callback response """ - return send_status_callback( + return callback_client.send_callback( task_id=task_id, subtask_id=subtask_id, task_title=task_title, @@ -107,6 +114,8 @@ def send_task_started_callback( progress=50, executor_name=executor_name, executor_namespace=executor_namespace, + result=result, + task_type=task_type, ) @@ -119,6 +128,7 @@ def send_task_completed_callback( executor_name: Optional[str] = None, executor_namespace: Optional[str] = None, result: Optional[Dict[str, Any]] = None, + task_type: Optional[str] = None, ) -> Dict[str, Any]: """ Send task completed callback @@ -132,6 +142,7 @@ def send_task_completed_callback( executor_name (str, optional): Executor name executor_namespace (str, optional): Executor namespace result (dict, optional): Result data to include in callback + task_type (str, optional): Task type (e.g., "validation" for validation tasks) Returns: Dict[str, Any]: Callback response @@ -147,6 +158,7 @@ def send_task_completed_callback( executor_name=executor_name, executor_namespace=executor_namespace, result=result, + task_type=task_type, ) @@ -158,6 +170,8 @@ def send_task_failed_callback( error_message: Optional[str] = None, executor_name: Optional[str] = None, executor_namespace: Optional[str] = None, + result: Optional[Dict[str, Any]] = None, + task_type: Optional[str] = None, ) -> Dict[str, Any]: """ Send task failed callback @@ -168,11 +182,13 @@ def send_task_failed_callback( error_message (str): Error message executor_name (str, optional): Executor name executor_namespace (str, optional): Executor namespace + result (dict, optional): Result data to include in callback (e.g., validation_id) + task_type (str, optional): Task type (e.g., "validation" for validation tasks) Returns: Dict[str, Any]: Callback response """ - return send_status_callback( + return callback_client.send_callback( task_id=task_id, subtask_id=subtask_id, task_title=task_title, @@ -182,4 +198,6 @@ def send_task_failed_callback( progress=100, executor_name=executor_name, executor_namespace=executor_namespace, + result=result, + task_type=task_type, ) \ No newline at end of file diff --git a/executor/services/agent_service.py b/executor/services/agent_service.py index 2078a12d..38897c80 100644 --- a/executor/services/agent_service.py +++ b/executor/services/agent_service.py @@ -62,14 +62,25 @@ def create_agent(self, task_data: Dict[str, Any]) -> Optional[Agent]: return existing_agent try: - bot_config = task_data.get("bot") - if isinstance(bot_config, list): - agent_name = bot_config[0].get("agent_name", "").strip().lower() + # Determine agent type based on task type + task_type = task_data.get("type", "") + + if task_type == "validation": + # For validation tasks, use ImageValidatorAgent + shell_type = "imagevalidator" + logger.info(f"[{_format_task_log(task_id, subtask_id)}] Validation task detected, using ImageValidatorAgent") else: - agent_name = bot_config.get("agent_name", "").strip().lower() + # For regular tasks, get shell_type from bot config + bot_config = task_data.get("bot") + if isinstance(bot_config, list): + shell_type = bot_config[0].get("shell_type", "").strip().lower() if bot_config else "" + elif isinstance(bot_config, dict): + shell_type = bot_config.get("shell_type", "").strip().lower() + else: + shell_type = "" - logger.info(f"[{_format_task_log(task_id, subtask_id)}] Creating new agent '{agent_name}'") - agent = AgentFactory.get_agent(agent_name, task_data) + logger.info(f"[{_format_task_log(task_id, subtask_id)}] Creating new agent '{shell_type}'") + agent = AgentFactory.get_agent(shell_type, task_data) if not agent: logger.error(f"[{_format_task_log(task_id, subtask_id)}] Failed to create agent") diff --git a/executor/tasks/task_processor.py b/executor/tasks/task_processor.py index 5401fa26..7c810e34 100644 --- a/executor/tasks/task_processor.py +++ b/executor/tasks/task_processor.py @@ -72,7 +72,7 @@ def _get_callback_params(task_data: Dict[str, Any]) -> Dict[str, str]: Returns: dict: Common callback parameters """ - return { + params = { "task_id": task_data.get("task_id", -1), "subtask_id": task_data.get("subtask_id", -1), "task_title": task_data.get("task_title", ""), @@ -80,6 +80,11 @@ def _get_callback_params(task_data: Dict[str, Any]) -> Dict[str, str]: "executor_name": os.getenv("EXECUTOR_NAME"), "executor_namespace": os.getenv("EXECUTOR_NAMESPACE"), } + # Include task_type if present (e.g., "validation" for validation tasks) + task_type = task_data.get("type") + if task_type: + params["task_type"] = task_type + return params def process(task_data: Dict[str, Any]) -> TaskStatus: @@ -96,8 +101,21 @@ def process(task_data: Dict[str, Any]) -> TaskStatus: # Get common callback parameters callback_params = _get_callback_params(task_data) + # Extract validation_id from validation_params if present (for validation tasks) + validation_params = task_data.get("validation_params", {}) + validation_id = validation_params.get("validation_id") if validation_params else None + + # For validation tasks, include validation_id in the started callback result + # so executor_manager can identify it as a validation task + started_result = None + if validation_id: + started_result = { + "validation_id": validation_id, + "stage": "running", + } + # Send task started callback - result = send_task_started_callback(**callback_params) + result = send_task_started_callback(result=started_result, **callback_params) if not result or result.get("status") != TaskStatus.SUCCESS.value: logger.error("Failed to send 'running' status callback") return TaskStatus.FAILED @@ -125,7 +143,20 @@ def process(task_data: Dict[str, Any]) -> TaskStatus: if status in [TaskStatus.SUCCESS, TaskStatus.COMPLETED]: send_task_completed_callback(message=message, **callback_params) elif status in [TaskStatus.FAILED]: - send_task_failed_callback(error_message=message, **callback_params) + # Include validation_id in result for validation tasks so that + # executor_manager can forward the failure status to backend + fail_result = None + if validation_id: + fail_result = { + "validation_id": validation_id, + "stage": "failed", + "validation_result": { + "valid": False, + "checks": [], + "errors": [message] if message else [], + }, + } + send_task_failed_callback(error_message=message, result=fail_result, **callback_params) return status diff --git a/executor_manager/executors/docker/binary_extractor.py b/executor_manager/executors/docker/binary_extractor.py index 7ef7b8bb..97ae653c 100644 --- a/executor_manager/executors/docker/binary_extractor.py +++ b/executor_manager/executors/docker/binary_extractor.py @@ -9,6 +9,13 @@ """ Binary extractor module for extracting executor binary from official image to Named Volume. This enables the Init Container pattern where custom base images can run the latest executor. + +Uses a symlink-based versioning strategy to handle "Text file busy" errors: +- executor.v1, executor.v2 are versioned binaries +- executor is a symlink pointing to the current version +- New containers use the symlink, which can be updated atomically +- Old containers continue using their already-opened file handles +- Only keeps 2 versions (current + previous) to minimize disk usage """ import os @@ -74,9 +81,37 @@ def extract_executor_binary() -> bool: return False +def _get_image_digest(image: str) -> Optional[str]: + """ + Get the digest of a Docker image. + + Args: + image: The image name (with or without tag) + + Returns: + The image digest or None if not found + """ + try: + result = subprocess.run( + ["docker", "inspect", "--format", "{{.Id}}", image], + capture_output=True, + text=True, + timeout=30 + ) + if result.returncode == 0: + return result.stdout.strip() + return None + except Exception as e: + logger.warning(f"Error getting image digest: {e}") + return None + + def _should_extract_binary(target_image: str) -> Tuple[bool, Optional[str]]: """ - Check if binary extraction is needed by comparing versions. + Check if binary extraction is needed by comparing image digests. + + Uses image digest (sha256) instead of tag to ensure we always use + the latest version even when the tag (e.g., 'latest') hasn't changed. Args: target_image: The target executor image to compare against @@ -85,7 +120,13 @@ def _should_extract_binary(target_image: str) -> Tuple[bool, Optional[str]]: Tuple of (should_extract, current_version) """ try: - # Try to read version from existing volume + # Get the digest of the target image + target_digest = _get_image_digest(target_image) + if not target_digest: + logger.info(f"Could not get digest for {target_image}, will extract") + return True, None + + # Try to read version (digest) from existing volume result = subprocess.run( [ "docker", "run", "--rm", @@ -100,10 +141,12 @@ def _should_extract_binary(target_image: str) -> Tuple[bool, Optional[str]]: if result.returncode == 0: current_version = result.stdout.strip() - if current_version == target_image: + # Compare digests instead of image names + if current_version == target_digest: + logger.info(f"Executor binary up-to-date (digest: {target_digest[:20]}...)") return False, current_version else: - logger.info(f"Version mismatch: current={current_version}, target={target_image}") + logger.info(f"Digest mismatch: current={current_version[:20] if current_version else 'None'}..., target={target_digest[:20]}...") return True, current_version else: # Volume doesn't exist or version file not found @@ -120,7 +163,12 @@ def _should_extract_binary(target_image: str) -> Tuple[bool, Optional[str]]: def _extract_binary_to_volume(executor_image: str) -> bool: """ - Extract executor binary from image to Named Volume. + Extract executor binary from image to Named Volume using symlink-based versioning. + + This strategy handles "Text file busy" errors when the binary is in use: + 1. Copy new binary to a versioned file (executor.v1 or executor.v2) + 2. Update symlink to point to the new version (atomic operation) + 3. Clean up old version if not in use Args: executor_image: The source executor image @@ -129,6 +177,12 @@ def _extract_binary_to_volume(executor_image: str) -> bool: bool: True if successful, False otherwise """ try: + # Get the digest of the image to store as version + image_digest = _get_image_digest(executor_image) + if not image_digest: + logger.warning(f"Could not get digest for {executor_image}, using image name as version") + image_digest = executor_image + # Step 1: Create/ensure the Named Volume exists subprocess.run( ["docker", "volume", "create", EXECUTOR_BINARY_VOLUME], @@ -138,12 +192,52 @@ def _extract_binary_to_volume(executor_image: str) -> bool: ) logger.info(f"Created/verified volume: {EXECUTOR_BINARY_VOLUME}") - # Step 2: Extract executor binary and write version file - # Using a single container to copy files and write version + # Step 2: Extract executor binary using symlink-based versioning + # This handles "Text file busy" by: + # - Writing to a new versioned file (never overwriting running binary) + # - Atomically updating symlink (ln -sf is atomic on most filesystems) + # - Cleaning up old versions extract_cmd = f""" - cp -r /app/* /target/ 2>/dev/null || cp /app/executor /target/executor; - echo '{executor_image}' > {VERSION_FILE_PATH}; - chmod +x /target/executor 2>/dev/null || true + set -e + + # Determine which version slot to use (v1 or v2) + # Read current symlink target to know which slot is in use + if [ -L /target/executor ]; then + CURRENT=$(readlink /target/executor) + if [ "$CURRENT" = "executor.v1" ]; then + NEW_VERSION="executor.v2" + OLD_VERSION="executor.v1" + else + NEW_VERSION="executor.v1" + OLD_VERSION="executor.v2" + fi + else + # First time setup or executor is a regular file + NEW_VERSION="executor.v1" + OLD_VERSION="" + # Remove old regular file if exists (might fail if busy, that's ok) + rm -f /target/executor 2>/dev/null || true + fi + + # Copy new binary to versioned file + cp /app/executor /target/$NEW_VERSION + chmod +x /target/$NEW_VERSION + + # Atomically update symlink using ln -sf + # This creates a temp symlink and renames it (atomic on POSIX) + ln -sf $NEW_VERSION /target/executor.tmp + mv -f /target/executor.tmp /target/executor + + # Write version file only after successful symlink update + echo '{image_digest}' > {VERSION_FILE_PATH} + + # Clean up old version (will succeed even if file is busy - + # the file will be deleted when last process closes it) + if [ -n "$OLD_VERSION" ] && [ -f "/target/$OLD_VERSION" ]; then + rm -f /target/$OLD_VERSION 2>/dev/null || true + fi + + echo "SUCCESS: Updated executor symlink to $NEW_VERSION" """ result = subprocess.run( @@ -162,7 +256,9 @@ def _extract_binary_to_volume(executor_image: str) -> bool: logger.error(f"Failed to extract binary: {result.stderr}") return False - logger.info("Binary extraction completed successfully") + logger.info(f"Binary extraction completed successfully (digest: {image_digest[:20]}...)") + if result.stdout: + logger.debug(f"Extraction output: {result.stdout.strip()}") return True except subprocess.TimeoutExpired: diff --git a/executor_manager/executors/docker/executor.py b/executor_manager/executors/docker/executor.py index 6fdbb34c..88f54c63 100644 --- a/executor_manager/executors/docker/executor.py +++ b/executor_manager/executors/docker/executor.py @@ -14,6 +14,7 @@ import json import os import subprocess +import time from typing import Any, Dict, List, Optional, Tuple import requests import httpx @@ -93,6 +94,9 @@ def submit_executor( user_name = task_info["user_name"] executor_name = task_info["executor_name"] + # Check if this is a validation task (validation tasks use negative task_id) + is_validation_task = task.get("type") == "validation" + # Initialize execution status execution_status = { "status": "success", @@ -115,15 +119,18 @@ def submit_executor( # Unified exception handling self._handle_execution_exception(e, task_id, execution_status) - # Call callback function - self._call_callback( - callback, - task_id, - subtask_id, - execution_status["executor_name"], - execution_status["progress"], - execution_status["callback_status"] - ) + # Call callback function only for regular tasks (not validation tasks) + # Validation tasks don't exist in the database, so we skip the callback + # to avoid 404 errors when trying to update non-existent task status + if not is_validation_task: + self._call_callback( + callback, + task_id, + subtask_id, + execution_status["executor_name"], + execution_status["progress"], + execution_status["callback_status"] + ) # Return unified result structure return self._create_result_response(execution_status) @@ -177,6 +184,7 @@ def _create_new_container(self, task: Dict[str, Any], task_info: Dict[str, Any], """Create new Docker container""" executor_name = status["executor_name"] task_id = task_info["task_id"] + is_validation_task = task.get("type") == "validation" # Check for custom base_image from bot configuration base_image = self._get_base_image_from_task(task) @@ -184,6 +192,10 @@ def _create_new_container(self, task: Dict[str, Any], task_info: Dict[str, Any], # Get executor image executor_image = self._get_executor_image(task) + # If using custom base_image, ensure executor binary is up-to-date + if base_image: + self._ensure_executor_binary_updated(executor_image) + # Prepare Docker command with optional base_image support cmd = self._prepare_docker_command(task, task_info, executor_name, executor_image, base_image) @@ -198,7 +210,7 @@ def _create_new_container(self, task: Dict[str, Any], task_info: Dict[str, Any], logger.info(f"Started Docker container {executor_name} with ID {container_id}") # For validation tasks, report starting_container stage - if task.get("type") == "validation": + if is_validation_task: self._report_validation_stage( task, stage="starting_container", @@ -207,9 +219,14 @@ def _create_new_container(self, task: Dict[str, Any], task_info: Dict[str, Any], message="Container started, running validation checks", ) + # Check if container is still running after a short delay + # This catches cases where the container exits immediately (e.g., binary incompatibility) + if base_image: + self._check_container_health(task, executor_name, is_validation_task) + except subprocess.CalledProcessError as e: # For validation tasks, report image pull or container start failure - if task.get("type") == "validation": + if is_validation_task: error_msg = e.stderr or str(e) stage = "pulling_image" if "pull" in error_msg.lower() or "not found" in error_msg.lower() else "starting_container" self._report_validation_stage( @@ -223,6 +240,151 @@ def _create_new_container(self, task: Dict[str, Any], task_info: Dict[str, Any], ) raise + def _check_container_health(self, task: Dict[str, Any], executor_name: str, is_validation_task: bool) -> None: + """ + Check if container is still running after startup. + + This catches cases where the container exits immediately due to: + - Binary incompatibility (glibc vs musl) + - Missing dependencies + - Entrypoint errors + + Args: + task: Task data + executor_name: Name of the container to check + is_validation_task: Whether this is a validation task + """ + # Wait a short time for container to potentially fail + time.sleep(2) + + try: + # Check container status + inspect_result = self.subprocess.run( + ["docker", "inspect", "--format", "{{.State.Status}}", executor_name], + capture_output=True, + text=True, + timeout=10 + ) + + if inspect_result.returncode != 0: + logger.warning(f"Failed to inspect container {executor_name}") + return + + container_status = inspect_result.stdout.strip() + + if container_status == "exited": + # Container has exited, get logs to understand why + logs_result = self.subprocess.run( + ["docker", "logs", "--tail", "50", executor_name], + capture_output=True, + text=True, + timeout=10 + ) + + # Get exit code + exit_code_result = self.subprocess.run( + ["docker", "inspect", "--format", "{{.State.ExitCode}}", executor_name], + capture_output=True, + text=True, + timeout=10 + ) + exit_code = exit_code_result.stdout.strip() if exit_code_result.returncode == 0 else "unknown" + + # Combine stdout and stderr for logs + container_logs = logs_result.stdout or logs_result.stderr or "No logs available" + + # Detect common error patterns + error_msg = self._analyze_container_failure(container_logs, exit_code) + + logger.error(f"Container {executor_name} exited immediately with code {exit_code}: {error_msg}") + + # Report failure for validation tasks + if is_validation_task: + self._report_validation_stage( + task, + stage="starting_container", + status="failed", + progress=100, + message=f"Container exited immediately: {error_msg}", + error_message=error_msg, + valid=False, + ) + + # Clean up the failed container + try: + self.subprocess.run( + ["docker", "rm", "-f", executor_name], + capture_output=True, + timeout=10 + ) + except Exception: + pass + + # Raise exception to mark task as failed + raise RuntimeError(f"Container exited immediately: {error_msg}") + + except subprocess.TimeoutExpired: + logger.warning(f"Timeout checking container health for {executor_name}") + except RuntimeError: + # Re-raise RuntimeError from container failure + raise + except Exception as e: + logger.warning(f"Error checking container health: {e}") + + def _analyze_container_failure(self, logs: str, exit_code: str) -> str: + """ + Analyze container logs to determine the cause of failure. + + Args: + logs: Container logs + exit_code: Container exit code + + Returns: + Human-readable error message + """ + logs_lower = logs.lower() + + # Check for common error patterns + if "no such file or directory" in logs_lower and "exec" in logs_lower: + return "Binary incompatibility: The executor binary cannot run in this image. This usually happens when the base image uses a different C library (e.g., Alpine uses musl while the executor was built with glibc). Please use a glibc-based image like Ubuntu, Debian, or AlmaLinux." + + if "not found" in logs_lower and ("libc" in logs_lower or "ld-linux" in logs_lower): + return "Missing C library: The base image is missing required system libraries. Please use a glibc-based image." + + if "permission denied" in logs_lower: + return "Permission denied: The executor binary does not have execute permissions or the user lacks required permissions." + + if exit_code == "127": + return "Command not found: The entrypoint or command could not be found in the container." + + if exit_code == "126": + return "Permission denied or not executable: The entrypoint exists but cannot be executed." + + # Default message with logs excerpt + logs_excerpt = logs[:500] if len(logs) > 500 else logs + return f"Container exited with code {exit_code}. Logs: {logs_excerpt}" + + def _ensure_executor_binary_updated(self, executor_image: str) -> None: + """ + Ensure executor binary in Named Volume is up-to-date before starting container. + + This method checks if the executor binary in the Named Volume matches the + current executor image digest. If not, it extracts the latest binary. + + Args: + executor_image: The executor image to extract binary from + """ + from executors.docker.binary_extractor import extract_executor_binary + + try: + logger.info(f"Checking executor binary for image: {executor_image}") + if extract_executor_binary(): + logger.info("Executor binary is up-to-date") + else: + logger.warning("Failed to update executor binary, using existing version") + except Exception as e: + logger.warning(f"Error checking executor binary: {e}, using existing version") + def _get_base_image_from_task(self, task: Dict[str, Any]) -> Optional[str]: """Extract custom base_image from task's bot configuration""" bots = task.get("bot", []) diff --git a/executor_manager/routers/routers.py b/executor_manager/routers/routers.py index 04a506ae..a4908f5e 100644 --- a/executor_manager/routers/routers.py +++ b/executor_manager/routers/routers.py @@ -69,6 +69,7 @@ class CallbackRequest(BaseModel): status: Optional[str] = None error_message: Optional[str] = None result: Optional[Dict[str, Any]] = None + task_type: Optional[str] = None # Task type: "validation" for validation tasks, None for regular tasks @app.post("/executor-manager/callback") @@ -86,11 +87,21 @@ async def callback_handler(request: CallbackRequest, http_request: Request): client_ip = http_request.client.host if http_request.client else "unknown" logger.info(f"Received callback: body={request} from {client_ip}") - # Check if this is a validation task callback (has validation_id in result) - if request.result and request.result.get("validation_id"): + # Check if this is a validation task callback + # Primary check: task_type == "validation" + # Fallback check: validation_id in result (for backward compatibility) + is_validation_task = request.task_type == "validation" or (request.result and request.result.get("validation_id")) + if is_validation_task: await _forward_validation_callback(request) - - # Directly call the API client to update task status + # For validation tasks, we only need to forward to backend for Redis update + # No need to update task status in database (validation tasks don't exist in DB) + logger.info(f"Successfully processed validation callback for task {request.task_id}") + return { + "status": "success", + "message": f"Successfully processed validation callback for task {request.task_id}", + } + + # For regular tasks, update task status in database success, result = api_client.update_task_status_by_fields( task_id=request.task_id, subtask_id=request.subtask_id, @@ -118,29 +129,39 @@ async def _forward_validation_callback(request: CallbackRequest): """Forward validation task callback to Backend for Redis status update""" import httpx - validation_id = request.result.get("validation_id") + # Get validation_id from result if available + validation_id = request.result.get("validation_id") if request.result else None if not validation_id: + # If no validation_id in result, we can't forward to backend + # This can happen when task_type is "validation" but result is None (e.g., early failure) + logger.warning(f"Validation callback for task {request.task_id} has no validation_id, skipping forward") return - # Map callback status to validation status + # Map callback status to validation status (case-insensitive) + status_lower = request.status.lower() if request.status else "" status_mapping = { "running": "running_checks", "completed": "completed", "failed": "completed", } - validation_status = status_mapping.get(request.status, request.status) + validation_status = status_mapping.get(status_lower, request.status) # Extract validation result from callback validation_result = request.result.get("validation_result", {}) stage = request.result.get("stage", "Running checks") progress = request.progress + # For failed status, ensure valid is False if not explicitly set + valid_value = validation_result.get("valid") + if status_lower == "failed" and valid_value is None: + valid_value = False + # Build update payload update_payload = { "status": validation_status, "stage": stage, "progress": progress, - "valid": validation_result.get("valid"), + "valid": valid_value, "checks": validation_result.get("checks"), "errors": validation_result.get("errors"), "errorMessage": request.error_message, @@ -154,7 +175,7 @@ async def _forward_validation_callback(request: CallbackRequest): async with httpx.AsyncClient(timeout=10.0) as client: response = await client.post(update_url, json=update_payload) if response.status_code == 200: - logger.info(f"Successfully forwarded validation callback: {validation_id} -> {validation_status}") + logger.info(f"Successfully forwarded validation callback: {validation_id} -> {validation_status}, valid={valid_value}") else: logger.warning(f"Failed to forward validation callback: {response.status_code} {response.text}") except Exception as e: @@ -294,7 +315,7 @@ async def validate_image(request: ValidateImageRequest, http_request: Request): # Build validation task data # Use a unique negative task_id to distinguish validation tasks from regular tasks import time - validation_task_id = -int(time.time() * 1000) % 1000000 # Negative ID for validation tasks + validation_task_id = -(int(time.time() * 1000) % 1000000) # Negative ID for validation tasks validation_task = { "task_id": validation_task_id, diff --git a/frontend/src/apis/bots.ts b/frontend/src/apis/bots.ts index 23504051..4afc1bb5 100644 --- a/frontend/src/apis/bots.ts +++ b/frontend/src/apis/bots.ts @@ -8,7 +8,7 @@ import { Bot, PaginationParams, SuccessMessage } from '../types/api'; // Bot Request/Response Types export interface CreateBotRequest { name: string; - agent_name: string; + shell_name: string; // Shell name (e.g., 'ClaudeCode', 'Agno', 'my-custom-shell') agent_config: Record; system_prompt: string; mcp_servers: Record; @@ -17,7 +17,7 @@ export interface CreateBotRequest { export interface UpdateBotRequest { name?: string; - agent_name?: string; + shell_name?: string; // Shell name (e.g., 'ClaudeCode', 'Agno', 'my-custom-shell') agent_config?: Record; system_prompt?: string; mcp_servers?: Record; diff --git a/frontend/src/apis/mocks/bot.ts b/frontend/src/apis/mocks/bot.ts index 9c380ec7..76652ac1 100644 --- a/frontend/src/apis/mocks/bot.ts +++ b/frontend/src/apis/mocks/bot.ts @@ -10,7 +10,8 @@ export const MOCK_BOTS: Bot[] = [ { id: 1, name: 'Code Reviewer', - agent_name: 'claude_code_agent', + shell_name: 'ClaudeCode', + shell_type: 'ClaudeCode', agent_config: { model: 'claude-3-opus-20240229' }, system_prompt: 'You are a senior software engineer. Please review the code for any issues.', mcp_servers: {}, @@ -21,7 +22,8 @@ export const MOCK_BOTS: Bot[] = [ { id: 2, name: 'Unit Test Writer', - agent_name: 'claude_code_agent', + shell_name: 'ClaudeCode', + shell_type: 'ClaudeCode', agent_config: { model: 'claude-3-sonnet-20240229' }, system_prompt: 'You are a QA engineer. Please write unit tests for the given code.', mcp_servers: {}, @@ -45,6 +47,7 @@ export const botHandlers = [ const newBot: Bot = { id: MOCK_BOTS.length + 1, ...botData, + shell_type: botData.shell_name, // For mock, shell_type equals shell_name is_active: true, created_at: new Date().toISOString(), updated_at: new Date().toISOString(), diff --git a/frontend/src/apis/models.ts b/frontend/src/apis/models.ts index 43fe6cc5..eba7a092 100644 --- a/frontend/src/apis/models.ts +++ b/frontend/src/apis/models.ts @@ -110,10 +110,10 @@ export interface CompatibleModelsResponse { // Model Services export const modelApis = { /** - * Get model names for a specific agent (legacy API, use getUnifiedModels for new implementations) + * Get model names for a specific shell type (legacy API, use getUnifiedModels for new implementations) */ - async getModelNames(agentName: string): Promise { - return apiClient.get(`/models/names?agent_name=${encodeURIComponent(agentName)}`); + async getModelNames(shellType: string): Promise { + return apiClient.get(`/models/names?shell_type=${encodeURIComponent(shellType)}`); }, /** @@ -122,16 +122,16 @@ export const modelApis = { * This is the recommended API for new implementations. * Each model includes a 'type' field ('public' or 'user') to identify its source. * - * @param agentName - Optional agent name to filter compatible models + * @param shellType - Optional shell type to filter compatible models * @param includeConfig - Whether to include full model config in response */ async getUnifiedModels( - agentName?: string, + shellType?: string, includeConfig: boolean = false ): Promise { const params = new URLSearchParams(); - if (agentName) { - params.append('agent_name', agentName); + if (shellType) { + params.append('shell_type', shellType); } if (includeConfig) { params.append('include_config', 'true'); @@ -206,9 +206,9 @@ export const modelApis = { }, /** - * Get models compatible with a specific agent type + * Get models compatible with a specific shell type */ - async getCompatibleModels(agentName: string): Promise { - return apiClient.get(`/models/compatible?agent_name=${encodeURIComponent(agentName)}`); + async getCompatibleModels(shellType: string): Promise { + return apiClient.get(`/models/compatible?shell_type=${encodeURIComponent(shellType)}`); }, }; diff --git a/frontend/src/apis/shells.ts b/frontend/src/apis/shells.ts index 83fbd4bb..3f64b459 100644 --- a/frontend/src/apis/shells.ts +++ b/frontend/src/apis/shells.ts @@ -2,61 +2,61 @@ // // SPDX-License-Identifier: Apache-2.0 -import { apiClient } from './client' +import { apiClient } from './client'; // Shell Types -export type ShellTypeEnum = 'public' | 'user' +export type ShellTypeEnum = 'public' | 'user'; export interface UnifiedShell { - name: string - type: ShellTypeEnum // 'public' or 'user' - identifies shell source - displayName?: string | null - runtime: string - baseImage?: string | null - baseShellRef?: string | null - supportModel?: string[] | null - shellType?: 'local_engine' | 'external_api' | null // Shell execution type + name: string; + type: ShellTypeEnum; // 'public' or 'user' - identifies shell source + displayName?: string | null; + shellType: string; // Agent type: 'ClaudeCode' | 'Agno' | 'Dify' + baseImage?: string | null; + baseShellRef?: string | null; + supportModel?: string[] | null; + executionType?: 'local_engine' | 'external_api' | null; // Shell execution type } export interface UnifiedShellListResponse { - data: UnifiedShell[] + data: UnifiedShell[]; } export interface ShellCreateRequest { - name: string - displayName?: string - baseShellRef: string // Required: base public shell name (e.g., "ClaudeCode") - baseImage: string // Required: custom base image address + name: string; + displayName?: string; + baseShellRef: string; // Required: base public shell name (e.g., "ClaudeCode") + baseImage: string; // Required: custom base image address } export interface ShellUpdateRequest { - displayName?: string - baseImage?: string + displayName?: string; + baseImage?: string; } // Image Validation Types export interface ImageValidationRequest { - image: string - shellType: string // e.g., "ClaudeCode", "Agno" - shellName?: string // Optional shell name for tracking + image: string; + shellType: string; // e.g., "ClaudeCode", "Agno" + shellName?: string; // Optional shell name for tracking } export interface ImageCheckResult { - name: string - version?: string | null - status: 'pass' | 'fail' - message?: string | null + name: string; + version?: string | null; + status: 'pass' | 'fail'; + message?: string | null; } export interface ImageValidationResponse { - status: 'submitted' | 'skipped' | 'error' - message: string - validationId?: string | null // UUID for polling validation status - validationTaskId?: number | null // Legacy field + status: 'submitted' | 'skipped' | 'error'; + message: string; + validationId?: string | null; // UUID for polling validation status + validationTaskId?: number | null; // Legacy field // For immediate results (e.g., Dify skip) - valid?: boolean | null - checks?: ImageCheckResult[] | null - errors?: string[] | null + valid?: boolean | null; + checks?: ImageCheckResult[] | null; + errors?: string[] | null; } // Validation Status Types @@ -65,17 +65,17 @@ export type ValidationStage = | 'pulling_image' | 'starting_container' | 'running_checks' - | 'completed' + | 'completed'; export interface ValidationStatusResponse { - validationId: string - status: ValidationStage - stage: string // Human-readable stage description - progress: number // 0-100 - valid?: boolean | null - checks?: ImageCheckResult[] | null - errors?: string[] | null - errorMessage?: string | null + validationId: string; + status: ValidationStage; + stage: string; // Human-readable stage description + progress: number; // 0-100 + valid?: boolean | null; + checks?: ImageCheckResult[] | null; + errors?: string[] | null; + errorMessage?: string | null; } // Shell Services @@ -86,7 +86,7 @@ export const shellApis = { * Each shell includes a 'type' field ('public' or 'user') to identify its source. */ async getUnifiedShells(): Promise { - return apiClient.get('/shells/unified') + return apiClient.get('/shells/unified'); }, /** @@ -96,42 +96,42 @@ export const shellApis = { * @param shellType - Optional shell type ('public' or 'user') */ async getUnifiedShell(shellName: string, shellType?: ShellTypeEnum): Promise { - const params = new URLSearchParams() + const params = new URLSearchParams(); if (shellType) { - params.append('shell_type', shellType) + params.append('shell_type', shellType); } - const queryString = params.toString() + const queryString = params.toString(); return apiClient.get( `/shells/unified/${encodeURIComponent(shellName)}${queryString ? `?${queryString}` : ''}` - ) + ); }, /** * Create a new user-defined shell */ async createShell(request: ShellCreateRequest): Promise { - return apiClient.post('/shells', request) + return apiClient.post('/shells', request); }, /** * Update an existing user-defined shell */ async updateShell(name: string, request: ShellUpdateRequest): Promise { - return apiClient.put(`/shells/${encodeURIComponent(name)}`, request) + return apiClient.put(`/shells/${encodeURIComponent(name)}`, request); }, /** * Delete a user-defined shell */ async deleteShell(name: string): Promise { - return apiClient.delete(`/shells/${encodeURIComponent(name)}`) + return apiClient.delete(`/shells/${encodeURIComponent(name)}`); }, /** * Validate base image compatibility with a shell type */ async validateImage(request: ImageValidationRequest): Promise { - return apiClient.post('/shells/validate-image', request) + return apiClient.post('/shells/validate-image', request); }, /** @@ -140,24 +140,24 @@ export const shellApis = { * @param validationId - UUID of the validation task */ async getValidationStatus(validationId: string): Promise { - return apiClient.get(`/shells/validation-status/${encodeURIComponent(validationId)}`) + return apiClient.get(`/shells/validation-status/${encodeURIComponent(validationId)}`); }, /** * Get public shells only (filter from unified list) */ async getPublicShells(): Promise { - const response = await this.getUnifiedShells() - return (response.data || []).filter(shell => shell.type === 'public') + const response = await this.getUnifiedShells(); + return (response.data || []).filter(shell => shell.type === 'public'); }, /** * Get local_engine type shells only (for base shell selection) */ async getLocalEngineShells(): Promise { - const response = await this.getUnifiedShells() + const response = await this.getUnifiedShells(); return (response.data || []).filter( - shell => shell.type === 'public' && shell.shellType === 'local_engine' - ) + shell => shell.type === 'public' && shell.executionType === 'local_engine' + ); }, -} +}; diff --git a/frontend/src/features/settings/components/BotEdit.tsx b/frontend/src/features/settings/components/BotEdit.tsx index ed6065a9..410f3a36 100644 --- a/frontend/src/features/settings/components/BotEdit.tsx +++ b/frontend/src/features/settings/components/BotEdit.tsx @@ -25,8 +25,8 @@ import { getModelTypeFromConfig, createPredefinedModelConfig, } from '@/features/settings/services/bots'; -import { agentApis, Agent } from '@/apis/agents'; import { modelApis, UnifiedModel, ModelTypeEnum } from '@/apis/models'; +import { shellApis, UnifiedShell } from '@/apis/shells'; import { fetchSkillsList } from '@/apis/skills'; import { useTranslation } from 'react-i18next'; import { adaptMcpConfigForAgent, isValidAgentType } from '../utils/mcpTypeAdapter'; @@ -68,8 +68,8 @@ const BotEdit: React.FC = ({ const { t, i18n } = useTranslation('common'); const [botSaving, setBotSaving] = useState(false); - const [agents, setAgents] = useState([]); - const [loadingAgents, setLoadingAgents] = useState(false); + const [shells, setShells] = useState([]); + const [loadingShells, setLoadingShells] = useState(false); const [models, setModels] = useState([]); const [loadingModels, setLoadingModels] = useState(false); const [isCustomModel, setIsCustomModel] = useState(false); @@ -91,11 +91,12 @@ const BotEdit: React.FC = ({ }, [editingBot, editingBotId, cloningBot]); const [botName, setBotName] = useState(baseBot?.name || ''); - const [agentName, setAgentName] = useState(baseBot?.agent_name || ''); + // Use shell_name for the selected shell, fallback to shell_type for backward compatibility + const [agentName, setAgentName] = useState(baseBot?.shell_name || baseBot?.shell_type || ''); // Helper function to remove protocol from agent_config for display const getAgentConfigWithoutProtocol = (config: Record | undefined): string => { if (!config) return ''; - + const { protocol: _, ...rest } = config; return Object.keys(rest).length > 0 ? JSON.stringify(rest, null, 2) : ''; }; @@ -255,32 +256,32 @@ const BotEdit: React.FC = ({ window.open(docsUrl, '_blank'); }, [i18n.language]); - // Get agents list + // Get shells list (including both public and user-defined shells) useEffect(() => { - const fetchAgents = async () => { - setLoadingAgents(true); + const fetchShells = async () => { + setLoadingShells(true); try { - const response = await agentApis.getAgents(); - // Filter agents based on allowedAgents prop - let filteredAgents = response.items; + const response = await shellApis.getUnifiedShells(); + // Filter shells based on allowedAgents prop (using shellType as agent type) + let filteredShells = response.data || []; if (allowedAgents && allowedAgents.length > 0) { - filteredAgents = response.items.filter(agent => - allowedAgents.includes(agent.name as AgentType) + filteredShells = filteredShells.filter(shell => + allowedAgents.includes(shell.shellType as AgentType) ); } - setAgents(filteredAgents); + setShells(filteredShells); } catch (error) { - console.error('Failed to fetch agents:', error); + console.error('Failed to fetch shells:', error); toast({ variant: 'destructive', title: t('bot.errors.fetch_agents_failed'), }); } finally { - setLoadingAgents(false); + setLoadingShells(false); } }; - fetchAgents(); + fetchShells(); }, [toast, t, allowedAgents]); // Get skills list - only for ClaudeCode agent useEffect(() => { @@ -312,7 +313,8 @@ const BotEdit: React.FC = ({ useEffect(() => { console.log('[DEBUG] fetchModels useEffect triggered', { agentName, - baseBot_agent_name: baseBot?.agent_name, + baseBot_shell_name: baseBot?.shell_name, + baseBot_shell_type: baseBot?.shell_type, baseBot_agent_config: baseBot?.agent_config, }); @@ -324,17 +326,24 @@ const BotEdit: React.FC = ({ const fetchModels = async () => { setLoadingModels(true); try { + // Find the selected shell to get its shellType for model filtering + const selectedShell = shells.find(s => s.name === agentName); + // Use shell's shellType for model filtering, fallback to agentName for public shells + const shellType = selectedShell?.shellType || agentName; + // Use the new unified models API which includes type information - const response = await modelApis.getUnifiedModels(agentName); + const response = await modelApis.getUnifiedModels(shellType); console.log('[DEBUG] Models loaded:', response.data); setModels(response.data); // After loading models, check if we should restore the bot's saved model // This handles the case when editing an existing bot with a predefined model - // Only restore if the current agentName matches the baseBot's agent_name + // Only restore if the current agentName matches the baseBot's shell_name // (i.e., user hasn't switched to a different agent) const hasConfig = baseBot?.agent_config && Object.keys(baseBot.agent_config).length > 0; - const agentMatches = baseBot?.agent_name === agentName; + // Use shell_name for comparison, fallback to shell_type for backward compatibility + const baseBotShellName = baseBot?.shell_name || baseBot?.shell_type; + const agentMatches = baseBotShellName === agentName; const isPredefined = hasConfig && isPredefinedModel(baseBot.agent_config); console.log('[DEBUG] Model restore check:', { @@ -389,11 +398,12 @@ const BotEdit: React.FC = ({ }; fetchModels(); - }, [agentName, toast, t, baseBot]); + }, [agentName, shells, toast, t, baseBot]); // Reset base form when switching editing object useEffect(() => { setBotName(baseBot?.name || ''); - setAgentName(baseBot?.agent_name || ''); + // Use shell_name for the selected shell, fallback to shell_type for backward compatibility + setAgentName(baseBot?.shell_name || baseBot?.shell_type || ''); setPrompt(baseBot?.system_prompt || ''); setMcpConfig(baseBot?.mcp_servers ? JSON.stringify(baseBot.mcp_servers, null, 2) : ''); setSelectedSkills(baseBot?.skills || []); @@ -576,7 +586,7 @@ const BotEdit: React.FC = ({ try { const botReq: CreateBotRequest = { name: botName.trim(), - agent_name: agentName.trim(), + shell_name: agentName.trim(), // Use shell_name instead of shell_type agent_config: parsedAgentConfig as Record, system_prompt: isDifyAgent ? '' : prompt.trim() || '', // Clear system_prompt for Dify mcp_servers: parsedMcpConfig ?? {}, @@ -737,15 +747,20 @@ const BotEdit: React.FC = ({ } setAgentName(value); }} - disabled={loadingAgents || readOnly} + disabled={loadingShells || readOnly} > - {agents.map(agent => ( - - {agent.name} + {shells.map(shell => ( + + {shell.displayName || shell.name} + {shell.type === 'user' && ( + + [{t('bot.custom_shell', '自定义')}] + + )} ))} diff --git a/frontend/src/features/settings/components/BotList.tsx b/frontend/src/features/settings/components/BotList.tsx index ef625f08..3da10ebf 100644 --- a/frontend/src/features/settings/components/BotList.tsx +++ b/frontend/src/features/settings/components/BotList.tsx @@ -173,7 +173,7 @@ export default function BotList() {
- {bot.agent_name} + {bot.shell_type} {isPredefinedModel(bot.agent_config) diff --git a/frontend/src/features/settings/components/ShellEdit.tsx b/frontend/src/features/settings/components/ShellEdit.tsx index a4cd7d8e..23644407 100644 --- a/frontend/src/features/settings/components/ShellEdit.tsx +++ b/frontend/src/features/settings/components/ShellEdit.tsx @@ -2,34 +2,34 @@ // // SPDX-License-Identifier: Apache-2.0 -'use client' +'use client'; -import React, { useCallback, useState, useEffect, useRef } from 'react' -import { Button } from '@/components/ui/button' -import { Input } from '@/components/ui/input' -import { Label } from '@/components/ui/label' +import React, { useCallback, useState, useEffect, useRef } from 'react'; +import { Button } from '@/components/ui/button'; +import { Input } from '@/components/ui/input'; +import { Label } from '@/components/ui/label'; import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue, -} from '@/components/ui/select' -import { Progress } from '@/components/ui/progress' -import { Loader2 } from 'lucide-react' -import { BeakerIcon, CheckCircleIcon, XCircleIcon } from '@heroicons/react/24/outline' -import { useTranslation } from '@/hooks/useTranslation' +} from '@/components/ui/select'; +import { Progress } from '@/components/ui/progress'; +import { Loader2 } from 'lucide-react'; +import { BeakerIcon, CheckCircleIcon, XCircleIcon } from '@heroicons/react/24/outline'; +import { useTranslation } from '@/hooks/useTranslation'; import { shellApis, UnifiedShell, ImageCheckResult, ValidationStage, ValidationStatusResponse, -} from '@/apis/shells' +} from '@/apis/shells'; // Polling configuration -const POLLING_INTERVAL = 2000 // 2 seconds -const MAX_POLLING_COUNT = 60 // 60 * 2s = 120 seconds timeout +const POLLING_INTERVAL = 2000; // 2 seconds +const MAX_POLLING_COUNT = 60; // 60 * 2s = 120 seconds timeout // Stage progress mapping const STAGE_PROGRESS: Record = { @@ -38,99 +38,99 @@ const STAGE_PROGRESS: Record = { starting_container: 50, running_checks: 70, completed: 100, -} +}; interface ShellEditProps { - shell: UnifiedShell | null - onClose: () => void - toast: ReturnType['toast'] + shell: UnifiedShell | null; + onClose: () => void; + toast: ReturnType['toast']; } const ShellEdit: React.FC = ({ shell, onClose, toast }) => { - const { t } = useTranslation('common') - const isEditing = !!shell + const { t } = useTranslation('common'); + const isEditing = !!shell; // Form state - const [name, setName] = useState(shell?.name || '') - const [displayName, setDisplayName] = useState(shell?.displayName || '') - const [baseShellRef, setBaseShellRef] = useState(shell?.baseShellRef || '') - const [baseImage, setBaseImage] = useState(shell?.baseImage || '') - const [originalBaseImage] = useState(shell?.baseImage || '') // Track original value for edit mode - const [saving, setSaving] = useState(false) - const [validating, setValidating] = useState(false) - const [_validationId, setValidationId] = useState(null) - const pollingRef = useRef(null) + const [name, setName] = useState(shell?.name || ''); + const [displayName, setDisplayName] = useState(shell?.displayName || ''); + const [baseShellRef, setBaseShellRef] = useState(shell?.baseShellRef || ''); + const [baseImage, setBaseImage] = useState(shell?.baseImage || ''); + const [originalBaseImage] = useState(shell?.baseImage || ''); // Track original value for edit mode + const [saving, setSaving] = useState(false); + const [validating, setValidating] = useState(false); + const [_validationId, setValidationId] = useState(null); + const pollingRef = useRef(null); const [validationStatus, setValidationStatus] = useState<{ - status: ValidationStage | 'error' | 'success' | 'failed' - message: string - progress: number - valid?: boolean - checks?: ImageCheckResult[] - errors?: string[] - } | null>(null) + status: ValidationStage | 'error' | 'success' | 'failed'; + message: string; + progress: number; + valid?: boolean; + checks?: ImageCheckResult[]; + errors?: string[]; + } | null>(null); // Available base shells (public local_engine shells) - const [baseShells, setBaseShells] = useState([]) - const [loadingBaseShells, setLoadingBaseShells] = useState(true) + const [baseShells, setBaseShells] = useState([]); + const [loadingBaseShells, setLoadingBaseShells] = useState(true); // Cleanup polling on unmount useEffect(() => { return () => { if (pollingRef.current) { - clearInterval(pollingRef.current) + clearInterval(pollingRef.current); } - } - }, []) + }; + }, []); useEffect(() => { const fetchBaseShells = async () => { try { - const shells = await shellApis.getLocalEngineShells() - setBaseShells(shells) + const shells = await shellApis.getLocalEngineShells(); + setBaseShells(shells); } catch (error) { - console.error('Failed to fetch base shells:', error) + console.error('Failed to fetch base shells:', error); } finally { - setLoadingBaseShells(false) + setLoadingBaseShells(false); } - } - fetchBaseShells() - }, []) + }; + fetchBaseShells(); + }, []); // Start polling for validation status const startPolling = useCallback( (validationIdToCheck: string) => { if (pollingRef.current) { - clearInterval(pollingRef.current) + clearInterval(pollingRef.current); } - let count = 0 + let count = 0; pollingRef.current = setInterval(async () => { - count++ + count++; if (count >= MAX_POLLING_COUNT) { // Timeout - clearInterval(pollingRef.current!) - pollingRef.current = null - setValidating(false) + clearInterval(pollingRef.current!); + pollingRef.current = null; + setValidating(false); setValidationStatus({ status: 'error', message: t('shells.validation_timeout'), progress: 0, valid: false, errors: [t('shells.validation_timeout')], - }) + }); toast({ variant: 'destructive', title: t('shells.validation_failed'), description: t('shells.validation_timeout'), - }) - return + }); + return; } try { const result: ValidationStatusResponse = - await shellApis.getValidationStatus(validationIdToCheck) + await shellApis.getValidationStatus(validationIdToCheck); // Update validation status display setValidationStatus({ @@ -140,13 +140,13 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { valid: result.valid ?? undefined, checks: result.checks ?? undefined, errors: result.errors ?? undefined, - }) + }); // Check if validation is completed if (result.status === 'completed') { - clearInterval(pollingRef.current!) - pollingRef.current = null - setValidating(false) + clearInterval(pollingRef.current!); + pollingRef.current = null; + setValidating(false); if (result.valid === true) { setValidationStatus({ @@ -155,10 +155,10 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { progress: 100, valid: true, checks: result.checks ?? undefined, - }) + }); toast({ title: t('shells.validation_success'), - }) + }); } else { setValidationStatus({ status: 'failed', @@ -167,60 +167,60 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { valid: false, checks: result.checks ?? undefined, errors: result.errors ?? undefined, - }) + }); toast({ variant: 'destructive', title: t('shells.validation_failed'), description: result.errorMessage || t('shells.validation_not_passed'), - }) + }); } } } catch (error) { - console.error('Failed to poll validation status:', error) + console.error('Failed to poll validation status:', error); // Don't stop polling on transient errors, just log it } - }, POLLING_INTERVAL) + }, POLLING_INTERVAL); }, [t, toast] - ) + ); const handleValidateImage = async () => { if (!baseImage || !baseShellRef) { toast({ variant: 'destructive', title: t('shells.errors.base_image_and_shell_required'), - }) - return + }); + return; } // Find the runtime for selected base shell - const selectedBaseShell = baseShells.find(s => s.name === baseShellRef) + const selectedBaseShell = baseShells.find(s => s.name === baseShellRef); if (!selectedBaseShell) { toast({ variant: 'destructive', title: t('shells.errors.base_shell_not_found'), - }) - return + }); + return; } - setValidating(true) + setValidating(true); setValidationStatus({ status: 'submitted', message: t('shells.validation_stage_submitted'), progress: STAGE_PROGRESS.submitted, - }) + }); try { const result = await shellApis.validateImage({ image: baseImage, - shellType: selectedBaseShell.runtime, + shellType: selectedBaseShell.shellType, shellName: name || undefined, - }) + }); // Handle different response statuses if (result.status === 'skipped') { // Dify type - validation not needed - setValidating(false) + setValidating(false); setValidationStatus({ status: 'success', message: result.message, @@ -228,73 +228,73 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { valid: true, checks: [], errors: [], - }) + }); toast({ title: t('shells.validation_skipped'), description: result.message, - }) + }); } else if (result.status === 'submitted' && result.validationId) { // Async validation task submitted - start polling - setValidationId(result.validationId) - startPolling(result.validationId) + setValidationId(result.validationId); + startPolling(result.validationId); toast({ title: t('shells.validation_submitted'), description: t('shells.validation_async_hint'), - }) + }); } else if (result.status === 'error') { // Error submitting validation - setValidating(false) + setValidating(false); setValidationStatus({ status: 'error', message: result.message, progress: 0, valid: false, errors: result.errors || [], - }) + }); toast({ variant: 'destructive', title: t('shells.validation_failed'), description: result.message, - }) + }); } } catch (error) { - setValidating(false) + setValidating(false); setValidationStatus({ status: 'error', message: (error as Error).message, progress: 0, valid: false, errors: [(error as Error).message], - }) + }); toast({ variant: 'destructive', title: t('shells.validation_failed'), description: (error as Error).message, - }) + }); } - } + }; // Check if save button should be disabled const isSaveDisabled = useCallback(() => { // If there's no baseImage, no validation needed - if (!baseImage) return false + if (!baseImage) return false; // In edit mode, if baseImage hasn't changed, no re-validation needed - if (isEditing && baseImage === originalBaseImage) return false + if (isEditing && baseImage === originalBaseImage) return false; // If there's a baseImage, validation must pass - if (!validationStatus) return true - if (validationStatus.status !== 'success' || validationStatus.valid !== true) return true + if (!validationStatus) return true; + if (validationStatus.status !== 'success' || validationStatus.valid !== true) return true; - return false - }, [baseImage, isEditing, originalBaseImage, validationStatus]) + return false; + }, [baseImage, isEditing, originalBaseImage, validationStatus]); const getSaveButtonTooltip = useCallback(() => { if (isSaveDisabled()) { - return t('shells.validation_required') + return t('shells.validation_required'); } - return undefined - }, [isSaveDisabled, t]) + return undefined; + }, [isSaveDisabled, t]); const handleSave = async () => { // Validation @@ -302,18 +302,18 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { toast({ variant: 'destructive', title: t('shells.errors.name_required'), - }) - return + }); + return; } // Validate name format (lowercase letters, numbers, and hyphens only) - const nameRegex = /^[a-z0-9][a-z0-9-]*[a-z0-9]$|^[a-z0-9]$/ + const nameRegex = /^[a-z0-9][a-z0-9-]*[a-z0-9]$|^[a-z0-9]$/; if (!nameRegex.test(name)) { toast({ variant: 'destructive', title: t('shells.errors.name_invalid'), - }) - return + }); + return; } if (!isEditing) { @@ -321,92 +321,92 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { toast({ variant: 'destructive', title: t('shells.errors.base_shell_required'), - }) - return + }); + return; } if (!baseImage.trim()) { toast({ variant: 'destructive', title: t('shells.errors.base_image_required'), - }) - return + }); + return; } } - setSaving(true) + setSaving(true); try { if (isEditing) { await shellApis.updateShell(shell.name, { displayName: displayName.trim() || undefined, baseImage: baseImage.trim() || undefined, - }) + }); toast({ title: t('shells.update_success'), - }) + }); } else { await shellApis.createShell({ name: name.trim(), displayName: displayName.trim() || undefined, baseShellRef, baseImage: baseImage.trim(), - }) + }); toast({ title: t('shells.create_success'), - }) + }); } - onClose() + onClose(); } catch (error) { toast({ variant: 'destructive', title: isEditing ? t('shells.errors.update_failed') : t('shells.errors.create_failed'), description: (error as Error).message, - }) + }); } finally { - setSaving(false) + setSaving(false); } - } + }; const handleBack = useCallback(() => { // Clean up polling when going back if (pollingRef.current) { - clearInterval(pollingRef.current) + clearInterval(pollingRef.current); } - onClose() - }, [onClose]) + onClose(); + }, [onClose]); useEffect(() => { const handleEsc = (event: KeyboardEvent) => { - if (event.key !== 'Escape') return - handleBack() - } + if (event.key !== 'Escape') return; + handleBack(); + }; - window.addEventListener('keydown', handleEsc) - return () => window.removeEventListener('keydown', handleEsc) - }, [handleBack]) + window.addEventListener('keydown', handleEsc); + return () => window.removeEventListener('keydown', handleEsc); + }, [handleBack]); // Get stage display text const getStageDisplayText = (status: ValidationStage | 'error' | 'success' | 'failed') => { switch (status) { case 'submitted': - return t('shells.validation_stage_submitted') + return t('shells.validation_stage_submitted'); case 'pulling_image': - return t('shells.validation_stage_pulling') + return t('shells.validation_stage_pulling'); case 'starting_container': - return t('shells.validation_stage_starting') + return t('shells.validation_stage_starting'); case 'running_checks': - return t('shells.validation_stage_checking') + return t('shells.validation_stage_checking'); case 'completed': case 'success': - return t('shells.validation_passed') + return t('shells.validation_passed'); case 'failed': case 'error': - return t('shells.validation_not_passed') + return t('shells.validation_not_passed'); default: - return status + return status; } - } + }; return (
@@ -494,7 +494,7 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => {
{shell.displayName || shell.name} - ({shell.runtime}) + ({shell.shellType})
))} @@ -513,13 +513,13 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => { id="baseImage" value={baseImage} onChange={e => { - setBaseImage(e.target.value) + setBaseImage(e.target.value); // Reset validation status on change - setValidationStatus(null) - setValidationId(null) + setValidationStatus(null); + setValidationId(null); if (pollingRef.current) { - clearInterval(pollingRef.current) - pollingRef.current = null + clearInterval(pollingRef.current); + pollingRef.current = null; } }} placeholder="ghcr.io/your-org/your-image:latest" @@ -632,7 +632,7 @@ const ShellEdit: React.FC = ({ shell, onClose, toast }) => {
- ) -} + ); +}; -export default ShellEdit +export default ShellEdit; diff --git a/frontend/src/features/settings/components/ShellList.tsx b/frontend/src/features/settings/components/ShellList.tsx index 2b2d78d0..d308d36e 100644 --- a/frontend/src/features/settings/components/ShellList.tsx +++ b/frontend/src/features/settings/components/ShellList.tsx @@ -2,23 +2,18 @@ // // SPDX-License-Identifier: Apache-2.0 -'use client' -import '@/features/common/scrollbar.css' - -import React, { useEffect, useState, useCallback } from 'react' -import { Button } from '@/components/ui/button' -import { Card } from '@/components/ui/card' -import { Tag } from '@/components/ui/tag' -import { - CommandLineIcon, - PencilIcon, - TrashIcon, - GlobeAltIcon, -} from '@heroicons/react/24/outline' -import { Loader2 } from 'lucide-react' -import { useToast } from '@/hooks/use-toast' -import { useTranslation } from '@/hooks/useTranslation' -import ShellEdit from './ShellEdit' +'use client'; +import '@/features/common/scrollbar.css'; + +import React, { useEffect, useState, useCallback } from 'react'; +import { Button } from '@/components/ui/button'; +import { Card } from '@/components/ui/card'; +import { Tag } from '@/components/ui/tag'; +import { CommandLineIcon, PencilIcon, TrashIcon, GlobeAltIcon } from '@heroicons/react/24/outline'; +import { Loader2 } from 'lucide-react'; +import { useToast } from '@/hooks/use-toast'; +import { useTranslation } from '@/hooks/useTranslation'; +import ShellEdit from './ShellEdit'; import { AlertDialog, AlertDialogAction, @@ -28,77 +23,77 @@ import { AlertDialogFooter, AlertDialogHeader, AlertDialogTitle, -} from '@/components/ui/alert-dialog' -import { shellApis, UnifiedShell } from '@/apis/shells' -import UnifiedAddButton from '@/components/common/UnifiedAddButton' +} from '@/components/ui/alert-dialog'; +import { shellApis, UnifiedShell } from '@/apis/shells'; +import UnifiedAddButton from '@/components/common/UnifiedAddButton'; const ShellList: React.FC = () => { - const { t } = useTranslation('common') - const { toast } = useToast() - const [shells, setShells] = useState([]) - const [loading, setLoading] = useState(true) - const [editingShell, setEditingShell] = useState(null) - const [isCreating, setIsCreating] = useState(false) - const [deleteConfirmShell, setDeleteConfirmShell] = useState(null) + const { t } = useTranslation('common'); + const { toast } = useToast(); + const [shells, setShells] = useState([]); + const [loading, setLoading] = useState(true); + const [editingShell, setEditingShell] = useState(null); + const [isCreating, setIsCreating] = useState(false); + const [deleteConfirmShell, setDeleteConfirmShell] = useState(null); const fetchShells = useCallback(async () => { - setLoading(true) + setLoading(true); try { - const response = await shellApis.getUnifiedShells() - setShells(response.data || []) + const response = await shellApis.getUnifiedShells(); + setShells(response.data || []); } catch (error) { - console.error('Failed to fetch shells:', error) + console.error('Failed to fetch shells:', error); toast({ variant: 'destructive', title: t('shells.errors.load_shells_failed'), - }) + }); } finally { - setLoading(false) + setLoading(false); } - }, [toast, t]) + }, [toast, t]); useEffect(() => { - fetchShells() - }, [fetchShells]) + fetchShells(); + }, [fetchShells]); const handleDelete = async () => { - if (!deleteConfirmShell) return + if (!deleteConfirmShell) return; try { - await shellApis.deleteShell(deleteConfirmShell.name) + await shellApis.deleteShell(deleteConfirmShell.name); toast({ title: t('shells.delete_success'), - }) - setDeleteConfirmShell(null) - fetchShells() + }); + setDeleteConfirmShell(null); + fetchShells(); } catch (error) { toast({ variant: 'destructive', title: t('shells.errors.delete_failed'), description: (error as Error).message, - }) + }); } - } + }; const handleEdit = (shell: UnifiedShell) => { - if (shell.type === 'public') return - setEditingShell(shell) - } + if (shell.type === 'public') return; + setEditingShell(shell); + }; const handleEditClose = () => { - setEditingShell(null) - setIsCreating(false) - fetchShells() - } + setEditingShell(null); + setIsCreating(false); + fetchShells(); + }; - const getShellTypeLabel = (shellType?: string | null) => { - if (shellType === 'local_engine') return 'Local Engine' - if (shellType === 'external_api') return 'External API' - return shellType || 'Unknown' - } + const getExecutionTypeLabel = (executionType?: string | null) => { + if (executionType === 'local_engine') return 'Local Engine'; + if (executionType === 'external_api') return 'External API'; + return executionType || 'Unknown'; + }; if (editingShell || isCreating) { - return + return ; } return ( @@ -132,7 +127,7 @@ const ShellList: React.FC = () => { <>
{shells.map(shell => { - const isPublic = shell.type === 'public' + const isPublic = shell.type === 'public'; return ( { )}
- {shell.runtime} + {shell.shellType} - {getShellTypeLabel(shell.shellType)} + {getExecutionTypeLabel(shell.executionType)} {shell.baseImage && ( - + {shell.baseImage} )} @@ -202,7 +200,7 @@ const ShellList: React.FC = () => {
- ) + ); })} @@ -238,7 +236,7 @@ const ShellList: React.FC = () => { - ) -} + ); +}; -export default ShellList +export default ShellList; diff --git a/frontend/src/features/settings/components/TeamEdit.tsx b/frontend/src/features/settings/components/TeamEdit.tsx index b3ad9cc8..8022d658 100644 --- a/frontend/src/features/settings/components/TeamEdit.tsx +++ b/frontend/src/features/settings/components/TeamEdit.tsx @@ -24,6 +24,7 @@ import { TeamMode, getFilteredBotsForMode, AgentType } from './team-modes'; import { createTeam, updateTeam } from '../services/teams'; import TeamEditDrawer from './TeamEditDrawer'; import { useTranslation } from '@/hooks/useTranslation'; +import { shellApis, UnifiedShell } from '@/apis/shells'; // Import mode-specific editors import SoloModeEditor from './team-modes/SoloModeEditor'; @@ -83,15 +84,30 @@ export default function TeamEdit(props: TeamEditProps) { const wasDrawerOpenRef = useRef(false); // Store unsaved team prompts const [unsavedPrompts, setUnsavedPrompts] = useState>({}); - // Mode change confirmation dialog state const [modeChangeDialogVisible, setModeChangeDialogVisible] = useState(false); const [pendingMode, setPendingMode] = useState(null); - // Filter bots based on current mode + // Shells data for resolving custom shell runtime types + const [shells, setShells] = useState([]); + + // Load shells data on mount + useEffect(() => { + const fetchShells = async () => { + try { + const response = await shellApis.getUnifiedShells(); + setShells(response.data || []); + } catch (error) { + console.error('Failed to fetch shells:', error); + } + }; + fetchShells(); + }, []); + + // Filter bots based on current mode, using shells to resolve custom shell runtime types const filteredBots = useMemo(() => { - return getFilteredBotsForMode(bots, mode); - }, [bots, mode]); + return getFilteredBotsForMode(bots, mode, shells); + }, [bots, mode, shells]); // Get allowed agents for current mode const allowedAgentsForMode = useMemo((): AgentType[] | undefined => { @@ -274,17 +290,17 @@ export default function TeamEdit(props: TeamEditProps) { setModeChangeDialogVisible(false); setPendingMode(null); }; - // Get currently selected agent_name (from leader or selected bot) - // Note: agent_name restriction has been removed - users can now select any mode - const selectedAgentName = useMemo(() => { - // No agent_name restriction - always return null + // Get currently selected shell_type (from leader or selected bot) + // Note: shell_type restriction has been removed - users can now select any mode + const selectedShellType = useMemo(() => { + // No shell_type restriction - always return null return null; }, []); const isDifyLeader = useMemo(() => { if (leaderBotId === null) return false; const leader = filteredBots.find((b: Bot) => b.id === leaderBotId); - return leader?.agent_name === 'Dify'; + return leader?.shell_type === 'Dify'; }, [leaderBotId, filteredBots]); // Leader change handler @@ -296,7 +312,7 @@ export default function TeamEdit(props: TeamEditProps) { const newLeader = filteredBots.find((b: Bot) => b.id === botId); // If the new leader is Dify, clear the selected bots - if (newLeader?.agent_name === 'Dify') { + if (newLeader?.shell_type === 'Dify') { setSelectedBotKeys([]); } @@ -349,7 +365,7 @@ export default function TeamEdit(props: TeamEditProps) { // For solo mode, only use leaderBotId const selectedIds = mode === 'solo' ? [] : selectedBotKeys.map(k => Number(k)); - // Note: agent_name consistency validation has been removed - users can now mix different agent types + // Note: shell_type consistency validation has been removed - users can now mix different agent types // Assemble bots data (per-step prompt not supported, all prompts empty) // Ensure leader bot is first, others follow transfer order const allBotIds: number[] = []; @@ -606,7 +622,7 @@ export default function TeamEdit(props: TeamEditProps) { unsavedPrompts={unsavedPrompts} teamPromptMap={teamPromptMap} isDifyLeader={isDifyLeader} - selectedAgentName={selectedAgentName} + selectedShellType={selectedShellType} leaderOptions={leaderOptions} toast={toast} onEditBot={handleEditBot} diff --git a/frontend/src/features/settings/components/team-modes/BotTransfer.tsx b/frontend/src/features/settings/components/team-modes/BotTransfer.tsx index c60f82eb..798ce586 100644 --- a/frontend/src/features/settings/components/team-modes/BotTransfer.tsx +++ b/frontend/src/features/settings/components/team-modes/BotTransfer.tsx @@ -24,7 +24,7 @@ export interface BotTransferProps { unsavedPrompts: Record; teamPromptMap: Map; isDifyLeader?: boolean; - selectedAgentName?: string | null; + selectedShellType?: string | null; /** Whether to exclude leader from transfer list */ excludeLeader?: boolean; /** Whether to auto-set first selected bot as leader */ @@ -46,7 +46,7 @@ export default function BotTransfer({ unsavedPrompts, teamPromptMap, isDifyLeader = false, - selectedAgentName = null, + selectedShellType = null, excludeLeader = false, autoSetLeader = false, sortable = false, @@ -110,13 +110,13 @@ export default function BotTransfer({ return filteredBots.map(b => ({ key: String(b.id), title: b.name, - description: b.agent_name, + description: b.shell_type, disabled: isDifyLeader || - // Disable options not matching agent_name if already selected - (selectedAgentName !== null && b.agent_name !== selectedAgentName), + // Disable options not matching shell_type if already selected + (selectedShellType !== null && b.shell_type !== selectedShellType), })); - }, [bots, isDifyLeader, selectedAgentName, excludeLeader, leaderBotId]); + }, [bots, isDifyLeader, selectedShellType, excludeLeader, leaderBotId]); // Transfer change handler const onTransferChange = ( diff --git a/frontend/src/features/settings/components/team-modes/LeaderModeEditor.tsx b/frontend/src/features/settings/components/team-modes/LeaderModeEditor.tsx index 6f6cafba..1a612f01 100644 --- a/frontend/src/features/settings/components/team-modes/LeaderModeEditor.tsx +++ b/frontend/src/features/settings/components/team-modes/LeaderModeEditor.tsx @@ -31,7 +31,7 @@ export interface LeaderModeEditorProps { unsavedPrompts: Record; teamPromptMap: Map; isDifyLeader: boolean; - selectedAgentName: string | null; + selectedShellType: string | null; leaderOptions: Bot[]; toast: ReturnType['toast']; onEditBot: (botId: number) => void; @@ -50,7 +50,7 @@ export default function LeaderModeEditor({ unsavedPrompts, teamPromptMap, isDifyLeader, - selectedAgentName, + selectedShellType, leaderOptions, onEditBot, onCreateBot, @@ -83,7 +83,7 @@ export default function LeaderModeEditor({ {bots.find(b => b.id === leaderBotId)?.name || ''} - ({bots.find(b => b.id === leaderBotId)?.agent_name || ''}) + ({bots.find(b => b.id === leaderBotId)?.shell_type || ''}) @@ -134,11 +134,11 @@ export default function LeaderModeEditor({ {b.name}{' '} - ({b.agent_name}) + ({b.shell_type}) -

{`${b.name} (${b.agent_name})`}

+

{`${b.name} (${b.shell_type})`}

{teamPromptMap.get(b.id) && ( @@ -194,7 +194,7 @@ export default function LeaderModeEditor({ unsavedPrompts={unsavedPrompts} teamPromptMap={teamPromptMap} isDifyLeader={isDifyLeader} - selectedAgentName={selectedAgentName} + selectedShellType={selectedShellType} excludeLeader={true} onEditBot={onEditBot} onCreateBot={onCreateBot} diff --git a/frontend/src/features/settings/components/team-modes/SoloModeEditor.tsx b/frontend/src/features/settings/components/team-modes/SoloModeEditor.tsx index 72d60763..f6918d12 100644 --- a/frontend/src/features/settings/components/team-modes/SoloModeEditor.tsx +++ b/frontend/src/features/settings/components/team-modes/SoloModeEditor.tsx @@ -190,7 +190,7 @@ export default function SoloModeEditor({ {selectedBot.name} - ({selectedBot.agent_name}) + ({selectedBot.shell_type}) ) : ( @@ -221,11 +221,11 @@ export default function SoloModeEditor({ {b.name}{' '} - ({b.agent_name}) + ({b.shell_type}) -

{`${b.name} (${b.agent_name})`}

+

{`${b.name} (${b.shell_type})`}

diff --git a/frontend/src/features/settings/components/team-modes/index.ts b/frontend/src/features/settings/components/team-modes/index.ts index 37de350a..7be292c1 100644 --- a/frontend/src/features/settings/components/team-modes/index.ts +++ b/frontend/src/features/settings/components/team-modes/index.ts @@ -3,6 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 import { Bot } from '@/types/api'; +import { UnifiedShell } from '@/apis/shells'; export { default as SoloModeEditor } from './SoloModeEditor'; export { default as PipelineModeEditor } from './PipelineModeEditor'; @@ -31,13 +32,44 @@ const MODE_AGENT_FILTER: Record = { collaborate: ['Agno'], }; +/** + * Get the actual shell type for a bot's shell_type + * For custom shells, the shell_type is the shell name, but we need to check + * the shell's shellType field to get the actual agent type (ClaudeCode, Agno, etc.) + * + * @param shellType - The bot's shell_type (could be shell name or shell type) + * @param shellMap - Map of shell name to UnifiedShell object + * @returns The actual shell type (ClaudeCode, Agno, Dify, etc.) + */ +function getActualShellType(shellType: string, shellMap: Map): string { + // First check if shellType is already a known agent type + const knownAgentTypes: AgentType[] = ['ClaudeCode', 'Agno', 'Dify']; + if (knownAgentTypes.includes(shellType as AgentType)) { + return shellType; + } + + // Otherwise, look up the shell to get its shellType + const shell = shellMap.get(shellType); + if (shell) { + return shell.shellType; + } + + // Fallback to the original shell_type if shell not found + return shellType; +} + /** * Filter bots based on the selected team mode * @param bots - All available bots * @param mode - Current team mode + * @param shells - Optional list of shells for resolving custom shell runtime types * @returns Filtered bots that are compatible with the mode */ -export function getFilteredBotsForMode(bots: Bot[], mode: TeamMode): Bot[] { +export function getFilteredBotsForMode( + bots: Bot[], + mode: TeamMode, + shells?: UnifiedShell[] +): Bot[] { const allowedAgents = MODE_AGENT_FILTER[mode]; // If null, all agents are allowed @@ -45,6 +77,17 @@ export function getFilteredBotsForMode(bots: Bot[], mode: TeamMode): Bot[] { return bots; } - // Filter bots by allowed agent types - return bots.filter(bot => allowedAgents.includes(bot.agent_name as AgentType)); + // Build shell map for quick lookup + const shellMap = new Map(); + if (shells) { + shells.forEach(shell => { + shellMap.set(shell.name, shell); + }); + } + + // Filter bots by allowed agent types, resolving custom shell types + return bots.filter(bot => { + const actualShellType = getActualShellType(bot.shell_type, shellMap); + return allowedAgents.includes(actualShellType as AgentType); + }); } diff --git a/frontend/src/features/settings/services/bots.ts b/frontend/src/features/settings/services/bots.ts index 10c4a0e6..7992b517 100644 --- a/frontend/src/features/settings/services/bots.ts +++ b/frontend/src/features/settings/services/bots.ts @@ -18,7 +18,7 @@ export async function fetchBotsList(): Promise { items.forEach((bot, index) => { console.log(`[DEBUG] Bot ${index} (${bot.name}):`, { id: bot.id, - agent_name: bot.agent_name, + shell_type: bot.shell_type, agent_config: bot.agent_config, }); }); diff --git a/frontend/src/i18n/locales/en/common.json b/frontend/src/i18n/locales/en/common.json index fea61f35..ba2df220 100644 --- a/frontend/src/i18n/locales/en/common.json +++ b/frontend/src/i18n/locales/en/common.json @@ -125,7 +125,9 @@ "dify_parameters_loading": "Loading parameters...", "dify_parameters_load_failed": "Failed to load application parameters", "dify_parameters_refresh": "Refresh parameters", - "dify_no_parameters": "No parameters configured for this application." + "dify_no_parameters": "No parameters configured for this application.", + "custom_shell": "Custom", + "public_model": "Public" }, "bots": { "title": "AI Assistant", diff --git a/frontend/src/i18n/locales/zh-CN/common.json b/frontend/src/i18n/locales/zh-CN/common.json index a1ad62e0..d21009be 100644 --- a/frontend/src/i18n/locales/zh-CN/common.json +++ b/frontend/src/i18n/locales/zh-CN/common.json @@ -140,7 +140,9 @@ "dify_parameters_loading": "加载参数中...", "dify_parameters_load_failed": "加载应用参数失败", "dify_parameters_refresh": "刷新参数", - "dify_no_parameters": "此应用未配置输入参数。" + "dify_no_parameters": "此应用未配置输入参数。", + "custom_shell": "自定义", + "public_model": "公共" }, "messages": { "copy_markdown": "复制 Markdown 内容", @@ -313,7 +315,7 @@ "bot": "机器人", "team": "机器人", "models": "模型", - "shells": "Shell", + "shells": "执行器", "sections": { "general": "通用" } diff --git a/frontend/src/types/api.ts b/frontend/src/types/api.ts index afc1fd66..168595f6 100644 --- a/frontend/src/types/api.ts +++ b/frontend/src/types/api.ts @@ -29,7 +29,8 @@ export interface GitInfo { export interface Bot { id: number; name: string; - agent_name: string; + shell_name: string; // Shell name user selected (e.g., 'ClaudeCode', 'my-custom-shell') + shell_type: string; // Actual agent type (e.g., 'ClaudeCode', 'Agno', 'Dify') agent_config: Record; system_prompt: string; mcp_servers: Record; @@ -104,7 +105,7 @@ export interface Team { /** Bot summary with only necessary fields for team list */ export interface BotSummary { agent_config?: Record; - agent_name?: string; + shell_type?: string; } /** Bot information (used for Team.bots) */ From 7a73bf6b9bace5d913ce89dc51f6303b0c5f83ec Mon Sep 17 00:00:00 2001 From: axb Date: Mon, 1 Dec 2025 01:03:12 +0800 Subject: [PATCH 08/10] style(backend): format code with black --- backend/app/api/endpoints/adapter/shells.py | 48 ++++++++------ backend/app/schemas/bot.py | 4 +- backend/app/schemas/kind.py | 4 +- backend/app/services/adapters/bot_kinds.py | 15 +++-- .../app/services/adapters/executor_kinds.py | 2 + backend/app/services/adapters/shell_utils.py | 64 +++++++++---------- 6 files changed, 75 insertions(+), 62 deletions(-) diff --git a/backend/app/api/endpoints/adapter/shells.py b/backend/app/api/endpoints/adapter/shells.py index 3e5ab9c0..52fdbb10 100644 --- a/backend/app/api/endpoints/adapter/shells.py +++ b/backend/app/api/endpoints/adapter/shells.py @@ -39,7 +39,9 @@ class UnifiedShell(BaseModel): baseImage: Optional[str] = None baseShellRef: Optional[str] = None supportModel: Optional[List[str]] = None - executionType: Optional[str] = None # 'local_engine' or 'external_api' (from labels) + executionType: Optional[str] = ( + None # 'local_engine' or 'external_api' (from labels) + ) class ShellCreateRequest(BaseModel): @@ -151,23 +153,23 @@ def list_unified_shells( current_user: User = Depends(security.get_current_user), ): """ - Get unified list of all available shells (both public and user-defined). + Get unified list of all available shells (both public and user-defined). - Each shell includes a 'type' field ('public' or 'user') to identify its source. -Response: -{ - "data": [ + Each shell includes a 'type' field ('public' or 'user') to identify its source. + Response: { - "name": "shell-name", - "type": "public" | "user", - "displayName": "Human Readable Name", - "shellType": "ClaudeCode", - "baseImage": "ghcr.io/...", - "executionType": "local_engine" | "external_api" - } - ] -} + "data": [ + { + "name": "shell-name", + "type": "public" | "user", + "displayName": "Human Readable Name", + "shellType": "ClaudeCode", + "baseImage": "ghcr.io/...", + "executionType": "local_engine" | "external_api" + } + ] } + } """ result = [] @@ -688,7 +690,9 @@ async def _update_validation_status( return False -@router.get("/validation-status/{validation_id}", response_model=ValidationStatusResponse) +@router.get( + "/validation-status/{validation_id}", response_model=ValidationStatusResponse +) async def get_validation_status( validation_id: str, current_user: User = Depends(security.get_current_user), @@ -722,7 +726,9 @@ async def get_validation_status( raise except Exception as e: logger.error(f"Error getting validation status: {e}") - raise HTTPException(status_code=500, detail=f"Error getting validation status: {str(e)}") + raise HTTPException( + status_code=500, detail=f"Error getting validation status: {str(e)}" + ) @router.post("/validation-status/{validation_id}") @@ -749,11 +755,15 @@ async def update_validation_status( ) if not success: - raise HTTPException(status_code=500, detail="Failed to update validation status") + raise HTTPException( + status_code=500, detail="Failed to update validation status" + ) return {"status": "success", "message": "Validation status updated"} except HTTPException: raise except Exception as e: logger.error(f"Error updating validation status: {e}") - raise HTTPException(status_code=500, detail=f"Error updating validation status: {str(e)}") + raise HTTPException( + status_code=500, detail=f"Error updating validation status: {str(e)}" + ) diff --git a/backend/app/schemas/bot.py b/backend/app/schemas/bot.py index 3535c785..26a269db 100644 --- a/backend/app/schemas/bot.py +++ b/backend/app/schemas/bot.py @@ -26,7 +26,9 @@ class BotUpdate(BaseModel): """Bot update model - request schema""" name: Optional[str] = None - shell_name: Optional[str] = None # Shell name (e.g., 'ClaudeCode', 'Agno', 'my-custom-shell') + shell_name: Optional[str] = ( + None # Shell name (e.g., 'ClaudeCode', 'Agno', 'my-custom-shell') + ) agent_config: Optional[dict[str, Any]] = None system_prompt: Optional[str] = None mcp_servers: Optional[dict[str, Any]] = None diff --git a/backend/app/schemas/kind.py b/backend/app/schemas/kind.py index c92ab287..af1181c9 100644 --- a/backend/app/schemas/kind.py +++ b/backend/app/schemas/kind.py @@ -115,7 +115,9 @@ class ShellSpec(BaseModel): ) # Agent type: 'ClaudeCode', 'Agno', 'Dify', etc. Accepts 'runtime' for backward compatibility supportModel: Optional[List[str]] = None baseImage: Optional[str] = None # Custom base image address for user-defined shells - baseShellRef: Optional[str] = None # Reference to base public shell (e.g., "ClaudeCode") + baseShellRef: Optional[str] = ( + None # Reference to base public shell (e.g., "ClaudeCode") + ) class ShellStatus(Status): diff --git a/backend/app/services/adapters/bot_kinds.py b/backend/app/services/adapters/bot_kinds.py index d73eb3a9..e8f33ddc 100644 --- a/backend/app/services/adapters/bot_kinds.py +++ b/backend/app/services/adapters/bot_kinds.py @@ -241,13 +241,14 @@ def create_with_user( ) -> Dict[str, Any]: """ Create user Bot using kinds table. - + Bot's shellRef directly points to the user-selected Shell (custom or public), instead of creating a dedicated shell for each bot. """ import logging + logger = logging.getLogger(__name__) - + # Check duplicate bot name under the same user (only active bots) existing = ( db.query(Kind) @@ -349,7 +350,7 @@ def create_with_user( shell_info = get_shell_info_by_name(db, obj_in.shell_name, user_id) except ValueError as e: raise HTTPException(status_code=400, detail=str(e)) - + logger.info( f"[DEBUG] create_with_user: shell_name={obj_in.shell_name}, " f"resolved shell_type={shell_info['shell_type']}, " @@ -557,7 +558,7 @@ def update_with_user( shell_info = get_shell_info_by_name(db, new_shell_name, user_id) except ValueError as e: raise HTTPException(status_code=400, detail=str(e)) - + logger.info( f"[DEBUG] update_with_user: shell_name={new_shell_name}, " f"resolved shell_type={shell_info['shell_type']}, " @@ -572,7 +573,7 @@ def update_with_user( bot_crd.spec.shellRef.namespace = "default" bot.json = bot_crd.model_dump() flag_modified(bot, "json") - + # Update shell reference for response shell = get_shell_by_name(db, new_shell_name, user_id) @@ -791,7 +792,7 @@ def update_with_user( def delete_with_user(self, db: Session, *, bot_id: int, user_id: int) -> None: """ Delete user Bot and related components. - + Note: Shell is not deleted because it's now a reference to user's custom shell or public shell, not a dedicated shell for this bot. """ @@ -890,7 +891,7 @@ def _get_bot_components(self, db: Session, bot: Kind, user_id: int): # Get shell - try user's custom shells first, then public shells shell_ref_name = bot_crd.spec.shellRef.name shell = get_shell_by_name(db, shell_ref_name, user_id) - + logger.info( f"[DEBUG] _get_bot_components: shellRef.name={shell_ref_name}, " f"shell found={shell is not None}, " diff --git a/backend/app/services/adapters/executor_kinds.py b/backend/app/services/adapters/executor_kinds.py index 6abfa7f4..b386eac4 100644 --- a/backend/app/services/adapters/executor_kinds.py +++ b/backend/app/services/adapters/executor_kinds.py @@ -424,10 +424,12 @@ def _format_subtasks_response( if public_shell and public_shell.json: shell_crd_temp = Shell.model_validate(public_shell.json) shell_base_image = shell_crd_temp.spec.baseImage + # Create a mock shell object for compatibility class MockShell: def __init__(self, json_data): self.json = json_data + shell = MockShell(public_shell.json) # Get model for agent config (modelRef is optional) diff --git a/backend/app/services/adapters/shell_utils.py b/backend/app/services/adapters/shell_utils.py index 18f7c642..dd6a3f6e 100644 --- a/backend/app/services/adapters/shell_utils.py +++ b/backend/app/services/adapters/shell_utils.py @@ -27,13 +27,13 @@ def get_shell_by_name( ) -> Optional[Union[Kind, PublicShell]]: """ Get a Shell by name, first checking user's custom shells, then public shells. - + Args: db: Database session shell_name: Name of the shell (e.g., 'ClaudeCode', 'my-custom-shell') user_id: User ID namespace: Namespace (default: 'default') - + Returns: Kind object (for user shells) or PublicShell object (for public shells), or None if not found. @@ -50,11 +50,11 @@ def get_shell_by_name( ) .first() ) - + if user_shell: logger.debug(f"Found user shell '{shell_name}' for user {user_id}") return user_shell - + # Then, try to find in public shells public_shell = ( db.query(PublicShell) @@ -65,11 +65,11 @@ def get_shell_by_name( ) .first() ) - + if public_shell: logger.debug(f"Found public shell '{shell_name}'") return public_shell - + logger.warning(f"Shell '{shell_name}' not found in user shells or public shells") return None @@ -79,16 +79,16 @@ def get_shell_info_by_name( ) -> Dict[str, Any]: """ Get shell information by shell name. - + First tries to find a user-defined custom shell in kinds table, then falls back to public shells in public_shells table. - + Args: db: Database session shell_name: Name of the shell (e.g., 'ClaudeCode', 'my-custom-shell') user_id: User ID namespace: Namespace (default: 'default') - + Returns: Dict with: - shell_type: The actual agent type (e.g., 'ClaudeCode', 'Agno', 'Dify') @@ -96,7 +96,7 @@ def get_shell_info_by_name( - execution_type: 'local_engine' or 'external_api' - base_image: Base Docker image (optional) - is_custom: Whether this is a user-defined custom shell - + Raises: ValueError: If shell is not found """ @@ -112,7 +112,7 @@ def get_shell_info_by_name( ) .first() ) - + if user_shell and isinstance(user_shell.json, dict): shell_crd = Shell.model_validate(user_shell.json) # For custom shells, shellType contains the actual agent type @@ -131,7 +131,7 @@ def get_shell_info_by_name( f"base_image={result['base_image']}" ) return result - + # Then, try to find in public shells public_shell = ( db.query(PublicShell) @@ -142,7 +142,7 @@ def get_shell_info_by_name( ) .first() ) - + if public_shell and isinstance(public_shell.json, dict): shell_crd = Shell.model_validate(public_shell.json) # For public shells, the shell name IS the shell type (e.g., 'ClaudeCode') @@ -161,7 +161,7 @@ def get_shell_info_by_name( f"base_image={result['base_image']}" ) return result - + # Shell not found - raise error instead of using fallback raise ValueError(f"Shell '{shell_name}' not found in user shells or public shells") @@ -182,7 +182,7 @@ def get_shell_type( Returns: "local_engine" or "external_api" - + Raises: ValueError: If shell is not found """ @@ -235,36 +235,31 @@ def get_shells_by_names_batch( ) -> Dict[Tuple[str, str], Union[Kind, PublicShell]]: """ Batch-fetch shells by (name, namespace) keys. - + First queries user's custom shells from kinds table, then queries public shells for any missing keys. - + Args: db: Database session shell_keys: Set of (name, namespace) tuples to query user_id: User ID - + Returns: Dict mapping (name, namespace) to Kind or PublicShell objects """ if not shell_keys: return {} - + shell_map: Dict[Tuple[str, str], Union[Kind, PublicShell]] = {} - + # Build OR filter for user shells def build_user_shell_or_filters(keys: Set[Tuple[str, str]]): return ( - or_( - *[ - and_(Kind.name == n, Kind.namespace == ns) - for (n, ns) in keys - ] - ) + or_(*[and_(Kind.name == n, Kind.namespace == ns) for (n, ns) in keys]) if keys else None ) - + # Query user's custom shells first user_shell_filter = build_user_shell_or_filters(shell_keys) if user_shell_filter is not None: @@ -278,15 +273,16 @@ def build_user_shell_or_filters(keys: Set[Tuple[str, str]]): .filter(user_shell_filter) .all() ) - + for shell in user_shells: shell_map[(shell.name, shell.namespace)] = shell - + # Find missing keys and query public shells found_keys = set(shell_map.keys()) missing_keys = shell_keys - found_keys - + if missing_keys: + def build_public_shell_or_filters(keys: Set[Tuple[str, str]]): return ( or_( @@ -298,7 +294,7 @@ def build_public_shell_or_filters(keys: Set[Tuple[str, str]]): if keys else None ) - + public_shell_filter = build_public_shell_or_filters(missing_keys) if public_shell_filter is not None: public_shells = ( @@ -307,14 +303,14 @@ def build_public_shell_or_filters(keys: Set[Tuple[str, str]]): .filter(public_shell_filter) .all() ) - + for shell in public_shells: shell_map[(shell.name, shell.namespace)] = shell - + logger.debug( f"Batch fetched {len(shell_map)} shells for {len(shell_keys)} keys " f"(user: {len(shell_map) - len(missing_keys) + len(found_keys)}, " f"public: {len(shell_map) - len(found_keys)})" ) - + return shell_map From 04849fd16c65e6528b3fb4b33834c29f135d6054 Mon Sep 17 00:00:00 2001 From: axb Date: Mon, 1 Dec 2025 01:05:23 +0800 Subject: [PATCH 09/10] docs: update Shell YAML specification with shellType, baseImage and labels fields --- docs/en/reference/yaml-specification.md | 33 ++++++++++++++++++------- docs/zh/reference/yaml-specification.md | 33 ++++++++++++++++++------- 2 files changed, 48 insertions(+), 18 deletions(-) diff --git a/docs/en/reference/yaml-specification.md b/docs/en/reference/yaml-specification.md index de837551..83996b88 100644 --- a/docs/en/reference/yaml-specification.md +++ b/docs/en/reference/yaml-specification.md @@ -185,7 +185,7 @@ spec: ## 🐚 Shell -Shell defines the agent's runtime environment, specifying the runtime type and supported models. +Shell defines the agent's runtime environment, specifying the runtime type, base image, and supported models. ### Complete Configuration Example @@ -195,8 +195,14 @@ kind: Shell metadata: name: ClaudeCode namespace: default + labels: + type: local_engine spec: - runtime: "ClaudeCode" + shellType: ClaudeCode + supportModel: [] + baseImage: ghcr.io/wecode-ai/wegent-base-python3.12:1.0.0 +status: + state: Available ``` ### Field Description @@ -205,16 +211,25 @@ spec: |------|------|----------|-------------| | `metadata.name` | string | Yes | Unique identifier for the Shell | | `metadata.namespace` | string | Yes | Namespace, typically `default` | -| `spec.runtime` | string | Yes | Runtime type, such as `ClaudeCode`, `Agno` | +| `metadata.labels` | object | No | Labels for categorization, e.g., `type: local_engine` or `type: external_api` | +| `spec.shellType` | string | Yes | Shell type, such as `ClaudeCode`, `Agno`, `Dify` | | `spec.supportModel` | array | No | List of supported model types | +| `spec.baseImage` | string | No | Docker base image for local engine shells (required for `local_engine` type) | +| `status.state` | string | No | Shell status: `Available` or `Unavailable` | + +### Shell Types + +| Type | Label | Description | +|------|-------|-------------| +| `ClaudeCode` | `local_engine` | Claude Code runtime, requires `baseImage` | +| `Agno` | `local_engine` | Agno runtime, requires `baseImage` | +| `Dify` | `external_api` | Dify external API runtime, no `baseImage` needed | -### Supported Runtimes +### Labels -| Runtime | Description | -|---------|-------------| -| `ClaudeCode` | Claude Code runtime | -| `Agno` | Agno runtime | -| `Dify` | Dify runtime (planned) | +| Label | Values | Description | +|-------|--------|-------------| +| `type` | `local_engine`, `external_api` | Indicates whether the shell runs locally or connects to external API | --- diff --git a/docs/zh/reference/yaml-specification.md b/docs/zh/reference/yaml-specification.md index 62a2d518..3bb1bdb8 100644 --- a/docs/zh/reference/yaml-specification.md +++ b/docs/zh/reference/yaml-specification.md @@ -185,7 +185,7 @@ spec: ## 🐚 Shell -Shell 定义了智能体的运行环境,指定了运行时类型和支持的模型。 +Shell 定义了智能体的运行环境,指定了运行时类型、基础镜像和支持的模型。 ### 完整配置示例 @@ -195,8 +195,14 @@ kind: Shell metadata: name: ClaudeCode namespace: default + labels: + type: local_engine spec: - runtime: "ClaudeCode" + shellType: ClaudeCode + supportModel: [] + baseImage: ghcr.io/wecode-ai/wegent-base-python3.12:1.0.0 +status: + state: Available ``` ### 字段说明 @@ -205,16 +211,25 @@ spec: |------|------|------|------| | `metadata.name` | string | 是 | Shell 的唯一标识符 | | `metadata.namespace` | string | 是 | 命名空间,通常为 `default` | -| `spec.runtime` | string | 是 | 运行时类型,如 `ClaudeCode`、`Agno` | +| `metadata.labels` | object | 否 | 分类标签,如 `type: local_engine` 或 `type: external_api` | +| `spec.shellType` | string | 是 | Shell 类型,如 `ClaudeCode`、`Agno`、`Dify` | | `spec.supportModel` | array | 否 | 支持的模型类型列表 | +| `spec.baseImage` | string | 否 | 本地引擎 Shell 的 Docker 基础镜像(`local_engine` 类型必填) | +| `status.state` | string | 否 | Shell 状态:`Available` 或 `Unavailable` | + +### Shell 类型 + +| 类型 | 标签 | 说明 | +|------|------|------| +| `ClaudeCode` | `local_engine` | Claude Code 运行时,需要 `baseImage` | +| `Agno` | `local_engine` | Agno 运行时,需要 `baseImage` | +| `Dify` | `external_api` | Dify 外部 API 运行时,不需要 `baseImage` | -### 支持的运行时 +### 标签说明 -| 运行时 | 说明 | -|--------|------| -| `ClaudeCode` | Claude Code 运行时 | -| `Agno` | Agno 运行时 | -| `Dify` | Dify 运行时(计划中) | +| 标签 | 可选值 | 说明 | +|------|--------|------| +| `type` | `local_engine`, `external_api` | 表示 Shell 是本地运行还是连接外部 API | --- From 64036ad598825ceb423492e836163d627a74de4f Mon Sep 17 00:00:00 2001 From: yansheng3 Date: Tue, 2 Dec 2025 12:16:34 +0800 Subject: [PATCH 10/10] feat: Agno supports Claude models and fixes dependency issues. --- backend/app/api/endpoints/adapter/models.py | 3 ++- .../app/services/model_aggregation_service.py | 17 ++++++++++++----- backend/requirements.txt | 4 ++++ .../features/settings/components/BotEdit.tsx | 9 +++++++-- 4 files changed, 25 insertions(+), 8 deletions(-) diff --git a/backend/app/api/endpoints/adapter/models.py b/backend/app/api/endpoints/adapter/models.py index 9c265619..b2d1d51c 100644 --- a/backend/app/api/endpoints/adapter/models.py +++ b/backend/app/api/endpoints/adapter/models.py @@ -403,7 +403,8 @@ def get_compatible_models( model_type = env.get("model", "") # Filter compatible models - if shell_type == "Agno" and model_type == "openai": + # Agno supports both OpenAI and Claude models + if shell_type == "Agno" and model_type in ["openai", "claude"]: compatible_models.append({"name": model_kind.name}) elif shell_type == "ClaudeCode" and model_type == "claude": compatible_models.append({"name": model_kind.name}) diff --git a/backend/app/services/model_aggregation_service.py b/backend/app/services/model_aggregation_service.py index 86a57fed..6c01d663 100644 --- a/backend/app/services/model_aggregation_service.py +++ b/backend/app/services/model_aggregation_service.py @@ -160,16 +160,23 @@ def _is_model_compatible_with_shell( True if compatible, False otherwise """ # Shell type to model provider mapping - shell_provider_map = {"Agno": "openai", "ClaudeCode": "claude"} + # Agno supports both OpenAI and Claude models + shell_provider_map = { + "Agno": ["openai", "claude"], + "ClaudeCode": ["claude"] + } # If supportModel is specified in shell, use it if support_model: return provider in support_model - # Otherwise, filter by shell's required provider - required_provider = shell_provider_map.get(shell_type) - if required_provider: - return provider == required_provider + # Otherwise, filter by shell's supported providers + supported_providers = shell_provider_map.get(shell_type) + if supported_providers: + if isinstance(supported_providers, list): + return provider in supported_providers + else: + return provider == supported_providers # No filter, allow all return True diff --git a/backend/requirements.txt b/backend/requirements.txt index 529021cb..c7573085 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -29,6 +29,10 @@ httpx>=0.19.0 requests>=2.31.0 aiohttp>=3.8.0 +# AI/ML API clients +anthropic>=0.18.0 +openai>=1.0.0 + # Utilities python-dotenv==1.0.0 tenacity==8.2.3 diff --git a/frontend/src/features/settings/components/BotEdit.tsx b/frontend/src/features/settings/components/BotEdit.tsx index 410f3a36..bab7b568 100644 --- a/frontend/src/features/settings/components/BotEdit.tsx +++ b/frontend/src/features/settings/components/BotEdit.tsx @@ -892,7 +892,12 @@ const BotEdit: React.FC = ({ {agentName === 'ClaudeCode' && ( Claude (Anthropic) )} - {agentName === 'Agno' && OpenAI} + {agentName === 'Agno' && ( + <> + OpenAI + Claude (Anthropic) + + )} {/* Show all options if agent type is unknown or not selected */} {agentName !== 'ClaudeCode' && agentName !== 'Agno' && ( <> @@ -933,7 +938,7 @@ const BotEdit: React.FC = ({ : agentName === 'Agno' ? `{ "env": { - "model": "openai", + "model": "openai or claude", "model_id": "xxxxxx", "api_key": "xxxxxx", "base_url": "xxxxxx"