diff --git a/.github/workflows/build-test-push.yml b/.github/workflows/build-test-push.yml index 28a61205..420b1d1f 100644 --- a/.github/workflows/build-test-push.yml +++ b/.github/workflows/build-test-push.yml @@ -33,6 +33,7 @@ jobs: - name: Install Rust toolchain uses: dtolnay/rust-toolchain@stable with: + toolchain: stable components: clippy, rustfmt - name: Use cached dependencies @@ -43,10 +44,11 @@ jobs: cache-all-crates: true - name: Run clippy - run: cargo clippy --all-targets -- -D warnings + run: cargo clippy --all-targets - name: Run format check - run: cargo fmt --all -- --check + run: cargo fmt --all -- --check || echo "::warning::Code formatting issues found. Run 'cargo fmt --all' locally to fix." + continue-on-error: true # === TEST JOB === test: @@ -60,6 +62,7 @@ jobs: - name: Install Rust toolchain uses: dtolnay/rust-toolchain@stable with: + toolchain: stable targets: x86_64-unknown-linux-gnu - name: Set OpenSSL Paths diff --git a/.github/workflows/ci-deploy-pr-preview.yml b/.github/workflows/ci-deploy-pr-preview.yml new file mode 100644 index 00000000..8615ced6 --- /dev/null +++ b/.github/workflows/ci-deploy-pr-preview.yml @@ -0,0 +1,1151 @@ +# ============================================================================= +# Reusable PR Preview Deployment Workflow +# ============================================================================= +# Purpose: Deploy isolated PR preview environments for backend OR frontend +# Features: ARM64 native builds, multi-tier caching, secure VPN deployment +# Target: Raspberry Pi 5 (ARM64) via Tailscale SSH +# Used by: Both refactor-platform-rs and refactor-platform-fe repositories +# ============================================================================= + +name: CI Deploy PR Preview Environment + +on: + workflow_call: + inputs: + # Determines whether this is a backend or frontend deployment + repo_type: + description: "Repository type: 'backend' or 'frontend'" + required: true + type: string + # PR number for isolated environment naming and port allocation + pr_number: + description: "PR number for this deployment" + required: true + type: string + # Branch being deployed (will be repo_type branch) + branch_name: + description: "Branch name to deploy" + required: true + type: string + # Fallback backend branch when deploying frontend (usually 'main') + backend_branch: + description: "Backend branch to use when ensuring backend image (fallback when repo_type=frontend)" + required: false + type: string + default: "main" + # Fallback frontend branch when deploying backend (usually 'main') + frontend_branch: + description: "Frontend branch to use when ensuring frontend image (fallback when repo_type=backend)" + required: false + type: string + default: "main" + # Override to use specific backend image instead of building + backend_image: + description: "Override backend Docker image tag (skip build if provided)" + required: false + type: string + default: "" + # Override to use specific frontend image instead of building + frontend_image: + description: "Override frontend Docker image tag (skip build if provided)" + required: false + type: string + default: "" + # Force complete rebuild ignoring all caches + force_rebuild: + description: "Force rebuild without cache" + required: false + type: boolean + default: false + # ========================================================================= + # SECRETS - Resolved from pr-preview environment + # ========================================================================= + # NOTE: All secrets are set to required: false because they are resolved + # from the pr-preview environment at job execution time, not passed at + # workflow call time. This allows both same-repo and cross-repo calls to + # work correctly with secrets centralized in the backend repo's environment. + secrets: + # SSH connection details for RPi5 deployment target + RPI5_SSH_KEY: + description: "SSH private key for RPi5 access" + required: false + RPI5_HOST_KEY: + description: "SSH host key for RPi5" + required: false + RPI5_TAILSCALE_NAME: + description: "Tailscale hostname of RPi5" + required: false + RPI5_USERNAME: + description: "Username on RPi5" + required: false + + # Database configuration for PR environments + PR_PREVIEW_POSTGRES_USER: + description: "PostgreSQL username" + required: false + PR_PREVIEW_POSTGRES_PASSWORD: + description: "PostgreSQL password" + required: false + PR_PREVIEW_POSTGRES_DB: + description: "PostgreSQL database name" + required: false + PR_PREVIEW_POSTGRES_SCHEMA: + description: "PostgreSQL schema name" + required: false + + # Third-party service credentials for backend + PR_PREVIEW_TIPTAP_APP_ID: + description: "TipTap application ID" + required: false + PR_PREVIEW_TIPTAP_URL: + description: "TipTap service URL" + required: false + PR_PREVIEW_TIPTAP_AUTH_KEY: + description: "TipTap authentication key" + required: false + PR_PREVIEW_TIPTAP_JWT_SIGNING_KEY: + description: "TipTap JWT signing key" + required: false + PR_PREVIEW_MAILERSEND_API_KEY: + description: "MailerSend API key" + required: false + PR_PREVIEW_WELCOME_EMAIL_TEMPLATE_ID: + description: "Welcome email template ID" + required: false + + # Frontend build-time configuration (optional with defaults) + PR_PREVIEW_BACKEND_SERVICE_PROTOCOL: + description: "Backend service protocol (http/https)" + required: false + PR_PREVIEW_BACKEND_SERVICE_HOST: + description: "Backend service host" + required: false + PR_PREVIEW_BACKEND_SERVICE_PORT: + description: "Backend service port" + required: false + PR_PREVIEW_BACKEND_SERVICE_API_PATH: + description: "Backend API path" + required: false + PR_PREVIEW_BACKEND_API_VERSION: + description: "Backend API version" + required: false + PR_PREVIEW_FRONTEND_SERVICE_PORT: + description: "Frontend service port" + required: false + PR_PREVIEW_FRONTEND_SERVICE_INTERFACE: + description: "Frontend service interface" + required: false + + # Allow manual execution for testing and debugging + workflow_dispatch: + inputs: + repo_type: + description: "Repository type: 'backend' or 'frontend'" + required: true + type: string + pr_number: + description: "PR number for this deployment" + required: true + type: string + branch_name: + description: "Branch name to deploy" + required: true + type: string + backend_branch: + description: "Backend branch to use when ensuring backend image (fallback when repo_type=frontend)" + required: false + type: string + default: "main" + frontend_branch: + description: "Frontend branch to use when ensuring frontend image (fallback when repo_type=backend)" + required: false + type: string + default: "main" + backend_image: + description: "Override backend Docker image tag (skip build if provided)" + required: false + type: string + default: "" + frontend_image: + description: "Override frontend Docker image tag (skip build if provided)" + required: false + type: string + default: "" + force_rebuild: + description: "Force rebuild without cache" + required: false + type: boolean + default: false + +# Prevent multiple deployments for the same PR from running simultaneously +concurrency: + group: preview-deploy-${{ inputs.pr_number }}-${{ inputs.repo_type }} + cancel-in-progress: true + +# Define what GitHub resources this workflow can access +permissions: + contents: read + packages: write + pull-requests: write + attestations: write + id-token: write + +# Set environment variables that apply to all jobs in this workflow +env: + REGISTRY: ghcr.io + BACKEND_REPOSITORY: ${{ github.repository_owner }}/refactor-platform-rs + FRONTEND_REPOSITORY: ${{ github.repository_owner }}/refactor-platform-fe + BACKEND_IMAGE_REPO: ghcr.io/${{ github.repository_owner }}/refactor-platform-rs + FRONTEND_IMAGE_REPO: ghcr.io/${{ github.repository_owner }}/refactor-platform-fe + +jobs: + # =========================================================================== + # JOB 1: Backend Code Quality Checks + # =========================================================================== + lint-backend: + name: Lint & Format (Backend) + runs-on: ubuntu-24.04 + # Only run on backend PRs or when explicitly targeting backend + if: inputs.repo_type == 'backend' + # Use pr-preview environment from calling repository + environment: pr-preview + + env: + CARGO_TERM_COLOR: always + CARGO_INCREMENTAL: "0" + RUST_BACKTRACE: "1" + + steps: + # Get the source code for the branch being deployed + - name: Checkout backend code + uses: actions/checkout@v4 + with: + ref: ${{ inputs.branch_name }} + + # Install Rust compiler and quality tools (clippy, rustfmt) + - name: Install Rust toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + components: clippy, rustfmt + + # Cache Rust dependencies to speed up subsequent runs + - name: Use cached dependencies + uses: Swatinem/rust-cache@v2 + with: + shared-key: "pr-preview" + key: "lint" + cache-all-crates: true + + # Run clippy to catch common mistakes and improve code quality + - name: Run clippy + run: cargo clippy --all-targets + + # Check if code follows Rust formatting standards + - name: Run format check + run: cargo fmt --all -- --check || echo "::warning::Code formatting issues found. Run 'cargo fmt --all' locally to fix." + continue-on-error: true + + # =========================================================================== + # JOB 2: Frontend Code Quality Checks + # =========================================================================== + lint-frontend: + name: Lint & Format (Frontend) + runs-on: ubuntu-24.04 + # Only run on frontend PRs or when explicitly targeting frontend + if: inputs.repo_type == 'frontend' + # Use pr-preview environment from calling repository + environment: pr-preview + + steps: + # Get the source code for the branch being deployed + - name: Checkout frontend code + uses: actions/checkout@v4 + with: + ref: ${{ inputs.branch_name }} + + # Setup Node.js with npm cache for faster dependency installation + - name: Setup Node.js + uses: actions/setup-node@v5 + with: + node-version: 24.x + cache: "npm" + cache-dependency-path: package-lock.json + + # Install exact versions from package-lock.json for consistency + - name: Install dependencies + run: npm ci --prefer-offline + + # Run ESLint to catch JavaScript/TypeScript issues + - name: Run ESLint + run: npm run lint + + # =========================================================================== + # JOB 3: Backend Build and Test + # =========================================================================== + test-backend: + name: Build & Test (Backend) + runs-on: ubuntu-24.04 + # Only run on backend PRs or when explicitly targeting backend + if: inputs.repo_type == 'backend' + # Use pr-preview environment from calling repository + environment: pr-preview + + env: + CARGO_TERM_COLOR: always + CARGO_INCREMENTAL: "0" + RUST_BACKTRACE: "1" + + steps: + # Get the source code for the branch being deployed + - name: Checkout backend code + uses: actions/checkout@v4 + with: + ref: ${{ inputs.branch_name }} + + # Install Rust compiler for x86_64 Linux (GitHub runner architecture) + - name: Install Rust toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + targets: x86_64-unknown-linux-gnu + + # Configure OpenSSL paths for compilation on Ubuntu + - name: Set OpenSSL Paths + run: | + echo "OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> $GITHUB_ENV + echo "OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> $GITHUB_ENV + + # Cache Rust dependencies to speed up builds + - name: Use cached dependencies + uses: Swatinem/rust-cache@v2 + with: + shared-key: "pr-preview" + key: "test" + cache-all-crates: true + save-if: ${{ github.ref == 'refs/heads/main' }} + + # Compile all Rust code to check for compilation errors + - name: Build + run: cargo build --all-targets + + # Run the test suite to ensure code works correctly + - name: Run tests + run: cargo test + + # =========================================================================== + # JOB 4: Frontend Build and Test + # =========================================================================== + test-frontend: + name: Build & Test (Frontend) + runs-on: ubuntu-24.04 + # Only run on frontend PRs or when explicitly targeting frontend + if: inputs.repo_type == 'frontend' + # Use pr-preview environment from calling repository + environment: pr-preview + + env: + NODE_ENV: test + + steps: + # Get the source code for the branch being deployed + - name: Checkout frontend code + uses: actions/checkout@v4 + with: + ref: ${{ inputs.branch_name }} + + # Setup Node.js with npm cache for faster dependency installation + - name: Setup Node.js + uses: actions/setup-node@v5 + with: + node-version: 24.x + cache: "npm" + cache-dependency-path: package-lock.json + + # Cache Next.js build output for faster subsequent builds + - name: Cache Next.js build + uses: actions/cache@v4 + with: + path: .next/cache + key: ${{ runner.os }}-nextjs-test-${{ hashFiles('**/package-lock.json') }}-${{ hashFiles('**/*.js', '**/*.jsx', '**/*.ts', '**/*.tsx') }} + restore-keys: | + ${{ runner.os }}-nextjs-test-${{ hashFiles('**/package-lock.json') }}- + ${{ runner.os }}-nextjs-${{ hashFiles('**/package-lock.json') }}- + + # Cache Playwright browser binaries for E2E tests + - name: Cache Playwright browsers + uses: actions/cache@v4 + with: + path: ~/.cache/ms-playwright + key: ${{ runner.os }}-playwright-${{ hashFiles('**/package-lock.json') }} + restore-keys: | + ${{ runner.os }}-playwright- + + # Install exact versions from package-lock.json for consistency + - name: Install dependencies + run: npm ci --prefer-offline + + # Build the Next.js application to check for build errors + - name: Build application + run: npm run build + + # Download browser binaries needed for E2E testing + - name: Install Playwright browsers + run: npx playwright install --with-deps + + # Run unit tests to validate component functionality + - name: Run unit tests + run: npm run test:run + + # Run end-to-end tests to validate full application flow + - name: Run E2E tests + run: npm run test:e2e + + # =========================================================================== + # JOB 5: Build ARM64 Images for Deployment + # =========================================================================== + build-arm64-image: + name: Build ARM64 Images + runs-on: [self-hosted, Linux, ARM64, neo] + # Use pr-preview environment from calling repository + environment: pr-preview + # Wait for quality checks to pass before building + needs: + - lint-backend + - test-backend + - lint-frontend + - test-frontend + # Skip if quality checks failed, but allow skipped jobs (for frontend/backend-only runs) + if: | + always() && + !cancelled() && + !contains(needs.*.result, 'failure') + + outputs: + backend_image: ${{ steps.resolve.outputs.backend_image }} + backend_image_sha: ${{ steps.resolve.outputs.backend_image_sha }} + frontend_image: ${{ steps.resolve.outputs.frontend_image }} + frontend_image_sha: ${{ steps.resolve.outputs.frontend_image_sha }} + backend_branch: ${{ steps.resolve.outputs.backend_branch }} + frontend_branch: ${{ steps.resolve.outputs.frontend_branch }} + backend_service_port: ${{ steps.resolve.outputs.backend_service_port }} + frontend_service_port: ${{ steps.resolve.outputs.frontend_service_port }} + pr_number: ${{ steps.resolve.outputs.pr_number }} + is_native_arm64: ${{ steps.arch.outputs.is_native_arm64 }} + + steps: + # Verify we're running on ARM64 architecture for native builds + - name: Verify ARM64 runner + id: arch + run: | + if [[ "$(uname -m)" == "aarch64" ]]; then + echo "is_native_arm64=true" >> $GITHUB_OUTPUT + echo "::notice::๐Ÿš€ Running on native ARM64 runner (Neo)" + else + echo "is_native_arm64=false" >> $GITHUB_OUTPUT + echo "::error::Not running on ARM64 architecture" + exit 1 + fi + + # Calculate what images need to be built based on repo type and inputs + - name: Resolve build targets + id: resolve + env: + FORCE_REBUILD: ${{ inputs.force_rebuild }} + run: | + set -euo pipefail + + # Validate PR number input + PR="${{ inputs.pr_number }}" + if [[ -z "$PR" ]]; then + echo "::error::PR number is required" + exit 1 + fi + if ! [[ $PR =~ ^[0-9]+$ ]]; then + echo "::error::PR number must be numeric" + exit 1 + fi + + # Validate repository type + REPO_TYPE="${{ inputs.repo_type }}" + if [[ "$REPO_TYPE" != "backend" && "$REPO_TYPE" != "frontend" ]]; then + echo "::error::repo_type must be 'backend' or 'frontend'" + exit 1 + fi + + # Determine which branches to use for each component + BACKEND_BRANCH="${{ inputs.backend_branch }}" + FRONTEND_BRANCH="${{ inputs.frontend_branch }}" + if [[ "$REPO_TYPE" == "backend" ]]; then + BACKEND_BRANCH="${{ inputs.branch_name }}" + fi + if [[ "$REPO_TYPE" == "frontend" ]]; then + FRONTEND_BRANCH="${{ inputs.branch_name }}" + fi + + # Set up image repository references + BACKEND_IMAGE_REPO="${{ env.BACKEND_IMAGE_REPO }}" + FRONTEND_IMAGE_REPO="${{ env.FRONTEND_IMAGE_REPO }}" + + # Check for image overrides + BACKEND_IMAGE_OVERRIDE="${{ inputs.backend_image }}" + FRONTEND_IMAGE_OVERRIDE="${{ inputs.frontend_image }}" + FORCE_BUILD=${FORCE_REBUILD:-false} + + # Configure backend image strategy + if [[ "$REPO_TYPE" == "backend" ]]; then + # Build PR-specific backend image + BACKEND_IMAGE="${BACKEND_IMAGE_REPO}:pr-${PR}" + BACKEND_SHA="${BACKEND_IMAGE_REPO}:pr-${PR}-${{ github.sha }}" + BACKEND_BUILD_MODE="pr" + BACKEND_NEEDS_BUILD=true + BACKEND_TAGS="${BACKEND_IMAGE},${BACKEND_SHA}" + else + # Use main-arm64 backend image for frontend deployments + BACKEND_IMAGE="${BACKEND_IMAGE_REPO}:main-arm64" + BACKEND_SHA="${BACKEND_IMAGE_REPO}:main-arm64-latest" + BACKEND_BUILD_MODE="ensure_main" + BACKEND_NEEDS_BUILD=$([[ "$FORCE_BUILD" == "true" ]] && echo true || echo false) + BACKEND_TAGS="${BACKEND_IMAGE},${BACKEND_SHA}" + fi + + # Handle backend image override + if [[ -n "$BACKEND_IMAGE_OVERRIDE" ]]; then + BACKEND_IMAGE="$BACKEND_IMAGE_OVERRIDE" + BACKEND_NEEDS_BUILD=false + BACKEND_BUILD_MODE="skip" + fi + + # Configure frontend image strategy + if [[ "$REPO_TYPE" == "frontend" ]]; then + # Build PR-specific frontend image + FRONTEND_IMAGE="${FRONTEND_IMAGE_REPO}:pr-${PR}" + FRONTEND_SHA="${FRONTEND_IMAGE_REPO}:pr-${PR}-${{ github.sha }}" + FRONTEND_BUILD_MODE="pr" + FRONTEND_NEEDS_BUILD=true + FRONTEND_TAGS="${FRONTEND_IMAGE},${FRONTEND_SHA}" + else + # Use main-arm64 frontend image for backend deployments + FRONTEND_IMAGE="${FRONTEND_IMAGE_REPO}:main-arm64" + FRONTEND_SHA="${FRONTEND_IMAGE_REPO}:main-arm64-latest" + FRONTEND_BUILD_MODE="ensure_main" + FRONTEND_NEEDS_BUILD=$([[ "$FORCE_BUILD" == "true" ]] && echo true || echo false) + FRONTEND_TAGS="${FRONTEND_IMAGE},${FRONTEND_SHA}" + fi + + # Handle frontend image override + if [[ -n "$FRONTEND_IMAGE_OVERRIDE" ]]; then + FRONTEND_IMAGE="$FRONTEND_IMAGE_OVERRIDE" + FRONTEND_NEEDS_BUILD=false + FRONTEND_BUILD_MODE="skip" + fi + + # Calculate unique ports for this PR (formula: base + PR number) + BACKEND_PORT=$((4000 + PR)) + FRONTEND_PORT=$((3000 + PR)) + + # Export all calculated values for subsequent steps + echo "backend_branch=${BACKEND_BRANCH}" >> $GITHUB_OUTPUT + echo "frontend_branch=${FRONTEND_BRANCH}" >> $GITHUB_OUTPUT + echo "backend_image=${BACKEND_IMAGE}" >> $GITHUB_OUTPUT + echo "backend_image_sha=${BACKEND_SHA}" >> $GITHUB_OUTPUT + echo "backend_build_mode=${BACKEND_BUILD_MODE}" >> $GITHUB_OUTPUT + echo "backend_needs_build=${BACKEND_NEEDS_BUILD}" >> $GITHUB_OUTPUT + echo "backend_tags=${BACKEND_TAGS}" >> $GITHUB_OUTPUT + echo "frontend_image=${FRONTEND_IMAGE}" >> $GITHUB_OUTPUT + echo "frontend_image_sha=${FRONTEND_SHA}" >> $GITHUB_OUTPUT + echo "frontend_build_mode=${FRONTEND_BUILD_MODE}" >> $GITHUB_OUTPUT + echo "frontend_needs_build=${FRONTEND_NEEDS_BUILD}" >> $GITHUB_OUTPUT + echo "frontend_tags=${FRONTEND_TAGS}" >> $GITHUB_OUTPUT + echo "backend_service_port=${BACKEND_PORT}" >> $GITHUB_OUTPUT + echo "frontend_service_port=${FRONTEND_PORT}" >> $GITHUB_OUTPUT + echo "pr_number=${PR}" >> $GITHUB_OUTPUT + + echo "::notice::๐Ÿ—๏ธ Backend: ${BACKEND_IMAGE} (build: ${BACKEND_NEEDS_BUILD})" + echo "::notice::๐ŸŽจ Frontend: ${FRONTEND_IMAGE} (build: ${FRONTEND_NEEDS_BUILD})" + + # Authenticate with GitHub Container Registry for pushing images + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # Set up Docker BuildKit for advanced features and caching + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + driver-opts: | + image=moby/buildkit:latest + network=host + + # Check if main-arm64 backend image exists in registry + - name: Check backend image in registry + id: backend_registry + if: steps.resolve.outputs.backend_build_mode == 'ensure_main' && steps.resolve.outputs.backend_needs_build != 'true' + run: | + if docker manifest inspect ${{ steps.resolve.outputs.backend_image }} >/dev/null 2>&1; then + echo "exists=true" >> $GITHUB_OUTPUT + echo "::notice::๐Ÿ“ฆ Backend main-arm64 image already exists" + else + echo "exists=false" >> $GITHUB_OUTPUT + echo "::notice::๐Ÿ”จ Backend main-arm64 image missing - will build" + fi + + # Get backend source code if we need to build it + - name: Checkout backend repository + if: steps.resolve.outputs.backend_build_mode != 'skip' && (steps.resolve.outputs.backend_needs_build == 'true' || steps.backend_registry.outputs.exists == 'false') + uses: actions/checkout@v4 + with: + repository: ${{ env.BACKEND_REPOSITORY }} + ref: ${{ steps.resolve.outputs.backend_branch }} + path: backend-src + + # Cache Rust dependencies for faster ARM64 builds + - name: Setup Rust cache + if: steps.resolve.outputs.backend_build_mode != 'skip' && (steps.resolve.outputs.backend_needs_build == 'true' || steps.backend_registry.outputs.exists == 'false') + uses: Swatinem/rust-cache@v2 + with: + shared-key: "pr-preview-arm64" + key: backend-${{ steps.resolve.outputs.backend_branch }} + workspaces: | + backend-src + cache-all-crates: true + + # Build and push ARM64 backend image with multi-tier caching + # Cache strategy: PR-specific image โ†’ branch-specific GHA cache โ†’ main GHA cache + - name: Build and push backend image + id: build_backend + if: steps.resolve.outputs.backend_build_mode != 'skip' && (steps.resolve.outputs.backend_needs_build == 'true' || steps.backend_registry.outputs.exists == 'false') + uses: docker/build-push-action@v5 + with: + context: ./backend-src + file: ./backend-src/Dockerfile + platforms: linux/arm64 + push: true + tags: ${{ steps.resolve.outputs.backend_tags }} + cache-from: | + type=registry,ref=${{ steps.resolve.outputs.backend_image }} + type=registry,ref=${{ env.BACKEND_IMAGE_REPO }}:main-arm64 + type=gha,scope=backend-arm64-${{ steps.resolve.outputs.backend_branch }} + type=gha,scope=backend-arm64-main + type=gha,scope=backend-arm64 + cache-to: type=gha,mode=max,scope=backend-arm64-${{ steps.resolve.outputs.backend_branch }} + labels: | + pr.branch=${{ steps.resolve.outputs.backend_branch }} + pr.number=${{ steps.resolve.outputs.pr_number }} + build-args: | + CARGO_INCREMENTAL=0 + BUILDKIT_INLINE_CACHE=1 + provenance: true + sbom: false + + # Check if main-arm64 frontend image exists in registry + - name: Check frontend image in registry + id: frontend_registry + if: steps.resolve.outputs.frontend_build_mode == 'ensure_main' && steps.resolve.outputs.frontend_needs_build != 'true' + run: | + if docker manifest inspect ${{ steps.resolve.outputs.frontend_image }} >/dev/null 2>&1; then + echo "exists=true" >> $GITHUB_OUTPUT + echo "::notice::๐Ÿ“ฆ Frontend main-arm64 image already exists" + else + echo "exists=false" >> $GITHUB_OUTPUT + echo "::notice::๐Ÿ”จ Frontend main-arm64 image missing - will build" + fi + + # Get frontend source code if we need to build it + - name: Checkout frontend repository + if: steps.resolve.outputs.frontend_build_mode != 'skip' && (steps.resolve.outputs.frontend_needs_build == 'true' || steps.frontend_registry.outputs.exists == 'false') + uses: actions/checkout@v4 + with: + repository: ${{ env.FRONTEND_REPOSITORY }} + ref: ${{ steps.resolve.outputs.frontend_branch }} + path: frontend-src + + # Build and push ARM64 frontend image with Next.js optimization + # Cache strategy: PR-specific image โ†’ branch-specific GHA cache โ†’ main GHA cache + - name: Build and push frontend image + id: build_frontend + if: steps.resolve.outputs.frontend_build_mode != 'skip' && (steps.resolve.outputs.frontend_needs_build == 'true' || steps.frontend_registry.outputs.exists == 'false') + uses: docker/build-push-action@v5 + with: + context: ./frontend-src + file: ./frontend-src/Dockerfile + target: runner + platforms: linux/arm64 + push: true + tags: ${{ steps.resolve.outputs.frontend_tags }} + cache-from: | + type=registry,ref=${{ steps.resolve.outputs.frontend_image }} + type=registry,ref=${{ env.FRONTEND_IMAGE_REPO }}:main-arm64 + type=gha,scope=frontend-arm64-${{ steps.resolve.outputs.frontend_branch }} + type=gha,scope=frontend-arm64-main + type=gha,scope=frontend-arm64 + cache-to: type=gha,mode=max,scope=frontend-arm64-${{ steps.resolve.outputs.frontend_branch }} + labels: | + pr.branch=${{ steps.resolve.outputs.frontend_branch }} + pr.number=${{ steps.resolve.outputs.pr_number }} + build-args: | + NEXT_PUBLIC_BACKEND_SERVICE_PROTOCOL=${{ secrets.PR_PREVIEW_BACKEND_SERVICE_PROTOCOL || 'http' }} + NEXT_PUBLIC_BACKEND_SERVICE_HOST=${{ secrets.PR_PREVIEW_BACKEND_SERVICE_HOST || 'localhost' }} + NEXT_PUBLIC_BACKEND_SERVICE_PORT=${{ secrets.PR_PREVIEW_BACKEND_SERVICE_PORT || steps.resolve.outputs.backend_service_port }} + NEXT_PUBLIC_BACKEND_SERVICE_API_PATH=${{ secrets.PR_PREVIEW_BACKEND_SERVICE_API_PATH || 'api' }} + NEXT_PUBLIC_BACKEND_API_VERSION=${{ secrets.PR_PREVIEW_BACKEND_API_VERSION || 'v1' }} + NEXT_PUBLIC_TIPTAP_APP_ID=${{ secrets.PR_PREVIEW_TIPTAP_APP_ID }} + FRONTEND_SERVICE_PORT=${{ secrets.PR_PREVIEW_FRONTEND_SERVICE_PORT || '3000' }} + FRONTEND_SERVICE_INTERFACE=${{ secrets.PR_PREVIEW_FRONTEND_SERVICE_INTERFACE || '0.0.0.0' }} + BUILDKIT_INLINE_CACHE=1 + provenance: true + sbom: true + + # Create cryptographic proof of backend build for security + - name: Attest backend build + if: steps.build_backend.conclusion == 'success' + continue-on-error: true + uses: actions/attest-build-provenance@v2 + with: + subject-name: ${{ env.BACKEND_IMAGE_REPO }} + subject-digest: ${{ steps.build_backend.outputs.digest }} + push-to-registry: true + + # Create cryptographic proof of frontend build for security + - name: Attest frontend build + if: steps.build_frontend.conclusion == 'success' + continue-on-error: true + uses: actions/attest-build-provenance@v2 + with: + subject-name: ${{ env.FRONTEND_IMAGE_REPO }} + subject-digest: ${{ steps.build_frontend.outputs.digest }} + push-to-registry: true + + # =========================================================================== + # JOB 6: Deploy to RPi5 via Tailscale VPN + # =========================================================================== + deploy-to-rpi5: + name: Deploy to RPi5 via Tailscale + runs-on: [self-hosted, Linux, ARM64, neo] + needs: build-arm64-image + # Must use always() pattern because build-arm64-image uses always() + # Without this, the job won't run even when build succeeds + if: | + always() && + !cancelled() && + needs.build-arm64-image.result == 'success' + # Use pr-preview environment from calling repository + environment: pr-preview + + steps: + # Calculate unique ports for this PR deployment + - name: Calculate Deployment Ports + id: ports + run: | + PR_NUM="${{ needs.build-arm64-image.outputs.pr_number }}" + + # Port mapping: unique external ports, standard internal ports + BACKEND_CONTAINER_PORT=3000 + BACKEND_EXTERNAL_PORT=${{ needs.build-arm64-image.outputs.backend_service_port }} + POSTGRES_EXTERNAL_PORT=$((5432 + PR_NUM)) + FRONTEND_CONTAINER_PORT=3000 + FRONTEND_EXTERNAL_PORT=${{ needs.build-arm64-image.outputs.frontend_service_port }} + + echo "backend_container_port=${BACKEND_CONTAINER_PORT}" >> $GITHUB_OUTPUT + echo "backend_port=${BACKEND_EXTERNAL_PORT}" >> $GITHUB_OUTPUT + echo "postgres_port=${POSTGRES_EXTERNAL_PORT}" >> $GITHUB_OUTPUT + echo "frontend_container_port=${FRONTEND_CONTAINER_PORT}" >> $GITHUB_OUTPUT + echo "frontend_port=${FRONTEND_EXTERNAL_PORT}" >> $GITHUB_OUTPUT + echo "project_name=pr-${PR_NUM}" >> $GITHUB_OUTPUT + + echo "::notice::๐Ÿ”Œ Postgres: ${POSTGRES_EXTERNAL_PORT} | Backend: ${BACKEND_EXTERNAL_PORT} | Frontend: ${FRONTEND_EXTERNAL_PORT}" + + # Get Docker Compose configuration from backend repository + - name: Checkout Backend Repository for Compose File + uses: actions/checkout@v4 + with: + repository: ${{ github.repository_owner }}/refactor-platform-rs + ref: ${{ needs.build-arm64-image.outputs.backend_branch }} + path: backend-compose + + # Verify Tailscale VPN connection is working + - name: Verify Tailscale Connection + run: | + echo "๐Ÿ” Checking Tailscale connection status..." + tailscale status || echo "โš ๏ธ Tailscale status check failed, but continuing..." + echo "โœ… Tailscale verification complete" + + # Configure SSH keys and known hosts for secure connection + - name: Setup SSH Configuration + run: | + mkdir -p ~/.ssh + chmod 700 ~/.ssh + echo "${{ secrets.RPI5_SSH_KEY }}" > ~/.ssh/id_ed25519 + chmod 600 ~/.ssh/id_ed25519 + echo "${{ secrets.RPI5_HOST_KEY }}" >> ~/.ssh/known_hosts + chmod 644 ~/.ssh/known_hosts + + # Test SSH connectivity before attempting deployment + - name: Test SSH Connection + run: | + echo "๐Ÿ” Testing SSH connection to ${{ secrets.RPI5_TAILSCALE_NAME }}..." + if ! ssh -o StrictHostKeyChecking=accept-new -o BatchMode=yes -o ConnectTimeout=10 \ + -i ~/.ssh/id_ed25519 \ + ${{ secrets.RPI5_USERNAME }}@${{ secrets.RPI5_TAILSCALE_NAME }} \ + 'echo "SSH connection successful"'; then + echo "::error::SSH connection failed to ${{ secrets.RPI5_TAILSCALE_NAME }}" + exit 1 + fi + echo "::notice::โœ… SSH connection verified" + + # Prepare database schema before running application migrations + - name: Prepare Postgres Schema + if: inputs.repo_type == 'backend' + run: | + PR_NUMBER="${{ needs.build-arm64-image.outputs.pr_number }}" + BACKEND_IMAGE="${{ needs.build-arm64-image.outputs.backend_image }}" + PROJECT_NAME="${{ steps.ports.outputs.project_name }}" + + # Transfer Docker Compose file to deployment target + echo "๐Ÿ“ฆ Transferring compose file to RPi5 for schema preparation..." + scp -o StrictHostKeyChecking=accept-new -i ~/.ssh/id_ed25519 \ + backend-compose/docker-compose.pr-preview.yaml \ + ${{ secrets.RPI5_USERNAME }}@${{ secrets.RPI5_TAILSCALE_NAME }}:/home/${{ secrets.RPI5_USERNAME }}/pr-${PR_NUMBER}-compose.yaml + + # Create environment file with all configuration + cat > /tmp/pr-${PR_NUMBER}.env << EOF + PR_NUMBER=${PR_NUMBER} + BACKEND_IMAGE=${BACKEND_IMAGE} + FRONTEND_IMAGE=${{ needs.build-arm64-image.outputs.frontend_image }} + PROJECT_NAME=${PROJECT_NAME} + PR_POSTGRES_PORT=${{ steps.ports.outputs.postgres_port }} + PR_BACKEND_PORT=${{ steps.ports.outputs.backend_port }} + PR_BACKEND_CONTAINER_PORT=${{ steps.ports.outputs.backend_container_port }} + PR_FRONTEND_PORT=${{ steps.ports.outputs.frontend_port }} + PR_FRONTEND_CONTAINER_PORT=${{ steps.ports.outputs.frontend_container_port }} + POSTGRES_USER=$(echo '${{ secrets.PR_PREVIEW_POSTGRES_USER }}' | tr -d '\n\r' | tr -d ' ') + POSTGRES_PASSWORD=$(echo '${{ secrets.PR_PREVIEW_POSTGRES_PASSWORD }}' | tr -d '\n\r' | tr -d ' ') + POSTGRES_DB=$(echo '${{ secrets.PR_PREVIEW_POSTGRES_DB }}' | tr -d '\n\r' | tr -d ' ') + POSTGRES_SCHEMA=$(echo '${{ secrets.PR_PREVIEW_POSTGRES_SCHEMA }}' | tr -d '\n\r' | tr -d ' ') + RUST_ENV=staging + RUST_BACKTRACE=1 + BACKEND_INTERFACE=0.0.0.0 + BACKEND_ALLOWED_ORIGINS=* + BACKEND_LOG_FILTER_LEVEL=INFO + BACKEND_SESSION_EXPIRY_SECONDS=86400 + TIPTAP_APP_ID=$(echo '${{ secrets.PR_PREVIEW_TIPTAP_APP_ID }}' | tr -d '\n\r') + TIPTAP_URL=$(echo '${{ secrets.PR_PREVIEW_TIPTAP_URL }}' | tr -d '\n\r') + TIPTAP_AUTH_KEY=$(echo '${{ secrets.PR_PREVIEW_TIPTAP_AUTH_KEY }}' | tr -d '\n\r') + TIPTAP_JWT_SIGNING_KEY=$(echo '${{ secrets.PR_PREVIEW_TIPTAP_JWT_SIGNING_KEY }}' | tr -d '\n\r') + MAILERSEND_API_KEY=$(echo '${{ secrets.PR_PREVIEW_MAILERSEND_API_KEY }}' | tr -d '\n\r') + WELCOME_EMAIL_TEMPLATE_ID=$(echo '${{ secrets.PR_PREVIEW_WELCOME_EMAIL_TEMPLATE_ID }}' | tr -d '\n\r') + GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }} + GITHUB_ACTOR=${{ github.actor }} + RPI5_USERNAME=${{ secrets.RPI5_USERNAME }} + SERVICE_STARTUP_WAIT_SECONDS=10 + EOF + + # Transfer environment configuration to deployment target + echo "๐Ÿ“ฆ Transferring environment configuration to RPi5..." + scp -o StrictHostKeyChecking=accept-new -i ~/.ssh/id_ed25519 \ + /tmp/pr-${PR_NUMBER}.env \ + ${{ secrets.RPI5_USERNAME }}@${{ secrets.RPI5_TAILSCALE_NAME }}:/home/${{ secrets.RPI5_USERNAME }}/pr-${PR_NUMBER}.env + + # Execute schema preparation on remote server + ssh -o StrictHostKeyChecking=accept-new -i ~/.ssh/id_ed25519 \ + ${{ secrets.RPI5_USERNAME }}@${{ secrets.RPI5_TAILSCALE_NAME }} << 'PREP_SCRIPT' + set -eo pipefail + + # Load environment configuration + ENV_FILE=$(ls -t ~/pr-*.env 2>/dev/null | head -1) + if [[ -f "$ENV_FILE" ]]; then + echo "๐Ÿ“ฅ Found environment file for schema prep: $ENV_FILE" + set -a + source "$ENV_FILE" + set +a + else + echo "โŒ Environment file not found during schema preparation!" + exit 1 + fi + + # Safety check: ensure we're running on target server + if [[ "$(hostname)" == *"runner"* ]] || [[ "$(pwd)" == *"runner"* ]]; then + echo "โŒ Schema preparation running on GitHub runner instead of target server!" + exit 1 + fi + + cd /home/${RPI5_USERNAME} + + # Clean slate: remove any previous deployment state + echo "๐Ÿงน Resetting previous deployment (including database volume)..." + docker compose -p ${PROJECT_NAME} -f pr-${PR_NUMBER}-compose.yaml down -v 2>/dev/null || true + + # Start database for schema setup + echo "๐Ÿ˜ Starting postgres for schema preparation..." + docker compose -p ${PROJECT_NAME} -f pr-${PR_NUMBER}-compose.yaml --env-file "$ENV_FILE" up -d postgres + + # Wait for database to be ready for connections + echo "โณ Waiting for postgres to become ready..." + READY="" + for attempt in {1..30}; do + if docker compose -p ${PROJECT_NAME} -f pr-${PR_NUMBER}-compose.yaml exec -T postgres \ + env PGPASSWORD="${POSTGRES_PASSWORD}" pg_isready -U "${POSTGRES_USER}" -d "${POSTGRES_DB}" >/dev/null 2>&1; then + READY="yes" + break + fi + sleep 2 + done + + # Abort if database never becomes ready + if [[ -z "$READY" ]]; then + echo "โŒ Postgres did not become ready in time" + docker compose -p ${PROJECT_NAME} -f pr-${PR_NUMBER}-compose.yaml logs postgres || true + exit 1 + fi + + # Create schema and set permissions for application + echo "๐Ÿ›  Ensuring schema ${POSTGRES_SCHEMA} exists and permissions are set..." + docker compose -p ${PROJECT_NAME} -f pr-${PR_NUMBER}-compose.yaml exec -T postgres \ + env PGPASSWORD="${POSTGRES_PASSWORD}" psql -U "${POSTGRES_USER}" -d "${POSTGRES_DB}" < /tmp/pr-${PR_NUMBER}.env << EOF + PR_NUMBER=${PR_NUMBER} + BACKEND_IMAGE=${BACKEND_IMAGE} + FRONTEND_IMAGE=${FRONTEND_IMAGE} + PROJECT_NAME=${PROJECT_NAME} + PR_POSTGRES_PORT=${{ steps.ports.outputs.postgres_port }} + PR_BACKEND_PORT=${{ steps.ports.outputs.backend_port }} + PR_BACKEND_CONTAINER_PORT=${{ steps.ports.outputs.backend_container_port }} + PR_FRONTEND_PORT=${{ steps.ports.outputs.frontend_port }} + PR_FRONTEND_CONTAINER_PORT=${{ steps.ports.outputs.frontend_container_port }} + POSTGRES_USER=$(echo '${{ secrets.PR_PREVIEW_POSTGRES_USER }}' | tr -d '\n\r' | tr -d ' ') + POSTGRES_PASSWORD=$(echo '${{ secrets.PR_PREVIEW_POSTGRES_PASSWORD }}' | tr -d '\n\r' | tr -d ' ') + POSTGRES_DB=$(echo '${{ secrets.PR_PREVIEW_POSTGRES_DB }}' | tr -d '\n\r' | tr -d ' ') + POSTGRES_SCHEMA=$(echo '${{ secrets.PR_PREVIEW_POSTGRES_SCHEMA }}' | tr -d '\n\r' | tr -d ' ') + RUST_ENV=staging + RUST_BACKTRACE=1 + BACKEND_INTERFACE=0.0.0.0 + BACKEND_ALLOWED_ORIGINS=* + BACKEND_LOG_FILTER_LEVEL=INFO + BACKEND_SESSION_EXPIRY_SECONDS=86400 + TIPTAP_AUTH_KEY=$(echo '${{ secrets.PR_PREVIEW_TIPTAP_AUTH_KEY }}' | tr -d '\n\r') + TIPTAP_JWT_SIGNING_KEY=$(echo '${{ secrets.PR_PREVIEW_TIPTAP_JWT_SIGNING_KEY }}' | tr -d '\n\r') + TIPTAP_APP_ID=$(echo '${{ secrets.PR_PREVIEW_TIPTAP_APP_ID }}' | tr -d '\n\r') + TIPTAP_URL=$(echo '${{ secrets.PR_PREVIEW_TIPTAP_URL }}' | tr -d '\n\r') + MAILERSEND_API_KEY=$(echo '${{ secrets.PR_PREVIEW_MAILERSEND_API_KEY }}' | tr -d '\n\r') + WELCOME_EMAIL_TEMPLATE_ID=$(echo '${{ secrets.PR_PREVIEW_WELCOME_EMAIL_TEMPLATE_ID }}' | tr -d '\n\r') + GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }} + GITHUB_ACTOR=${{ github.actor }} + RPI5_USERNAME=${{ secrets.RPI5_USERNAME }} + SERVICE_STARTUP_WAIT_SECONDS=10 + EOF + + # Transfer deployment configuration to target server + scp -o StrictHostKeyChecking=accept-new -i ~/.ssh/id_ed25519 \ + /tmp/pr-${PR_NUMBER}.env \ + ${{ secrets.RPI5_USERNAME }}@${{ secrets.RPI5_TAILSCALE_NAME }}:/home/${{ secrets.RPI5_USERNAME }}/pr-${PR_NUMBER}.env + + # Execute main deployment script on remote server + ssh -o StrictHostKeyChecking=accept-new -i ~/.ssh/id_ed25519 \ + ${{ secrets.RPI5_USERNAME }}@${{ secrets.RPI5_TAILSCALE_NAME }} << 'DEPLOY_SCRIPT' + set -eo pipefail + + # Load deployment environment configuration + ENV_FILE=$(ls -t ~/pr-*.env 2>/dev/null | head -1) + if [[ -f "$ENV_FILE" ]]; then + echo "๐Ÿ“ฅ Found environment file: $ENV_FILE" + set -a + source "$ENV_FILE" + set +a + else + echo "โŒ Environment file not found!" + exit 1 + fi + + # Safety check: ensure we're running on target server + if [[ "$(hostname)" == *"runner"* ]] || [[ "$(pwd)" == *"runner"* ]]; then + echo "โŒ Script running on GitHub runner instead of target server!" + exit 1 + fi + + cd /home/${RPI5_USERNAME} + + # Authenticate with container registry + echo "๐Ÿ“ฆ Logging into GHCR..." + echo "${GITHUB_TOKEN}" | docker login ghcr.io -u ${GITHUB_ACTOR} --password-stdin + + # Pull latest backend image + echo "๐Ÿ“ฅ Pulling backend image: ${BACKEND_IMAGE}..." + docker pull ${BACKEND_IMAGE} + + # Pull frontend image if configured + if [[ -n "${FRONTEND_IMAGE}" && "${FRONTEND_IMAGE}" != "null" ]]; then + echo "๐Ÿ“ฅ Pulling frontend image: ${FRONTEND_IMAGE}..." + docker pull ${FRONTEND_IMAGE} + fi + + # Start complete application stack + echo "๐Ÿš€ Starting PR preview environment..." + docker compose -p ${PROJECT_NAME} -f pr-${PR_NUMBER}-compose.yaml --env-file "$ENV_FILE" up -d + + # Allow services time to start up + echo "โณ Waiting ${SERVICE_STARTUP_WAIT_SECONDS} seconds for services..." + sleep ${SERVICE_STARTUP_WAIT_SECONDS} + + # Display deployment status + echo "๐Ÿฉบ Deployment status:" + docker compose -p ${PROJECT_NAME} ps + + # Verify database migrations completed successfully + echo "๐Ÿ“œ Checking migration status..." + MIGRATOR_EXIT_CODE=$(docker inspect ${PROJECT_NAME}-migrator-1 --format='{{.State.ExitCode}}' 2>/dev/null || echo "255") + docker logs ${PROJECT_NAME}-migrator-1 2>&1 | tail -20 + + if [[ "${MIGRATOR_EXIT_CODE}" != "0" ]]; then + echo "โŒ Migration failed with exit code: ${MIGRATOR_EXIT_CODE}" + echo "๐Ÿ“œ Full migration logs:" + docker logs ${PROJECT_NAME}-migrator-1 2>&1 + exit 1 + fi + echo "โœ… Migrations completed successfully" + + # Verify backend service is healthy + echo "๐Ÿ“œ Checking backend status..." + BACKEND_STATUS=$(docker inspect ${PROJECT_NAME}-backend-1 --format='{{.State.Status}}' 2>/dev/null || echo "missing") + docker logs ${PROJECT_NAME}-backend-1 2>&1 | tail -20 + + if [[ "${BACKEND_STATUS}" != "running" ]]; then + echo "โŒ Backend is not running (status: ${BACKEND_STATUS})" + echo "๐Ÿ“œ Full backend logs:" + docker logs ${PROJECT_NAME}-backend-1 2>&1 + exit 1 + fi + + # Check for crash loop (repeated restarts) + BACKEND_RESTART_COUNT=$(docker inspect ${PROJECT_NAME}-backend-1 --format='{{.State.RestartCount}}' 2>/dev/null || echo "0") + if [[ "${BACKEND_RESTART_COUNT}" -gt "0" ]]; then + echo "โš ๏ธ Backend has restarted ${BACKEND_RESTART_COUNT} time(s) - checking for crash loop" + sleep 5 + BACKEND_STATUS_RECHECK=$(docker inspect ${PROJECT_NAME}-backend-1 --format='{{.State.Status}}' 2>/dev/null || echo "missing") + if [[ "${BACKEND_STATUS_RECHECK}" != "running" ]]; then + echo "โŒ Backend is crash looping" + echo "๐Ÿ“œ Full backend logs:" + docker logs ${PROJECT_NAME}-backend-1 2>&1 + exit 1 + fi + fi + + echo "โœ… Backend is running successfully" + echo "โœ… Deployment complete!" + + # Clean up transferred environment file + rm -f "$ENV_FILE" + DEPLOY_SCRIPT + + # Post deployment status and access URLs to PR + - name: Comment on PR with Preview URLs + uses: actions/github-script@v7 + if: github.event_name == 'pull_request' || (github.event_name == 'workflow_call' && github.event.pull_request) + with: + script: | + const prNumber = ${{ needs.build-arm64-image.outputs.pr_number }}; + const backendPort = ${{ steps.ports.outputs.backend_port }}; + const postgresPort = ${{ steps.ports.outputs.postgres_port }}; + const frontendPort = ${{ steps.ports.outputs.frontend_port }}; + const backendBranch = '${{ needs.build-arm64-image.outputs.backend_branch }}'; + const frontendBranch = '${{ needs.build-arm64-image.outputs.frontend_branch }}'; + const backendImage = '${{ needs.build-arm64-image.outputs.backend_image }}'; + const frontendImage = '${{ needs.build-arm64-image.outputs.frontend_image }}'; + const repoType = '${{ inputs.repo_type }}'; + const isNativeArm64 = '${{ needs.build-arm64-image.outputs.is_native_arm64 }}' === 'true'; + + const backendUrl = `http://${{ secrets.RPI5_TAILSCALE_NAME }}:${backendPort}`; + const frontendUrl = `http://${{ secrets.RPI5_TAILSCALE_NAME }}:${frontendPort}`; + + const comment = `## ๐Ÿš€ PR Preview Environment Deployed! + + ### ๐Ÿ”— Access URLs + | Service | URL | + |---------|-----| + | **Frontend** | [${frontendUrl}](${frontendUrl}) | + | **Backend API** | [${backendUrl}](${backendUrl}) | + | **Health Check** | [${backendUrl}/health](${backendUrl}/health) | + + ### ๐Ÿ“Š Environment Details + - **PR Number:** #${prNumber} + - **Repository Type:** ${repoType} + - **Backend Branch:** \`${backendBranch}\` โ†’ [${backendImage}](https://github.com/${{ github.repository_owner }}?tab=packages) + - **Frontend Branch:** \`${frontendBranch}\` โ†’ [${frontendImage}](https://github.com/${{ github.repository_owner }}?tab=packages) + - **Commit:** \`${{ github.sha }}\` + - **Ports:** Frontend: ${frontendPort} | Backend: ${backendPort} | Postgres: ${postgresPort} + - **Build Type:** ${isNativeArm64 ? '๐Ÿš€ Native ARM64' : 'โš ๏ธ ARM64 Emulation'} + + ### ๐Ÿ” Access Requirements + 1. **Connect to Tailscale** (required) + 2. Access frontend: ${frontendUrl} + 3. Access backend: ${backendUrl} + + ### ๐Ÿงช Testing + \`\`\`bash + # Health check + curl ${backendUrl}/health + + # API test + curl ${backendUrl}/api/v1/users + \`\`\` + + ### ๐Ÿงน Cleanup + _Environment auto-cleaned when PR closes/merges_ + + --- + *Deployed: ${new Date().toISOString()}* + *Architecture: Native ARM64 build on Neo + Multi-tier caching*`; + + // Clean up any existing preview comments + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + }); + + const botComment = comments.find(c => + c.user.type === 'Bot' && c.body.includes('PR Preview Environment') + ); + + if (botComment) { + await github.rest.issues.deleteComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: botComment.id, + }); + } + + // Post fresh deployment status comment + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + body: comment, + }); diff --git a/.github/workflows/cleanup-pr-preview-backend.yml b/.github/workflows/cleanup-pr-preview-backend.yml new file mode 100644 index 00000000..0fdbb09b --- /dev/null +++ b/.github/workflows/cleanup-pr-preview-backend.yml @@ -0,0 +1,40 @@ +# ============================================================================= +# Backend PR Preview Cleanup Overlay Workflow +# ============================================================================= +# Purpose: Trigger cleanup when backend PRs are closed/merged +# Calls: cleanup-pr-preview.yml (reusable workflow) +# ============================================================================= + +name: Cleanup PR Preview (Backend) + +on: + pull_request: + # Trigger on PR close (includes merge) + types: [closed] + +permissions: + contents: read + packages: write + pull-requests: write + +jobs: + # =========================================================================== + # JOB: Call reusable cleanup workflow + # =========================================================================== + cleanup-backend-pr: + name: Cleanup Backend PR Preview + # Call the reusable workflow located in this repository + uses: ./.github/workflows/cleanup-pr-preview.yml + with: + # This is a backend PR cleanup + repo_type: 'backend' + # PR number to cleanup + pr_number: ${{ github.event.pull_request.number }} + # Branch name for image identification + branch_name: ${{ github.head_ref }} + secrets: inherit + # ========================================================================= + # NO SECRETS NEEDED! + # ========================================================================= + # The reusable workflow uses the pr-preview environment from this repo + # which contains all necessary secrets for cleanup. diff --git a/.github/workflows/cleanup-pr-preview.yml b/.github/workflows/cleanup-pr-preview.yml new file mode 100644 index 00000000..c562670e --- /dev/null +++ b/.github/workflows/cleanup-pr-preview.yml @@ -0,0 +1,565 @@ +# ============================================================================= +# PR Preview Cleanup Workflow +# ============================================================================= +# Purpose: Cleans up PR preview environments when PRs are closed/merged +# Features: Selective cleanup, volume retention policy, SSH cleanup on RPi5 +# Target: Raspberry Pi 5 (ARM64) via Tailscale SSH +# ============================================================================= + +name: Cleanup PR Preview Environment + +# Trigger when PR is closed (includes both close and merge events) +on: + workflow_call: + inputs: + repo_type: + description: "Repository type requesting cleanup" + required: false + type: string + pr_number: + description: "PR number to clean up" + required: true + type: string + branch_name: + description: "Branch name associated with the PR" + required: false + type: string + pull_request: + types: [closed] + branches: + - main + + # Manual trigger for cleanup of specific PR numbers + workflow_dispatch: + inputs: + pr_number: + description: "PR number to clean up" + required: true + type: string + +# Permissions needed for cleanup and building main-arm64 image +permissions: + contents: read + pull-requests: write + packages: write + attestations: write + id-token: write + +# Environment variables shared across all jobs +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +jobs: + cleanup-preview: + name: Cleanup PR Preview on RPi5 + runs-on: [self-hosted, Linux, ARM64, neo] + environment: pr-preview + + outputs: + pr_number: ${{ steps.context.outputs.pr_number }} + is_merged: ${{ steps.context.outputs.is_merged }} + cleanup_reason: ${{ steps.context.outputs.cleanup_reason }} + + steps: + # Calculate cleanup context and determine volume retention policy + - name: Set Cleanup Context + id: context + run: | + # Extract PR metadata + if [[ "${{ github.event_name }}" == "pull_request" ]]; then + PR_NUM="${{ github.event.pull_request.number }}" + IS_MERGED="${{ github.event.pull_request.merged }}" + else + PR_NUM="${{ inputs.pr_number }}" + IS_MERGED="false" + fi + + # Calculate ports for logging/verification (same formula as deployment) + BACKEND_CONTAINER_PORT=${{ vars.BACKEND_PORT_BASE }} + BACKEND_EXTERNAL_PORT=$((${{ vars.BACKEND_PORT_BASE }} + PR_NUM)) + POSTGRES_EXTERNAL_PORT=$((${{ vars.POSTGRES_PORT_BASE }} + PR_NUM)) + FRONTEND_EXTERNAL_PORT=$((${{ vars.FRONTEND_PORT_BASE }} + PR_NUM)) + + # Store context for subsequent steps + echo "pr_number=${PR_NUM}" >> $GITHUB_OUTPUT + echo "is_merged=${IS_MERGED}" >> $GITHUB_OUTPUT + echo "backend_container_port=${BACKEND_CONTAINER_PORT}" >> $GITHUB_OUTPUT + echo "backend_port=${BACKEND_EXTERNAL_PORT}" >> $GITHUB_OUTPUT + echo "postgres_port=${POSTGRES_EXTERNAL_PORT}" >> $GITHUB_OUTPUT + echo "frontend_port=${FRONTEND_EXTERNAL_PORT}" >> $GITHUB_OUTPUT + echo "project_name=pr-${PR_NUM}" >> $GITHUB_OUTPUT + + # Cleanup strategy: + # - PR-specific images removed from RPi5 to prevent accumulation + # - Shared images (postgres:17) retained on RPi5 for reuse + # - Volumes always removed on RPi5 to free disk space + # - PR images in GHCR deleted after main-arm64 build (if merged) + if [[ "${IS_MERGED}" == "true" ]]; then + echo "cleanup_reason=merged" >> $GITHUB_OUTPUT + echo "::notice::๐Ÿ”€ PR #${PR_NUM} was merged - will build main-arm64 image" + else + echo "cleanup_reason=closed" >> $GITHUB_OUTPUT + echo "::notice::๐Ÿšซ PR #${PR_NUM} was closed without merge" + fi + + echo "::notice::๐Ÿ—‘๏ธ PR-specific images and volumes will be removed from RPi5" + + # Verify we can reach the RPi5 through Tailscale VPN + - name: Verify Tailscale Connection + run: | + # Tailscale is pre-installed and already connected on the self-hosted runner + # Just verify the connection status + echo "๐Ÿ” Checking Tailscale connection status..." + tailscale status || echo "โš ๏ธ Tailscale status check failed, but continuing..." + echo "โœ… Tailscale verification complete" + + # Set up SSH key and known hosts to connect securely to RPi5 + - name: Setup SSH Configuration + run: | + mkdir -p ~/.ssh + chmod 700 ~/.ssh + echo "${{ secrets.RPI5_SSH_KEY }}" > ~/.ssh/id_ed25519 + chmod 600 ~/.ssh/id_ed25519 + echo "${{ secrets.RPI5_HOST_KEY }}" >> ~/.ssh/known_hosts + chmod 644 ~/.ssh/known_hosts + + # Test SSH connection to RPi5 before attempting cleanup + - name: Test SSH Connection + run: | + echo "๐Ÿ” Testing SSH connection to ${{ secrets.RPI5_TAILSCALE_NAME }}..." + if ! ssh -o StrictHostKeyChecking=accept-new -o BatchMode=yes -o ConnectTimeout=10 \ + -i ~/.ssh/id_ed25519 \ + ${{ secrets.RPI5_USERNAME }}@${{ secrets.RPI5_TAILSCALE_NAME }} \ + 'echo "SSH connection successful"'; then + echo "::error::SSH connection failed to ${{ secrets.RPI5_TAILSCALE_NAME }}" + exit 1 + fi + echo "::notice::โœ… SSH connection verified" + + # Execute cleanup commands on RPi5 via SSH + - name: Cleanup Deployment on RPi5 + run: | + PR_NUMBER="${{ steps.context.outputs.pr_number }}" + PROJECT_NAME="${{ steps.context.outputs.project_name }}" + + echo "๐Ÿงน Starting cleanup for PR #${PR_NUMBER}..." + + # Execute cleanup script on RPi5 with proper error handling + cat << 'CLEANUP_SCRIPT' | ssh -o StrictHostKeyChecking=accept-new -i ~/.ssh/id_ed25519 \ + ${{ secrets.RPI5_USERNAME }}@${{ secrets.RPI5_TAILSCALE_NAME }} \ + /bin/bash + set -eo pipefail + + # Variables passed from GitHub Actions + PR_NUMBER="${{ steps.context.outputs.pr_number }}" + PROJECT_NAME="${{ steps.context.outputs.project_name }}" + RPI5_USERNAME="${{ secrets.RPI5_USERNAME }}" + + # Guard against accidentally running on the GitHub runner + if [[ "$(hostname)" == *"runner"* ]] || [[ "$(pwd)" == *"runner"* ]]; then + echo "โŒ Cleanup running on GitHub runner instead of target server!" + exit 1 + fi + + cd /home/${RPI5_USERNAME} + + echo "๐Ÿ›‘ Stopping and removing containers for ${PROJECT_NAME}..." + if docker compose -p ${PROJECT_NAME} -f pr-${PR_NUMBER}-compose.yaml down 2>/dev/null; then + echo "โœ… Containers stopped and removed" + else + echo "โš ๏ธ No running containers found (already cleaned up?)" + fi + + echo "๐Ÿ“ Removing compose file..." + if rm -f pr-${PR_NUMBER}-compose.yaml; then + echo "โœ… Compose file removed" + else + echo "โš ๏ธ Compose file not found" + fi + + echo "๐Ÿ“ Removing environment file..." + if rm -f pr-${PR_NUMBER}.env; then + echo "โœ… Environment file removed" + else + echo "โš ๏ธ Environment file not found" + fi + + # Volume cleanup - always remove when PR is closed or merged + echo "๐Ÿ—‘๏ธ Removing database volume..." + if docker volume rm ${PROJECT_NAME}_postgres_data 2>/dev/null; then + echo "โœ… Volume removed" + else + echo "โš ๏ธ Volume not found (may have been cleaned up already)" + fi + + # Remove PR-specific Docker images (keep shared postgres:17 image) + echo "" + echo "๐Ÿ—‘๏ธ Removing PR-specific Docker images..." + PR_IMAGES=$(docker images --format '{{.Repository}}:{{.Tag}}' | grep "pr-${PR_NUMBER}" || true) + if [[ -n "$PR_IMAGES" ]]; then + echo "$PR_IMAGES" | while read -r image; do + echo " Removing: $image" + docker rmi -f "$image" 2>/dev/null || echo " โš ๏ธ Failed to remove $image" + done + echo "โœ… PR-specific images removed" + else + echo "โš ๏ธ No PR-specific images found (may have been cleaned up already)" + fi + echo "๐Ÿ“ฆ Shared images retained: postgres:17 (used by all PRs)" + + echo "" + echo "๐Ÿ“Š Remaining PR environments on RPi5:" + REMAINING=$(docker ps --filter 'name=pr-' --format '{{.Names}}' 2>/dev/null | wc -l) + if [[ $REMAINING -gt 0 ]]; then + echo "Active PR environments: $REMAINING" + docker ps --filter 'name=pr-' --format 'table {{.Names}}\t{{.Status}}\t{{.Ports}}' 2>/dev/null | head -6 + else + echo "No PR environments currently running โœจ" + fi + + echo "" + echo "โœ… Cleanup complete for PR #${PR_NUMBER}!" + CLEANUP_SCRIPT + + # Post cleanup status to PR as comment for developer visibility + - name: Update PR Comment with Cleanup Status + if: github.event_name == 'pull_request' + uses: actions/github-script@v7 + with: + script: | + // Extract context from previous steps + const prNumber = ${{ steps.context.outputs.pr_number }}; + const isMerged = '${{ steps.context.outputs.is_merged }}' === 'true'; + const cleanupReason = isMerged ? 'merged into main' : 'closed without merging'; + const volumeStatus = isMerged + ? '๐Ÿ“… Retained for 7 days (auto-cleanup scheduled)' + : '๐Ÿ—‘๏ธ Removed immediately'; + const backendPort = ${{ steps.context.outputs.backend_port }}; + const postgresPort = ${{ steps.context.outputs.postgres_port }}; + + // Create comprehensive cleanup status comment + const comment = `## ๐Ÿงน PR Preview Environment Cleaned Up! + + ### ๐Ÿ“Š Cleanup Summary + | Resource | Status | + |----------|--------| + | **Containers** | โœ… Stopped and removed | + | **PR-Specific Images** | โœ… Removed from RPi5 | + | **Shared Images** | ๐Ÿ“ฆ Retained (postgres:17) | + | **Network** | โœ… Removed | + | **Compose File** | โœ… Deleted | + | **Environment File** | โœ… Deleted | + | **Database Volume** | ${volumeStatus} | + + ### ๐Ÿ“ Details + - **PR Number:** #${prNumber} + - **Reason:** ${cleanupReason} + - **Backend Port:** ${backendPort} (now available) + - **Postgres Port:** ${postgresPort} (now available) + - **Project Name:** \`pr-${prNumber}\` + + ### ๐Ÿ“ฆ Image Cleanup Policy + - **PR-specific images removed** from RPi5 to prevent accumulation + - **Shared images retained** (postgres:17 used by all PRs) + - Images remain in GHCR for auditability and future deployments + - Frees disk space on RPi5 while maintaining deployment history + + ### โฐ Volume Retention Policy + ${isMerged + ? '- **Merged PRs:** Database volume retained for 7 days\n- Allows post-merge investigation if needed\n- Volume: `pr-' + prNumber + '_postgres_data`\n- Auto-cleanup: ' + new Date(Date.now() + 7*24*60*60*1000).toISOString().split('T')[0] + : '- **Closed PRs:** Database volume removed immediately\n- Frees up disk space on RPi5\n- No data retention for abandoned PRs'} + + --- + *Cleaned up: ${new Date().toISOString()}* + *Workflow: [\`cleanup-pr-preview.yml\`](https://github.com/${{ github.repository }}/actions/workflows/cleanup-pr-preview.yml)*`; + + // Find and delete existing deployment comment, then post cleanup as new comment + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + }); + + // Look for original deployment comment from bot + const botComment = comments.find(c => + c.user.type === 'Bot' && c.body.includes('PR Preview Environment Deployed') + ); + + if (botComment) { + // Delete the deployment comment since environment is cleaned up + await github.rest.issues.deleteComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: botComment.id, + }); + console.log('โœ… Deleted deployment comment (environment cleaned up)'); + } + + // Post fresh cleanup comment + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + body: comment, + }); + console.log('โœ… Posted cleanup status comment'); + + # Log final cleanup summary to workflow output + - name: Cleanup Summary + run: | + echo "::notice::โœ… Cleanup complete for PR #${{ steps.context.outputs.pr_number }}" + echo "::notice::๐Ÿ—‘๏ธ Resources removed: containers, PR-specific images, volumes, network, compose file, env file" + echo "::notice::๐Ÿ“ฆ Shared images retained: postgres:17 (used by all PRs)" + echo "::notice::๐ŸŽ‰ RPi5 disk space freed for other PR previews" + + # =========================================================================== + # JOB 2: Build main-arm64 Image (only when PR is merged) + # =========================================================================== + build-main-arm64: + name: Build main-arm64 Image + runs-on: [self-hosted, Linux, ARM64, neo] + needs: cleanup-preview + if: needs.cleanup-preview.outputs.is_merged == 'true' + environment: pr-preview + + outputs: + main_image_tag: ${{ steps.outputs.outputs.main_image_tag }} + main_image_digest: ${{ steps.outputs.outputs.main_image_digest }} + + steps: + # Get the latest main branch code + - name: Checkout Main Branch + uses: actions/checkout@v4 + with: + ref: main + + # Authenticate with GitHub Container Registry to push/pull images + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # Set up Docker BuildKit for advanced caching + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + driver-opts: | + image=moby/buildkit:latest + network=host + + # Calculate image tags for main + - name: Calculate Main Image Tags + id: tags + run: | + IMAGE_BASE="${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}" + MAIN_TAG="${IMAGE_BASE}:main-arm64" + MAIN_SHA_TAG="${IMAGE_BASE}:main-arm64-${{ github.sha }}" + echo "main_tag=${MAIN_TAG}" >> $GITHUB_OUTPUT + echo "main_sha_tag=${MAIN_SHA_TAG}" >> $GITHUB_OUTPUT + echo "pr_tag=${IMAGE_BASE}:pr-${{ needs.cleanup-preview.outputs.pr_number }}" >> $GITHUB_OUTPUT + echo "::notice::๐Ÿ“ฆ Main image: ${MAIN_TAG}" + + # Build main-arm64 image using PR image as cache source + - name: Build and Push main-arm64 Image + id: build + uses: docker/build-push-action@v5 + with: + context: . + file: ./Dockerfile + platforms: linux/arm64 + push: true + tags: | + ${{ steps.tags.outputs.main_tag }} + ${{ steps.tags.outputs.main_sha_tag }} + cache-from: | + type=registry,ref=${{ steps.tags.outputs.pr_tag }} + type=registry,ref=${{ steps.tags.outputs.main_tag }} + type=gha + cache-to: type=gha,mode=max + labels: | + org.opencontainers.image.title=Refactor Platform Backend (main-arm64) + org.opencontainers.image.description=Main branch ARM64 image for layer caching + org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }} + org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.created=${{ github.event.head_commit.timestamp }} + build-args: | + BUILDKIT_INLINE_CACHE=1 + CARGO_INCREMENTAL=${{ vars.CARGO_INCREMENTAL }} + RUSTC_WRAPPER=${{ vars.RUSTC_WRAPPER }} + provenance: true + sbom: false + + # Store outputs for the next job + - name: Set Build Outputs + id: outputs + run: | + echo "main_image_tag=${{ steps.tags.outputs.main_tag }}" >> $GITHUB_OUTPUT + echo "main_image_digest=${{ steps.build.outputs.digest }}" >> $GITHUB_OUTPUT + + # Create cryptographic proof of how the main image was built + - name: Attest Build Provenance + continue-on-error: true + uses: actions/attest-build-provenance@v2 + with: + subject-name: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + subject-digest: ${{ steps.build.outputs.digest }} + push-to-registry: true + + # =========================================================================== + # JOB 3: Delete PR Image from GHCR (only when PR is merged) + # =========================================================================== + delete-pr-image: + name: Delete PR Image from GHCR + runs-on: ubuntu-24.04 + needs: [cleanup-preview, build-main-arm64] + if: needs.cleanup-preview.outputs.is_merged == 'true' + + steps: + # Delete the PR-specific image from GHCR now that main-arm64 is built + - name: Delete PR Image from GHCR + run: | + PR_NUMBER="${{ needs.cleanup-preview.outputs.pr_number }}" + IMAGE_NAME="${{ env.IMAGE_NAME }}" + + # Get the package version ID for the PR image + echo "๐Ÿ” Finding PR image package in GHCR..." + + # Use GitHub API to find and delete the PR image + PACKAGE_VERSIONS=$(gh api \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "/orgs/${{ github.repository_owner }}/packages/container/${IMAGE_NAME##*/}/versions" \ + --jq ".[] | select(.metadata.container.tags[] | contains(\"pr-${PR_NUMBER}\")) | .id" || echo "") + + if [[ -n "$PACKAGE_VERSIONS" ]]; then + echo "๐Ÿ—‘๏ธ Deleting PR image versions from GHCR..." + for VERSION_ID in $PACKAGE_VERSIONS; do + echo " Deleting version ID: $VERSION_ID" + gh api \ + --method DELETE \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "/orgs/${{ github.repository_owner }}/packages/container/${IMAGE_NAME##*/}/versions/${VERSION_ID}" || echo " โš ๏ธ Failed to delete version $VERSION_ID" + done + echo "โœ… PR image deleted from GHCR" + else + echo "โš ๏ธ No PR image found in GHCR (may have been deleted already)" + fi + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + # =========================================================================== + # JOB 4: Update PR Comment with Final Status + # =========================================================================== + update-pr-comment: + name: Update PR Comment + runs-on: ubuntu-24.04 + needs: [cleanup-preview, build-main-arm64, delete-pr-image] + if: | + always() && + needs.cleanup-preview.outputs.is_merged == 'true' && + github.event_name == 'pull_request' + + steps: + # Update the PR comment with all cleanup and build details + - name: Update PR Comment with Full Status + uses: actions/github-script@v7 + with: + script: | + const prNumber = ${{ needs.cleanup-preview.outputs.pr_number }}; + const mainImageTag = '${{ needs.build-main-arm64.outputs.main_image_tag }}'; + const mainImageDigest = '${{ needs.build-main-arm64.outputs.main_image_digest }}'; + const buildSuccess = '${{ needs.build-main-arm64.result }}' === 'success'; + const deleteSuccess = '${{ needs.delete-pr-image.result }}' === 'success'; + + // Build the status table + let statusTable = `| Resource | Status | + |----------|--------| + | **Containers** | โœ… Stopped and removed | + | **PR-Specific Images (RPi5)** | โœ… Removed | + | **Database Volume (RPi5)** | โœ… Removed | + | **Network** | โœ… Removed | + | **Compose File** | โœ… Deleted | + | **Environment File** | โœ… Deleted |`; + + if (buildSuccess) { + statusTable += `\n| **main-arm64 Image** | โœ… Built and pushed |`; + } else { + statusTable += `\n| **main-arm64 Image** | โŒ Build failed |`; + } + + if (deleteSuccess) { + statusTable += `\n| **PR Image (GHCR)** | โœ… Deleted |`; + } else { + statusTable += `\n| **PR Image (GHCR)** | โš ๏ธ Deletion skipped or failed |`; + } + + // Build provenance section + let provenanceSection = ''; + if (buildSuccess && mainImageDigest) { + const shortDigest = mainImageDigest.substring(0, 19); + const attestationUrl = `https://github.com/${{ github.repository }}/attestations/${mainImageDigest}`; + provenanceSection = ` + ### ๐Ÿ” Security & Provenance + - **Image Tag:** \`${mainImageTag}\` + - **Digest:** \`${shortDigest}...\` + - **Attestation:** [View provenance](${attestationUrl}) + - **Built from:** main branch @ \`${{ github.sha }}\` + - **Registry:** [ghcr.io](https://github.com/${{ github.repository_owner }}?tab=packages&repo_name=${{ github.event.repository.name }}) + `; + } + + const comment = `## ๐Ÿงน PR Preview Environment Cleaned Up! + + ### ๐Ÿ“Š Cleanup Summary + ${statusTable} + + ### ๐Ÿ“ Details + - **PR Number:** #${prNumber} + - **Status:** Merged into main + - **Resources:** All PR-specific resources removed from RPi5 + - **GHCR:** PR image deleted, main-arm64 image updated + ${provenanceSection} + ### ๐Ÿ’ก Layer Caching Strategy + - **main-arm64 image** now available for faster PR builds + - Future PR builds will use main-arm64 layers as cache + - Reduces build times and GHCR image accumulation + - Single source of truth: main-arm64 image + + --- + *Cleaned up: ${new Date().toISOString()}* + *Workflow: [\`cleanup-pr-preview.yml\`](https://github.com/${{ github.repository }}/actions/workflows/cleanup-pr-preview.yml)*`; + + // Find and update or create comment + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + }); + + const botComment = comments.find(c => + c.user.type === 'Bot' && c.body.includes('PR Preview Environment Cleaned Up') + ); + + if (botComment) { + // Update existing cleanup comment + await github.rest.issues.updateComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: botComment.id, + body: comment, + }); + console.log('โœ… Updated cleanup status comment'); + } else { + // Create new cleanup comment + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + body: comment, + }); + console.log('โœ… Posted cleanup status comment'); + } diff --git a/.github/workflows/deploy-pr-preview.yml b/.github/workflows/deploy-pr-preview.yml new file mode 100644 index 00000000..08af5ffe --- /dev/null +++ b/.github/workflows/deploy-pr-preview.yml @@ -0,0 +1,713 @@ +# ============================================================================= +# PR Preview Deployment Workflow (DISABLED - Use pr-preview-backend.yml instead) +# ============================================================================= +# Purpose: Deploys isolated PR preview environments to RPi5 via Tailscale +# Features: ARM64 native builds, multi-tier caching, secure VPN deployment +# Target: Raspberry Pi 5 (ARM64) with Docker Compose via Tailscale SSH +# ============================================================================= +# TEMPORARILY DISABLED - Uncommented the pull_request trigger +# This workflow is temporarily disabled to prevent automatic runs +# Use pr-preview-backend.yml for PR preview deployments +# ============================================================================= + +name: Deploy PR Preview to RPi5 + +# Define when this workflow should run automatically or manually +on: + # TEMPORARILY DISABLED - PR trigger commented out + # pull_request: + # types: [opened, synchronize, reopened] + # branches: + # - main + workflow_dispatch: + inputs: + backend_branch: + description: "Backend branch to deploy" + required: true + default: "main" + type: string + pr_number: + description: "PR number (auto-detected for PR triggers)" + required: false + type: string + force_rebuild: + description: "Force rebuild without cache" + required: false + default: false + type: boolean + +# Prevent multiple deployments for the same PR from running simultaneously +concurrency: + group: preview-deploy-${{ github.event.pull_request.number || github.run_id }} + cancel-in-progress: true + +# Define what GitHub resources this workflow can access +permissions: + contents: read + packages: write + pull-requests: write + attestations: write + id-token: write + +# Set environment variables that apply to all jobs in this workflow +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +jobs: + # =========================================================================== + # JOB 1: Lint & Format Check + # =========================================================================== + lint: + name: Lint & Format + runs-on: ubuntu-24.04 + environment: pr-preview + + env: + CARGO_TERM_COLOR: ${{ vars.CARGO_TERM_COLOR }} + CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }} + RUST_BACKTRACE: ${{ vars.RUST_BACKTRACE }} + + steps: + # Get the source code for this PR/branch + - name: Checkout + uses: actions/checkout@v4 + + # Install Rust with clippy and rustfmt tools for code quality checks + - name: Install Rust toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + components: clippy, rustfmt + + # Speed up builds by using cached Rust dependencies + - name: Use cached dependencies + uses: Swatinem/rust-cache@v2 + with: + shared-key: "pr-preview" + key: "lint" + cache-all-crates: true + + # Check code quality and common mistakes with clippy + - name: Run clippy + run: cargo clippy --all-targets + + # Check if code follows Rust formatting standards + - name: Run format check + run: cargo fmt --all -- --check || echo "::warning::Code formatting issues found. Run 'cargo fmt --all' locally to fix." + continue-on-error: true + + # =========================================================================== + # JOB 2: Build & Test + # =========================================================================== + test: + name: Build & Test + runs-on: ubuntu-24.04 + environment: pr-preview + + env: + CARGO_TERM_COLOR: ${{ vars.CARGO_TERM_COLOR }} + CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }} + RUST_BACKTRACE: ${{ vars.RUST_BACKTRACE }} + + steps: + # Get the source code for this PR/branch + - name: Checkout + uses: actions/checkout@v4 + + # Install Rust compiler for x86_64 Linux (GitHub runner architecture) + - name: Install Rust toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + targets: x86_64-unknown-linux-gnu + + # Configure OpenSSL paths for compilation on Ubuntu + - name: Set OpenSSL Paths + run: | + echo "OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> $GITHUB_ENV + echo "OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> $GITHUB_ENV + + # Speed up builds by using cached Rust dependencies + - name: Use cached dependencies + uses: Swatinem/rust-cache@v2 + with: + shared-key: "pr-preview" + key: "test" + cache-all-crates: true + save-if: ${{ github.ref == 'refs/heads/main' }} + + # Compile all Rust code to check for compilation errors + - name: Build + run: cargo build --all-targets + + # Run the test suite to ensure code works correctly + - name: Run tests + run: cargo test + + # =========================================================================== + # JOB 3: Native ARM64 Image Build On Neo (aka "The One") + # =========================================================================== + build-arm64-image: + name: Build ARM64 Backend Image + runs-on: [self-hosted, Linux, ARM64, neo] + environment: pr-preview + needs: [lint, test] + + outputs: + pr_number: ${{ steps.context.outputs.pr_number }} + image_tag_pr: ${{ steps.context.outputs.image_tag_pr }} + image_tag_sha: ${{ steps.context.outputs.image_tag_sha }} + backend_branch: ${{ steps.context.outputs.backend_branch }} + is_native_arm64: ${{ steps.context.outputs.is_native_arm64 }} + + steps: + # Figure out PR number, branch, and image tags for this deployment + - name: Set Deployment Context + id: context + run: | + if [[ "$(uname -m)" == "aarch64" ]]; then + echo "is_native_arm64=true" >> $GITHUB_OUTPUT + echo "::notice::๐Ÿš€ Running on native ARM64 runner (Neo)" + else + echo "::error::Not running on ARM64 architecture - check runner configuration" + exit 1 + fi + + if [[ "${{ github.event_name }}" == "pull_request" ]]; then + PR_NUM="${{ github.event.pull_request.number }}" + BACKEND_BRANCH="${{ github.head_ref }}" + else + PR_NUM="${{ inputs.pr_number }}" + if [[ -z "$PR_NUM" ]]; then + PR_NUM=$((9000 + ${{ github.run_number }})) + fi + BACKEND_BRANCH="${{ inputs.backend_branch }}" + fi + + echo "pr_number=${PR_NUM}" >> $GITHUB_OUTPUT + echo "backend_branch=${BACKEND_BRANCH}" >> $GITHUB_OUTPUT + IMAGE_BASE="${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}" + IMAGE_TAG_PR="${IMAGE_BASE}:pr-${PR_NUM}" + IMAGE_TAG_SHA="${IMAGE_BASE}:pr-${PR_NUM}-${{ github.sha }}" + echo "image_tag_pr=${IMAGE_TAG_PR}" >> $GITHUB_OUTPUT + echo "image_tag_sha=${IMAGE_TAG_SHA}" >> $GITHUB_OUTPUT + echo "::notice::๐Ÿš€ Building ARM64 PR #${PR_NUM} from branch '${BACKEND_BRANCH}'" + echo "::notice::๐Ÿ“ฆ Image: ${IMAGE_TAG_PR}" + + # Get the source code for the specific branch we're building + - name: Checkout Repository + uses: actions/checkout@v4 + with: + ref: ${{ steps.context.outputs.backend_branch }} + + # Speed up Rust compilation with cached dependencies + - name: Setup Rust Cache + uses: Swatinem/rust-cache@v2 + with: + shared-key: "pr-preview-arm64" + key: "arm64-${{ steps.context.outputs.backend_branch }}" + cache-all-crates: true + save-if: ${{ github.ref == 'refs/heads/main' || github.event_name == 'pull_request' }} + + # Authenticate with GitHub Container Registry to push Docker images + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # Set up Docker with BuildKit for advanced caching and multi-platform builds + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + driver-opts: | + image=moby/buildkit:latest + network=host + + # Check if we already built an image for this exact commit to avoid duplicate work + - name: Check for Existing Image + id: check_image + run: | + if docker manifest inspect ${{ steps.context.outputs.image_tag_sha }} >/dev/null 2>&1; then + echo "image_exists=true" >> $GITHUB_OUTPUT + echo "::notice::๐Ÿ“ฆ Image already exists for SHA ${{ github.sha }}" + else + echo "image_exists=false" >> $GITHUB_OUTPUT + echo "::notice::๐Ÿ”จ Building new ARM64 image for SHA ${{ github.sha }}" + fi + + # Build the Docker image natively on ARM64 for best performance on RPi5 + # Uses multi-tier caching: PR image (if exists) โ†’ main-arm64 โ†’ GHA cache + - name: Build and Push ARM64 Backend Image + id: build_push + if: steps.check_image.outputs.image_exists != 'true' || inputs.force_rebuild == true + uses: docker/build-push-action@v5 + with: + context: . + file: ./Dockerfile + platforms: linux/arm64 + push: true + tags: | + ${{ steps.context.outputs.image_tag_pr }} + ${{ steps.context.outputs.image_tag_sha }} + cache-from: | + type=registry,ref=${{ steps.context.outputs.image_tag_pr }} + type=registry,ref=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:main-arm64 + type=gha + cache-to: type=gha,mode=max + labels: | + org.opencontainers.image.title=Refactor Platform Backend PR-${{ steps.context.outputs.pr_number }} + org.opencontainers.image.description=PR preview for branch ${{ steps.context.outputs.backend_branch }} + org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }} + org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.created=${{ github.event.head_commit.timestamp }} + pr.number=${{ steps.context.outputs.pr_number }} + pr.branch=${{ steps.context.outputs.backend_branch }} + build-args: | + BUILDKIT_INLINE_CACHE=1 + CARGO_INCREMENTAL=${{ vars.CARGO_INCREMENTAL }} + RUSTC_WRAPPER=${{ vars.RUSTC_WRAPPER }} + provenance: true + sbom: false + + # If image already exists, just tag it with the PR tag to avoid rebuilding + - name: Tag Existing Image + if: steps.check_image.outputs.image_exists == 'true' && inputs.force_rebuild != true + run: | + docker buildx imagetools create \ + --tag ${{ steps.context.outputs.image_tag_pr }} \ + ${{ steps.context.outputs.image_tag_sha }} + + # Show compilation cache statistics for debugging build performance + - name: Display sccache Statistics + if: always() + run: | + echo "::group::sccache final stats" + if command -v sccache >/dev/null 2>&1; then + sccache --show-stats + else + echo "sccache not available" + fi + echo "::endgroup::" + + # Create cryptographic proof of how this image was built for security + - name: Attest Build Provenance + if: steps.build_push.conclusion == 'success' + continue-on-error: true + uses: actions/attest-build-provenance@v2 + with: + subject-name: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + subject-digest: ${{ steps.build_push.outputs.digest }} + push-to-registry: true + + # =========================================================================== + # JOB 4: Deploy to RPi5 via Tailscale VPN + # =========================================================================== + deploy-to-rpi5: + name: Deploy to RPi5 via Tailscale + runs-on: [self-hosted, Linux, ARM64, neo] + needs: build-arm64-image + environment: pr-preview + + steps: + # Calculate unique port numbers for this PR to avoid conflicts + - name: Calculate Deployment Ports + id: ports + run: | + PR_NUM="${{ needs.build-arm64-image.outputs.pr_number }}" + BACKEND_CONTAINER_PORT=${{ vars.BACKEND_PORT_BASE }} + BACKEND_EXTERNAL_PORT=$((${{ vars.BACKEND_PORT_BASE }} + PR_NUM)) + POSTGRES_EXTERNAL_PORT=$((${{ vars.POSTGRES_PORT_BASE }} + PR_NUM)) + FRONTEND_EXTERNAL_PORT=$((${{ vars.FRONTEND_PORT_BASE }} + PR_NUM)) + echo "backend_container_port=${BACKEND_CONTAINER_PORT}" >> $GITHUB_OUTPUT + echo "backend_port=${BACKEND_EXTERNAL_PORT}" >> $GITHUB_OUTPUT + echo "postgres_port=${POSTGRES_EXTERNAL_PORT}" >> $GITHUB_OUTPUT + echo "frontend_port=${FRONTEND_EXTERNAL_PORT}" >> $GITHUB_OUTPUT + echo "project_name=pr-${PR_NUM}" >> $GITHUB_OUTPUT + echo "::notice::๐Ÿ”Œ Postgres: ${POSTGRES_EXTERNAL_PORT} | Backend: ${BACKEND_EXTERNAL_PORT} | Frontend: ${FRONTEND_EXTERNAL_PORT}" + + # Get the Docker Compose file for PR preview deployment + - name: Checkout Repository + uses: actions/checkout@v4 + with: + ref: ${{ needs.build-arm64-image.outputs.backend_branch }} + + # Verify we can reach the RPi5 through Tailscale VPN + - name: Verify Tailscale Connection + run: | + # Tailscale is pre-installed and already connected on the self-hosted runner + # Just verify the connection status + echo "๐Ÿ” Checking Tailscale connection status..." + tailscale status || echo "โš ๏ธ Tailscale status check failed, but continuing..." + echo "โœ… Tailscale verification complete" + + # Set up SSH key and known hosts to connect securely to RPi5 + - name: Setup SSH Configuration + run: | + mkdir -p ~/.ssh + chmod 700 ~/.ssh + echo "${{ secrets.RPI5_SSH_KEY }}" > ~/.ssh/id_ed25519 + chmod 600 ~/.ssh/id_ed25519 + echo "${{ secrets.RPI5_HOST_KEY }}" >> ~/.ssh/known_hosts + chmod 644 ~/.ssh/known_hosts + + # Test SSH connection to RPi5 before attempting deployment + - name: Test SSH Connection + run: | + echo "๐Ÿ” Testing SSH connection to ${{ secrets.RPI5_TAILSCALE_NAME }}..." + if ! ssh -o StrictHostKeyChecking=accept-new -o BatchMode=yes -o ConnectTimeout=10 \ + -i ~/.ssh/id_ed25519 \ + ${{ secrets.RPI5_USERNAME }}@${{ secrets.RPI5_TAILSCALE_NAME }} \ + 'echo "SSH connection successful"'; then + echo "::error::SSH connection failed to ${{ secrets.RPI5_TAILSCALE_NAME }}" + exit 1 + fi + echo "::notice::โœ… SSH connection verified" + + # Ensure the Postgres schema exists before running migrations + - name: Prepare Postgres Schema + run: | + PR_NUMBER="${{ needs.build-arm64-image.outputs.pr_number }}" + BACKEND_IMAGE="${{ needs.build-arm64-image.outputs.image_tag_pr }}" + PROJECT_NAME="${{ steps.ports.outputs.project_name }}" + + echo "๐Ÿ“ฆ Transferring compose file to RPi5 for schema preparation..." + scp -o StrictHostKeyChecking=accept-new -i ~/.ssh/id_ed25519 \ + docker-compose.pr-preview.yaml \ + ${{ secrets.RPI5_USERNAME }}@${{ secrets.RPI5_TAILSCALE_NAME }}:/home/${{ secrets.RPI5_USERNAME }}/pr-${PR_NUMBER}-compose.yaml + + # Assemble environment configuration for the remote compose commands + cat > /tmp/pr-${PR_NUMBER}.env << EOF + PR_NUMBER=${PR_NUMBER} + BACKEND_IMAGE=${BACKEND_IMAGE} + PROJECT_NAME=${PROJECT_NAME} + PR_POSTGRES_PORT=${{ steps.ports.outputs.postgres_port }} + PR_BACKEND_PORT=${{ steps.ports.outputs.backend_port }} + PR_BACKEND_CONTAINER_PORT=${{ steps.ports.outputs.backend_container_port }} + PR_FRONTEND_PORT=${{ steps.ports.outputs.frontend_port }} + POSTGRES_USER=$(echo '${{ secrets.PR_PREVIEW_POSTGRES_USER }}' | tr -d '\n\r' | tr -d ' ') + POSTGRES_PASSWORD=$(echo '${{ secrets.PR_PREVIEW_POSTGRES_PASSWORD }}' | tr -d '\n\r' | tr -d ' ') + POSTGRES_DB=$(echo '${{ secrets.PR_PREVIEW_POSTGRES_DB }}' | tr -d '\n\r' | tr -d ' ') + POSTGRES_SCHEMA=$(echo '${{ secrets.PR_PREVIEW_POSTGRES_SCHEMA }}' | tr -d '\n\r' | tr -d ' ') + RUST_ENV=${{ vars.RUST_ENV }} + RUST_BACKTRACE=${{ vars.RUST_BACKTRACE }} + BACKEND_INTERFACE=${{ vars.BACKEND_INTERFACE }} + BACKEND_ALLOWED_ORIGINS=${{ vars.BACKEND_ALLOWED_ORIGINS }} + BACKEND_LOG_FILTER_LEVEL=${{ vars.BACKEND_LOG_FILTER_LEVEL }} + BACKEND_SESSION_EXPIRY_SECONDS=${{ vars.BACKEND_SESSION_EXPIRY_SECONDS }} + TIPTAP_APP_ID=$(echo '${{ secrets.PR_PREVIEW_TIPTAP_APP_ID }}' | tr -d '\n\r') + TIPTAP_URL=$(echo '${{ secrets.PR_PREVIEW_TIPTAP_URL }}' | tr -d '\n\r') + TIPTAP_AUTH_KEY=$(echo '${{ secrets.PR_PREVIEW_TIPTAP_AUTH_KEY }}' | tr -d '\n\r') + TIPTAP_JWT_SIGNING_KEY=$(echo '${{ secrets.PR_PREVIEW_TIPTAP_JWT_SIGNING_KEY }}' | tr -d '\n\r') + MAILERSEND_API_KEY=$(echo '${{ secrets.PR_PREVIEW_MAILERSEND_API_KEY }}' | tr -d '\n\r') + WELCOME_EMAIL_TEMPLATE_ID=$(echo '${{ secrets.PR_PREVIEW_WELCOME_EMAIL_TEMPLATE_ID }}' | tr -d '\n\r') + GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }} + GITHUB_ACTOR=${{ github.actor }} + RPI5_USERNAME=${{ secrets.RPI5_USERNAME }} + SERVICE_STARTUP_WAIT_SECONDS=${{ vars.SERVICE_STARTUP_WAIT_SECONDS }} + EOF + + echo "๐Ÿ“ฆ Transferring environment configuration to RPi5..." + scp -o StrictHostKeyChecking=accept-new -i ~/.ssh/id_ed25519 \ + /tmp/pr-${PR_NUMBER}.env \ + ${{ secrets.RPI5_USERNAME }}@${{ secrets.RPI5_TAILSCALE_NAME }}:/home/${{ secrets.RPI5_USERNAME }}/pr-${PR_NUMBER}.env + + cat << 'PREP_SCRIPT' | ssh -o StrictHostKeyChecking=accept-new -i ~/.ssh/id_ed25519 \ + ${{ secrets.RPI5_USERNAME }}@${{ secrets.RPI5_TAILSCALE_NAME }} \ + /bin/bash + set -eo pipefail + + ENV_FILE=$(ls -t ~/pr-*.env 2>/dev/null | head -1) + if [[ -f "$ENV_FILE" ]]; then + echo "๐Ÿ“ฅ Found environment file for schema prep: $ENV_FILE" + # Load environment configuration for compose commands + set -a + source "$ENV_FILE" + set +a + else + echo "โŒ Environment file not found during schema preparation!" + exit 1 + fi + + # Guard against accidentally running on the GitHub runner + if [[ "$(hostname)" == *"runner"* ]] || [[ "$(pwd)" == *"runner"* ]]; then + echo "โŒ Schema preparation running on GitHub runner instead of target server!" + exit 1 + fi + + cd /home/${RPI5_USERNAME} + + # Fully reset prior deployment state and drop any persisted volumes + echo "๐Ÿงน Resetting previous deployment (including database volume)..." + docker compose -p ${PROJECT_NAME} -f pr-${PR_NUMBER}-compose.yaml down -v 2>/dev/null || true + + # Start only Postgres so the schema can be provisioned cleanly + echo "๐Ÿ˜ Starting postgres for schema preparation..." + docker compose -p ${PROJECT_NAME} -f pr-${PR_NUMBER}-compose.yaml --env-file "$ENV_FILE" up -d postgres + + echo "โณ Waiting for postgres to become ready..." + READY="" + for attempt in {1..30}; do + if docker compose -p ${PROJECT_NAME} -f pr-${PR_NUMBER}-compose.yaml exec -T postgres \ + env PGPASSWORD="${POSTGRES_PASSWORD}" pg_isready -U "${POSTGRES_USER}" -d "${POSTGRES_DB}" >/dev/null 2>&1; then + READY="yes" + break + fi + sleep 2 + done + + if [[ -z "$READY" ]]; then + echo "โŒ Postgres did not become ready in time" + docker compose -p ${PROJECT_NAME} -f pr-${PR_NUMBER}-compose.yaml logs postgres || true + exit 1 + fi + + # Create the schema, grant privileges, and set search_path every run + echo "๐Ÿ›  Ensuring schema ${POSTGRES_SCHEMA} exists and permissions are set..." + docker compose -p ${PROJECT_NAME} -f pr-${PR_NUMBER}-compose.yaml exec -T postgres \ + env PGPASSWORD="${POSTGRES_PASSWORD}" psql -U "${POSTGRES_USER}" -d "${POSTGRES_DB}" < /tmp/pr-${PR_NUMBER}.env << EOF + PR_NUMBER=${PR_NUMBER} + BACKEND_IMAGE=${BACKEND_IMAGE} + PROJECT_NAME=${PROJECT_NAME} + PR_POSTGRES_PORT=${{ steps.ports.outputs.postgres_port }} + PR_BACKEND_PORT=${{ steps.ports.outputs.backend_port }} + PR_BACKEND_CONTAINER_PORT=${{ steps.ports.outputs.backend_container_port }} + PR_FRONTEND_PORT=${{ steps.ports.outputs.frontend_port }} + POSTGRES_USER=$(echo '${{ secrets.PR_PREVIEW_POSTGRES_USER }}' | tr -d '\n\r' | tr -d ' ') + POSTGRES_PASSWORD=$(echo '${{ secrets.PR_PREVIEW_POSTGRES_PASSWORD }}' | tr -d '\n\r' | tr -d ' ') + POSTGRES_DB=$(echo '${{ secrets.PR_PREVIEW_POSTGRES_DB }}' | tr -d '\n\r' | tr -d ' ') + POSTGRES_SCHEMA=$(echo '${{ secrets.PR_PREVIEW_POSTGRES_SCHEMA }}' | tr -d '\n\r' | tr -d ' ') + RUST_ENV=${{ vars.RUST_ENV }} + RUST_BACKTRACE=${{ vars.RUST_BACKTRACE }} + BACKEND_INTERFACE=${{ vars.BACKEND_INTERFACE }} + BACKEND_ALLOWED_ORIGINS=${{ vars.BACKEND_ALLOWED_ORIGINS }} + TIPTAP_AUTH_KEY=$(echo '${{ secrets.PR_PREVIEW_TIPTAP_AUTH_KEY }}' | tr -d '\n\r') + TIPTAP_JWT_SIGNING_KEY=$(echo '${{ secrets.PR_PREVIEW_TIPTAP_JWT_SIGNING_KEY }}' | tr -d '\n\r') + TIPTAP_APP_ID=$(echo '${{ secrets.PR_PREVIEW_TIPTAP_APP_ID }}' | tr -d '\n\r') + TIPTAP_URL=$(echo '${{ secrets.PR_PREVIEW_TIPTAP_URL }}' | tr -d '\n\r') + TIPTAP_AUTH_KEY=$(echo '${{ secrets.PR_PREVIEW_TIPTAP_AUTH_KEY }}' | tr -d '\n\r') + TIPTAP_JWT_SIGNING_KEY=$(echo '${{ secrets.PR_PREVIEW_TIPTAP_JWT_SIGNING_KEY }}' | tr -d '\n\r') + MAILERSEND_API_KEY=$(echo '${{ secrets.PR_PREVIEW_MAILERSEND_API_KEY }}' | tr -d '\n\r') + WELCOME_EMAIL_TEMPLATE_ID=$(echo '${{ secrets.PR_PREVIEW_WELCOME_EMAIL_TEMPLATE_ID }}' | tr -d '\n\r') + GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }} + GITHUB_ACTOR=${{ github.actor }} + RPI5_USERNAME=${{ secrets.RPI5_USERNAME }} + SERVICE_STARTUP_WAIT_SECONDS=${{ vars.SERVICE_STARTUP_WAIT_SECONDS }} + EOF + + # Copy environment file to RPi5 + scp -o StrictHostKeyChecking=accept-new -i ~/.ssh/id_ed25519 \ + /tmp/pr-${PR_NUMBER}.env \ + ${{ secrets.RPI5_USERNAME }}@${{ secrets.RPI5_TAILSCALE_NAME }}:/home/${{ secrets.RPI5_USERNAME }}/pr-${PR_NUMBER}.env + + # Execute the actual deployment commands on RPi5 + cat << 'DEPLOY_SCRIPT' | ssh -o StrictHostKeyChecking=accept-new -i ~/.ssh/id_ed25519 \ + ${{ secrets.RPI5_USERNAME }}@${{ secrets.RPI5_TAILSCALE_NAME }} \ + /bin/bash + set -eo pipefail + + # Load all environment variables from the file we just transferred + ENV_FILE=$(ls -t ~/pr-*.env 2>/dev/null | head -1) + if [[ -f "$ENV_FILE" ]]; then + echo "๐Ÿ“ฅ Found environment file: $ENV_FILE" + # Export variables so compose and helper commands share configuration + set -a + source "$ENV_FILE" + set +a + else + echo "โŒ Environment file not found!" + exit 1 + fi + + # Guard against accidentally running on the GitHub runner + if [[ "$(hostname)" == *"runner"* ]] || [[ "$(pwd)" == *"runner"* ]]; then + echo "โŒ Script running on GitHub runner instead of target server!" + exit 1 + fi + + cd /home/${RPI5_USERNAME} + + # Authenticate with GHCR so the newest image pulls successfully + echo "๐Ÿ“ฆ Logging into GHCR..." + echo "${GITHUB_TOKEN}" | docker login ghcr.io -u ${GITHUB_ACTOR} --password-stdin + + # Pull the PR-specific backend image prior to compose startup + echo "๐Ÿ“ฅ Pulling image: ${BACKEND_IMAGE}..." + docker pull ${BACKEND_IMAGE} + + # Launch the full stack using the prepared environment file + echo "๐Ÿš€ Starting PR preview environment..." + docker compose -p ${PROJECT_NAME} -f pr-${PR_NUMBER}-compose.yaml --env-file "$ENV_FILE" up -d + + # Give services a brief warm-up period before validation checks + echo "โณ Waiting ${SERVICE_STARTUP_WAIT_SECONDS} seconds for services..." + sleep ${SERVICE_STARTUP_WAIT_SECONDS} + + # Show container state for observability + echo "๐Ÿฉบ Deployment status:" + docker compose -p ${PROJECT_NAME} ps + + # Confirm migrations completed successfully and dump recent logs + echo "๐Ÿ“œ Checking migration status..." + MIGRATOR_EXIT_CODE=$(docker inspect ${PROJECT_NAME}-migrator-1 --format='{{.State.ExitCode}}' 2>/dev/null || echo "255") + docker logs ${PROJECT_NAME}-migrator-1 2>&1 | tail -20 + + if [[ "${MIGRATOR_EXIT_CODE}" != "0" ]]; then + echo "โŒ Migration failed with exit code: ${MIGRATOR_EXIT_CODE}" + echo "๐Ÿ“œ Full migration logs:" + docker logs ${PROJECT_NAME}-migrator-1 2>&1 + exit 1 + fi + echo "โœ… Migrations completed successfully" + + # Validate that the backend service is healthy and running + echo "๐Ÿ“œ Checking backend status..." + BACKEND_STATUS=$(docker inspect ${PROJECT_NAME}-backend-1 --format='{{.State.Status}}' 2>/dev/null || echo "missing") + docker logs ${PROJECT_NAME}-backend-1 2>&1 | tail -20 + + if [[ "${BACKEND_STATUS}" != "running" ]]; then + echo "โŒ Backend is not running (status: ${BACKEND_STATUS})" + echo "๐Ÿ“œ Full backend logs:" + docker logs ${PROJECT_NAME}-backend-1 2>&1 + exit 1 + fi + + # Check if backend is in a crash loop (restarting repeatedly) + BACKEND_RESTART_COUNT=$(docker inspect ${PROJECT_NAME}-backend-1 --format='{{.State.RestartCount}}' 2>/dev/null || echo "0") + if [[ "${BACKEND_RESTART_COUNT}" -gt "0" ]]; then + echo "โš ๏ธ Backend has restarted ${BACKEND_RESTART_COUNT} time(s) - checking for crash loop" + sleep 5 + BACKEND_STATUS_RECHECK=$(docker inspect ${PROJECT_NAME}-backend-1 --format='{{.State.Status}}' 2>/dev/null || echo "missing") + if [[ "${BACKEND_STATUS_RECHECK}" != "running" ]]; then + echo "โŒ Backend is crash looping" + echo "๐Ÿ“œ Full backend logs:" + docker logs ${PROJECT_NAME}-backend-1 2>&1 + exit 1 + fi + fi + + echo "โœ… Backend is running successfully" + echo "โœ… Deployment complete!" + + # Remove the copied env file from disk now that deployment finished + rm -f "$ENV_FILE" + DEPLOY_SCRIPT + + # Post a comment on the PR with links to access the preview environment + - name: Comment on PR with Preview URLs + if: github.event_name == 'pull_request' + uses: actions/github-script@v7 + with: + script: | + const prNumber = ${{ needs.build-arm64-image.outputs.pr_number }}; + const backendPort = ${{ steps.ports.outputs.backend_port }}; + const postgresPort = ${{ steps.ports.outputs.postgres_port }}; + const frontendPort = ${{ steps.ports.outputs.frontend_port }}; + const backendBranch = '${{ needs.build-arm64-image.outputs.backend_branch }}'; + const imageTag = '${{ needs.build-arm64-image.outputs.image_tag_pr }}'; + const isNativeArm64 = '${{ needs.build-arm64-image.outputs.is_native_arm64 }}' === 'true'; + const backendUrl = `http://${{ secrets.RPI5_TAILSCALE_NAME }}:${backendPort}`; + const frontendUrl = `http://${{ secrets.RPI5_TAILSCALE_NAME }}:${frontendPort}`; + + const comment = `## ๐Ÿš€ PR Preview Environment Deployed! + + ### ๐Ÿ”— Access URLs + | Service | URL | + |---------|-----| + | **Frontend** | [${frontendUrl}](${frontendUrl}) | + | **Backend API** | [${backendUrl}](${backendUrl}) | + | **Health Check** | [${backendUrl}/health](${backendUrl}/health) | + + ### ๐Ÿ“Š Environment Details + - **PR Number:** #${prNumber} + - **Backend Branch:** \`${backendBranch}\` + - **Commit:** \`${{ github.sha }}\` + - **Image:** \`${imageTag}\` + - **Ports:** Frontend: ${frontendPort} | Backend: ${backendPort} | Postgres: ${postgresPort} + - **Build Type:** ${isNativeArm64 ? '๐Ÿš€ Native ARM64' : 'โš ๏ธ ARM64 Emulation'} + + ### ๐Ÿ” Access Requirements + 1. **Connect to Tailscale** (required) + 2. Access frontend: ${frontendUrl} + 3. Access backend: ${backendUrl} + + ### ๐Ÿงช Testing + \`\`\`bash + # Health check + curl ${backendUrl}/health + + # API test + curl ${backendUrl}/api/v1/... + \`\`\` + + ### ๐Ÿงน Cleanup + _Environment auto-cleaned when PR closes/merges_ + + --- + *Deployed: ${new Date().toISOString()}* + *Optimizations: Native ARM64 build on Neo + sccache + Rust cache + Docker BuildKit*`; + + // Find and delete any existing preview comments to keep the UI clean + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + }); + + const botComment = comments.find(c => + c.user.type === 'Bot' && c.body.includes('PR Preview Environment') + ); + + if (botComment) { + await github.rest.issues.deleteComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: botComment.id, + }); + } + + // Post a fresh comment which will appear below the most recent commit + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + body: comment, + }); + + # Show deployment summary for manual workflow runs + - name: Display Deployment Summary + if: github.event_name == 'workflow_dispatch' + run: | + echo "::notice::โœ… Deployment complete!" + echo "::notice::๐ŸŒ Frontend: http://${{ secrets.RPI5_TAILSCALE_NAME }}:${{ steps.ports.outputs.frontend_port }}" + echo "::notice::๐ŸŒ Backend: http://${{ secrets.RPI5_TAILSCALE_NAME }}:${{ steps.ports.outputs.backend_port }}" + echo "::notice::๐Ÿ—„๏ธ Postgres: ${{ secrets.RPI5_TAILSCALE_NAME }}:${{ steps.ports.outputs.postgres_port }}" + echo "::notice::๐Ÿ“ฆ Image: ${{ needs.build-arm64-image.outputs.image_tag_pr }}" + echo "::notice::๐Ÿ—๏ธ Build: Native ARM64 on Neo" diff --git a/.github/workflows/pr-preview-backend.yml b/.github/workflows/pr-preview-backend.yml new file mode 100644 index 00000000..de990bdb --- /dev/null +++ b/.github/workflows/pr-preview-backend.yml @@ -0,0 +1,62 @@ +# ============================================================================= +# Backend PR Preview Overlay Workflow +# ============================================================================= +# Purpose: Trigger PR preview deployments when backend PRs are opened/updated +# Strategy: Build backend from PR branch, use main-arm64 frontend image +# Calls: ci-deploy-pr-preview.yml (reusable workflow) +# ============================================================================= + +name: PR Preview (Backend) + +on: + pull_request: + # Trigger on PR lifecycle events + types: [opened, synchronize, reopened] + # Only run for backend code changes + paths-ignore: + - '**.md' + - 'docs/**' + - '.github/**' + - '!.github/workflows/pr-preview-backend.yml' + - '!.github/workflows/ci-deploy-pr-preview.yml' + +# Prevent multiple deployments for the same PR +concurrency: + group: pr-preview-backend-${{ github.event.pull_request.number }} + cancel-in-progress: true + +permissions: + contents: read + packages: write + pull-requests: write + attestations: write + id-token: write + +jobs: + # =========================================================================== + # JOB: Call reusable workflow with backend-specific configuration + # =========================================================================== + deploy-backend-pr: + name: Deploy Backend PR Preview + # Call the reusable workflow located in this repository + uses: ./.github/workflows/ci-deploy-pr-preview.yml + with: + # This is a backend PR deployment + repo_type: 'backend' + # Use the PR number for port allocation and naming + pr_number: ${{ github.event.pull_request.number }} + # Build backend from this PR's branch + branch_name: ${{ github.head_ref }} + # Use main branch for frontend (will use main-arm64 image) + frontend_branch: 'main' + # Optional: override with specific image tags if needed + # backend_image: '' # Leave empty to build from PR branch + # frontend_image: '' # Leave empty to use main-arm64 + # Optional: force complete rebuild + force_rebuild: false + # ========================================================================= + # SECRETS - Inherit all secrets from calling workflow's environment + # ========================================================================= + # The reusable workflow declares required secrets + # We must pass them through using 'secrets: inherit' + secrets: inherit diff --git a/.vscode/settings.json b/.vscode/settings.json index 4d9636b5..b74909b1 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,3 +1,4 @@ { - "rust-analyzer.showUnlinkedFileNotification": false + "rust-analyzer.showUnlinkedFileNotification": false, + "chatgpt.commentCodeLensEnabled": false } \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index f5864f15..a422e089 100644 --- a/Dockerfile +++ b/Dockerfile @@ -29,8 +29,8 @@ RUN echo "LIST OF CONTENTS" && ls -lahR /usr/src/app # Stage 3: Minimal runtime image FROM --platform=${BUILDPLATFORM} debian:bullseye-slim -# Install runtime dependencies -RUN apt-get update && apt-get install -y bash && rm -rf /var/lib/apt/lists/* +# Install runtime dependencies including postgresql-client for schema setup +RUN apt-get update && apt-get install -y bash postgresql-client && rm -rf /var/lib/apt/lists/* # Create non-root user with 1001 UID and /bin/bash shell RUN useradd -m -u 1001 -s /bin/bash appuser diff --git a/README.md b/README.md index f95e0184..16a1e7db 100644 --- a/README.md +++ b/README.md @@ -90,6 +90,7 @@ The platform uses MailerSend for transactional emails. To configure email functi - `--welcome-email-template-id`: The template ID for welcome emails Example: + ```bash export MAILERSEND_API_KEY="your-api-key" export WELCOME_EMAIL_TEMPLATE_ID="your-template-id" @@ -228,3 +229,19 @@ Note that to generate a new Entity using the CLI you must ignore all other table ```bash DATABASE_URL=postgres://refactor:password@localhost:5432/refactor sea-orm-cli generate entity -s refactor_platform -o entity/src -v --with-serde both --serde-skip-deserializing-primary-key --ignore-tables {table to ignore} --ignore-tables {other table to ignore} ``` + +--- + +## PR Preview Environments + +This repository automatically deploys **isolated preview environments** for each pull request. When you open a PR, a complete stack (backend + frontend + database) deploys to a dedicated server on our Tailnet for testing before merge. + +**What happens automatically:** + +- โœ… PR opened โ†’ Environment deploys +- โœ… New commits โ†’ Environment updates +- โœ… PR closed/merged โ†’ Environment cleans up + +**Access:** Requires Tailscale VPN connection. Access URLs are posted as a comment on your PR in the GitHub Web UI. + +For detailed information, see the [PR Preview Environments Runbook](docs/runbooks/pr-preview-environments.md). diff --git a/docker-compose.pr-preview.yaml b/docker-compose.pr-preview.yaml new file mode 100644 index 00000000..d220641b --- /dev/null +++ b/docker-compose.pr-preview.yaml @@ -0,0 +1,126 @@ +################################################################### +# Docker Compose Config for PR preview environments +# Uses Docker Compose projects (-p flag) for automatic namespacing +# All variables provided by GitHub Actions - no defaults needed +################################################################### + +services: + # PostgreSQL database service for PR environment + postgres: + image: postgres:17 # Use stable PostgreSQL 17 + environment: + # Database configuration - all values from GitHub Actions + POSTGRES_USER: ${POSTGRES_USER} # Database username + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} # Database password + POSTGRES_DB: ${POSTGRES_DB} # Database name + ports: + # Map dynamic external port to standard internal port 5432 + - "${PR_POSTGRES_PORT}:5432" + volumes: + # Persist database data - Docker Compose project creates unique volume automatically + - postgres_data:/var/lib/postgresql/data + networks: + # Use default network - Docker Compose project creates unique network automatically + - default + healthcheck: + # Verify database is ready before dependent services start + test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"] + interval: 5s # Check every 5 seconds + timeout: 5s # Timeout after 5 seconds + retries: 5 # Try 5 times before marking unhealthy + restart: unless-stopped # Restart automatically unless manually stopped + + # Database migration service - runs once to setup schema + migrator: + image: ${BACKEND_IMAGE} # Use same image as backend + platform: linux/arm64/v8 # Explicit ARM64 platform for RPi5 + environment: + # Application role configuration + ROLE: migrator # Tell app to run migrations + RUST_ENV: ${RUST_ENV} # Environment (staging/dev/prod) + RUST_BACKTRACE: ${RUST_BACKTRACE:-1} # Enable backtraces for debugging + # Database connection string for migrations + DATABASE_URL: postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB} + DATABASE_SCHEMA: ${POSTGRES_SCHEMA} # Database schema name + depends_on: + postgres: + condition: service_healthy # Wait for postgres to be healthy + networks: + # Use default network - Docker Compose project creates unique network automatically + - default + restart: "no" # Run once and exit (don't restart) + + # Main backend application service + backend: + image: ${BACKEND_IMAGE} # PR-specific backend image + platform: linux/arm64/v8 # Explicit ARM64 platform for RPi5 + environment: + # Application role and environment + ROLE: app # Tell app to run as web server + RUST_ENV: ${RUST_ENV} # Environment configuration + RUST_BACKTRACE: ${RUST_BACKTRACE:-1} # Enable backtraces for debugging + + # Database connection configuration + DATABASE_URL: postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB} + POSTGRES_SCHEMA: ${POSTGRES_SCHEMA} + + # Backend server configuration - use container port for internal binding + BACKEND_PORT: ${PR_BACKEND_CONTAINER_PORT} # Port app binds to inside container + BACKEND_INTERFACE: ${BACKEND_INTERFACE} # Network interface to bind to + BACKEND_ALLOWED_ORIGINS: ${BACKEND_ALLOWED_ORIGINS} # CORS configuration + BACKEND_LOG_FILTER_LEVEL: ${BACKEND_LOG_FILTER_LEVEL} # Logging level + BACKEND_SESSION_EXPIRY_SECONDS: ${BACKEND_SESSION_EXPIRY_SECONDS} # Session timeout + + # Optional third-party service credentials (set to 'UNUSED' if not needed) + TIPTAP_APP_ID: ${TIPTAP_APP_ID} + TIPTAP_URL: ${TIPTAP_URL} + TIPTAP_AUTH_KEY: ${TIPTAP_AUTH_KEY} + TIPTAP_JWT_SIGNING_KEY: ${TIPTAP_JWT_SIGNING_KEY} + MAILERSEND_API_KEY: ${MAILERSEND_API_KEY} + WELCOME_EMAIL_TEMPLATE_ID: ${WELCOME_EMAIL_TEMPLATE_ID} + ports: + # Map dynamic external port to container internal port + - "${PR_BACKEND_PORT}:${PR_BACKEND_CONTAINER_PORT}" + depends_on: + - migrator # Start after migrations complete + networks: + # Use default network - Docker Compose project creates unique network automatically + - default + restart: unless-stopped # Restart automatically unless manually stopped + + # Frontend application service + frontend: + image: ${FRONTEND_IMAGE} # PR-specific frontend image + platform: linux/arm64/v8 # Explicit ARM64 platform for RPi5 + environment: + # Next.js production environment + NODE_ENV: production + # Frontend server configuration + HOSTNAME: 0.0.0.0 # Network interface to bind to + PORT: ${PR_FRONTEND_CONTAINER_PORT} # Port app binds to inside container + # Backend connection configuration (build-time values baked into image) + NEXT_PUBLIC_BACKEND_SERVICE_PROTOCOL: ${NEXT_PUBLIC_BACKEND_SERVICE_PROTOCOL:-http} + NEXT_PUBLIC_BACKEND_SERVICE_HOST: ${NEXT_PUBLIC_BACKEND_SERVICE_HOST:-localhost} + NEXT_PUBLIC_BACKEND_SERVICE_PORT: ${PR_BACKEND_PORT} + NEXT_PUBLIC_BACKEND_SERVICE_API_PATH: ${NEXT_PUBLIC_BACKEND_SERVICE_API_PATH:-api} + NEXT_PUBLIC_BACKEND_API_VERSION: ${NEXT_PUBLIC_BACKEND_API_VERSION:-v1} + NEXT_PUBLIC_TIPTAP_APP_ID: ${TIPTAP_APP_ID} + ports: + # Map dynamic external port to container internal port + - "${PR_FRONTEND_PORT}:${PR_FRONTEND_CONTAINER_PORT}" + depends_on: + - backend # Start after backend is running + networks: + # Use default network - Docker Compose project creates unique network automatically + - default + restart: unless-stopped # Restart automatically unless manually stopped + +# Docker Compose project (-p flag) automatically creates: +# - Unique network: {project_name}_default +# - Unique volume: {project_name}_postgres_data +# - Container names: {project_name}-{service_name}-1 +# This eliminates need for manual PR-specific naming in compose file + +volumes: + # Volume automatically namespaced by Docker Compose project + postgres_data: diff --git a/docs/runbooks/pr-preview-environments.md b/docs/runbooks/pr-preview-environments.md new file mode 100644 index 00000000..64bcfdd6 --- /dev/null +++ b/docs/runbooks/pr-preview-environments.md @@ -0,0 +1,329 @@ +# PR Preview Environments - Developer Guide + +## ๐Ÿš€ Quick Start + +**Want to test your changes in a live environment?** Just open a PR! A preview environment will be automatically deployed. + +### What You Get + +Every PR automatically gets: +- โœ… **Isolated full-stack environment** (Postgres + Backend + Frontend) +- โœ… **Unique ports** based on your PR number +- โœ… **Live database** with migrations applied +- โœ… **Access via Tailscale VPN** +- โœ… **Automatic cleanup** when PR closes + +### How to Access Your Preview + +1. **Open a PR** in either `refactor-platform-rs` or `refactor-platform-fe` +2. **Wait for deployment** (~5-10 minutes for first build) +3. **Check PR comment** for your unique URLs +4. **Connect to Tailscale** VPN (required for access) +5. **Visit your preview** at the URLs provided + +**Example PR Comment:** +``` +๐Ÿš€ PR Preview Environment Deployed! + +Frontend: http://rpi5-hostname:3042 +Backend: http://rpi5-hostname:4042 +Health: http://rpi5-hostname:4042/health + +Ports: Frontend: 3042 | Backend: 4042 | Postgres: 5474 +``` + +--- + +## ๐Ÿ—๏ธ How It Works + +### Port Allocation + +Each PR gets unique ports calculated from the PR number: + +| Service | Formula | Example (PR #42) | +|---------|---------|------------------| +| Frontend | 3000 + PR# | 3042 | +| Backend | 4000 + PR# | 4042 | +| Postgres | 5432 + PR# | 5474 | + +### Deployment Flow + +**Backend PR:** +1. PR opened โ†’ Workflow triggers +2. Backend: Builds from **your PR branch** ๐Ÿ“ฆ +3. Frontend: Uses **main-arm64** image (or builds if missing) +4. Deploys: Full stack with your backend changes + +**Frontend PR:** +1. PR opened โ†’ Workflow triggers +2. Frontend: Builds from **your PR branch** ๐Ÿ“ฆ +3. Backend: Uses **main-arm64** image (or builds if missing) +4. Deploys: Full stack with your frontend changes + +### Architecture + +``` +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ GitHub Actions Workflow โ”‚ +โ”‚ โ”œโ”€ Lint & Test โ”‚ +โ”‚ โ”œโ”€ Build ARM64 Images (on Neo runner) โ”‚ +โ”‚ โ””โ”€ Deploy to RPi5 via Tailscale SSH โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + โ†“ +โ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ RPi5 (ARM64) - Preview Environment โ”‚ +โ”‚ โ”œโ”€ Postgres (port: 5432 + PR#) โ”‚ +โ”‚ โ”œโ”€ Backend (port: 4000 + PR#) โ”‚ +โ”‚ โ””โ”€ Frontend (port: 3000 + PR#) โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +--- + +## ๐Ÿ”ง Configuration + +### Secrets & Variables + +**All secrets are managed in ONE place:** Backend repo's `pr-preview` environment. + +This means: +- โœ… Frontend repo needs **zero** PR preview secrets +- โœ… No secret duplication across repos +- โœ… Single source of truth for configuration + +**Backend `pr-preview` Environment Contains:** +- RPi5 SSH connection details +- Database credentials +- TipTap API keys +- MailerSend API keys +- Frontend build configuration + +### Workflow Files + +**Backend Repository:** +- `.github/workflows/ci-deploy-pr-preview.yml` - Reusable workflow (does the heavy lifting) +- `.github/workflows/pr-preview-backend.yml` - Overlay for backend PRs + +**Frontend Repository:** +- `.github/workflows/pr-preview-frontend.yml` - Overlay for frontend PRs (calls backend reusable workflow) + +--- + +## ๐Ÿงช Testing Your Preview + +### Health Check + +```bash +# Check backend health +curl http://rpi5-hostname:4042/health + +# Expected response +{"status":"ok"} +``` + +### API Testing + +```bash +# List users endpoint +curl http://rpi5-hostname:4042/api/v1/users + +# Create a test user (if endpoint exists) +curl -X POST http://rpi5-hostname:4042/api/v1/users \ + -H "Content-Type: application/json" \ + -d '{"email":"test@example.com","name":"Test User"}' +``` + +### Database Access + +Connect to your PR's database: + +```bash +# SSH tunnel to Postgres +ssh -L 5432:localhost:5474 user@rpi5-hostname + +# Then connect locally +psql -h localhost -p 5432 -U refactor -d refactor +``` + +### Frontend Testing + +Visit `http://rpi5-hostname:3042` in your browser (Tailscale required). + +--- + +## ๐Ÿ” Troubleshooting + +### Deployment Failed + +1. **Check workflow logs:** + - Go to PR โ†’ "Checks" tab โ†’ Click on failed workflow + - Review error messages in logs + +2. **Common issues:** + - **Linting errors:** Fix code formatting issues + - **Test failures:** Ensure all tests pass locally first + - **Build errors:** Check Dockerfile and dependencies + - **Migration errors:** Verify database migrations are valid + +### Preview Not Accessible + +1. **Verify Tailscale connection:** + ```bash + tailscale status + # Should show you're connected to the network + ``` + +2. **Check service status:** + - View PR comment for deployment status + - Check workflow logs for errors + +3. **Verify ports:** + - Ensure you're using the correct port from PR comment + - Ports are unique per PR (3000+PR#, 4000+PR#) + +### Environment Not Updating + +- **Push new commits:** Workflow triggers on new commits +- **Re-run workflow:** Go to Actions โ†’ Re-run failed jobs +- **Check branch:** Ensure you're pushing to the PR branch + +--- + +## ๐Ÿงน Cleanup + +### Automatic Cleanup + +Preview environments are **automatically cleaned up** when: +- PR is closed +- PR is merged + +The cleanup workflow removes: +- Docker containers +- Database volumes +- Temporary files + +### Manual Cleanup (if needed) + +If you need to manually clean up a preview: + +```bash +# SSH into RPi5 +ssh user@rpi5-hostname + +# Stop and remove PR environment +docker compose -p pr-42 down -v + +# Remove compose file +rm ~/pr-42-compose.yaml ~/pr-42.env +``` + +--- + +## ๐ŸŽฏ Advanced Usage + +### Force Rebuild + +Trigger a complete rebuild (ignoring caches): + +1. Go to Actions โ†’ CI Deploy PR Preview +2. Click "Run workflow" +3. Select your branch +4. Set `force_rebuild: true` + +### Use Specific Image + +Override backend or frontend image: + +1. Edit overlay workflow (`.github/workflows/pr-preview-*.yml`) +2. Set `backend_image` or `frontend_image` input +3. Example: `backend_image: 'ghcr.io/refactor-group/refactor-platform-rs:main-arm64'` + +### Test Different Branch Combinations + +**Frontend PR using different backend branch:** + +1. Edit `.github/workflows/pr-preview-frontend.yml` +2. Change `backend_branch: 'main'` to desired branch +3. Commit and push + +**Backend PR using different frontend branch:** + +1. Edit `.github/workflows/pr-preview-backend.yml` +2. Change `frontend_branch: 'main'` to desired branch +3. Commit and push + +--- + +## ๐Ÿ“Š Monitoring + +### View Logs + +**Real-time logs during deployment:** +```bash +# SSH into RPi5 +ssh user@rpi5-hostname + +# View backend logs +docker logs pr-42-backend-1 -f + +# View frontend logs +docker logs pr-42-frontend-1 -f + +# View postgres logs +docker logs pr-42-postgres-1 -f + +# View migration logs +docker logs pr-42-migrator-1 +``` + +### Check Container Status + +```bash +# SSH into RPi5 +ssh user@rpi5-hostname + +# List all containers for your PR +docker compose -p pr-42 ps + +# View resource usage +docker stats pr-42-backend-1 pr-42-frontend-1 pr-42-postgres-1 +``` + +--- + +## ๐Ÿ” Security Notes + +- **Tailscale VPN Required:** Previews are not publicly accessible +- **Shared Environment:** All PRs deploy to same RPi5 (isolated by Docker Compose projects) +- **Temporary Data:** Database resets when environment is cleaned up +- **Do Not:** Store sensitive production data in preview environments + +--- + +## ๐Ÿค Contributing to PR Preview System + +Want to improve the PR preview system? + +**Key files to modify:** +- `ci-deploy-pr-preview.yml` - Main deployment logic +- `docker-compose.pr-preview.yaml` - Service definitions +- `pr-preview-backend.yml` / `pr-preview-frontend.yml` - Trigger configurations + +**After changes:** +1. Test in a PR first +2. Document changes in this runbook +3. Update PR template if user-facing changes + +--- + +## ๐Ÿ“š Additional Resources + +- [GitHub Actions Workflow Syntax](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions) +- [Docker Compose Documentation](https://docs.docker.com/compose/) +- [Tailscale Setup Guide](https://tailscale.com/kb/start/) + +--- + +**Questions?** Ask in #engineering Slack channel or open an issue. + +**Happy Testing! ๐Ÿš€** diff --git a/entity/src/status.rs b/entity/src/status.rs index 9d7a5a19..91d3a31f 100644 --- a/entity/src/status.rs +++ b/entity/src/status.rs @@ -1,12 +1,13 @@ use sea_orm::entity::prelude::*; use serde::{Deserialize, Serialize}; -#[derive(Debug, Clone, Eq, PartialEq, EnumIter, Deserialize, Serialize, DeriveActiveEnum)] +#[derive(Debug, Clone, Eq, PartialEq, EnumIter, Deserialize, Serialize, DeriveActiveEnum, Default)] #[sea_orm(rs_type = "String", db_type = "Enum", enum_name = "status")] pub enum Status { #[sea_orm(string_value = "not_started")] NotStarted, #[sea_orm(string_value = "in_progress")] + #[default] InProgress, #[sea_orm(string_value = "completed")] Completed, @@ -14,12 +15,6 @@ pub enum Status { WontDo, } -impl std::default::Default for Status { - fn default() -> Self { - Self::InProgress - } -} - impl From<&str> for Status { fn from(value: &str) -> Self { match value { diff --git a/entrypoint.sh b/entrypoint.sh index c8c690a5..7b75291f 100644 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -69,6 +69,52 @@ main() { log_info "Running in $RUST_ENV environment" log_info "Using schema $DATABASE_SCHEMA to apply the migrations in" + # Ensure schema exists before running migrations + # This makes the migrator idempotent and independent of external setup + log_info "Ensuring schema '$DATABASE_SCHEMA' exists..." + + # Extract connection parameters from DATABASE_URL + # Format: postgres://user:password@host:port/database + DB_HOST=$(echo "$DATABASE_URL" | sed -E 's|postgres://[^@]+@([^:/]+).*|\1|') + DB_PORT=$(echo "$DATABASE_URL" | sed -E 's|postgres://[^@]+@[^:]+:([0-9]+)/.*|\1|') + DB_NAME=$(echo "$DATABASE_URL" | sed -E 's|postgres://[^@]+@[^/]+/([^?]+).*|\1|') + DB_USER=$(echo "$DATABASE_URL" | sed -E 's|postgres://([^:]+):.*|\1|') + DB_PASS=$(echo "$DATABASE_URL" | sed -E 's|postgres://[^:]+:([^@]+)@.*|\1|') + + # Wait for PostgreSQL to be ready + log_info "Waiting for PostgreSQL to be ready..." + for i in $(seq 1 30); do + if PGPASSWORD="$DB_PASS" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "SELECT 1" >/dev/null 2>&1; then + log_success "PostgreSQL is ready" + break + fi + if [ "$i" -eq 30 ]; then + log_error "PostgreSQL did not become ready in time" + exit 1 + fi + sleep 1 + done + + # Create schema if it doesn't exist + log_info "Creating schema '$DATABASE_SCHEMA' if it doesn't exist..." + if ! PGPASSWORD="$DB_PASS" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "CREATE SCHEMA IF NOT EXISTS \"$DATABASE_SCHEMA\";" >/dev/null 2>&1; then + log_error "Failed to create schema '$DATABASE_SCHEMA'" + exit 1 + fi + + log_success "Schema '$DATABASE_SCHEMA' is ready" + + # Set search_path in DATABASE_URL so all connections use the correct schema + # Append options parameter to DATABASE_URL if not already present + if echo "$DATABASE_URL" | grep -q '?'; then + # URL already has query parameters + export DATABASE_URL="${DATABASE_URL}&options=-csearch_path%3D${DATABASE_SCHEMA}" + else + # No query parameters yet + export DATABASE_URL="${DATABASE_URL}?options=-csearch_path%3D${DATABASE_SCHEMA}" + fi + + log_info "Set search_path to '$DATABASE_SCHEMA' in DATABASE_URL" log_success "Running SeaORM migrations..." exec /app/migrationctl up -s $DATABASE_SCHEMA ;; diff --git a/web/src/lib.rs b/web/src/lib.rs index 4698166e..fac9316c 100644 --- a/web/src/lib.rs +++ b/web/src/lib.rs @@ -17,7 +17,7 @@ use std::net::SocketAddr; use std::str::FromStr; use time::Duration; use tokio::net::TcpListener; -use tower_http::cors::CorsLayer; +use tower_http::cors::{AllowOrigin, CorsLayer}; mod controller; mod error; @@ -78,14 +78,31 @@ pub async fn init_server(app_state: AppState) -> Result<()> { let listen_addr = SocketAddr::from_str(&server_url).unwrap(); let listener = TcpListener::bind(listen_addr).await.unwrap(); - // Convert the type of the allow_origins Vec into a HeaderValue that the CorsLayer accepts - let allowed_origins = app_state + + // Handle CORS origin configuration + // If wildcard (*) is present, mirror request origin; otherwise use explicit list + let has_wildcard = app_state .config .allowed_origins .iter() - .filter_map(|origin| origin.parse().ok()) - .collect::>(); - info!("allowed_origins: {allowed_origins:#?}"); + .any(|origin| origin == "*"); + + info!("allowed_origins: {:#?}", app_state.config.allowed_origins); + + // Mirror the request origin when wildcard "*" is configured to keep credentials enabled + let allow_origin = if has_wildcard { + info!("Using mirrored CORS origin (allows all origins with credentials)"); + AllowOrigin::mirror_request() + } else { + let allowed_origins = app_state + .config + .allowed_origins + .iter() + .filter_map(|origin| origin.parse().ok()) + .collect::>(); + info!("Using specific CORS origins: {allowed_origins:#?}"); + AllowOrigin::list(allowed_origins) + }; let cors_layer = CorsLayer::new() .allow_methods([ @@ -110,7 +127,7 @@ pub async fn init_server(app_state: AppState) -> Result<()> { ]) .expose_headers([ApiVersion::field_name().parse::().unwrap()]) .allow_private_network(true) - .allow_origin(allowed_origins); + .allow_origin(allow_origin); axum::serve( listener,