diff --git a/pkgs/client/project.json b/pkgs/client/project.json index 5457f929c..9d0247d4a 100644 --- a/pkgs/client/project.json +++ b/pkgs/client/project.json @@ -66,7 +66,7 @@ "options": { "cwd": "{projectRoot}", "commands": [ - "../../scripts/supabase-start-locked.sh ." + "../../scripts/supabase-start-locked.sh $(pwd)" ], "parallel": false } @@ -77,7 +77,7 @@ "cache": false, "options": { "cwd": "{projectRoot}", - "commands": ["../../scripts/supabase-start-locked.sh ."], + "commands": ["../../scripts/supabase-start-locked.sh $(pwd)"], "parallel": false } }, @@ -132,7 +132,7 @@ "options": { "cwd": "{projectRoot}", "commands": [ - "../../scripts/supabase-start-locked.sh .", + "../../scripts/supabase-start-locked.sh $(pwd)", "supabase db reset" ], "parallel": false @@ -146,7 +146,7 @@ "options": { "cwd": "{projectRoot}", "commands": [ - "../../scripts/supabase-start-locked.sh .", + "../../scripts/supabase-start-locked.sh $(pwd)", "psql 'postgresql://postgres:postgres@localhost:50522/postgres' -c 'SELECT pgflow_tests.reset_db()'", "vitest run __tests__/integration/" ], @@ -171,7 +171,7 @@ "options": { "cwd": "{projectRoot}", "commands": [ - "../../scripts/supabase-start-locked.sh .", + "../../scripts/supabase-start-locked.sh $(pwd)", "psql 'postgresql://postgres:postgres@localhost:50522/postgres' -c 'SELECT pgflow_tests.reset_db()'", "vitest run __tests__/" ], @@ -191,7 +191,7 @@ "options": { "cwd": "{projectRoot}", "commands": [ - "../../scripts/supabase-start-locked.sh .", + "../../scripts/supabase-start-locked.sh $(pwd)", "psql 'postgresql://postgres:postgres@localhost:50522/postgres' -c 'SELECT pgflow_tests.reset_db()'", "node scripts/performance-benchmark.mjs" ], diff --git a/pkgs/core/project.json b/pkgs/core/project.json index 8366c9f65..fe65a42b1 100644 --- a/pkgs/core/project.json +++ b/pkgs/core/project.json @@ -31,7 +31,7 @@ "options": { "cwd": "{projectRoot}", "commands": [ - "../../scripts/supabase-start-locked.sh .", + "../../scripts/supabase-start-locked.sh $(pwd)", "scripts/atlas-dump-realtime-schema" ], "parallel": false @@ -50,7 +50,7 @@ "options": { "cwd": "{projectRoot}", "commands": [ - "../../scripts/supabase-start-locked.sh .", + "../../scripts/supabase-start-locked.sh $(pwd)", "mkdir -p .nx-inputs", "scripts/atlas-verify-schemas-synced > .nx-inputs/verify-schemas-synced.txt 2>&1 || (cat .nx-inputs/verify-schemas-synced.txt && exit 1)" ], @@ -66,7 +66,7 @@ "options": { "cwd": "{projectRoot}", "commands": [ - "../../scripts/supabase-start-locked.sh .", + "../../scripts/supabase-start-locked.sh $(pwd)", "mkdir -p .nx-inputs", "supabase db reset > .nx-inputs/verify-migrations.txt 2>&1 || (cat .nx-inputs/verify-migrations.txt && exit 1)" ], @@ -144,7 +144,7 @@ "options": { "cwd": "{projectRoot}", "commands": [ - "../../scripts/supabase-start-locked.sh ." + "../../scripts/supabase-start-locked.sh $(pwd)" ], "parallel": false } @@ -155,7 +155,7 @@ "cache": false, "options": { "cwd": "{projectRoot}", - "commands": ["../../scripts/supabase-start-locked.sh ."], + "commands": ["../../scripts/supabase-start-locked.sh $(pwd)"], "parallel": false } }, @@ -196,7 +196,7 @@ "options": { "cwd": "{projectRoot}", "commands": [ - "../../scripts/supabase-start-locked.sh .", + "../../scripts/supabase-start-locked.sh $(pwd)", "supabase db reset" ], "parallel": false @@ -216,7 +216,7 @@ "options": { "cwd": "{projectRoot}", "commands": [ - "../../scripts/supabase-start-locked.sh .", + "../../scripts/supabase-start-locked.sh $(pwd)", "scripts/run-test-with-colors" ], "parallel": false @@ -240,7 +240,7 @@ "options": { "cwd": "{projectRoot}", "commands": [ - "../../scripts/supabase-start-locked.sh .", + "../../scripts/supabase-start-locked.sh $(pwd)", "echo 'Generating database types...'", "supabase gen types --local --schema pgflow --schema pgmq > src/database-types.ts", "echo 'Verifying generated types...'", @@ -259,7 +259,7 @@ "options": { "cwd": "{projectRoot}", "commands": [ - "../../scripts/supabase-start-locked.sh .", + "../../scripts/supabase-start-locked.sh $(pwd)", "mkdir -p .nx-inputs", "echo 'Verifying database types are up-to-date...'", "cp src/database-types.ts .nx-inputs/database-types.ts.backup", diff --git a/pkgs/edge-worker/project.json b/pkgs/edge-worker/project.json index ab8970df9..54912500e 100644 --- a/pkgs/edge-worker/project.json +++ b/pkgs/edge-worker/project.json @@ -60,7 +60,7 @@ "options": { "cwd": "{projectRoot}", "commands": [ - "../../scripts/supabase-start-locked.sh ." + "../../scripts/supabase-start-locked.sh $(pwd)" ], "parallel": false } @@ -71,7 +71,7 @@ "cache": false, "options": { "cwd": "{projectRoot}", - "commands": ["../../scripts/supabase-start-locked.sh ."], + "commands": ["../../scripts/supabase-start-locked.sh $(pwd)"], "parallel": false } }, @@ -116,7 +116,7 @@ "mkdir -p supabase/migrations/", "rm -f supabase/migrations/*.sql", "cp ../core/supabase/migrations/*.sql supabase/migrations/", - "../../scripts/supabase-start-locked.sh .", + "../../scripts/supabase-start-locked.sh $(pwd)", "supabase db reset" ], "parallel": false @@ -129,7 +129,7 @@ "options": { "cwd": "{projectRoot}", "commands": [ - "../../scripts/supabase-start-locked.sh .", + "../../scripts/supabase-start-locked.sh $(pwd)", "supabase functions serve --env-file supabase/functions/.env --no-verify-jwt" ], "parallel": false @@ -194,7 +194,7 @@ "mkdir -p supabase/migrations/", "rm -f supabase/migrations/*.sql", "cp ../core/supabase/migrations/*.sql supabase/migrations/", - "../../scripts/supabase-start-locked.sh ." + "../../scripts/supabase-start-locked.sh $(pwd)" ], "parallel": false } diff --git a/pkgs/website/project.json b/pkgs/website/project.json index 6dd76b636..107505f16 100644 --- a/pkgs/website/project.json +++ b/pkgs/website/project.json @@ -109,7 +109,7 @@ "options": { "cwd": "{projectRoot}", "commands": [ - "../../scripts/supabase-start-locked.sh .", + "../../scripts/supabase-start-locked.sh $(pwd)", "astro dev" ] } diff --git a/scripts/LOCKING.md b/scripts/LOCKING.md new file mode 100644 index 000000000..f51fd6e17 --- /dev/null +++ b/scripts/LOCKING.md @@ -0,0 +1,208 @@ +# Supabase Locking Mechanism + +## Overview + +Each package in the monorepo has its own Supabase instance with separate ports and locks. The locking mechanism prevents parallel starts of the same instance, but does not block different packages from starting their own instances simultaneously. + +## How It Works + +### Lock Scope + +- **Per-package locks**: Each package (core, edge-worker, client, website) gets its own lock based on its absolute directory path +- **Independent instances**: Different packages can start their Supabase instances in parallel without blocking each other +- **Serialized starts**: Multiple Nx targets in the same package that need Supabase will wait for each other + +### Lock Files + +- **Location**: `/tmp/supabase-start-.lock` +- **Hash**: MD5 of the absolute project directory path +- **Port range**: 40000-49999 (derived from first 8 hex chars of hash) +- **Lock mechanism**: TCP port binding via netcat (NFS-safe alternative to flock) + +### Port Binding Approach + +We use TCP port binding instead of file locking (flock) because: + +1. **NFS compatibility**: GitHub Actions uses NFS storage where flock is unreliable +2. **OS-managed cleanup**: Kernel automatically releases ports on process death (no orphaned locks) +3. **Platform independence**: Works consistently across different filesystems + +## Package Ports + +Each package uses a distinct set of ports to avoid conflicts: + +| Package | DB Port | API Port | Shadow Port | Pooler Port | Other Ports | +| ----------- | ------- | -------- | ----------- | ----------- | ---------------- | +| core | 50422 | 50421* | 50420 | - | 50323 (studio*) | +| edge-worker | 50322 | 50321 | 50320 | 50329 | 8083 (inspector) | +| client | 50522 | 50521 | 50520 | 50529 | - | +| website | 55322 | 55321 | 55320 | - | 55323, 55324 | + +_* = disabled in config_ + +**Lock port range**: 40000-49999 (well separated from Supabase service ports) + +## Usage in Nx Targets + +### Cacheable Targets + +Targets that need Supabase can call `supabase-start-locked.sh` directly and still be cacheable: + +```json +{ + "verify-migrations": { + "executor": "nx:run-commands", + "cache": true, + "options": { + "cwd": "{projectRoot}", + "commands": [ + "../../scripts/supabase-start-locked.sh $(pwd)", + "supabase db reset > .nx-inputs/verify-migrations.txt" + ] + } + } +} +``` + +### Why This Works + +1. **Short-lived processes**: Targets run, use the database, produce output, and exit +2. **Idempotent starts**: `supabase-start.sh` checks if already running (fast path) +3. **Lock serialization**: Only one target per package can start Supabase at a time +4. **Cache flexibility**: + - If first target starts Supabase → second target finds it running + - If first target loads from cache → second target starts Supabase fresh + +### Manual Operations + +The following targets are **intentionally not locked** as they're for manual use only: + +- `supabase:stop` +- `supabase:restart` +- `supabase:status` + +These targets are not part of automated workflows via `dependsOn`, so they don't need lock protection. + +## Scripts + +### supabase-start-locked.sh (Wrapper) + +**Purpose**: Provides lock-protected access to the Supabase start worker script. + +**Responsibilities**: + +- Normalizes project directory to absolute path +- Computes unique lock file path based on directory hash +- Delegates to `port-lock.sh` for lock acquisition +- Passes control to `supabase-start.sh` when lock is acquired + +**Usage**: + +```bash +../../scripts/supabase-start-locked.sh $(pwd) +``` + +### port-lock.sh (Lock Manager) + +**Purpose**: NFS-safe locking mechanism using TCP port binding. + +**How it works**: + +1. Derives unique port (40000-49999) from lockfile path via MD5 hash +2. Attempts to bind to that port with netcat +3. If bind succeeds: holds lock and runs command +4. If bind fails: waits with timeout (60s) and retries +5. OS automatically releases port on process exit + +**Features**: + +- 8-hex-char hash (4.2B possible ports, very low collision probability) +- 60-second timeout with progress indicators +- Automatic cleanup on process death + +### supabase-start.sh (Worker) + +**Purpose**: Idempotent Supabase startup with health verification. + +**Behavior**: + +1. **Fast path**: Checks if already running → exits immediately if yes +2. **Cleanup**: Stops stale containers and releases ports if needed +3. **Start**: Launches Supabase with `pnpm exec supabase start` +4. **Health check**: Verifies readiness with 30-second retry loop +5. **Progress**: Shows status every 5 seconds during health check + +**Exit codes**: + +- `0`: Success (Supabase is running and healthy) +- `1`: Failure (could not start or verify Supabase) + +## Troubleshooting + +### Port Conflicts + +If you see "Port X is still in use" errors: + +1. Check what's using the port: `ss -lpn | grep :PORT` +2. Stop the process or wait for automatic cleanup +3. If persistent, manually kill: `kill -9 PID` + +### Lock Timeout + +If you see "Timeout waiting for lock after 60s": + +1. Check if another process is stuck: `ss -lpn | grep :40` (shows locks in 40000-49999 range) +2. Wait for timeout or kill the stuck process +3. First-time Supabase starts may take longer (downloading images) + +### Stale Containers + +If containers don't stop gracefully: + +1. The script will force cleanup after 30s +2. Manual cleanup: `docker ps -a | grep supabase | awk '{print $1}' | xargs docker rm -f` + +### Health Check Failures + +If "Supabase started but not responding to status check": + +1. Check Docker daemon is running: `docker info` +2. Check available disk space: `df -h` +3. Check Docker logs: `docker logs ` +4. Try manual restart: `pnpm nx run :supabase:restart` + +## Implementation Notes + +### Why Not Shared Locks? + +Q: Why doesn't each package share the same lock? +A: Each package runs its own Supabase instance on different ports. They can (and should) start independently without blocking each other. + +### Why Not Lock Stop/Restart? + +Q: Why aren't `supabase:stop` and `supabase:restart` locked? +A: These are manual convenience targets, not used in automated workflows via `dependsOn`. Users invoke them directly, so lock protection isn't necessary. + +### Caching Strategy + +Nx caching works because: + +1. **Inputs are deterministic**: Migrations, schemas, tests don't change randomly +2. **Outputs are reproducible**: Same inputs → same outputs +3. **Supabase state is transient**: DB state doesn't affect cached outputs +4. **Lock prevents races**: Only one target per package starts Supabase at a time + +### Migration Safety + +Q: What prevents race conditions when copying migrations (e.g., edge-worker copying from core)? +A: Migration copy happens in `supabase:prepare` target which: + +- Depends on `^verify-migrations` (ensures core migrations are stable) +- Runs before `supabase-start-locked.sh` is called +- Uses simple file copy (no concurrent writes) + +## Future Improvements + +- [ ] Consider making timeout configurable via environment variable +- [ ] Add telemetry for lock wait times in CI +- [ ] Investigate whether `serve:functions:e2e` continuous target needs special handling diff --git a/scripts/port-lock.sh b/scripts/port-lock.sh new file mode 100755 index 000000000..bc36c0f38 --- /dev/null +++ b/scripts/port-lock.sh @@ -0,0 +1,195 @@ +#!/bin/bash +set -e + +# ============================================================================ +# Port-Based Locking Script - Drop-in Replacement for flock +# ============================================================================ +# This script provides file-lock semantics using TCP port binding instead of +# file locking, solving NFS compatibility issues in CI environments. +# +# PURPOSE: +# Replace flock for environments where file locking is unreliable (NFS, etc.) +# Uses TCP port binding as mutex - OS guarantees only one process can bind. +# +# USAGE: +# port-lock.sh [args...] +# +# Same interface as flock - lockfile path determines port number. +# Runs command when lock acquired, releases on exit. +# +# HOW IT WORKS: +# 1. Derives unique port (40000-49999) from lockfile path via md5 hash +# 2. Attempts to bind to that port with nc (netcat) in listen mode +# 3. If bind succeeds: runs command and holds lock until exit +# 4. If bind fails: another process holds lock, retry with timeout +# 5. OS automatically releases port on process exit (even crashes) +# +# CRITICAL: Must use 'nc -l PORT' without address - passing an address +# makes nc act as client and exit immediately, breaking the lock! +# +# KEY ADVANTAGE OVER FILE LOCKS: +# Port binding is kernel-managed, not filesystem-dependent. +# OS always releases ports on process death - no orphaned locks. +# +# PORT RANGE: +# 40000-49999 (10,000 ports available) +# - Below Supabase ports (50000+, 54000+, 55000+) +# - Above system/registered ports (0-32767) +# - Non-privileged (no root needed) +# +# PLATFORM: +# Linux with GNU netcat +# Tested on: Manjaro, Ubuntu (GitHub Actions) +# ============================================================================ + +# Configuration +LOCK_PORT_BASE=40000 # Start of safe port range +LOCK_PORT_RANGE=10000 # 40000-49999 +MAX_WAIT_SECONDS=180 # Timeout for lock acquisition (3 minutes for CI) +PROGRESS_INTERVAL=5 # Show progress every N seconds + +# Global state +NC_PID="" # Background nc process ID + +# ============================================================================ +# Usage and Validation +# ============================================================================ + +usage() { + echo "Usage: $0 [args...]" >&2 + echo "" >&2 + echo "Drop-in replacement for flock using port-based locking." >&2 + echo "" >&2 + echo "Arguments:" >&2 + echo " lockfile - Path used to derive unique port number" >&2 + echo " command - Command to run while holding lock" >&2 + echo " args - Optional arguments passed to command" >&2 + exit 1 +} + +if [ $# -lt 2 ]; then + echo "Error: Insufficient arguments" >&2 + usage +fi + +LOCKFILE="$1" +shift +# Note: We keep the remaining arguments in $@ to preserve quoting +# DO NOT capture in COMMAND variable - it would lose quote boundaries + +# ============================================================================ +# Port Derivation +# ============================================================================ + +# Generate unique port from lockfile path (same approach as flock version) +# Returns port number in range 40000-49999 +generate_lock_port() { + local lockfile="$1" + + # Normalize to absolute path for consistent hashing + local lockfile_abs + if [ -e "$lockfile" ]; then + lockfile_abs=$(realpath "$lockfile" 2>/dev/null || echo "$lockfile") + else + # For non-existent files, use the path as-is + lockfile_abs="$lockfile" + fi + + # Hash the path and convert to port number + local hash=$(echo "$lockfile_abs" | md5sum | cut -d' ' -f1) + + # Convert first 8 hex chars to decimal, mod range, add to base + local offset=$((0x${hash:0:8} % LOCK_PORT_RANGE)) + echo $((LOCK_PORT_BASE + offset)) +} + +# ============================================================================ +# Lock Management +# ============================================================================ + +# Release lock by killing nc process +release_lock() { + if [ -n "$NC_PID" ]; then + kill "$NC_PID" 2>/dev/null || true + wait "$NC_PID" 2>/dev/null || true + NC_PID="" + fi + trap - EXIT INT TERM +} + +# Try to acquire lock by binding to port +# Returns 0 on success, 1 on failure (port already bound) +acquire_lock() { + local port="$1" + + # Detect netcat version and use appropriate syntax + # DO NOT pass address after port - that makes nc act as client! + if nc -h 2>&1 | grep -q "OpenBSD"; then + # OpenBSD netcat (common on Ubuntu/Debian) + nc -l "$port" /dev/null 2>&1 & + else + # GNU netcat (traditional) + nc -l -p "$port" /dev/null 2>&1 & + fi + NC_PID=$! + + # Give nc a moment to bind to the port + sleep 0.1 + + # Verify the process is still alive (bind succeeded) + # If nc failed to bind (port in use), it exits immediately + if ! kill -0 "$NC_PID" 2>/dev/null; then + NC_PID="" + return 1 # Lock acquisition failed + fi + + # Set up cleanup trap - ensures lock release on exit + trap 'release_lock' EXIT INT TERM + return 0 # Lock acquired successfully +} + +# Wait for lock with retry logic and timeout +# Returns 0 if lock acquired, 1 on timeout +wait_for_lock() { + local port="$1" + local attempt=0 + + while [ $attempt -lt $MAX_WAIT_SECONDS ]; do + if acquire_lock "$port"; then + return 0 # Lock acquired + fi + + # Lock held by another process - wait and retry + sleep 1 + attempt=$((attempt + 1)) + + # Progress indicator every N seconds + if [ $((attempt % PROGRESS_INTERVAL)) -eq 0 ]; then + echo "port-lock: Waiting for lock on port $port (${attempt}s elapsed)..." >&2 + fi + done + + echo "port-lock: ERROR: Timeout waiting for lock after ${MAX_WAIT_SECONDS}s" >&2 + echo "port-lock: Port $port may be held by stalled process or Supabase service" >&2 + return 1 # Timeout +} + +# ============================================================================ +# Main Execution +# ============================================================================ + +# Derive port from lockfile path +LOCK_PORT=$(generate_lock_port "$LOCKFILE") + +# Attempt to acquire lock +if ! wait_for_lock "$LOCK_PORT"; then + exit 1 +fi + +# Lock acquired - run the command +# Use "$@" directly to preserve argument boundaries and quoting +"$@" +EXIT_CODE=$? + +# Lock automatically released via trap on exit +exit $EXIT_CODE diff --git a/scripts/supabase-start-locked.sh b/scripts/supabase-start-locked.sh index 02ea7e3ea..e47437e3a 100755 --- a/scripts/supabase-start-locked.sh +++ b/scripts/supabase-start-locked.sh @@ -4,7 +4,7 @@ set -e # ============================================================================ # Supabase Start Locked Wrapper Script # ============================================================================ -# This script wraps supabase-start.sh with file-based locking using flock(1). +# This script wraps supabase-start.sh with port-based locking using port-lock.sh. # # PURPOSE: # When multiple Nx targets run in parallel and all need Supabase running, @@ -13,7 +13,7 @@ set -e # # HOW IT WORKS: # 1. Computes a lock file path based on the project directory -# 2. Uses flock to acquire an exclusive lock on that file +# 2. Uses port-lock.sh to acquire an exclusive lock (via TCP port binding) # 3. Runs the worker script (supabase-start.sh) while holding the lock # 4. Lock is automatically released when this script exits # @@ -29,16 +29,16 @@ set -e # - Acquires lock, runs worker → checks status → already running → fast exit # - Releases lock # -# WHY FORM 1 OF FLOCK: -# We use: flock -# This is simpler than file descriptor manipulation (Form 3) and provides -# exactly what we need: serialize execution of the worker script per-project. +# WHY PORT-BASED LOCKING: +# We use: port-lock.sh +# Port-based locking solves NFS reliability issues with flock in CI environments. +# Provides same interface as flock - lockfile path determines lock (via port number). # # LOCK FILE LOCATION: # /tmp/supabase-start-.lock where is md5sum of absolute project path # - Unique per project (core, client, edge-worker have separate locks) -# - Standard /tmp location (cleaned on reboot) -# - Linux-specific (fine for our use case) +# - Lock file path is only used to derive port number (40000-49999 range) +# - Avoids Supabase service ports (50000+, 54000+, 55000+) # # Usage: supabase-start-locked.sh # ============================================================================ @@ -71,6 +71,7 @@ LOCK_FILE="/tmp/supabase-start-${PROJECT_LOCK_NAME}.lock" SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" WORKER_SCRIPT="$SCRIPT_DIR/supabase-start.sh" -# Use flock (Form 1) to serialize access to the worker script -# By default, flock blocks until the lock is available, then runs the command -flock "$LOCK_FILE" "$WORKER_SCRIPT" "$PROJECT_DIR_ABS" +# Use port-lock.sh to serialize access to the worker script +# port-lock.sh provides same interface as flock but uses port binding +# This solves NFS reliability issues in CI environments +"$SCRIPT_DIR/port-lock.sh" "$LOCK_FILE" "$WORKER_SCRIPT" "$PROJECT_DIR_ABS" diff --git a/scripts/supabase-start.sh b/scripts/supabase-start.sh index 3e67d5464..c9351439b 100755 --- a/scripts/supabase-start.sh +++ b/scripts/supabase-start.sh @@ -16,6 +16,8 @@ set -e # 1. Checks if Supabase is already running (fast path) # 2. If running: exits immediately # 3. If not running: cleans up stale containers and starts fresh +# 4. Waits for containers to actually stop (fixes Supabase CLI bug) +# 5. Verifies ports are free before starting # # Exit codes: # 0 - Success (Supabase is running) @@ -28,6 +30,121 @@ YELLOW='\033[1;33m' RED='\033[0;31m' NC='\033[0m' # No Color +# ============================================================================ +# Helper Functions +# ============================================================================ + +# Extract project_id from config.toml +get_project_id() { + local config_file="$1/supabase/config.toml" + if [ ! -f "$config_file" ]; then + echo -e "${RED}Error: config.toml not found at $config_file${NC}" >&2 + return 1 + fi + grep "^project_id" "$config_file" | cut -d'"' -f2 +} + +# Extract all ports from config.toml +get_project_ports() { + local config_file="$1/supabase/config.toml" + # Extract port value between = and any comment (#), trim spaces + grep -E "^\s*port\s*=" "$config_file" | sed 's/.*=\s*\([0-9]*\).*/\1/' | sort -u +} + +# Check if any Supabase containers are running for this project +containers_running() { + local project_id="$1" + docker ps --filter "name=supabase.*${project_id}" --format "{{.Names}}" 2>/dev/null | grep -q . +} + +# Wait for containers to stop with timeout +wait_for_containers_to_stop() { + local project_id="$1" + local max_wait="${2:-30}" # Default 30 seconds + + echo -e "${YELLOW}Waiting for containers to fully stop...${NC}" + local waited=0 + + while [ $waited -lt $max_wait ]; do + local running_containers=$(docker ps --filter "name=supabase.*${project_id}" --format "{{.Names}}" 2>/dev/null) + if [ -z "$running_containers" ]; then + echo -e "${GREEN}✓ All containers stopped${NC}" + return 0 + fi + + sleep 1 + waited=$((waited + 1)) + + # Progress indicator every 5 seconds + if [ $((waited % 5)) -eq 0 ]; then + echo -e "${YELLOW}Still waiting... (${waited}s)${NC}" + fi + done + + # Timeout reached + echo -e "${YELLOW}Containers didn't stop gracefully after ${max_wait}s${NC}" + return 1 +} + +# Force kill containers and clean up +force_cleanup_containers() { + local project_id="$1" + + echo -e "${YELLOW}Forcing container removal for project: ${project_id}${NC}" + + # Kill running containers + docker ps --filter "name=supabase.*${project_id}" -q | xargs -r docker kill 2>/dev/null || true + + # Remove stopped containers + docker ps -a --filter "name=supabase.*${project_id}" -q | xargs -r docker rm 2>/dev/null || true +} + +# Clean up any processes holding our ports +cleanup_port_processes() { + local project_dir="$1" + local ports=$(get_project_ports "$project_dir") + + for port in $ports; do + # Check if port is in use + if nc -z localhost "$port" 2>/dev/null; then + # Find process holding the port using ss + local pid=$(ss -lpn 2>/dev/null | grep ":$port " | grep -oE "pid=[0-9]+" | cut -d= -f2 | head -1) + if [ -n "$pid" ]; then + echo -e "${YELLOW}Killing process holding port $port (PID: $pid)${NC}" + kill -9 "$pid" 2>/dev/null || true + fi + fi + done + + # Give OS time to release ports + sleep 2 +} + +# Verify all required ports are free +verify_ports_free() { + local project_dir="$1" + local ports=$(get_project_ports "$project_dir") + local all_free=true + + for port in $ports; do + if nc -z localhost "$port" 2>/dev/null; then + echo -e "${RED}Error: Port $port is still in use${NC}" >&2 + all_free=false + fi + done + + if [ "$all_free" = true ]; then + echo -e "${GREEN}✓ All required ports are free${NC}" + return 0 + else + return 1 + fi +} + +# ============================================================================ +# Main Script +# ============================================================================ + # Validate project directory argument if [ -z "$1" ]; then echo -e "${RED}Error: Project directory argument is required${NC}" >&2 @@ -46,7 +163,15 @@ fi # Change to project directory (Supabase CLI uses current directory) cd "$PROJECT_DIR" -echo -e "${YELLOW}Checking Supabase status in: $PROJECT_DIR${NC}" +# Get project ID from config +PROJECT_ID=$(get_project_id ".") +if [ -z "$PROJECT_ID" ]; then + echo -e "${RED}Error: Could not extract project_id from config.toml${NC}" >&2 + exit 1 +fi + +echo -e "${YELLOW}Checking Supabase status for project: ${PROJECT_ID}${NC}" +echo -e "${YELLOW}Start time: $(date '+%H:%M:%S')${NC}" # Fast path: Check if Supabase is already running # This makes repeated calls very fast @@ -59,16 +184,56 @@ fi echo -e "${YELLOW}Supabase is not running. Starting...${NC}" # Clean up any stale containers first -# This prevents errors from previous incomplete shutdowns echo -e "${YELLOW}Cleaning up any stale containers...${NC}" pnpm exec supabase stop --no-backup 2>/dev/null || true +# Wait for containers to actually stop +if ! wait_for_containers_to_stop "$PROJECT_ID" 30; then + # Force cleanup if graceful stop failed + force_cleanup_containers "$PROJECT_ID" + + # Extra wait after force cleanup + sleep 2 +fi + +# Clean up any lingering port processes +cleanup_port_processes "." + +# Final verification that ports are free +if ! verify_ports_free "."; then + echo -e "${RED}Error: Unable to free all required ports${NC}" >&2 + echo -e "${YELLOW}You may need to manually check what's using these ports${NC}" >&2 + exit 1 +fi + # Start Supabase with all configured services -echo -e "${YELLOW}Starting Supabase...${NC}" +echo -e "${YELLOW}Starting Supabase... (this may take 1-2 minutes in CI)${NC}" +echo -e "${YELLOW}Time: $(date '+%H:%M:%S')${NC}" if pnpm exec supabase start; then - echo -e "${GREEN}✓ Supabase started successfully${NC}" - exit 0 + # Verify Supabase is actually ready with retries + echo -e "${YELLOW}Verifying Supabase is ready...${NC}" + MAX_HEALTH_CHECKS=30 + + for i in $(seq 1 $MAX_HEALTH_CHECKS); do + if pnpm exec supabase status > /dev/null 2>&1; then + echo -e "${GREEN}✓ Supabase started successfully and is ready${NC}" + echo -e "${YELLOW}End time: $(date '+%H:%M:%S')${NC}" + exit 0 + fi + + if [ $i -eq $MAX_HEALTH_CHECKS ]; then + echo -e "${RED}✗ Supabase started but not responding to status check after ${MAX_HEALTH_CHECKS}s${NC}" >&2 + exit 1 + fi + + # Show progress every 5 checks + if [ $((i % 5)) -eq 0 ]; then + echo -e "${YELLOW}Waiting for Supabase to be ready... (${i}s)${NC}" + fi + + sleep 1 + done else echo -e "${RED}✗ Failed to start Supabase${NC}" >&2 exit 1 -fi +fi \ No newline at end of file diff --git a/scripts/tests/port-lock.test.sh b/scripts/tests/port-lock.test.sh new file mode 100755 index 000000000..fa1902cee --- /dev/null +++ b/scripts/tests/port-lock.test.sh @@ -0,0 +1,104 @@ +#!/bin/bash +set -e + +# ============================================================================ +# Port Lock Test Script +# ============================================================================ +# Simple test to verify port-lock.sh works correctly +# ============================================================================ + +SCRIPT_DIR="$(cd "$(dirname "$0")/.." && pwd)" +PORT_LOCK="$SCRIPT_DIR/port-lock.sh" +TEST_OUTPUT="/tmp/port-lock-test-output.txt" +LOCK_FILE="/tmp/port-lock-test.lock" + +# Colors for output +GREEN='\033[0;32m' +RED='\033[0;31m' +NC='\033[0m' # No Color + +pass() { + echo -e "${GREEN}✓${NC} $1" +} + +fail() { + echo -e "${RED}✗${NC} $1" + exit 1 +} + +# Cleanup +cleanup() { + rm -f "$TEST_OUTPUT" "$LOCK_FILE" +} +trap cleanup EXIT + +echo "Testing port-lock.sh..." +echo "" + +# Test 1: Basic execution +echo "Test 1: Basic command execution" +if "$PORT_LOCK" "$LOCK_FILE" echo "test" > "$TEST_OUTPUT" 2>&1; then + if grep -q "test" "$TEST_OUTPUT"; then + pass "Command executed successfully" + else + fail "Command output not found" + fi +else + fail "Command execution failed" +fi + +# Test 2: Exit code preservation +echo "" +echo "Test 2: Exit code preservation" +"$PORT_LOCK" "$LOCK_FILE" bash -c 'exit 42' && EXIT_CODE=0 || EXIT_CODE=$? +if [ "$EXIT_CODE" -eq 42 ]; then + pass "Exit code preserved correctly" +else + fail "Exit code not preserved (expected 42, got $EXIT_CODE)" +fi + +# Test 3: Parallel execution (serialization) +echo "" +echo "Test 3: Parallel execution serialization" +> "$TEST_OUTPUT" + +"$PORT_LOCK" "$LOCK_FILE" bash -c 'echo "A START"; sleep 0.3; echo "A END"' >> "$TEST_OUTPUT" 2>&1 & +PID1=$! +"$PORT_LOCK" "$LOCK_FILE" bash -c 'echo "B START"; sleep 0.3; echo "B END"' >> "$TEST_OUTPUT" 2>&1 & +PID2=$! +"$PORT_LOCK" "$LOCK_FILE" bash -c 'echo "C START"; sleep 0.3; echo "C END"' >> "$TEST_OUTPUT" 2>&1 & +PID3=$! + +wait $PID1 $PID2 $PID3 + +# Verify all processes completed +if grep -q "A START" "$TEST_OUTPUT" && grep -q "A END" "$TEST_OUTPUT" && \ + grep -q "B START" "$TEST_OUTPUT" && grep -q "B END" "$TEST_OUTPUT" && \ + grep -q "C START" "$TEST_OUTPUT" && grep -q "C END" "$TEST_OUTPUT"; then + pass "All parallel processes completed" +else + fail "Some processes did not complete" +fi + +# Verify execution was serialized (6 process lines expected) +LINE_COUNT=$(grep -c "^[ABC]" "$TEST_OUTPUT") +if [ "$LINE_COUNT" -eq 6 ]; then + pass "Expected number of output lines (6)" +else + fail "Unexpected line count: $LINE_COUNT (expected 6)" +fi + +# Test 4: Complex command with arguments +echo "" +echo "Test 4: Complex command with arguments and redirection" +"$PORT_LOCK" "$LOCK_FILE" bash -c 'echo "line1"; echo "line2"' > "$TEST_OUTPUT" 2>&1 +if [ "$(wc -l < "$TEST_OUTPUT")" -eq 2 ]; then + pass "Complex command with multiple outputs works" +else + fail "Complex command failed" +fi + +echo "" +echo "================================" +echo -e "${GREEN}All tests passed!${NC}" +echo "================================"