-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdeploy
More file actions
executable file
·305 lines (260 loc) · 11.5 KB
/
deploy
File metadata and controls
executable file
·305 lines (260 loc) · 11.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
#!/bin/bash
# Portable Docker Deployment System
# Usage: deploy [deployment-dir] [flags]
#
# Deploys LLM containers with auto-detected hardware routing,
# GPU teardown, and compose template management.
#
# Flags:
# --shared-tenancy Skip GPU teardown, start alongside existing containers
# --gpu=N Override GPU device index (default: from .env or 0)
# --port=N Override host port
# --target=MACHINE Override auto-detected target machine
# --dry-run Show what would happen without executing
# --logs Tail container logs after start
# --build Force Docker image rebuild
# --generate Generate docker-compose.yml + .env from spec.yaml
# --down Stop and remove the deployment
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
LIB_DIR="$SCRIPT_DIR/infrastructure/lib"
# Source shared functions
source "$LIB_DIR/resolve-target.sh"
source "$LIB_DIR/teardown.sh"
source "$LIB_DIR/compose-generate.sh"
# ── Parse arguments ──────────────────────────────────────────────
DEPLOY_DIR=""
SHARED_TENANCY=false
GPU_OVERRIDE=""
PORT_OVERRIDE=""
TARGET_OVERRIDE=""
DRY_RUN=false
TAIL_LOGS=false
FORCE_BUILD=false
DO_GENERATE=false
DO_DOWN=false
for arg in "$@"; do
case "$arg" in
--shared-tenancy) SHARED_TENANCY=true ;;
--gpu=*) GPU_OVERRIDE="${arg#--gpu=}" ;;
--port=*) PORT_OVERRIDE="${arg#--port=}" ;;
--target=*) TARGET_OVERRIDE="${arg#--target=}" ;;
--dry-run) DRY_RUN=true ;;
--logs) TAIL_LOGS=true ;;
--build) FORCE_BUILD=true ;;
--generate) DO_GENERATE=true ;;
--down) DO_DOWN=true ;;
--help|-h)
head -17 "$0" | tail -16
exit 0
;;
-*)
echo "Unknown flag: $arg" >&2
exit 1
;;
*)
DEPLOY_DIR="$arg"
;;
esac
done
export DRY_RUN
if [ -z "$DEPLOY_DIR" ]; then
echo "Usage: deploy <deployment-dir> [flags]"
echo " e.g.: deploy SM120.RtxPro6000/MoE/vllm.nvidia.Llama-3.3-70B-Instruct-NVFP4.benchmarked"
echo "Run 'deploy --help' for all flags."
exit 1
fi
# Resolve deployment directory (relative to repo root)
if [ ! -d "$DEPLOY_DIR" ]; then
# Try relative to SCRIPT_DIR
if [ -d "$SCRIPT_DIR/$DEPLOY_DIR" ]; then
DEPLOY_DIR="$SCRIPT_DIR/$DEPLOY_DIR"
else
echo "ERROR: Deployment directory not found: $DEPLOY_DIR" >&2
exit 1
fi
fi
DEPLOY_DIR="$(cd "$DEPLOY_DIR" && pwd)"
# ── Resolve target ───────────────────────────────────────────────
# Extract SM tier from path (find the SM* component)
REL_PATH="${DEPLOY_DIR#$SCRIPT_DIR/}"
SM_TIER=$(echo "$REL_PATH" | grep -oP 'SM\d+' | head -1)
if [ -z "$SM_TIER" ]; then
echo "ERROR: Cannot determine SM tier from path: $REL_PATH" >&2
echo " Expected path like SM120.RtxPro6000/MoE/vllm.model.benchmarked" >&2
exit 1
fi
TARGET_MACHINE="${TARGET_OVERRIDE:-$(resolve_machine "$SM_TIER")}"
if [ -z "$TARGET_MACHINE" ]; then
echo "ERROR: No machine found for SM tier: $SM_TIER" >&2
exit 1
fi
PLATFORM=$(resolve_platform "$DEPLOY_DIR")
IS_LOCAL=false
if is_local "$TARGET_MACHINE"; then
IS_LOCAL=true
fi
echo "════════════════════════════════════════════════"
echo " deploy: $(basename "$DEPLOY_DIR")"
echo "════════════════════════════════════════════════"
echo " Path: $REL_PATH"
echo " SM tier: $SM_TIER"
echo " Target: $TARGET_MACHINE ($([ "$IS_LOCAL" = true ] && echo "local" || echo "remote"))"
echo " Platform: $PLATFORM"
echo "════════════════════════════════════════════════"
echo ""
# ── Generate mode ────────────────────────────────────────────────
if [ "$DO_GENERATE" = true ]; then
# Determine GPU index
GPU_INDEX="${GPU_OVERRIDE:-0}"
# Get default GPU index from SM tier's first available
if [ -z "$GPU_OVERRIDE" ]; then
local_indices=$(get_gpu_indices "$TARGET_MACHINE" "$SM_TIER")
if [ -n "$local_indices" ]; then
GPU_INDEX=$(echo "$local_indices" | cut -d',' -f1)
fi
fi
generate_deployment "$DEPLOY_DIR" "$SM_TIER" "$GPU_INDEX" "$PORT_OVERRIDE"
echo ""
echo "Generated files:"
[ -f "$DEPLOY_DIR/.env" ] && echo " .env: $(wc -l < "$DEPLOY_DIR/.env") lines"
[ -f "$DEPLOY_DIR/docker-compose.yml" ] && echo " docker-compose.yml: $(wc -l < "$DEPLOY_DIR/docker-compose.yml") lines"
exit 0
fi
# ── Validate compose file exists ─────────────────────────────────
if [ ! -f "$DEPLOY_DIR/docker-compose.yml" ]; then
echo "ERROR: No docker-compose.yml in $DEPLOY_DIR" >&2
echo " Run: deploy --generate $REL_PATH" >&2
exit 1
fi
# ── Load .env if present ─────────────────────────────────────────
if [ -f "$DEPLOY_DIR/.env" ]; then
set -a
source "$DEPLOY_DIR/.env"
set +a
fi
# Apply overrides
GPU_INDEX="${GPU_OVERRIDE:-${GPU_INDEX:-0}}"
HOST_PORT="${PORT_OVERRIDE:-${HOST_PORT:-$(compute_port "$PLATFORM" "$GPU_INDEX")}}"
CONTAINER_NAME="${CONTAINER_NAME:-$(basename "$DEPLOY_DIR")}"
echo " Container: $CONTAINER_NAME"
echo " GPU: $GPU_INDEX"
echo " Port: $HOST_PORT"
echo ""
# ── Down mode ────────────────────────────────────────────────────
if [ "$DO_DOWN" = true ]; then
echo "[deploy] Stopping deployment..."
if [ "$DRY_RUN" = true ]; then
echo "[deploy] DRY RUN — would run: docker compose -f $DEPLOY_DIR/docker-compose.yml down"
elif [ "$IS_LOCAL" = true ]; then
docker compose -f "$DEPLOY_DIR/docker-compose.yml" --env-file "$DEPLOY_DIR/.env" down 2>/dev/null || \
docker stop "$CONTAINER_NAME" 2>/dev/null || true
docker rm "$CONTAINER_NAME" 2>/dev/null || true
else
ssh_alias=""
ssh_alias=$(get_ssh_alias "$TARGET_MACHINE")
ssh "$ssh_alias" "cd ~/llm-deploy/$(basename "$DEPLOY_DIR") && docker compose down" 2>/dev/null || true
fi
echo "[deploy] Stopped."
exit 0
fi
# ── Step 1: Teardown ─────────────────────────────────────────────
if [ "$SHARED_TENANCY" = true ]; then
echo "[1/5] Skipping GPU teardown (--shared-tenancy)"
else
echo "[1/5] GPU teardown..."
if [ "${TP_SIZE:-1}" -gt 1 ] 2>/dev/null; then
teardown_gpu "$TARGET_MACHINE" "$GPU_INDEX" "$((GPU_INDEX + 1))"
else
teardown_gpu "$TARGET_MACHINE" "$GPU_INDEX"
fi
fi
# ── Step 1b: Verify GPU is clear ──────────────────────────────────
if [ "$SHARED_TENANCY" != true ]; then
if ! verify_gpu_free "${GPU_INDEX:-0}" "$TARGET_MACHINE"; then
echo "WARNING: GPU not fully clear. Deploy may fail."
echo "To force cleanup: ssh <admin> 'sudo fuser -k /dev/nvidia*'"
fi
fi
echo ""
# ── Step 2: Clear page cache (DGX Spark) ─────────────────────────
echo "[2/5] Page cache..."
if [ "$IS_LOCAL" = true ]; then
# Local machine (adeliant) — skip page cache clearing
echo "[2/5] Skipped (local machine, not UMA)"
else
clear_page_cache "$TARGET_MACHINE"
fi
echo ""
# ── Step 3: Deploy ───────────────────────────────────────────────
echo "[3/5] Deploying..."
if [ "$DRY_RUN" = true ]; then
echo "[deploy] DRY RUN — would run:"
if [ "$IS_LOCAL" = true ]; then
echo " cd $DEPLOY_DIR"
echo " GPU_INDEX=$GPU_INDEX HOST_PORT=$HOST_PORT docker compose up -d"
else
ssh_alias=""
ssh_alias=$(get_ssh_alias "$TARGET_MACHINE")
echo " rsync -az $DEPLOY_DIR/ $ssh_alias:~/llm-deploy/$(basename "$DEPLOY_DIR")/"
echo " ssh $ssh_alias 'cd ~/llm-deploy/$(basename "$DEPLOY_DIR") && docker compose up -d'"
fi
else
# Build image if --build specified
if [ "$FORCE_BUILD" = true ] && [ -f "$DEPLOY_DIR/Dockerfile" ]; then
echo "[deploy] Building Docker image..."
if [ "$IS_LOCAL" = true ]; then
docker build -t "$(grep 'image:' "$DEPLOY_DIR/docker-compose.yml" | head -1 | sed 's/.*image: *//' | envsubst)" "$DEPLOY_DIR"
fi
fi
if [ "$IS_LOCAL" = true ]; then
# Remove old container if exists
docker rm -f "$CONTAINER_NAME" 2>/dev/null || true
# Export overrides for compose
export GPU_INDEX HOST_PORT CONTAINER_NAME
docker compose -f "$DEPLOY_DIR/docker-compose.yml" --env-file "$DEPLOY_DIR/.env" up -d 2>&1
else
# Remote deployment
ssh_alias=""
ssh_alias=$(get_ssh_alias "$TARGET_MACHINE")
remote_dir="~/llm-deploy/$(basename "$DEPLOY_DIR")"
echo "[deploy] Syncing to $TARGET_MACHINE:$remote_dir..."
ssh "$ssh_alias" "mkdir -p $remote_dir"
rsync -az --exclude='.archive' --exclude='__pycache__' \
"$DEPLOY_DIR/" "$ssh_alias:$remote_dir/"
echo "[deploy] Starting on $TARGET_MACHINE..."
ssh "$ssh_alias" "cd $remote_dir && docker compose up -d"
fi
fi
echo ""
# ── Step 4: Health check ─────────────────────────────────────────
echo "[4/5] Verifying..."
if [ "$DRY_RUN" = true ]; then
echo "[deploy] DRY RUN — would check: curl -f http://localhost:$HOST_PORT/health"
echo ""
echo "════════════════════════════════════════════════"
echo " DRY RUN complete"
echo "════════════════════════════════════════════════"
exit 0
fi
# Wait briefly for container to start
sleep 3
# Check container is running
if docker ps --format "{{.Names}}" 2>/dev/null | grep -q "^${CONTAINER_NAME}$"; then
echo "[deploy] Container $CONTAINER_NAME is running"
else
echo "[deploy] WARNING: Container $CONTAINER_NAME not found in docker ps" >&2
echo "[deploy] Check logs: docker logs $CONTAINER_NAME" >&2
fi
echo ""
echo "════════════════════════════════════════════════"
echo " Deployment: $CONTAINER_NAME"
echo " Endpoint: http://localhost:$HOST_PORT/v1"
echo " Health: curl http://localhost:$HOST_PORT/health"
echo " Logs: docker logs -f $CONTAINER_NAME"
echo "════════════════════════════════════════════════"
if [ "$TAIL_LOGS" = true ]; then
echo ""
echo "Tailing logs (Ctrl+C to stop)..."
docker logs -f "$CONTAINER_NAME"
fi