Skip to content
8 changes: 5 additions & 3 deletions .github/configs/nvidia-master.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1790,7 +1790,7 @@ qwen3.5-fp8-b200-sglang:
- { tp: 4, ep: 4, conc-start: 16, conc-end: 128 }

qwen3.5-fp4-b200-sglang:
image: lmsysorg/sglang:nightly-dev-20260402-d7256eb6
image: lmsysorg/sglang:v0.5.10.post1-cu130
model: nvidia/Qwen3.5-397B-A17B-NVFP4
model-prefix: qwen3.5
runner: b200
Expand All @@ -1801,11 +1801,13 @@ qwen3.5-fp4-b200-sglang:
- isl: 1024
osl: 1024
search-space:
- { tp: 4, ep: 1, conc-start: 4, conc-end: 128 }
- { tp: 4, ep: 1, conc-start: 4, conc-end: 4 }
- { tp: 2, ep: 2, conc-start: 4, conc-end: 128 }
- isl: 8192
osl: 1024
search-space:
- { tp: 4, ep: 1, conc-start: 4, conc-end: 128 }
- { tp: 4, ep: 1, conc-start: 4, conc-end: 4 }
- { tp: 2, ep: 2, conc-start: 4, conc-end: 128 }

glm5-fp8-b200-sglang:
image: lmsysorg/sglang:nightly-dev-cu13-20260317-1eea7448
Expand Down
49 changes: 16 additions & 33 deletions benchmarks/single_node/qwen3.5_fp4_b200.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,56 +20,39 @@ nvidia-smi

hf download "$MODEL"

export NCCL_NVLS_ENABLE=1
export SGL_ENABLE_JIT_DEEPGEMM=false
export SGLANG_ENABLE_FLASHINFER_GEMM=true
export PYTHONUNBUFFERED=1

SERVER_LOG=/workspace/server.log
PORT=${PORT:-8888}

# Default: recv every ~10 requests; if CONC >= 16, relax to ~30 requests between scheduler recv polls.
if [[ $CONC -ge 16 ]]; then
SCHEDULER_RECV_INTERVAL=30
else
SCHEDULER_RECV_INTERVAL=10
fi

MEM_FRAC_STATIC=0.85
CHUNKED_PREFILL_SIZE=32768
MAX_PREFILL_TOKENS=32768
CUDA_GRAPH_MAX_BATCH_SIZE=$CONC
MAX_RUNNING_REQUESTS=128
CONTEXT_LENGTH=$((ISL + OSL + 20))
if [ "${EVAL_ONLY}" = "true" ]; then
setup_eval_context
CONTEXT_LENGTH="$EVAL_MAX_MODEL_LEN"
fi

if [[ $TP -eq 8 ]]; then
EXTRA_ARGS="--enable-flashinfer-allreduce-fusion"
else
EXTRA_ARGS=""
fi

echo "SCHEDULER_RECV_INTERVAL: $SCHEDULER_RECV_INTERVAL, CONC: $CONC, ISL: $ISL, OSL: $OSL"

# Start GPU monitoring (power, temperature, clocks every second)
start_gpu_monitor

set -x
PYTHONNOUSERSITE=1 python3 -m sglang.launch_server --model-path=$MODEL --host=0.0.0.0 --port=$PORT \
--trust-remote-code \
--tensor-parallel-size=$TP --data-parallel-size=1 --ep-size $EP_SIZE \
--quantization modelopt_fp4 --fp4-gemm-backend flashinfer_cutlass \
--tensor-parallel-size=$TP --data-parallel-size=1 --expert-parallel-size=$EP_SIZE \
--enable-symm-mem \
--disable-radix-cache \
--quantization modelopt_fp4 \
--kv-cache-dtype fp8_e4m3 \
--mamba-ssm-dtype bfloat16 \
--cuda-graph-max-bs $CUDA_GRAPH_MAX_BATCH_SIZE --max-running-requests $MAX_RUNNING_REQUESTS \
--mem-fraction-static $MEM_FRAC_STATIC --chunked-prefill-size $CHUNKED_PREFILL_SIZE --max-prefill-tokens $MAX_PREFILL_TOKENS \
--context-length $CONTEXT_LENGTH --disable-radix-cache \
--attention-backend trtllm_mha --moe-runner-backend flashinfer_trtllm \
$EXTRA_ARGS --scheduler-recv-interval $SCHEDULER_RECV_INTERVAL \
--tokenizer-worker-num 6 --stream-interval 30 > $SERVER_LOG 2>&1 &
--attention-backend trtllm_mha \
--moe-runner-backend flashinfer_trtllm \
--cuda-graph-max-bs $CONC \
--max-running-requests $CONC \
--max-prefill-tokens 81920 \
--chunked-prefill-size 81920 \
--mem-fraction-static 0.8 \
--stream-interval 50 \
--scheduler-recv-interval 10 \
--tokenizer-worker-num 6 \
--tokenizer-path $MODEL \
--context-length $CONTEXT_LENGTH > $SERVER_LOG 2>&1 &

SERVER_PID=$!

Expand Down
10 changes: 8 additions & 2 deletions perf-changelog.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1346,15 +1346,21 @@
description:
- "Bump GLM-5 FP8 B200 SGLang concurrency from 128 to 256"
pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1012

- config-keys:
- qwen3.5-fp4-mi355x-sglang
description:
- "TP2/TP4 seach space exploration for Qwen3.5 fp4 on SGL"
pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1022

- config-keys:
- qwen3.5-fp8-h200-sglang-mtp
description:
- "Enable SGLANG_ENABLE_SPEC_V2=1 for Qwen3.5 FP8 H200 SGLang MTP"
pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1017

- config-keys:
- qwen3.5-fp4-b200-sglang
description:
- "Update SGLang image to `0.5.10post1"
pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1018
Loading