Skip to content

Commit e302f9e

Browse files
committed
[CI] Add matrix strategy for backend support in build-and-run workflow
- Introduce `fail-fast: false` and matrix strategy for `opencl` and `ptx` backends. - Modify build and execution steps to dynamically use the selected backend.
1 parent f2e9c75 commit e302f9e

File tree

1 file changed

+18
-11
lines changed

1 file changed

+18
-11
lines changed

.github/workflows/build-and-run.yml

Lines changed: 18 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,13 @@ jobs:
3333
needs: code-quality
3434
timeout-minutes: 30
3535

36+
strategy:
37+
fail-fast: false
38+
matrix:
39+
backend:
40+
- name: opencl
41+
- name: ptx
42+
3643
steps:
3744
- name: Checkout GPULlama3
3845
uses: actions/checkout@v4
@@ -54,7 +61,7 @@ jobs:
5461
source venv/bin/activate
5562
echo "=== Building TornadoVM ==="
5663
57-
make
64+
make BACKEND=${{ matrix.backend.name }}
5865
5966
echo "=== Searching for TornadoVM SDK directory ==="
6067
SDK_DIR=$(find dist -type d -maxdepth 3 -path "*/tornadovm-*-opencl" | head -n 1)
@@ -88,69 +95,69 @@ jobs:
8895
run: |
8996
cd ${{ github.workspace }}
9097
export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH"
91-
./llama-tornado --gpu --opencl \
98+
./llama-tornado --gpu --${{ matrix.backend.name }} \
9299
--model $MODELS_DIR/Llama-3.2-1B-Instruct-F16.gguf \
93100
--prompt "Say hello"
94101
- name: FP16 - Run Qwen3-4B-f16.gguf
95102
run: |
96103
cd ${{ github.workspace }}
97104
export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH"
98-
./llama-tornado --gpu --opencl \
105+
./llama-tornado --gpu --${{ matrix.backend.name }} \
99106
--model $MODELS_DIR/Qwen3-4B-f16.gguf \
100107
--prompt "Say hello"
101108
- name: FP16 - Run Mistral-7B-Instruct-v0.3.fp16.gguf
102109
run: |
103110
cd ${{ github.workspace }}
104111
export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH"
105-
./llama-tornado --gpu --opencl \
112+
./llama-tornado --gpu --${{ matrix.backend.name }} \
106113
--model $MODELS_DIR/Mistral-7B-Instruct-v0.3.fp16.gguf \
107114
--prompt "Say hello"
108115
- name: FP16 - Run Qwen2.5-1.5b-instruct-fp16.gguf
109116
run: |
110117
cd ${{ github.workspace }}
111118
export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH"
112-
./llama-tornado --gpu --opencl \
119+
./llama-tornado --gpu --${{ matrix.backend.name }} \
113120
--model $MODELS_DIR/qwen2.5-1.5b-instruct-fp16.gguf \
114121
--prompt "Say hello"
115122
- name: FP16 - Run Phi-3-mini-4k-instruct-fp16.gguf
116123
run: |
117124
cd ${{ github.workspace }}
118125
export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH"
119-
./llama-tornado --gpu --opencl \
126+
./llama-tornado --gpu --${{ matrix.backend.name }} \
120127
--model /$MODELS_DIR/Phi-3-mini-4k-instruct-fp16.gguf \
121128
--prompt "Say hello"
122129
- name: Q8 - Run Llama-3.2-1B-Instruct-Q8_0.gguf
123130
run: |
124131
cd ${{ github.workspace }}
125132
export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH"
126-
./llama-tornado --gpu --opencl \
133+
./llama-tornado --gpu --${{ matrix.backend.name }} \
127134
--model $MODELS_DIR/Llama-3.2-1B-Instruct-Q8_0.gguf \
128135
--prompt "Say hello"
129136
- name: Q8 - Run Qwen3-0.6B-Q8_0.gguf
130137
run: |
131138
cd ${{ github.workspace }}
132139
export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH"
133-
./llama-tornado --gpu --opencl \
140+
./llama-tornado --gpu --${{ matrix.backend.name }} \
134141
--model $MODELS_DIR/Qwen3-0.6B-Q8_0.gguf \
135142
--prompt "Say hello"
136143
- name: Q8 - Run Phi-3-mini-4k-instruct-Q8_0.gguf
137144
run: |
138145
cd ${{ github.workspace }}
139146
export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH"
140-
./llama-tornado --gpu --opencl \
147+
./llama-tornado --gpu --${{ matrix.backend.name }} \
141148
--model $MODELS_DIR/Phi-3-mini-4k-instruct-Q8_0.gguf \
142149
--prompt "Say hello"
143150
- name: Q8 - Run Qwen2.5-1.5b-instruct-q8_0.gguf
144151
run: |
145152
cd ${{ github.workspace }}
146153
export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH"
147-
./llama-tornado --gpu --opencl \
154+
./llama-tornado --gpu --${{ matrix.backend.name }} \
148155
--model $MODELS_DIR/qwen2.5-1.5b-instruct-q8_0.gguf \
149156
--prompt "Say hello"
150157
- name: Q8 - Mistral-7B-Instruct-v0.3.Q8_0.gguf
151158
run: |
152159
cd ${{ github.workspace }}
153160
export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH"
154-
./llama-tornado --gpu --opencl \
161+
./llama-tornado --gpu --${{ matrix.backend.name }} \
155162
--model $MODELS_DIR/Mistral-7B-Instruct-v0.3.Q8_0.gguf \
156163
--prompt "Say hello"

0 commit comments

Comments
 (0)