|
12 | 12 | TORNADO_ROOT: ${{ github.workspace }}/GPULlama3.java/external/tornadovm |
13 | 13 | LLAMA_ROOT: ${{ github.workspace }} |
14 | 14 | GRAAL_JARS: /opt/graalJars |
| 15 | + MODELS_DIR: /opt/models |
15 | 16 |
|
16 | 17 | jobs: |
17 | 18 | code-quality: |
@@ -88,68 +89,68 @@ jobs: |
88 | 89 | cd ${{ github.workspace }} |
89 | 90 | export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" |
90 | 91 | ./llama-tornado --gpu --opencl \ |
91 | | - --model /home/michalis/models/Llama-3.2-1B-Instruct-F16.gguf \ |
| 92 | + --model $MODELS_DIR/Llama-3.2-1B-Instruct-F16.gguf \ |
92 | 93 | --prompt "Say hello" |
93 | 94 | - name: FP16 - Run Qwen3-4B-f16.gguf |
94 | 95 | run: | |
95 | 96 | cd ${{ github.workspace }} |
96 | 97 | export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" |
97 | 98 | ./llama-tornado --gpu --opencl \ |
98 | | - --model /opt/models/Qwen3-4B-f16.gguf \ |
| 99 | + --model $MODELS_DIR/Qwen3-4B-f16.gguf \ |
99 | 100 | --prompt "Say hello" |
100 | 101 | - name: FP16 - Run Mistral-7B-Instruct-v0.3.fp16.gguf |
101 | 102 | run: | |
102 | 103 | cd ${{ github.workspace }} |
103 | 104 | export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" |
104 | 105 | ./llama-tornado --gpu --opencl \ |
105 | | - --model /opt/models/Mistral-7B-Instruct-v0.3.fp16.gguf \ |
| 106 | + --model $MODELS_DIR/Mistral-7B-Instruct-v0.3.fp16.gguf \ |
106 | 107 | --prompt "Say hello" |
107 | 108 | - name: FP16 - Run Qwen2.5-1.5b-instruct-fp16.gguf |
108 | 109 | run: | |
109 | 110 | cd ${{ github.workspace }} |
110 | 111 | export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" |
111 | 112 | ./llama-tornado --gpu --opencl \ |
112 | | - --model /opt/models/qwen2.5-1.5b-instruct-fp16.gguf \ |
| 113 | + --model $MODELS_DIR/qwen2.5-1.5b-instruct-fp16.gguf \ |
113 | 114 | --prompt "Say hello" |
114 | 115 | - name: FP16 - Run Phi-3-mini-4k-instruct-fp16.gguf |
115 | 116 | run: | |
116 | 117 | cd ${{ github.workspace }} |
117 | 118 | export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" |
118 | 119 | ./llama-tornado --gpu --opencl \ |
119 | | - --model /opt/models/Phi-3-mini-4k-instruct-fp16.gguf \ |
| 120 | + --model /$MODELS_DIR/Phi-3-mini-4k-instruct-fp16.gguf \ |
120 | 121 | --prompt "Say hello" |
121 | 122 | - name: Q8 - Run Llama-3.2-1B-Instruct-Q8_0.gguf |
122 | 123 | run: | |
123 | 124 | cd ${{ github.workspace }} |
124 | 125 | export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" |
125 | 126 | ./llama-tornado --gpu --opencl \ |
126 | | - --model /opt/models/Llama-3.2-1B-Instruct-Q8_0.gguf \ |
| 127 | + --model$MODELS_DIR/Llama-3.2-1B-Instruct-Q8_0.gguf \ |
127 | 128 | --prompt "Say hello" |
128 | 129 | - name: Q8 - Run Qwen3-0.6B-Q8_0.gguf |
129 | 130 | run: | |
130 | 131 | cd ${{ github.workspace }} |
131 | 132 | export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" |
132 | 133 | ./llama-tornado --gpu --opencl \ |
133 | | - --model /opt/models/Qwen3-0.6B-Q8_0.gguf \ |
| 134 | + --model $MODELS_DIR/Qwen3-0.6B-Q8_0.gguf \ |
134 | 135 | --prompt "Say hello" |
135 | 136 | - name: Q8 - Run Phi-3-mini-4k-instruct-Q8_0.gguf |
136 | 137 | run: | |
137 | 138 | cd ${{ github.workspace }} |
138 | 139 | export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" |
139 | 140 | ./llama-tornado --gpu --opencl \ |
140 | | - --model /opt/models/Phi-3-mini-4k-instruct-Q8_0.gguf \ |
| 141 | + --model $MODELS_DIR/Phi-3-mini-4k-instruct-Q8_0.gguf \ |
141 | 142 | --prompt "Say hello" |
142 | 143 | - name: Q8 - Run Qwen2.5-1.5b-instruct-q8_0.gguf |
143 | 144 | run: | |
144 | 145 | cd ${{ github.workspace }} |
145 | 146 | export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" |
146 | 147 | ./llama-tornado --gpu --opencl \ |
147 | | - --model /opt/models/qwen2.5-1.5b-instruct-q8_0.gguf \ |
| 148 | + --model $MODELS_DIR/qwen2.5-1.5b-instruct-q8_0.gguf \ |
148 | 149 | --prompt "Say hello" |
149 | 150 | - name: Q8 - Mistral-7B-Instruct-v0.3.Q8_0.gguf |
150 | 151 | run: | |
151 | 152 | cd ${{ github.workspace }} |
152 | 153 | export PATH="$TORNADO_SDK/bin:$JAVA_HOME/bin:$PATH" |
153 | 154 | ./llama-tornado --gpu --opencl \ |
154 | | - --model /opt/models/Mistral-7B-Instruct-v0.3.Q8_0.gguf \ |
| 155 | + --model $MODELS_DIR/Mistral-7B-Instruct-v0.3.Q8_0.gguf \ |
155 | 156 | --prompt "Say hello" |
0 commit comments