Skip to content

Commit cb67ddc

Browse files
committed
use bigendian models when testing bigendian arches
Signed-off-by: Mike Bonnet <[email protected]>
1 parent 128911e commit cb67ddc

File tree

7 files changed

+66
-7
lines changed

7 files changed

+66
-7
lines changed

shortnames/shortnames.conf

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@
2424
"granite:2b" = "ollama://granite3.1-dense:2b"
2525
"granite:7b" = "huggingface://instructlab/granite-7b-lab-GGUF/granite-7b-lab-Q4_K_M.gguf"
2626
"granite:8b" = "ollama://granite3.1-dense:8b"
27+
"granite-be-3.0:1b" = "hf://taronaeo/Granite-3.0-1B-A400M-Instruct-BE-GGUF/granite-3.0-1b-a400m-instruct-be.Q2_K.gguf"
28+
"granite-be-3.3:2b" = "hf://taronaeo/Granite-3.3-2B-Instruct-BE-GGUF/granite-3.3-2b-instruct-be.Q4_K_M.gguf"
2729
"hermes" = "huggingface://NousResearch/Hermes-2-Pro-Mistral-7B-GGUF/Hermes-2-Pro-Mistral-7B.Q4_K_M.gguf"
2830
"ibm/granite" = "ollama://granite3.1-dense:8b"
2931
"ibm/granite:2b" = "ollama://granite3.1-dense:2b"

test/system/002-bench.bats

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,10 @@ function setup() {
1616
# bats test_tags=distro-integration
1717
@test "ramalama bench" {
1818
skip_if_no_llama_bench
19-
run_ramalama bench -t 2 smollm:135m
19+
if is_s390x; then
20+
local RAMALAMA_TIMEOUT=1200
21+
fi
22+
run_ramalama bench -t 2 $(test_model smollm:135m)
2023
is "$output" ".*model.*size.*" "model and size in output"
2124
}
2225

test/system/030-run.bats

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -125,19 +125,20 @@ EOF
125125
is "$output" ".*-e HSA_OVERRIDE_GFX_VERSION=0.0.0" "ensure HSA_OVERRIDE_GFX_VERSION is set from environment"
126126
}
127127

128-
@test "ramalama run smollm with prompt" {
129-
run_ramalama run --temp 0 ${MODEL} "What is the first line of the declaration of independence?"
128+
@test "ramalama run with prompt" {
129+
run_ramalama run --temp 0 $(test_model ${MODEL} granite-be-3.3:2b) "What is the first line of the declaration of independence?"
130130
}
131131

132132
@test "ramalama run --keepalive" {
133133
# timeout within 1 second and generate a 124 error code.
134-
run_ramalama 0 --debug run --keepalive 1s tiny
134+
run_ramalama 0 --debug run --keepalive 1s $(test_model tiny)
135135
}
136136

137137
@test "ramalama run --image bogus" {
138138
skip_if_nocontainer
139139
skip_if_darwin
140140
skip_if_docker
141+
run_ramalama pull tiny
141142
run_ramalama 22 run --image bogus --pull=never tiny
142143
is "$output" ".*Error: bogus: image not known"
143144
run_ramalama 125 run --image bogus1 --rag quay.io/ramalama/rag --pull=never tiny

test/system/040-serve.bats

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,9 @@ verify_begin=".*run --rm"
126126
run_ramalama -q --dryrun serve smollm
127127
is "$output" ".*ai.ramalama.model=ollama://library/smollm:latest" "smollm should be expanded to fullname"
128128

129-
model=ollama://smollm:135m
129+
model=$(test_model smollm:135m)
130+
run_ramalama info
131+
full_model=$(jq -r --arg model $model -r '.Shortnames.Names[$model]' <<<"$output")
130132
container1=c_$(safename)
131133
container2=c_$(safename)
132134

@@ -139,7 +141,7 @@ verify_begin=".*run --rm"
139141
port=${output: -8:4}
140142

141143
run_ramalama chat --ls --url http://127.0.0.1:${port}/v1
142-
is "$output" "smollm:135m" "list of models available correct"
144+
is "$output" ${full_model#*://} "list of models available correct"
143145

144146
run_ramalama containers --noheading
145147
is "$output" ".*${container1}" "list correct for container1"
@@ -159,7 +161,7 @@ verify_begin=".*run --rm"
159161
@test "ramalama --detach serve multiple" {
160162
skip_if_nocontainer
161163

162-
model=ollama://smollm:135m
164+
model=$(test_model ollama://smollm:135m)
163165
container=c_$(safename)
164166
port1=8100
165167
port2=8200
@@ -445,6 +447,8 @@ verify_begin=".*run --rm"
445447
@test "ramalama serve --api llama-stack" {
446448
skip_if_docker
447449
skip_if_nocontainer
450+
skip_if_ppc64le
451+
skip_if_s390x
448452
model=tiny
449453
name=c_$(safename)
450454
run_ramalama pull ${model}

test/system/050-pull.bats

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -171,6 +171,7 @@ load setup_suite
171171
if ! is_bigendian; then
172172
skip "Testing pulls of opposite-endian models"
173173
fi
174+
run_ramalama rm --ignore tiny
174175
run_ramalama 1 pull --verify=on tiny
175176
is "$output" ".*Endian mismatch of host (BIG) and model (LITTLE).*" "detected little-endian model"
176177
}
@@ -179,6 +180,7 @@ load setup_suite
179180
if is_bigendian; then
180181
skip "Testing pulls of opposite-endian models"
181182
fi
183+
run_ramalama rm --ignore granite-be-3.0:1b
182184
run_ramalama 1 pull --verify=on granite-be-3.0:1b
183185
is "$output" ".*Endian mismatch of host (LITTLE) and model (BIG).*" "detected big-endian model"
184186
}

test/system/055-convert.bats

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,9 @@ load helpers
1919

2020
@test "ramalama convert file to image" {
2121
skip_if_nocontainer
22+
# Requires the -rag images which are not available on these arches yet
23+
skip_if_ppc64le
24+
skip_if_s390x
2225
echo "hello" > $RAMALAMA_TMPDIR/aimodel
2326
run_ramalama convert file://$RAMALAMA_TMPDIR/aimodel foobar
2427
run_ramalama list
@@ -45,6 +48,9 @@ load helpers
4548
@test "ramalama convert tiny to image" {
4649
skip_if_nocontainer
4750
skip_if_docker
51+
# Requires the -rag images which are not available on these arches yet
52+
skip_if_ppc64le
53+
skip_if_s390x
4854
run_ramalama pull tiny
4955
run_ramalama convert tiny oci://quay.io/ramalama/tiny
5056
run_ramalama list
@@ -75,6 +81,9 @@ load helpers
7581
@test "ramalama convert tiny to GGUF image" {
7682
skip_if_nocontainer
7783
skip_if_docker
84+
# Requires the -rag images which are not available on these arches yet
85+
skip_if_ppc64le
86+
skip_if_s390x
7887
run_ramalama pull hf://TinyLlama/TinyLlama-1.1B-Chat-v1.0
7988
run_ramalama convert --gguf Q4_0 hf://TinyLlama/TinyLlama-1.1B-Chat-v1.0 oci://quay.io/ramalama/tiny-q4-0
8089
run_ramalama list

test/system/helpers.bash

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -289,5 +289,43 @@ function skip_if_no_llama_bench() {
289289
fi
290290
}
291291

292+
function is_ppc64le() {
293+
[ "$(uname -m)" == "ppc64le" ]
294+
}
295+
296+
function skip_if_ppc64le() {
297+
if is_ppc64le; then
298+
skip "Not yet supported on ppc64le"
299+
fi
300+
}
301+
302+
function is_s390x() {
303+
[ "$(uname -m)" == "s390x" ]
304+
}
305+
306+
function skip_if_s390x() {
307+
if is_s390x; then
308+
skip "Not yet supported on s390x"
309+
fi
310+
}
311+
312+
function is_bigendian() {
313+
is_s390x
314+
}
315+
316+
function test_model() {
317+
if is_bigendian; then
318+
# If there's a smaller, functional bigendian model, put it here
319+
echo ${2:-granite-be-3.0:1b}
320+
else
321+
echo ${1:-smollm:135m}
322+
fi
323+
}
324+
325+
function model_base() {
326+
local base=${1##*/}
327+
echo ${base%:*}
328+
}
329+
292330
# END miscellaneous tools
293331
###############################################################################

0 commit comments

Comments
 (0)