File tree Expand file tree Collapse file tree 7 files changed +66
-7
lines changed Expand file tree Collapse file tree 7 files changed +66
-7
lines changed Original file line number Diff line number Diff line change 24
24
"granite:2b" = "ollama://granite3.1-dense:2b"
25
25
"granite:7b" = "huggingface://instructlab/granite-7b-lab-GGUF/granite-7b-lab-Q4_K_M.gguf"
26
26
"granite:8b" = "ollama://granite3.1-dense:8b"
27
+ "granite-be-3.0:1b" = "hf://taronaeo/Granite-3.0-1B-A400M-Instruct-BE-GGUF/granite-3.0-1b-a400m-instruct-be.Q2_K.gguf"
28
+ "granite-be-3.3:2b" = "hf://taronaeo/Granite-3.3-2B-Instruct-BE-GGUF/granite-3.3-2b-instruct-be.Q4_K_M.gguf"
27
29
"hermes" = "huggingface://NousResearch/Hermes-2-Pro-Mistral-7B-GGUF/Hermes-2-Pro-Mistral-7B.Q4_K_M.gguf"
28
30
"ibm/granite" = "ollama://granite3.1-dense:8b"
29
31
"ibm/granite:2b" = "ollama://granite3.1-dense:2b"
Original file line number Diff line number Diff line change @@ -16,7 +16,10 @@ function setup() {
16
16
# bats test_tags=distro-integration
17
17
@test " ramalama bench" {
18
18
skip_if_no_llama_bench
19
- run_ramalama bench -t 2 smollm:135m
19
+ if is_s390x; then
20
+ local RAMALAMA_TIMEOUT=1200
21
+ fi
22
+ run_ramalama bench -t 2 $( test_model smollm:135m)
20
23
is " $output " " .*model.*size.*" " model and size in output"
21
24
}
22
25
Original file line number Diff line number Diff line change @@ -125,19 +125,20 @@ EOF
125
125
is " $output " " .*-e HSA_OVERRIDE_GFX_VERSION=0.0.0" " ensure HSA_OVERRIDE_GFX_VERSION is set from environment"
126
126
}
127
127
128
- @test " ramalama run smollm with prompt" {
129
- run_ramalama run --temp 0 ${MODEL} " What is the first line of the declaration of independence?"
128
+ @test " ramalama run with prompt" {
129
+ run_ramalama run --temp 0 $( test_model $ {MODEL} granite-be-3.3:2b ) " What is the first line of the declaration of independence?"
130
130
}
131
131
132
132
@test " ramalama run --keepalive" {
133
133
# timeout within 1 second and generate a 124 error code.
134
- run_ramalama 0 --debug run --keepalive 1s tiny
134
+ run_ramalama 0 --debug run --keepalive 1s $( test_model tiny)
135
135
}
136
136
137
137
@test " ramalama run --image bogus" {
138
138
skip_if_nocontainer
139
139
skip_if_darwin
140
140
skip_if_docker
141
+ run_ramalama pull tiny
141
142
run_ramalama 22 run --image bogus --pull=never tiny
142
143
is " $output " " .*Error: bogus: image not known"
143
144
run_ramalama 125 run --image bogus1 --rag quay.io/ramalama/rag --pull=never tiny
Original file line number Diff line number Diff line change @@ -126,7 +126,9 @@ verify_begin=".*run --rm"
126
126
run_ramalama -q --dryrun serve smollm
127
127
is " $output " " .*ai.ramalama.model=ollama://library/smollm:latest" " smollm should be expanded to fullname"
128
128
129
- model=ollama://smollm:135m
129
+ model=$( test_model smollm:135m)
130
+ run_ramalama info
131
+ full_model=$( jq -r --arg model $model -r ' .Shortnames.Names[$model]' <<< " $output" )
130
132
container1=c_$( safename)
131
133
container2=c_$( safename)
132
134
@@ -139,7 +141,7 @@ verify_begin=".*run --rm"
139
141
port=${output: -8: 4}
140
142
141
143
run_ramalama chat --ls --url http://127.0.0.1:${port} /v1
142
- is " $output " " smollm:135m " " list of models available correct"
144
+ is " $output " ${full_model #*:// } " list of models available correct"
143
145
144
146
run_ramalama containers --noheading
145
147
is " $output " " .*${container1} " " list correct for container1"
@@ -159,7 +161,7 @@ verify_begin=".*run --rm"
159
161
@test " ramalama --detach serve multiple" {
160
162
skip_if_nocontainer
161
163
162
- model=ollama://smollm:135m
164
+ model=$( test_model ollama://smollm:135m)
163
165
container=c_$( safename)
164
166
port1=8100
165
167
port2=8200
@@ -445,6 +447,8 @@ verify_begin=".*run --rm"
445
447
@test " ramalama serve --api llama-stack" {
446
448
skip_if_docker
447
449
skip_if_nocontainer
450
+ skip_if_ppc64le
451
+ skip_if_s390x
448
452
model=tiny
449
453
name=c_$( safename)
450
454
run_ramalama pull ${model}
Original file line number Diff line number Diff line change @@ -171,6 +171,7 @@ load setup_suite
171
171
if ! is_bigendian; then
172
172
skip " Testing pulls of opposite-endian models"
173
173
fi
174
+ run_ramalama rm --ignore tiny
174
175
run_ramalama 1 pull --verify=on tiny
175
176
is " $output " " .*Endian mismatch of host (BIG) and model (LITTLE).*" " detected little-endian model"
176
177
}
@@ -179,6 +180,7 @@ load setup_suite
179
180
if is_bigendian; then
180
181
skip " Testing pulls of opposite-endian models"
181
182
fi
183
+ run_ramalama rm --ignore granite-be-3.0:1b
182
184
run_ramalama 1 pull --verify=on granite-be-3.0:1b
183
185
is " $output " " .*Endian mismatch of host (LITTLE) and model (BIG).*" " detected big-endian model"
184
186
}
Original file line number Diff line number Diff line change @@ -19,6 +19,9 @@ load helpers
19
19
20
20
@test " ramalama convert file to image" {
21
21
skip_if_nocontainer
22
+ # Requires the -rag images which are not available on these arches yet
23
+ skip_if_ppc64le
24
+ skip_if_s390x
22
25
echo " hello" > $RAMALAMA_TMPDIR /aimodel
23
26
run_ramalama convert file://$RAMALAMA_TMPDIR /aimodel foobar
24
27
run_ramalama list
@@ -45,6 +48,9 @@ load helpers
45
48
@test " ramalama convert tiny to image" {
46
49
skip_if_nocontainer
47
50
skip_if_docker
51
+ # Requires the -rag images which are not available on these arches yet
52
+ skip_if_ppc64le
53
+ skip_if_s390x
48
54
run_ramalama pull tiny
49
55
run_ramalama convert tiny oci://quay.io/ramalama/tiny
50
56
run_ramalama list
@@ -75,6 +81,9 @@ load helpers
75
81
@test " ramalama convert tiny to GGUF image" {
76
82
skip_if_nocontainer
77
83
skip_if_docker
84
+ # Requires the -rag images which are not available on these arches yet
85
+ skip_if_ppc64le
86
+ skip_if_s390x
78
87
run_ramalama pull hf://TinyLlama/TinyLlama-1.1B-Chat-v1.0
79
88
run_ramalama convert --gguf Q4_0 hf://TinyLlama/TinyLlama-1.1B-Chat-v1.0 oci://quay.io/ramalama/tiny-q4-0
80
89
run_ramalama list
Original file line number Diff line number Diff line change @@ -289,5 +289,43 @@ function skip_if_no_llama_bench() {
289
289
fi
290
290
}
291
291
292
+ function is_ppc64le() {
293
+ [ " $( uname -m) " == " ppc64le" ]
294
+ }
295
+
296
+ function skip_if_ppc64le() {
297
+ if is_ppc64le; then
298
+ skip " Not yet supported on ppc64le"
299
+ fi
300
+ }
301
+
302
+ function is_s390x() {
303
+ [ " $( uname -m) " == " s390x" ]
304
+ }
305
+
306
+ function skip_if_s390x() {
307
+ if is_s390x; then
308
+ skip " Not yet supported on s390x"
309
+ fi
310
+ }
311
+
312
+ function is_bigendian() {
313
+ is_s390x
314
+ }
315
+
316
+ function test_model() {
317
+ if is_bigendian; then
318
+ # If there's a smaller, functional bigendian model, put it here
319
+ echo ${2:- granite-be-3.0: 1b}
320
+ else
321
+ echo ${1:- smollm: 135m}
322
+ fi
323
+ }
324
+
325
+ function model_base() {
326
+ local base=${1##*/ }
327
+ echo ${base%:* }
328
+ }
329
+
292
330
# END miscellaneous tools
293
331
# ##############################################################################
You can’t perform that action at this time.
0 commit comments