Skip to content

Commit f848f04

Browse files
authored
[mac-ai] Enable RamaLama CPU nightly testing (#823)
Needs containers/ramalama#1909 to work. <!-- This is an auto-generated comment: release notes by coderabbit.ai --> ## Summary by CodeRabbit * **New Features** * Added a Ramalama “no‑GPU” platform option, available in presets and platform flavors for easier selection on systems without GPU access. * **Bug Fixes** * Non‑GPU runs now use a safe “none” device configuration for improved stability. * Automatically disables GPU when the selected inference server flavor is set to no‑GPU, preventing unintended GPU usage. <!-- end of auto-generated comment: release notes by coderabbit.ai -->
2 parents 594eeb9 + 1b55690 commit f848f04

File tree

3 files changed

+10
-3
lines changed

3 files changed

+10
-3
lines changed

projects/mac_ai/testing/config.yaml

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ ci_presets:
6666
test.model.name: ollama://llama3.2
6767
test.platform:
6868
- podman/ramalama
69+
- podman/ramalama/no-gpu
6970
- macos/llama_cpp/vulkan
7071
# - podman/llama_cpp/vulkan
7172
- macos/llama_cpp/metal
@@ -75,7 +76,10 @@ ci_presets:
7576
prepare.ramalama.repo.version: main
7677
prepare.ramalama.build_image.enabled: true
7778

78-
test.platform: [podman/ramalama]
79+
test.platform:
80+
- podman/ramalama/no-gpu
81+
- podman/ramalama
82+
7983
test.model.name: ollama://llama3.2
8084
matbench.lts.generate: true
8185
test.inference_server.benchmark.enabled: true
@@ -628,4 +632,4 @@ __platform_check:
628632

629633
flavors:
630634
llama_cpp: [metal, vulkan, upstream_bin, remoting]
631-
ramalama: [remoting, null]
635+
ramalama: [remoting, no-gpu, null]

projects/mac_ai/testing/ramalama.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,7 @@ def _run_from_toolbox(ramalama_cmd, base_work_dir, platform, ramalama_path, mode
262262

263263
want_gpu = platform.want_gpu
264264
device = config.project.get_config("prepare.podman.container.device") \
265-
if want_gpu else "/dev/null"
265+
if want_gpu else "none"
266266

267267
if config.project.get_config("prepare.ramalama.build_image.enabled"):
268268
image_name = config.project.get_config("prepare.ramalama.build_image.name")

projects/mac_ai/testing/utils.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,10 +61,13 @@ def parse_platform(platform_str):
6161
if platform.inference_server_flavor not in inference_server_has_flavors:
6262
raise ValueError(f"Invalid platform inference server flavor ({platform.inference_server_flavor}) in {platform_str}. Expected one of {inference_server_has_flavors}")
6363

64+
6465
no_gpu_option_name = config.project.get_config("__platform_check.options.no_gpu", print=False)
6566
if no_gpu_option_name in platform_parts:
6667
platform.want_gpu = False
6768
platform_parts.remove(no_gpu_option_name)
69+
elif platform.inference_server_flavor == no_gpu_option_name:
70+
platform.want_gpu = False
6871
else:
6972
platform.want_gpu = True
7073

0 commit comments

Comments
 (0)