From 7a46ce4f76f8f3aa962c749424669dea501fb49d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 25 Sep 2025 15:06:21 +0300 Subject: [PATCH 1/3] ci : add AMD runners and workflows --- .github/workflows/build.yml | 54 ++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4f70232b1777f..8810d35c41fc9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1430,33 +1430,33 @@ jobs: run: | bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp -# ggml-ci-x64-amd-vulkan: -# runs-on: [self-hosted, Linux, X64, AMD] -# -# steps: -# - name: Clone -# id: checkout -# uses: actions/checkout@v4 -# -# - name: Test -# id: ggml-ci -# run: | -# vulkaninfo --summary -# GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp -# -# ggml-ci-x64-amd-rocm: -# runs-on: [self-hosted, Linux, X64, AMD] -# -# steps: -# - name: Clone -# id: checkout -# uses: actions/checkout@v4 -# -# - name: Test -# id: ggml-ci -# run: | -# amd-smi static -# GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp + ggml-ci-x64-amd-vulkan: + runs-on: [self-hosted, Linux, X64, AMD] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + vulkaninfo --summary + GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp + + ggml-ci-x64-amd-rocm: + runs-on: [self-hosted, Linux, X64, AMD] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + amd-smi static + GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp ggml-ci-mac-metal: runs-on: [self-hosted, macOS, ARM64] From 498888b0f9566cd2018e33df3ee3fb4d0fe241af Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 29 Sep 2025 09:00:47 +0300 Subject: [PATCH 2/3] ci : move AMD jobs to separate workflow [no ci] --- .github/workflows/build-amd.yml | 57 +++++++++++++++++++++++++++++++++ .github/workflows/build.yml | 28 ---------------- 2 files changed, 57 insertions(+), 28 deletions(-) create mode 100644 .github/workflows/build-amd.yml diff --git a/.github/workflows/build-amd.yml b/.github/workflows/build-amd.yml new file mode 100644 index 0000000000000..5bcdf44501014 --- /dev/null +++ b/.github/workflows/build-amd.yml @@ -0,0 +1,57 @@ +name: CI (AMD) + +on: + workflow_dispatch: # allows manual triggering + push: + branches: + - master + paths: [ + '.github/workflows/build.yml', + '.github/workflows/build-linux-cross.yml', + '.github/workflows/build-cmake-pkg.yml', + '**/CMakeLists.txt', + '**/.cmake', + '**/*.h', + '**/*.hpp', + '**/*.c', + '**/*.cpp', + '**/*.cu', + '**/*.cuh', + '**/*.swift', + '**/*.m', + '**/*.metal', + '**/*.comp' + ] + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} + cancel-in-progress: true + +jobs: + ggml-ci-x64-amd-vulkan: + runs-on: [self-hosted, Linux, X64, AMD] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + vulkaninfo --summary + GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp + + ggml-ci-x64-amd-rocm: + runs-on: [self-hosted, Linux, X64, AMD] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + amd-smi static + GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 8810d35c41fc9..350b1af8f0fd1 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1430,34 +1430,6 @@ jobs: run: | bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp - ggml-ci-x64-amd-vulkan: - runs-on: [self-hosted, Linux, X64, AMD] - - steps: - - name: Clone - id: checkout - uses: actions/checkout@v4 - - - name: Test - id: ggml-ci - run: | - vulkaninfo --summary - GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp - - ggml-ci-x64-amd-rocm: - runs-on: [self-hosted, Linux, X64, AMD] - - steps: - - name: Clone - id: checkout - uses: actions/checkout@v4 - - - name: Test - id: ggml-ci - run: | - amd-smi static - GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp - ggml-ci-mac-metal: runs-on: [self-hosted, macOS, ARM64] From 9c9414e6cc91b959740c0e6e6b4a0591a1897505 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 29 Sep 2025 13:02:16 +0300 Subject: [PATCH 3/3] cont : fix paths --- .github/workflows/build-amd.yml | 7 +------ ci/run.sh | 1 + 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build-amd.yml b/.github/workflows/build-amd.yml index 5bcdf44501014..b6fe8de8650a1 100644 --- a/.github/workflows/build-amd.yml +++ b/.github/workflows/build-amd.yml @@ -6,9 +6,7 @@ on: branches: - master paths: [ - '.github/workflows/build.yml', - '.github/workflows/build-linux-cross.yml', - '.github/workflows/build-cmake-pkg.yml', + '.github/workflows/build-amd.yml', '**/CMakeLists.txt', '**/.cmake', '**/*.h', @@ -17,9 +15,6 @@ on: '**/*.cpp', '**/*.cu', '**/*.cuh', - '**/*.swift', - '**/*.m', - '**/*.metal', '**/*.comp' ] diff --git a/ci/run.sh b/ci/run.sh index 68cbfdf2f52aa..b0af51723bcfe 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -114,6 +114,7 @@ if [ ! -z ${GG_BUILD_NO_SVE} ]; then # arm 9 and newer enables sve by default, adjust these flags depending on the cpu used CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv8.5-a+fp16+i8mm" fi + ## helpers # download a file if it does not exist or if it is outdated