diff --git a/.github/workflows/build-amd.yml b/.github/workflows/build-amd.yml new file mode 100644 index 0000000000000..b6fe8de8650a1 --- /dev/null +++ b/.github/workflows/build-amd.yml @@ -0,0 +1,52 @@ +name: CI (AMD) + +on: + workflow_dispatch: # allows manual triggering + push: + branches: + - master + paths: [ + '.github/workflows/build-amd.yml', + '**/CMakeLists.txt', + '**/.cmake', + '**/*.h', + '**/*.hpp', + '**/*.c', + '**/*.cpp', + '**/*.cu', + '**/*.cuh', + '**/*.comp' + ] + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} + cancel-in-progress: true + +jobs: + ggml-ci-x64-amd-vulkan: + runs-on: [self-hosted, Linux, X64, AMD] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + vulkaninfo --summary + GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp + + ggml-ci-x64-amd-rocm: + runs-on: [self-hosted, Linux, X64, AMD] + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Test + id: ggml-ci + run: | + amd-smi static + GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4f70232b1777f..350b1af8f0fd1 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1430,34 +1430,6 @@ jobs: run: | bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp -# ggml-ci-x64-amd-vulkan: -# runs-on: [self-hosted, Linux, X64, AMD] -# -# steps: -# - name: Clone -# id: checkout -# uses: actions/checkout@v4 -# -# - name: Test -# id: ggml-ci -# run: | -# vulkaninfo --summary -# GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp -# -# ggml-ci-x64-amd-rocm: -# runs-on: [self-hosted, Linux, X64, AMD] -# -# steps: -# - name: Clone -# id: checkout -# uses: actions/checkout@v4 -# -# - name: Test -# id: ggml-ci -# run: | -# amd-smi static -# GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp - ggml-ci-mac-metal: runs-on: [self-hosted, macOS, ARM64] diff --git a/ci/run.sh b/ci/run.sh index 68cbfdf2f52aa..b0af51723bcfe 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -114,6 +114,7 @@ if [ ! -z ${GG_BUILD_NO_SVE} ]; then # arm 9 and newer enables sve by default, adjust these flags depending on the cpu used CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv8.5-a+fp16+i8mm" fi + ## helpers # download a file if it does not exist or if it is outdated