|
| 1 | +ARG GCC_VERSION=15.2.0 |
| 2 | +ARG UBUNTU_VERSION=24.04 |
| 3 | + |
| 4 | +### Build Llama.cpp stage |
| 5 | +FROM --platform=linux/s390x gcc:${GCC_VERSION} AS build |
| 6 | + |
| 7 | +RUN --mount=type=cache,target=/var/cache/apt \ |
| 8 | + --mount=type=cache,target=/var/lib/apt/lists \ |
| 9 | + apt update -y && \ |
| 10 | + apt upgrade -y && \ |
| 11 | + apt install -y --no-install-recommends \ |
| 12 | + git cmake ccache ninja-build \ |
| 13 | + # WARNING: Do not use libopenblas-openmp-dev. libopenblas-dev is faster. |
| 14 | + libopenblas-dev libcurl4-openssl-dev && \ |
| 15 | + rm -rf /var/lib/apt/lists/* |
| 16 | + |
| 17 | +WORKDIR /app |
| 18 | +COPY . . |
| 19 | + |
| 20 | +RUN --mount=type=cache,target=/root/.ccache \ |
| 21 | + --mount=type=cache,target=/app/build \ |
| 22 | + cmake -S . -B build -G Ninja \ |
| 23 | + -DCMAKE_BUILD_TYPE=Release \ |
| 24 | + -DCMAKE_C_COMPILER_LAUNCHER=ccache \ |
| 25 | + -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \ |
| 26 | + -DLLAMA_BUILD_TESTS=OFF \ |
| 27 | + -DGGML_BACKEND_DL=OFF \ |
| 28 | + -DGGML_NATIVE=OFF \ |
| 29 | + -DGGML_BLAS=ON \ |
| 30 | + -DGGML_BLAS_VENDOR=OpenBLAS && \ |
| 31 | + cmake --build build --config Release -j $(nproc) && \ |
| 32 | + cmake --install build --prefix /opt/llama.cpp |
| 33 | + |
| 34 | +COPY *.py /opt/llama.cpp/bin |
| 35 | +COPY .devops/tools.sh /opt/llama.cpp/bin |
| 36 | + |
| 37 | +COPY gguf-py /opt/llama.cpp/gguf-py |
| 38 | +COPY requirements.txt /opt/llama.cpp/gguf-py |
| 39 | +COPY requirements /opt/llama.cpp/gguf-py/requirements |
| 40 | + |
| 41 | + |
| 42 | +### Collect all llama.cpp binaries, libraries and distro libraries |
| 43 | +FROM --platform=linux/s390x scratch AS collector |
| 44 | + |
| 45 | +# Copy llama.cpp binaries and libraries |
| 46 | +COPY --from=build /opt/llama.cpp/bin /llama.cpp/bin |
| 47 | +COPY --from=build /opt/llama.cpp/lib /llama.cpp/lib |
| 48 | +COPY --from=build /opt/llama.cpp/gguf-py /llama.cpp/gguf-py |
| 49 | + |
| 50 | + |
| 51 | +### Base image |
| 52 | +FROM --platform=linux/s390x ubuntu:${UBUNTU_VERSION} AS base |
| 53 | + |
| 54 | +RUN --mount=type=cache,target=/var/cache/apt \ |
| 55 | + --mount=type=cache,target=/var/lib/apt/lists \ |
| 56 | + apt update -y && \ |
| 57 | + apt install -y --no-install-recommends \ |
| 58 | + # WARNING: Do not use libopenblas-openmp-dev. libopenblas-dev is faster. |
| 59 | + curl libgomp1 libopenblas-dev && \ |
| 60 | + apt autoremove -y && \ |
| 61 | + apt clean -y && \ |
| 62 | + rm -rf /tmp/* /var/tmp/* && \ |
| 63 | + find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete && \ |
| 64 | + find /var/cache -type f -delete |
| 65 | + |
| 66 | +# Copy llama.cpp libraries |
| 67 | +COPY --from=collector /llama.cpp/lib /usr/lib/s390x-linux-gnu |
| 68 | + |
| 69 | + |
| 70 | +### Full |
| 71 | +FROM --platform=linux/s390x base AS full |
| 72 | + |
| 73 | +ENV PATH="/root/.cargo/bin:${PATH}" |
| 74 | +WORKDIR /app |
| 75 | + |
| 76 | +RUN --mount=type=cache,target=/var/cache/apt \ |
| 77 | + --mount=type=cache,target=/var/lib/apt/lists \ |
| 78 | + apt update -y && \ |
| 79 | + apt install -y \ |
| 80 | + git cmake libjpeg-dev \ |
| 81 | + python3 python3-pip python3-dev && \ |
| 82 | + apt autoremove -y && \ |
| 83 | + apt clean -y && \ |
| 84 | + rm -rf /tmp/* /var/tmp/* && \ |
| 85 | + find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete && \ |
| 86 | + find /var/cache -type f -delete |
| 87 | + |
| 88 | +RUN curl https://sh.rustup.rs -sSf | bash -s -- -y |
| 89 | + |
| 90 | +COPY --from=collector /llama.cpp/bin /app |
| 91 | +COPY --from=collector /llama.cpp/gguf-py /app/gguf-py |
| 92 | + |
| 93 | +RUN pip install --no-cache-dir --break-system-packages \ |
| 94 | + -r /app/gguf-py/requirements.txt |
| 95 | + |
| 96 | +ENTRYPOINT [ "/app/tools.sh" ] |
| 97 | + |
| 98 | + |
| 99 | +### CLI Only |
| 100 | +FROM --platform=linux/s390x base AS light |
| 101 | + |
| 102 | +WORKDIR /llama.cpp/bin |
| 103 | + |
| 104 | +# Copy llama.cpp binaries and libraries |
| 105 | +COPY --from=collector /llama.cpp/bin/llama-cli /llama.cpp/bin |
| 106 | + |
| 107 | +ENTRYPOINT [ "/llama.cpp/bin/llama-cli" ] |
| 108 | + |
| 109 | + |
| 110 | +### Server |
| 111 | +FROM --platform=linux/s390x base AS server |
| 112 | + |
| 113 | +ENV LLAMA_ARG_HOST=0.0.0.0 |
| 114 | + |
| 115 | +WORKDIR /llama.cpp/bin |
| 116 | + |
| 117 | +# Copy llama.cpp binaries and libraries |
| 118 | +COPY --from=collector /llama.cpp/bin/llama-server /llama.cpp/bin |
| 119 | + |
| 120 | +EXPOSE 8080 |
| 121 | + |
| 122 | +ENTRYPOINT [ "/llama.cpp/bin/llama-server" ] |
0 commit comments