Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion AgentQnA/docker_compose/intel/hpu/gaudi/compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ services:
OPENAI_API_BASE_URLS: ${SUPERVISOR_AGENT_ENDPOINT}
ENABLE_OLLAMA_API: False
vllm-service:
image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest}
image: opea/vllm-gaudi:1.4
container_name: vllm-gaudi-server
ports:
- "8086:8000"
Expand Down
6 changes: 0 additions & 6 deletions AgentQnA/docker_image_build/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,6 @@ services:
dockerfile: ./docker/Dockerfile
extends: agent
image: ${REGISTRY:-opea}/agent-ui:${TAG:-latest}
vllm-gaudi:
build:
context: vllm-fork
dockerfile: Dockerfile.hpu
extends: agent
image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest}
vllm-rocm:
build:
context: GenAIComps
Expand Down
9 changes: 2 additions & 7 deletions AgentQnA/tests/step1_build_images.sh
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,8 @@ function build_agent_docker_image_gaudi_vllm() {
cd $WORKDIR/GenAIExamples/AgentQnA/docker_image_build/
get_genai_comps

git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../

echo "Build agent image with --no-cache..."
service_list="agent agent-ui vllm-gaudi"
service_list="agent agent-ui"
docker compose -f build.yaml build ${service_list} --no-cache
}

Expand Down Expand Up @@ -83,6 +79,7 @@ function main() {
"rocm_vllm")
echo "==================== Build agent docker image for ROCm VLLM ===================="
build_agent_docker_image_rocm_vllm
docker image ls | grep vllm
;;
"gaudi_vllm")
echo "==================== Build agent docker image for Gaudi ===================="
Expand All @@ -97,8 +94,6 @@ function main() {
exit 1
;;
esac

docker image ls | grep vllm
}

main $1
2 changes: 1 addition & 1 deletion CodeGen/docker_compose/intel/hpu/gaudi/compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

services:
vllm-service:
image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest}
image: opea/vllm-gaudi:1.4
container_name: vllm-gaudi-server
ports:
- "8028:80"
Expand Down
6 changes: 0 additions & 6 deletions CodeGen/docker_image_build/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -78,12 +78,6 @@ services:
dockerfile: comps/third_parties/vllm/src/Dockerfile.amd_gpu
extends: codegen
image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest}
vllm-gaudi:
build:
context: vllm-fork
dockerfile: Dockerfile.hpu
extends: codegen
image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest}
dataprep:
build:
context: GenAIComps
Expand Down
8 changes: 1 addition & 7 deletions CodeGen/tests/test_compose_on_gaudi.sh
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,8 @@ function build_docker_images() {
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s

# Download Gaudi vllm of latest tag
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
echo "Check out vLLM tag ${VLLM_FORK_VER}"
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../

echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="codegen codegen-gradio-ui llm-textgen vllm-gaudi dataprep retriever embedding"
service_list="codegen codegen-gradio-ui llm-textgen dataprep retriever embedding"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log

docker images && sleep 1s
Expand Down
2 changes: 1 addition & 1 deletion CodeTrans/docker_compose/intel/hpu/gaudi/compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

services:
vllm-service:
image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest}
image: opea/vllm-gaudi:1.4
container_name: codetrans-gaudi-vllm-service
ports:
- "8008:80"
Expand Down
6 changes: 0 additions & 6 deletions CodeTrans/docker_image_build/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,6 @@ services:
dockerfile: comps/llms/src/text-generation/Dockerfile
extends: codetrans
image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest}
vllm-gaudi:
build:
context: vllm-fork
dockerfile: Dockerfile.hpu
extends: codetrans
image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest}
nginx:
build:
context: GenAIComps
Expand Down
6 changes: 1 addition & 5 deletions CodeTrans/tests/test_compose_on_gaudi.sh
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,8 @@ function build_docker_images() {
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s

git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../

echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="codetrans codetrans-ui llm-textgen vllm-gaudi nginx"
service_list="codetrans codetrans-ui llm-textgen nginx"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log

docker images && sleep 1s
Expand Down
2 changes: 1 addition & 1 deletion DocSum/docker_compose/intel/hpu/gaudi/compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

services:
vllm-service:
image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest}
image: opea/vllm-gaudi:1.4
container_name: docsum-gaudi-vllm-service
ports:
- ${LLM_ENDPOINT_PORT:-8008}:80
Expand Down
6 changes: 0 additions & 6 deletions DocSum/docker_image_build/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,3 @@ services:
context: GenAIComps
dockerfile: comps/third_parties/vllm/src/Dockerfile.amd_gpu
image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest}
vllm-gaudi:
build:
context: vllm-fork
dockerfile: Dockerfile.hpu
extends: docsum
image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest}
6 changes: 1 addition & 5 deletions DocSum/tests/test_compose_on_gaudi.sh
Original file line number Diff line number Diff line change
Expand Up @@ -46,12 +46,8 @@ function build_docker_images() {
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s

git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../

echo "Build all the images with --no-cache, check docker_image_build.log for details..."
service_list="docsum docsum-gradio-ui whisper llm-docsum vllm-gaudi"
service_list="docsum docsum-gradio-ui whisper llm-docsum"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log 2>&1

docker images && sleep 1s
Expand Down
2 changes: 1 addition & 1 deletion VisualQnA/docker_compose/intel/hpu/gaudi/compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

services:
vllm-gaudi-service:
image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest}
image: opea/vllm-gaudi:1.4
container_name: vllm-gaudi-service
ports:
- ${VLLM_PORT:-8399}:80
Expand Down
6 changes: 0 additions & 6 deletions VisualQnA/docker_image_build/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,3 @@ services:
dockerfile: comps/third_parties/vllm/src/Dockerfile.amd_gpu
extends: visualqna
image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest}
vllm-gaudi:
build:
context: vllm-fork
dockerfile: Dockerfile.hpu
extends: visualqna
image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest}
6 changes: 1 addition & 5 deletions VisualQnA/tests/test_compose_on_gaudi.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,7 @@ function build_docker_images() {
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
popd && sleep 1s

git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../

service_list="visualqna visualqna-ui lvm nginx vllm-gaudi"
service_list="visualqna visualqna-ui lvm nginx"
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
docker images && sleep 1s
}
Expand Down
Loading