Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docker/llm/serving/cpu/docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
pip install Jinja2==3.1.3 && \
pip install torch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 --index-url https://download.pytorch.org/whl/cpu && \
pip install intel-extension-for-pytorch==2.2.0 && \
pip install oneccl_bind_pt==2.2.0 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/cn/ && \
pip install oneccl_bind_pt==2.2.0 --index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/cn/ && \
pip install transformers==4.36.2 && \
# Install vllm dependencies
pip install --upgrade fastapi && \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ conda create -n llm python=3.11
conda activate llm
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
pip install oneccl_bind_pt==2.1.100 --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
# configures OneAPI environment variables
source /opt/intel/oneapi/setvars.sh
pip install git+https://github.com/microsoft/DeepSpeed.git@ed8aed5
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ python ./alpaca_qlora_finetuning_cpu.py \
```bash
# need to run the alpaca stand-alone version first
# for using mpirun
pip install oneccl_bind_pt --extra-index-url https://developer.intel.com/ipex-whl-stable
pip install oneccl_bind_pt --index-url https://developer.intel.com/ipex-whl-stable
```

2. modify conf in `finetune_one_node_two_sockets.sh` and run
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ To accelerate speculative decoding on CPU, optionally, you can install our valid
```bash
python -m pip install torch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 --index-url https://download.pytorch.org/whl/cpu
python -m pip install intel-extension-for-pytorch==2.2.0
python -m pip install oneccl_bind_pt==2.2.0 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/us/
python -m pip install oneccl_bind_pt==2.2.0 --index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/us/
# if there is any installation problem for oneccl_binding, you can also find suitable index url at "https://pytorch-extension.intel.com/release-whl/stable/cpu/cn/" or "https://developer.intel.com/ipex-whl-stable-cpu" according to your environment.

# Install other dependencies
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ To accelerate speculative decoding on CPU, you can install our validated version
# Install IPEX 2.2.0+cpu
python -m pip install torch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 --index-url https://download.pytorch.org/whl/cpu
python -m pip install intel-extension-for-pytorch==2.2.0
python -m pip install oneccl_bind_pt==2.2.0 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/us/
python -m pip install oneccl_bind_pt==2.2.0 --index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/us/
# if there is any installation problem for oneccl_binding, you can also find suitable index url at "https://pytorch-extension.intel.com/release-whl/stable/cpu/cn/" or "https://developer.intel.com/ipex-whl-stable-cpu" according to your environment.

# Update transformers
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ To accelerate speculative decoding on CPU, you can install our validated version
# Install IPEX 2.2.0+cpu
python -m pip install torch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 --index-url https://download.pytorch.org/whl/cpu
python -m pip install intel-extension-for-pytorch==2.2.0
python -m pip install oneccl_bind_pt==2.2.0 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/us/
python -m pip install oneccl_bind_pt==2.2.0 --index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/us/
# if there is any installation problem for oneccl_binding, you can also find suitable index url at "https://pytorch-extension.intel.com/release-whl/stable/cpu/cn/" or "https://developer.intel.com/ipex-whl-stable-cpu" according to your environment.

# Update transformers
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ To accelerate speculative decoding on CPU, you can install our validated version
# Install IPEX 2.2.0+cpu
python -m pip install torch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 --index-url https://download.pytorch.org/whl/cpu
python -m pip install intel-extension-for-pytorch==2.2.0
python -m pip install oneccl_bind_pt==2.2.0 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/us/
python -m pip install oneccl_bind_pt==2.2.0 --index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/us/
# if there is any installation problem for oneccl_binding, you can also find suitable index url at "https://pytorch-extension.intel.com/release-whl/stable/cpu/cn/" or "https://developer.intel.com/ipex-whl-stable-cpu" according to your environment.

# Update transformers
Expand Down
2 changes: 1 addition & 1 deletion python/llm/example/GPU/Deepspeed-AutoTP-FastAPI/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ conda create -n llm python=3.11
conda activate llm
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
pip install oneccl_bind_pt==2.1.100 --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
# configures OneAPI environment variables
source /opt/intel/oneapi/setvars.sh
pip install git+https://github.com/microsoft/DeepSpeed.git@ed8aed5
Expand Down
2 changes: 1 addition & 1 deletion python/llm/example/GPU/LLM-Finetuning/HF-PEFT/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-exte
pip install transformers==4.45.0 "trl<0.12.0" datasets
pip install bitsandbytes==0.45.1 scipy
pip install fire peft==0.10.0
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
pip install oneccl_bind_pt==2.1.100 --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
```

### 2. Configures OneAPI environment variables
Expand Down
2 changes: 1 addition & 1 deletion python/llm/example/GPU/LLM-Finetuning/LoRA/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-exte
pip install transformers==4.45.0 "trl<0.12.0" datasets
pip install fire peft==0.10.0
pip install bitsandbytes==0.45.1 scipy
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
pip install oneccl_bind_pt==2.1.100 --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
```

### 2. Configures OneAPI environment variables
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ pip install "deepspeed==0.13.1"
pip install "mpi4py>=3.1.5"
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
pip install oneccl_bind_pt==2.1.100 --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
```

### 2. Configures OneAPI Environment Variables
Expand Down
2 changes: 1 addition & 1 deletion python/llm/example/GPU/LLM-Finetuning/QA-LoRA/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-exte
pip install transformers==4.45.0 "trl<0.12.0" datasets
pip install fire peft==0.10.0
pip install bitsandbytes==0.45.1 scipy
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
pip install oneccl_bind_pt==2.1.100 --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
```

### 2. Configures OneAPI environment variables
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ conda activate llm
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
pip install transformers==4.36.1 datasets
pip install fire peft==0.10.0 accelerate==0.23.0
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
pip install oneccl_bind_pt==2.1.100 --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
pip install bitsandbytes scipy
# configures OneAPI environment variables
source /opt/intel/oneapi/setvars.sh # necessary to run before installing deepspeed
Expand Down
2 changes: 1 addition & 1 deletion python/llm/example/GPU/LLM-Finetuning/ReLora/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-exte
pip install transformers==4.45.0 "trl<0.12.0" datasets
pip install fire peft==0.10.0
pip install bitsandbytes==0.45.1 scipy
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
pip install oneccl_bind_pt==2.1.100 --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ # necessary to run distributed finetuning
```

### 2. Configures OneAPI environment variables
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ conda create -n llm python=3.11
conda activate llm
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
pip install oneccl_bind_pt==2.1.100 --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
```

### 2. Run pipeline parallel inference on multiple GPUs
Expand Down
2 changes: 1 addition & 1 deletion python/llm/example/GPU/Pipeline-Parallel-Serving/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ conda create -n llm python=3.11
conda activate llm
# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
pip install oneccl_bind_pt==2.1.100 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
pip install oneccl_bind_pt==2.1.100 --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
# configures OneAPI environment variables
source /opt/intel/oneapi/setvars.sh
pip install mpi4py fastapi uvicorn openai
Expand Down