74
74
libswresample-dev
75
75
libswscale-dev
76
76
pciutils
77
+ python3-dev
77
78
TORCH_INDEX : ' --pre --index-url https://download.pytorch.org/whl/nightly/xpu'
78
79
AGENT_TOOLSDIRECTORY : /tmp/xpu-tool
79
80
@@ -116,20 +117,36 @@ jobs:
116
117
render_id : ${{ steps.runner-info.outputs.render_id }}
117
118
hostname : ${{ steps.runner-info.outputs.hostname }}
118
119
pytest_extra_args : ${{ steps.runner-info.outputs.pytest_extra_args }}
120
+ env :
121
+ VIRTUAL_ENV : ${{ github.workspace }}/.venv
119
122
steps :
123
+ - name : Install uv and python-${{ env.python }}
124
+ uses : astral-sh/setup-uv@v6
125
+ with :
126
+ python-version : ${{ env.python }}
127
+ - name : Prepare environment
128
+ run : |
129
+ rm -rf ${{ env.VIRTUAL_ENV }}
130
+ uv venv ${{ env.VIRTUAL_ENV }}
120
131
- id : getver
121
132
run : |
122
133
# We can't just `pip index version...` and get the last available
123
134
# version as pytorch packages may have tricky dependencies. Instead
124
- # we dry run install packages and get versions which would be installed.
135
+ # we install packages and get versions which got installed. Note that
136
+ # trying to --dry-run is not actually reliable as it does not make
137
+ # the thorough check of package dependencies.
125
138
# See: https://github.com/pytorch/pytorch/issues/154687
126
- pip install --dry-run --ignore-installed $TORCH_INDEX \
139
+ uv pip install $TORCH_INDEX \
127
140
torch torchvision torchaudio pytorch-triton-xpu >_log.txt
128
141
129
- torch=$(cat _log.txt | grep "Would install" | sed -E "s/.*torch-([^ ]*).*/\1/")
130
- torchvision=$(cat _log.txt | grep "Would install" | sed -E "s/.*torchvision-([^ ]*).*/\1/")
131
- torchaudio=$(cat _log.txt | grep "Would install" | sed -E "s/.*torchaudio-([^ ]*).*/\1/")
132
- triton=$(cat _log.txt | grep "Would install" | sed -E "s/.*pytorch-triton-xpu-([^ ]*).*/\1/")
142
+ torch=$(uv pip show torch | grep Version)
143
+ torchvision=$(uv pip show torchvision | grep Version)
144
+ torchaudio=$(uv pip show torchaudio | grep Version)
145
+ triton=$(uv pip show pytorch-triton-xpu | grep Version)
146
+ torch=${torch#Version: *}
147
+ torchvision=${torchvision#Version: *}
148
+ torchaudio=${torchaudio#Version: *}
149
+ triton=${triton#Version: *}
133
150
echo "torch=$torch" | tee -a "$GITHUB_OUTPUT"
134
151
echo "torchvision=$torchvision" | tee -a "$GITHUB_OUTPUT"
135
152
echo "torchaudio=$torchaudio" | tee -a "$GITHUB_OUTPUT"
@@ -154,8 +171,12 @@ jobs:
154
171
env :
155
172
PYTORCH_DEBUG_XPU_FALLBACK : ' 1'
156
173
TRANSFORMERS_TEST_DEVICE_SPEC : ' spec.py'
157
- # enable pytest parallel run, and continue others if meets crash case such as segmentation fault
158
- PYTEST_ADDOPTS : -rsf --timeout 600 --timeout_method=thread --dist worksteal ${{ needs.prepare.outputs.pytest_extra_args }}
174
+ # Usage of `--dist loadfile` is a must as HF tests has complex setups including
175
+ # setUpClass and @first_run clauses. So 'loadfile' stratagy allows to minimize
176
+ # race conditions scope. Besides, that's how HF Transformers recommend to run
177
+ # tests and how they run them in their own CI.
178
+ # See: https://github.com/huggingface/transformers/blob/v4.56.2/CONTRIBUTING.md?plain=1#L312
179
+ PYTEST_ADDOPTS : -rsf --timeout 600 --timeout_method=thread --dist loadfile ${{ needs.prepare.outputs.pytest_extra_args }}
159
180
strategy :
160
181
fail-fast : false
161
182
max-parallel : 1
@@ -224,21 +245,9 @@ jobs:
224
245
fi
225
246
- name : Prepare OS environment
226
247
run : |
227
- # as jobs might run in parallel on the same system, apt-get might
228
- # step into the lock hold by other job
229
- start_time=$SECONDS
230
- while ! sudo apt-get update; do
231
- sleep 1;
232
- if (( $SECONDS - start_time > 60 )); then false; fi
233
- done
234
- while ! sudo apt-get install -y $PACKAGES; do
235
- sleep 1;
236
- if (( $SECONDS - start_time > 60 )); then false; fi
237
- done
238
- while ! git lfs install; do
239
- sleep 1;
240
- if (( $SECONDS - start_time > 60 )); then false; fi
241
- done
248
+ sudo apt-get update
249
+ sudo apt-get install -y $PACKAGES
250
+ git lfs install
242
251
- name : Setup python-${{ env.python }}
243
252
uses : actions/setup-python@v5
244
253
with :
@@ -250,12 +259,17 @@ jobs:
250
259
pip install -U pip wheel setuptools
251
260
- name : Prepare pytorch and deps
252
261
run : |
253
- pip install junitparser
254
262
pip install $TORCH_INDEX \
255
263
torch==${{ needs.prepare.outputs.torch }} \
256
264
torchvision==${{ needs.prepare.outputs.torchvision }} \
257
265
torchaudio==${{ needs.prepare.outputs.torchaudio }} \
258
266
pytorch-triton-xpu==${{needs.prepare.outputs.triton }}
267
+ pip install \
268
+ junitparser \
269
+ pytest \
270
+ pytest-timeout \
271
+ pytest-xdist \
272
+ pytest-shard
259
273
- name : Prepare Transformers
260
274
run : |
261
275
pwd
@@ -281,8 +295,6 @@ jobs:
281
295
xpu-smi discovery -y --json --dump -1
282
296
- name : Sanity check installed packages
283
297
run : |
284
- # Use latest pytest
285
- pip install -U pytest pytest-timeout pytest-xdist pytest-shard
286
298
# These checks are to exit earlier if for any reason Transformers
287
299
# reinstalled torch packages back to CUDA versions (not expected).
288
300
pip show torch | grep Version | grep xpu
0 commit comments