Skip to content

Commit 3e54d0b

Browse files
authored
build(dev-dependency): Add torchvision for multimodal test (#755)
## Summary <!--- This is a required section; please describe the main purpose of this proposed code change. ---> `Qwen2VLVideoProcessor` requires torchvision. Related to #738 #723 (comment) <!--- ## Details This is an optional section; is there anything specific that reviewers should be aware of? ---> ## Testing Done <!--- This is a required section; please describe how this change was tested. ---> <!-- Replace BLANK with your device type. For example, A100-80G-PCIe Complete the following tasks before sending your PR, and replace `[ ]` with `[x]` to indicate you have done them. --> - Hardware Type: <BLANK> - [ ] run `make test` to ensure correctness - [ ] run `make checkstyle` to ensure code style - [ ] run `make test-convergence` to ensure convergence
1 parent 7883185 commit 3e54d0b

File tree

4 files changed

+14
-0
lines changed

4 files changed

+14
-0
lines changed

setup.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ def get_optional_dependencies():
4343
"seaborn",
4444
"mkdocs",
4545
"mkdocs-material",
46+
"torchvision>=0.20",
4647
]
4748
}
4849

test/convergence/bf16/test_mini_models_multimodal.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
from test.utils import UNTOKENIZED_DATASET_PATH
2121
from test.utils import MiniModelConfig
2222
from test.utils import assert_verbose_allclose
23+
from test.utils import is_torchvision_available
2324
from test.utils import load_image_processing_config
2425
from test.utils import load_processor_config
2526
from test.utils import load_tokenizer_config
@@ -788,6 +789,7 @@ def run_mini_model_multimodal(
788789
not QWEN2_VL_AVAILABLE,
789790
reason="Qwen2-VL not available in this version of transformers",
790791
),
792+
pytest.mark.skipif(not is_torchvision_available(), reason="Qwen2VLVideoProcessor requires torchvision"),
791793
],
792794
),
793795
pytest.param(
@@ -826,6 +828,7 @@ def run_mini_model_multimodal(
826828
not QWEN2_5_VL_AVAILABLE,
827829
reason="Qwen2.5-VL not available in this version of transformers",
828830
),
831+
pytest.mark.skipif(not is_torchvision_available(), reason="Qwen2VLVideoProcessor requires torchvision"),
829832
],
830833
),
831834
pytest.param(

test/convergence/fp32/test_mini_models_multimodal.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
from test.utils import UNTOKENIZED_DATASET_PATH
2121
from test.utils import MiniModelConfig
2222
from test.utils import assert_verbose_allclose
23+
from test.utils import is_torchvision_available
2324
from test.utils import load_image_processing_config
2425
from test.utils import load_processor_config
2526
from test.utils import load_tokenizer_config
@@ -783,6 +784,7 @@ def run_mini_model_multimodal(
783784
not QWEN2_VL_AVAILABLE,
784785
reason="Qwen2-VL not available in this version of transformers",
785786
),
787+
pytest.mark.skipif(not is_torchvision_available(), reason="Qwen2VLVideoProcessor requires torchvision"),
786788
],
787789
),
788790
pytest.param(
@@ -817,6 +819,7 @@ def run_mini_model_multimodal(
817819
not QWEN2_5_VL_AVAILABLE,
818820
reason="Qwen2.5-VL not available in this version of transformers",
819821
),
822+
pytest.mark.skipif(not is_torchvision_available(), reason="Qwen2VLVideoProcessor requires torchvision"),
820823
],
821824
),
822825
pytest.param(

test/utils.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -228,6 +228,13 @@ def supports_bfloat16():
228228
return False
229229

230230

231+
def is_torchvision_available():
232+
if importlib.util.find_spec("torchvision") is not None:
233+
return True
234+
else:
235+
return False
236+
237+
231238
def revert_liger_kernel_to_granite(model_config: MiniModelConfig):
232239
"""
233240
Revert all Liger kernel patches applied to Granite.

0 commit comments

Comments
 (0)