Skip to content

Commit 10627bb

Browse files
CI: Split CI tests to run them in parallel (huggingface#1691)
* Split CI tests to run them in parallel The idea of this PR is to split tests into multiple sets that can be run in parallel by GH. For this, all tests in test_models.py that would run on GH get a pytest marker. The GH workflow matrix is factorized to run only a single marker. That way, only a subset of tests should run per worker, leading to quicker results. There is also a worker that runs all the tests that are not inside test_models.py. * [skip ci] empty commit to abort ci * Fix typo in marker name * Split fx into forward and backward * Comment out test coverage for now Checking if it's responsible for regression in CI runtime. * Remove pytest cov completely from requirements * Remove cov call in pyproject.toml Missed that one.
1 parent cf6f6ad commit 10627bb

File tree

4 files changed

+31
-8
lines changed

4 files changed

+31
-8
lines changed

.github/workflows/tests.yml

+3-2
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ jobs:
1919
python: ['3.10']
2020
torch: ['1.13.0']
2121
torchvision: ['0.14.0']
22+
testmarker: ['-k "not test_models"', '-m base', '-m cfg', '-m torchscript', '-m features', '-m fxforward', '-m fxbackward']
2223
runs-on: ${{ matrix.os }}
2324

2425
steps:
@@ -54,10 +55,10 @@ jobs:
5455
PYTHONDONTWRITEBYTECODE: 1
5556
run: |
5657
pytest -vv tests
57-
- name: Run tests on Linux / Mac
58+
- name: Run '${{ matrix.testmarker }}' tests on Linux / Mac
5859
if: ${{ !startsWith(matrix.os, 'windows') }}
5960
env:
6061
LD_PRELOAD: /usr/lib/x86_64-linux-gnu/libtcmalloc.so.4
6162
PYTHONDONTWRITEBYTECODE: 1
6263
run: |
63-
pytest -vv --forked --durations=0 tests
64+
pytest -vv --forked --durations=0 ${{ matrix.testmarker }} tests

pyproject.toml

+7-5
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,11 @@
11
[tool.pytest.ini_options]
2-
addopts = "--cov=timm --cov-report=term-missing"
3-
4-
[tool.coverage.run]
5-
omit = [
6-
"tests/test_*.py",
2+
markers = [
3+
"base: marker for model tests using the basic setup",
4+
"cfg: marker for model tests checking the config",
5+
"torchscript: marker for model tests using torchscript",
6+
"features: marker for model tests checking feature extraction",
7+
"fxforward: marker for model tests using torch fx (only forward)",
8+
"fxbackward: marker for model tests using torch fx (only backward)",
79
]
810

911
[tool.black]

requirements-dev.txt

-1
Original file line numberDiff line numberDiff line change
@@ -3,4 +3,3 @@ pytest-timeout
33
pytest-xdist
44
pytest-forked
55
expecttest
6-
pytest-cov

tests/test_models.py

+21
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,16 @@
1+
"""Run tests for all models
2+
3+
Tests that run on CI should have a specific marker, e.g. @pytest.mark.base. This
4+
marker is used to parallelize the CI runs, with one runner for each marker.
5+
6+
If new tests are added, ensure that they use one of the existing markers
7+
(documented in pyproject.toml > pytest > markers) or that a new marker is added
8+
for this set of tests. If using a new marker, adjust the test matrix in
9+
.github/workflows/tests.yml to run tests with this new marker, otherwise the
10+
tests will be skipped on CI.
11+
12+
"""
13+
114
import pytest
215
import torch
316
import platform
@@ -83,6 +96,7 @@ def _get_input_size(model=None, model_name='', target=None):
8396
return input_size
8497

8598

99+
@pytest.mark.base
86100
@pytest.mark.timeout(120)
87101
@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS))
88102
@pytest.mark.parametrize('batch_size', [1])
@@ -101,6 +115,7 @@ def test_model_forward(model_name, batch_size):
101115
assert not torch.isnan(outputs).any(), 'Output included NaNs'
102116

103117

118+
@pytest.mark.base
104119
@pytest.mark.timeout(120)
105120
@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS, name_matches_cfg=True))
106121
@pytest.mark.parametrize('batch_size', [2])
@@ -128,6 +143,7 @@ def test_model_backward(model_name, batch_size):
128143
assert not torch.isnan(outputs).any(), 'Output included NaNs'
129144

130145

146+
@pytest.mark.cfg
131147
@pytest.mark.timeout(300)
132148
@pytest.mark.parametrize('model_name', list_models(exclude_filters=NON_STD_FILTERS, include_tags=True))
133149
@pytest.mark.parametrize('batch_size', [1])
@@ -190,6 +206,7 @@ def test_model_default_cfgs(model_name, batch_size):
190206
assert fc + ".weight" in state_dict.keys(), f'{fc} not in model params'
191207

192208

209+
@pytest.mark.cfg
193210
@pytest.mark.timeout(300)
194211
@pytest.mark.parametrize('model_name', list_models(filter=NON_STD_FILTERS, exclude_filters=NON_STD_EXCLUDE_FILTERS, include_tags=True))
195212
@pytest.mark.parametrize('batch_size', [1])
@@ -274,6 +291,7 @@ def test_model_features_pretrained(model_name, batch_size):
274291
]
275292

276293

294+
@pytest.mark.torchscript
277295
@pytest.mark.timeout(120)
278296
@pytest.mark.parametrize(
279297
'model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_JIT_FILTERS, name_matches_cfg=True))
@@ -303,6 +321,7 @@ def test_model_forward_torchscript(model_name, batch_size):
303321
EXCLUDE_FEAT_FILTERS += ['*resnext101_32x32d', '*resnext101_32x16d']
304322

305323

324+
@pytest.mark.features
306325
@pytest.mark.timeout(120)
307326
@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FEAT_FILTERS, include_tags=True))
308327
@pytest.mark.parametrize('batch_size', [1])
@@ -379,6 +398,7 @@ def _create_fx_model(model, train=False):
379398
]
380399

381400

401+
@pytest.mark.fxforward
382402
@pytest.mark.timeout(120)
383403
@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FX_FILTERS))
384404
@pytest.mark.parametrize('batch_size', [1])
@@ -412,6 +432,7 @@ def test_model_forward_fx(model_name, batch_size):
412432
assert not torch.isnan(outputs).any(), 'Output included NaNs'
413433

414434

435+
@pytest.mark.fxbackward
415436
@pytest.mark.timeout(120)
416437
@pytest.mark.parametrize('model_name', list_models(
417438
exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FX_FILTERS, name_matches_cfg=True))

0 commit comments

Comments
 (0)