Skip to content

Commit 8451c1f

Browse files
xuzhao9facebook-github-bot
authored andcommitted
Fix test_bench script (#2320)
Summary: Fixes #2315 Pull Request resolved: #2320 Test Plan: ``` $ pytest test_bench.py -k "test_eval[BERT_pytorch-cpu]" --ignore_machine_config ========================================================================================== test session starts =========================================================================================== platform linux -- Python 3.11.5, pytest-7.4.3, pluggy-1.0.0 benchmark: 4.0.0 (defaults: timer=time.perf_counter disable_gc=False min_rounds=5 min_time=0.000005 max_time=1.0 calibration_precision=10 warmup=False warmup_iterations=100000) rootdir: /home/xz/git/benchmark plugins: benchmark-4.0.0, hypothesis-6.98.15 collected 411 items / 410 deselected / 1 selected test_bench.py . [100%] ------------------------------------------------- benchmark 'hub': 1 tests ------------------------------------------------- Name (time in ms) Min Max Mean StdDev Median IQR Outliers OPS Rounds Iterations ---------------------------------------------------------------------------------------------------------------------------- test_eval[BERT_pytorch-cpu] 114.2104 117.3853 115.4276 1.0485 115.3054 1.4325 4;0 8.6634 9 1 ---------------------------------------------------------------------------------------------------------------------------- Legend: Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile. OPS: Operations Per Second, computed as 1 / Mean =================================================================================== 1 passed, 410 deselected in 5.68s ==================================================================================== ``` Reviewed By: aaronenyeshi Differential Revision: D58823072 Pulled By: xuzhao9 fbshipit-source-id: 172be1d922b2a51ec2df08b822102dc0a20818ac
1 parent 8de4ad3 commit 8451c1f

File tree

1 file changed

+18
-18
lines changed

1 file changed

+18
-18
lines changed

test_bench.py

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -42,11 +42,10 @@ def pytest_generate_tests(metafunc):
4242

4343
if metafunc.cls and metafunc.cls.__name__ == "TestBenchNetwork":
4444
paths = _list_model_paths()
45-
model_names = [os.path.basename(path) for path in paths]
4645
metafunc.parametrize(
47-
"model_name",
48-
model_names,
49-
ids=model_names,
46+
"model_path",
47+
paths,
48+
ids=[os.path.basename(path) for path in paths],
5049
scope="class",
5150
)
5251

@@ -62,13 +61,14 @@ def pytest_generate_tests(metafunc):
6261
)
6362
class TestBenchNetwork:
6463

65-
def test_train(self, model_name, device, compiler, benchmark):
64+
def test_train(self, model_path, device, benchmark):
6665
try:
66+
model_name = os.path.basename(model_path)
6767
if skip_by_metadata(
6868
test="train",
6969
device=device,
7070
extra_args=[],
71-
metadata=get_metadata_from_yaml(model_name),
71+
metadata=get_metadata_from_yaml(model_path),
7272
):
7373
raise NotImplementedError("Test skipped by its metadata.")
7474
# TODO: skipping quantized tests for now due to BC-breaking changes for prepare
@@ -91,13 +91,14 @@ def test_train(self, model_name, device, compiler, benchmark):
9191
except NotImplementedError:
9292
print(f"Test train on {device} is not implemented, skipping...")
9393

94-
def test_eval(self, model_name, device, compiler, benchmark, pytestconfig):
94+
def test_eval(self, model_path, device, benchmark, pytestconfig):
9595
try:
96+
model_name = os.path.basename(model_path)
9697
if skip_by_metadata(
9798
test="eval",
9899
device=device,
99100
extra_args=[],
100-
metadata=get_metadata_from_yaml(model_name),
101+
metadata=get_metadata_from_yaml(model_path),
101102
):
102103
raise NotImplementedError("Test skipped by its metadata.")
103104
# TODO: skipping quantized tests for now due to BC-breaking changes for prepare
@@ -110,16 +111,15 @@ def test_eval(self, model_name, device, compiler, benchmark, pytestconfig):
110111

111112
task.make_model_instance(test="eval", device=device)
112113

113-
with task.no_grad(disable_nograd=pytestconfig.getoption("disable_nograd")):
114-
benchmark(task.invoke)
115-
benchmark.extra_info["machine_state"] = get_machine_state()
116-
benchmark.extra_info["batch_size"] = task.get_model_attribute(
117-
"batch_size"
118-
)
119-
benchmark.extra_info["precision"] = task.get_model_attribute(
120-
"dargs", "precision"
121-
)
122-
benchmark.extra_info["test"] = "eval"
114+
benchmark(task.invoke)
115+
benchmark.extra_info["machine_state"] = get_machine_state()
116+
benchmark.extra_info["batch_size"] = task.get_model_attribute(
117+
"batch_size"
118+
)
119+
benchmark.extra_info["precision"] = task.get_model_attribute(
120+
"dargs", "precision"
121+
)
122+
benchmark.extra_info["test"] = "eval"
123123

124124
except NotImplementedError:
125125
print(f"Test eval on {device} is not implemented, skipping...")

0 commit comments

Comments
 (0)