39
39
CURRENT_DIR = os .path .dirname (os .path .realpath (__file__ ))
40
40
41
41
def result_to_output_metrics (
42
- metrics : List [str ], metrics_res : TorchBenchModelMetrics
42
+ metrics : List [str ], metrics_res : TorchBenchModelMetrics , output_iter_metrics : bool
43
43
) -> Dict [str , float ]:
44
44
result_metrics = {}
45
45
if metrics_res :
@@ -48,11 +48,17 @@ def result_to_output_metrics(
48
48
median_latency = numpy .median (metrics_res .latencies )
49
49
assert median_latency , f"Run failed for metric { latency_metric } "
50
50
result_metrics [latency_metric ] = median_latency
51
+ if output_iter_metrics :
52
+ iter_latencies_metric = "iter_latencies"
53
+ result_metrics [iter_latencies_metric ] = list (metrics_res .latencies )
51
54
if "throughputs" in metrics and metrics_res .throughputs :
52
55
throughput_metric = "throughput"
53
56
median_throughput = numpy .median (metrics_res .throughputs )
54
57
assert median_throughput , f"Run failed for metric { throughput_metric } "
55
58
result_metrics [throughput_metric ] = median_throughput
59
+ if output_iter_metrics :
60
+ iter_throughputs_metric = "iter_throughputs"
61
+ result_metrics [iter_throughputs_metric ] = list (metrics_res .throughputs )
56
62
if "cpu_peak_mem" in metrics and metrics_res .cpu_peak_mem :
57
63
cpu_peak_mem = "cpu_peak_mem"
58
64
result_metrics [cpu_peak_mem ] = metrics_res .cpu_peak_mem
@@ -118,7 +124,7 @@ def run(args: List[str], extra_args: List[str]):
118
124
args .output = args .output if args .output else get_output_dir (BM_NAME )
119
125
target_dir = Path (args .output ).joinpath (f"{ config .name } -{ config .test } " )
120
126
target_dir .mkdir (exist_ok = True , parents = True )
121
- metrics_dict = result_to_output_metrics (metrics , metrics_res )
127
+ metrics_dict = result_to_output_metrics (metrics , metrics_res , args . output_iter_metrics )
122
128
dump_result_to_json (metrics_dict , target_dir )
123
129
124
130
if __name__ == "__main__" :
@@ -143,6 +149,9 @@ def run(args: List[str], extra_args: List[str]):
143
149
parser .add_argument (
144
150
"--metrics" , default = "latencies" , help = "Benchmark metrics, split by comma."
145
151
)
152
+ parser .add_argument (
153
+ "--output-iter-metrics" , action = argparse .BooleanOptionalAction , help = "Enable per-iteration benchmark metrics"
154
+ )
146
155
parser .add_argument (
147
156
"--nwarmup" , default = 20 , help = "Benchmark warmup iteration number."
148
157
)
0 commit comments