From e00aeffe51fa74949c8563567da89922157d04a4 Mon Sep 17 00:00:00 2001 From: Menglu Yu Date: Wed, 7 May 2025 14:13:17 -0700 Subject: [PATCH] Support activation quantization without scaling (#2607) Summary: X-link: https://github.com/pytorch/pytorch/pull/148380 We enable the activation quantization in the forward pass, and users can customize the dtype they want to quantize. Reviewed By: avicizhu Differential Revision: D70522237 --- userbenchmark/dynamo/dynamobench/_dynamo/utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/userbenchmark/dynamo/dynamobench/_dynamo/utils.py b/userbenchmark/dynamo/dynamobench/_dynamo/utils.py index b75b1d6c3..03d8418b7 100644 --- a/userbenchmark/dynamo/dynamobench/_dynamo/utils.py +++ b/userbenchmark/dynamo/dynamobench/_dynamo/utils.py @@ -4589,3 +4589,7 @@ def maybe_disable_inference_mode_for_fake_prop() -> Generator[None, None, None]: yield else: yield + + +def is_node_meta_valid(node: Optional[torch.fx.Node]) -> bool: + return node is None or "example_value" in node.meta or "val" in node.meta