Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion backends/cadence/aot/tests/test_quantizer_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,13 @@
from executorch.backends.cadence.aot.graph_builder import GraphBuilder
from executorch.backends.cadence.aot.quantizer import quantizer as quantizer_module
from executorch.backends.cadence.aot.quantizer.patterns import AddmmPattern

from executorch.backends.cadence.aot.quantizer.quantizer import (
CadenceAtenQuantizer,
CadenceDefaultQuantizer,
CadenceFusedConvReluQuantizer,
CadenceNopQuantizer,
CadenceQuantizer,
CadenceRmsNormNopQuantizer,
CadenceW8A32MixedQuantizer,
CadenceWakeWordQuantizer,
CadenceWith16BitConvActivationsQuantizer,
Expand Down Expand Up @@ -54,6 +54,7 @@
CadenceFusedConvReluQuantizer, # TODO: T247438151 Add test coverage
CadenceNopQuantizer, # No-op quantizer, doesn't annotate anything
CadenceW8A32MixedQuantizer, # TODO: T247438158 Add test coverage
CadenceRmsNormNopQuantizer, # No-op quantizer, doesn't annotate anything, preserves rms_norm from decomposition
CadenceWakeWordQuantizer, # TODO: T247438162 Add test coverage
CadenceWith16BitConvActivationsQuantizer, # TODO: T247438221 Add test coverage
CadenceWithLayerNormQuantizer, # TODO: T247438410 Add test coverage
Expand Down Expand Up @@ -261,6 +262,14 @@ def test_nested_quantizer_ops_to_preserve(self) -> None:
]
self.assertCountEqual(actual, expected)

def test_rms_norm_nop_quantizer_ops_to_preserve(self) -> None:
q = CadenceRmsNormNopQuantizer()
actual = q.get_ops_to_preserve_from_decomposition()
expected = [
torch.ops.aten.rms_norm.default,
]
self.assertCountEqual(actual, expected)


if __name__ == "__main__":
unittest.main()
Loading