From a39e67339c5aad63acf787b946fd73310b75740a Mon Sep 17 00:00:00 2001 From: Aleksei Kashapov Date: Wed, 18 Sep 2024 14:53:47 +0200 Subject: [PATCH] [TESTS] Revert ignored scope for conformance (#2975) ### Changes Revert ignored scope for conformance mobilenet_v3 --- .../data/ptq_reference_data.yaml | 2 +- tests/post_training/model_scope.py | 55 +------------------ 2 files changed, 2 insertions(+), 55 deletions(-) diff --git a/tests/post_training/data/ptq_reference_data.yaml b/tests/post_training/data/ptq_reference_data.yaml index 9a0f4bc88ee..8a922a89598 100644 --- a/tests/post_training/data/ptq_reference_data.yaml +++ b/tests/post_training/data/ptq_reference_data.yaml @@ -41,7 +41,7 @@ torchvision/resnet18_backend_FX_TORCH: torchvision/mobilenet_v3_small_BC_backend_FP32: metric_value: 0.6766 torchvision/mobilenet_v3_small_BC_backend_OV: - metric_value: 0.6677 + metric_value: 0.6669 torchvision/mobilenet_v3_small_BC_backend_ONNX: metric_value: 0.6679 torchvision/mobilenet_v3_small_BC_backend_FX_TORCH: diff --git a/tests/post_training/model_scope.py b/tests/post_training/model_scope.py index dde18b860ce..4318ef7373d 100644 --- a/tests/post_training/model_scope.py +++ b/tests/post_training/model_scope.py @@ -22,8 +22,6 @@ from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters from nncf.quantization.advanced_parameters import AdvancedScaleEstimationParameters from nncf.quantization.advanced_parameters import AdvancedSmoothQuantParameters -from nncf.scopes import IgnoredScope -from nncf.scopes import Subgraph from tests.post_training.pipelines.base import ALL_PTQ_BACKENDS from tests.post_training.pipelines.base import NNCF_PTQ_BACKENDS from tests.post_training.pipelines.base import BackendType @@ -99,58 +97,7 @@ "fast_bias_correction": False, "preset": QuantizationPreset.MIXED, }, - "backends": [BackendType.FX_TORCH, BackendType.ONNX], - "batch_size": 128, - }, - { # TODO(kshpv): changes from #2947 should be reverted after implement ticket 150952 - "reported_name": "torchvision/mobilenet_v3_small_BC", - "model_id": "mobilenet_v3_small", - "pipeline_cls": ImageClassificationTorchvision, - "compression_params": { - "fast_bias_correction": False, - "preset": QuantizationPreset.MIXED, - "ignored_scope": IgnoredScope( - subgraphs=[ - Subgraph( - inputs=["__module.features.1.block.1.avgpool/aten::adaptive_avg_pool2d/Reshape"], - outputs=["__module.features.1.block.1.scale_activation/aten::hardsigmoid/HSigmoid"], - ), - Subgraph( - inputs=["__module.features.4.block.2.avgpool/aten::adaptive_avg_pool2d/Reshape"], - outputs=["__module.features.4.block.2.scale_activation/aten::hardsigmoid/HSigmoid"], - ), - Subgraph( - inputs=["__module.features.5.block.2.avgpool/aten::adaptive_avg_pool2d/Reshape"], - outputs=["__module.features.5.block.2.scale_activation/aten::hardsigmoid/HSigmoid"], - ), - Subgraph( - inputs=["__module.features.6.block.2.avgpool/aten::adaptive_avg_pool2d/Reshape"], - outputs=["__module.features.6.block.2.scale_activation/aten::hardsigmoid/HSigmoid"], - ), - Subgraph( - inputs=["__module.features.7.block.2.avgpool/aten::adaptive_avg_pool2d/Reshape"], - outputs=["__module.features.7.block.2.scale_activation/aten::hardsigmoid/HSigmoid"], - ), - Subgraph( - inputs=["__module.features.8.block.2.avgpool/aten::adaptive_avg_pool2d/Reshape"], - outputs=["__module.features.8.block.2.scale_activation/aten::hardsigmoid/HSigmoid"], - ), - Subgraph( - inputs=["__module.features.9.block.2.avgpool/aten::adaptive_avg_pool2d/Reshape"], - outputs=["__module.features.9.block.2.scale_activation/aten::hardsigmoid/HSigmoid"], - ), - Subgraph( - inputs=["__module.features.10.block.2.avgpool/aten::adaptive_avg_pool2d/Reshape"], - outputs=["__module.features.10.block.2.scale_activation/aten::hardsigmoid/HSigmoid"], - ), - Subgraph( - inputs=["__module.features.11.block.2.avgpool/aten::adaptive_avg_pool2d/Reshape"], - outputs=["__module.features.11.block.2.scale_activation/aten::hardsigmoid/HSigmoid"], - ), - ] - ), - }, - "backends": [BackendType.OV], + "backends": [BackendType.FX_TORCH, BackendType.OV, BackendType.ONNX], "batch_size": 128, }, {