Skip to content

Commit

Permalink
[TESTS] Revert ignored scope for conformance (#2975)
Browse files Browse the repository at this point in the history
### Changes

Revert ignored scope for conformance mobilenet_v3
  • Loading branch information
kshpv authored Sep 18, 2024
1 parent 42101d5 commit a39e673
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 55 deletions.
2 changes: 1 addition & 1 deletion tests/post_training/data/ptq_reference_data.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ torchvision/resnet18_backend_FX_TORCH:
torchvision/mobilenet_v3_small_BC_backend_FP32:
metric_value: 0.6766
torchvision/mobilenet_v3_small_BC_backend_OV:
metric_value: 0.6677
metric_value: 0.6669
torchvision/mobilenet_v3_small_BC_backend_ONNX:
metric_value: 0.6679
torchvision/mobilenet_v3_small_BC_backend_FX_TORCH:
Expand Down
55 changes: 1 addition & 54 deletions tests/post_training/model_scope.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,6 @@
from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters
from nncf.quantization.advanced_parameters import AdvancedScaleEstimationParameters
from nncf.quantization.advanced_parameters import AdvancedSmoothQuantParameters
from nncf.scopes import IgnoredScope
from nncf.scopes import Subgraph
from tests.post_training.pipelines.base import ALL_PTQ_BACKENDS
from tests.post_training.pipelines.base import NNCF_PTQ_BACKENDS
from tests.post_training.pipelines.base import BackendType
Expand Down Expand Up @@ -99,58 +97,7 @@
"fast_bias_correction": False,
"preset": QuantizationPreset.MIXED,
},
"backends": [BackendType.FX_TORCH, BackendType.ONNX],
"batch_size": 128,
},
{ # TODO(kshpv): changes from #2947 should be reverted after implement ticket 150952
"reported_name": "torchvision/mobilenet_v3_small_BC",
"model_id": "mobilenet_v3_small",
"pipeline_cls": ImageClassificationTorchvision,
"compression_params": {
"fast_bias_correction": False,
"preset": QuantizationPreset.MIXED,
"ignored_scope": IgnoredScope(
subgraphs=[
Subgraph(
inputs=["__module.features.1.block.1.avgpool/aten::adaptive_avg_pool2d/Reshape"],
outputs=["__module.features.1.block.1.scale_activation/aten::hardsigmoid/HSigmoid"],
),
Subgraph(
inputs=["__module.features.4.block.2.avgpool/aten::adaptive_avg_pool2d/Reshape"],
outputs=["__module.features.4.block.2.scale_activation/aten::hardsigmoid/HSigmoid"],
),
Subgraph(
inputs=["__module.features.5.block.2.avgpool/aten::adaptive_avg_pool2d/Reshape"],
outputs=["__module.features.5.block.2.scale_activation/aten::hardsigmoid/HSigmoid"],
),
Subgraph(
inputs=["__module.features.6.block.2.avgpool/aten::adaptive_avg_pool2d/Reshape"],
outputs=["__module.features.6.block.2.scale_activation/aten::hardsigmoid/HSigmoid"],
),
Subgraph(
inputs=["__module.features.7.block.2.avgpool/aten::adaptive_avg_pool2d/Reshape"],
outputs=["__module.features.7.block.2.scale_activation/aten::hardsigmoid/HSigmoid"],
),
Subgraph(
inputs=["__module.features.8.block.2.avgpool/aten::adaptive_avg_pool2d/Reshape"],
outputs=["__module.features.8.block.2.scale_activation/aten::hardsigmoid/HSigmoid"],
),
Subgraph(
inputs=["__module.features.9.block.2.avgpool/aten::adaptive_avg_pool2d/Reshape"],
outputs=["__module.features.9.block.2.scale_activation/aten::hardsigmoid/HSigmoid"],
),
Subgraph(
inputs=["__module.features.10.block.2.avgpool/aten::adaptive_avg_pool2d/Reshape"],
outputs=["__module.features.10.block.2.scale_activation/aten::hardsigmoid/HSigmoid"],
),
Subgraph(
inputs=["__module.features.11.block.2.avgpool/aten::adaptive_avg_pool2d/Reshape"],
outputs=["__module.features.11.block.2.scale_activation/aten::hardsigmoid/HSigmoid"],
),
]
),
},
"backends": [BackendType.OV],
"backends": [BackendType.FX_TORCH, BackendType.OV, BackendType.ONNX],
"batch_size": 128,
},
{
Expand Down

0 comments on commit a39e673

Please sign in to comment.