diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 933f9a48eeb389..452a8927627629 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -296,8 +296,8 @@ const std::map get_supported_ops_ts() { {"aten::channel_shuffle", op::translate_channel_shuffle}, {"aten::clamp", op::translate_clamp}, {"aten::clamp_", op::inplace_op}, - {"aten::clamp_max", op::translate_1to1_match_2_inputs}, - {"aten::clamp_min", op::translate_1to1_match_2_inputs}, + {"aten::clamp_max", op::translate_1to1_match_2_inputs_align_types}, + {"aten::clamp_min", op::translate_1to1_match_2_inputs_align_types}, {"aten::clip", op::translate_clamp}, {"aten::clip_", op::inplace_op}, {"aten::clone", op::skip_node}, // ignore clone operators that are inserted by PyTorch autograd diff --git a/tests/layer_tests/pytorch_tests/test_clamp.py b/tests/layer_tests/pytorch_tests/test_clamp.py index ad869d6211e270..5c860f9be65ff1 100644 --- a/tests/layer_tests/pytorch_tests/test_clamp.py +++ b/tests/layer_tests/pytorch_tests/test_clamp.py @@ -47,11 +47,13 @@ def forward_clip_(self, x): @pytest.mark.parametrize("op_type", ["clamp", "clamp_"]) @pytest.mark.nightly def test_clamp(self, minimum, maximum, as_tensors, op_type, ie_device, precision, ir_version): - self._test(*self.create_model(minimum, maximum, as_tensors, op_type), ie_device, precision, ir_version) + self._test(*self.create_model(minimum, maximum, as_tensors, + op_type), ie_device, precision, ir_version) @pytest.mark.xfail(reason='OpenVINO clamp does not support min > max') def test_clamp_min_greater(self, ie_device, precision, ir_version): - self._test(*self.create_model(1.0, 0.0), ie_device, precision, ir_version) + self._test(*self.create_model(1.0, 0.0), + ie_device, precision, ir_version) class TestClampMin(PytorchLayerTest): @@ -74,11 +76,12 @@ def forward(self, x): op_name = "aten::clamp_min" return aten_clamp_min(minimum, as_tensor), ref_net, op_name - @pytest.mark.parametrize("minimum", [0., 1., -1., 0.5]) + @pytest.mark.parametrize("minimum", [0., 1., -1., 0.5, 2]) @pytest.mark.parametrize("as_tensor", [True, False]) @pytest.mark.nightly def test_clamp_min(self, minimum, as_tensor, ie_device, precision, ir_version): - self._test(*self.create_model(minimum, as_tensor), ie_device, precision, ir_version) + self._test(*self.create_model(minimum, as_tensor), ie_device, + precision, ir_version, use_convert_model=True, trace_model=True) class TestClampMax(PytorchLayerTest): @@ -101,9 +104,10 @@ def forward(self, x): op_name = "aten::clamp_max" return aten_clamp_max(maximum, as_tensor), ref_net, op_name - @pytest.mark.parametrize("maximum", [0., 1., -1., 0.5]) + @pytest.mark.parametrize("maximum", [0., 1., -1., 0.5, 2]) @pytest.mark.parametrize("as_tensor", [True, False]) @pytest.mark.nightly @pytest.mark.precommit def test_clamp(self, maximum, as_tensor, ie_device, precision, ir_version): - self._test(*self.create_model(maximum, as_tensor), ie_device, precision, ir_version) + self._test(*self.create_model(maximum, as_tensor), ie_device, + precision, ir_version, use_convert_model=True, trace_model=True) diff --git a/tests/layer_tests/pytorch_tests/test_norm.py b/tests/layer_tests/pytorch_tests/test_norm.py index 9422c170401702..bbbdb3bae34293 100644 --- a/tests/layer_tests/pytorch_tests/test_norm.py +++ b/tests/layer_tests/pytorch_tests/test_norm.py @@ -327,7 +327,27 @@ def test_linalg_norm(self, p, dim, keepdim, dtype, out, prim_dtype, input_shape, self._test(*self.create_model(p, dim, keepdim, dtype, out, prim_dtype), ie_device, precision, ir_version, kwargs_to_prepare_input={ - "out": out or prim_dtype, + "out": out or prim_dtype, "out_dtype": dtype if prim_dtype else None, "input_shape": input_shape - }) + }) + + +class TestTrickyNorm(PytorchLayerTest): + + def _prepare_input(self, input_shape=(3, 3)): + return (np.random.randn(*input_shape).astype(np.float32),) + + def create_model(self): + class aten_norm(torch.nn.Module): + def forward(self, x): + return torch.nn.functional.normalize(x, eps=2) + + return aten_norm(), None, ["aten::linalg_vector_norm", "aten::clamp_min"] + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.parametrize("input_shape", [[15, 15, 17]]) + def test_tricky_norm(self, input_shape, ie_device, precision, ir_version): + self._test(*self.create_model(), ie_device, precision, ir_version, + kwargs_to_prepare_input={"input_shape": input_shape}, use_convert_model=True, trace_model=True)