Skip to content

Commit

Permalink
[PT FE] Align types for aten::clamp_max and aten::clamp_min (openvino…
Browse files Browse the repository at this point in the history
…toolkit#21137)

* Align types for aten::clamp_max and aten::clamp_min

* Add tests
  • Loading branch information
mvafin authored Nov 17, 2023
1 parent 8b5b7a6 commit 6bdc159
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 10 deletions.
4 changes: 2 additions & 2 deletions src/frontends/pytorch/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -296,8 +296,8 @@ const std::map<std::string, CreatorFunction> get_supported_ops_ts() {
{"aten::channel_shuffle", op::translate_channel_shuffle},
{"aten::clamp", op::translate_clamp},
{"aten::clamp_", op::inplace_op<op::translate_clamp>},
{"aten::clamp_max", op::translate_1to1_match_2_inputs<opset10::Minimum>},
{"aten::clamp_min", op::translate_1to1_match_2_inputs<opset10::Maximum>},
{"aten::clamp_max", op::translate_1to1_match_2_inputs_align_types<opset10::Minimum>},
{"aten::clamp_min", op::translate_1to1_match_2_inputs_align_types<opset10::Maximum>},
{"aten::clip", op::translate_clamp},
{"aten::clip_", op::inplace_op<op::translate_clamp>},
{"aten::clone", op::skip_node}, // ignore clone operators that are inserted by PyTorch autograd
Expand Down
16 changes: 10 additions & 6 deletions tests/layer_tests/pytorch_tests/test_clamp.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,11 +47,13 @@ def forward_clip_(self, x):
@pytest.mark.parametrize("op_type", ["clamp", "clamp_"])
@pytest.mark.nightly
def test_clamp(self, minimum, maximum, as_tensors, op_type, ie_device, precision, ir_version):
self._test(*self.create_model(minimum, maximum, as_tensors, op_type), ie_device, precision, ir_version)
self._test(*self.create_model(minimum, maximum, as_tensors,
op_type), ie_device, precision, ir_version)

@pytest.mark.xfail(reason='OpenVINO clamp does not support min > max')
def test_clamp_min_greater(self, ie_device, precision, ir_version):
self._test(*self.create_model(1.0, 0.0), ie_device, precision, ir_version)
self._test(*self.create_model(1.0, 0.0),
ie_device, precision, ir_version)


class TestClampMin(PytorchLayerTest):
Expand All @@ -74,11 +76,12 @@ def forward(self, x):
op_name = "aten::clamp_min"
return aten_clamp_min(minimum, as_tensor), ref_net, op_name

@pytest.mark.parametrize("minimum", [0., 1., -1., 0.5])
@pytest.mark.parametrize("minimum", [0., 1., -1., 0.5, 2])
@pytest.mark.parametrize("as_tensor", [True, False])
@pytest.mark.nightly
def test_clamp_min(self, minimum, as_tensor, ie_device, precision, ir_version):
self._test(*self.create_model(minimum, as_tensor), ie_device, precision, ir_version)
self._test(*self.create_model(minimum, as_tensor), ie_device,
precision, ir_version, use_convert_model=True, trace_model=True)


class TestClampMax(PytorchLayerTest):
Expand All @@ -101,9 +104,10 @@ def forward(self, x):
op_name = "aten::clamp_max"
return aten_clamp_max(maximum, as_tensor), ref_net, op_name

@pytest.mark.parametrize("maximum", [0., 1., -1., 0.5])
@pytest.mark.parametrize("maximum", [0., 1., -1., 0.5, 2])
@pytest.mark.parametrize("as_tensor", [True, False])
@pytest.mark.nightly
@pytest.mark.precommit
def test_clamp(self, maximum, as_tensor, ie_device, precision, ir_version):
self._test(*self.create_model(maximum, as_tensor), ie_device, precision, ir_version)
self._test(*self.create_model(maximum, as_tensor), ie_device,
precision, ir_version, use_convert_model=True, trace_model=True)
24 changes: 22 additions & 2 deletions tests/layer_tests/pytorch_tests/test_norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,27 @@ def test_linalg_norm(self, p, dim, keepdim, dtype, out, prim_dtype, input_shape,
self._test(*self.create_model(p, dim, keepdim, dtype, out, prim_dtype),
ie_device, precision, ir_version,
kwargs_to_prepare_input={
"out": out or prim_dtype,
"out": out or prim_dtype,
"out_dtype": dtype if prim_dtype else None,
"input_shape": input_shape
})
})


class TestTrickyNorm(PytorchLayerTest):

def _prepare_input(self, input_shape=(3, 3)):
return (np.random.randn(*input_shape).astype(np.float32),)

def create_model(self):
class aten_norm(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.normalize(x, eps=2)

return aten_norm(), None, ["aten::linalg_vector_norm", "aten::clamp_min"]

@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.parametrize("input_shape", [[15, 15, 17]])
def test_tricky_norm(self, input_shape, ie_device, precision, ir_version):
self._test(*self.create_model(), ie_device, precision, ir_version,
kwargs_to_prepare_input={"input_shape": input_shape}, use_convert_model=True, trace_model=True)

0 comments on commit 6bdc159

Please sign in to comment.