From c5e16fc9c6b84b066b247f628b498669e9c88d38 Mon Sep 17 00:00:00 2001 From: Lifan Sun Date: Wed, 23 Oct 2024 02:27:28 -0700 Subject: [PATCH] [PT FE] Support aten::atan2 for pytorch models (#27026) ### Details: - add atan2 operator and unit tests ### Tickets: - [[Good First Issue]: Support aten::atan2 for pytorch models](https://github.com/openvinotoolkit/openvino/issues/20575) --------- Co-authored-by: Michal Lukaszewski Co-authored-by: Maxim Vafin --- src/frontends/pytorch/src/op/atan2.cpp | 99 +++++++++++++++++++ src/frontends/pytorch/src/op_table.cpp | 3 + tests/layer_tests/pytorch_tests/test_atan2.py | 80 +++++++++++++++ 3 files changed, 182 insertions(+) create mode 100644 src/frontends/pytorch/src/op/atan2.cpp create mode 100644 tests/layer_tests/pytorch_tests/test_atan2.py diff --git a/src/frontends/pytorch/src/op/atan2.cpp b/src/frontends/pytorch/src/op/atan2.cpp new file mode 100644 index 00000000000000..341f1c201eae56 --- /dev/null +++ b/src/frontends/pytorch/src/op/atan2.cpp @@ -0,0 +1,99 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#define _USE_MATH_DEFINES + +#include + +#include + +#include "openvino/core/type/element_type.hpp" +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/atan.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert_like.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/equal.hpp" +#include "openvino/op/greater.hpp" +#include "openvino/op/greater_eq.hpp" +#include "openvino/op/less.hpp" +#include "openvino/op/logical_and.hpp" +#include "openvino/op/logical_or.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/select.hpp" +#include "openvino/op/subtract.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +using namespace ov::op; + +OutputVector translate_atan2(const NodeContext& context) { + // atan2(input, other, *) → Tensor + num_inputs_check(context, 2, 2); + Output lhs; + Output rhs; + + std::tie(lhs, rhs) = get_inputs_with_promoted_types(context, 0, 1); + + auto div = context.mark_node(std::make_shared(lhs, rhs)); + + auto atan = context.mark_node(std::make_shared(div)); + + // create some constants to adjust result according to quadrant. + auto zero = context.mark_node(v0::Constant::create(ov::element::i32, Shape{}, {0})); + auto pi = context.mark_node(v0::Constant::create(ov::element::f64, Shape{}, {M_PI})); + auto half_pi = context.mark_node(v0::Constant::create(ov::element::f64, Shape{}, {M_PI_2})); + auto neg_half_pi = context.mark_node(v0::Constant::create(ov::element::f64, Shape{}, {-M_PI_2})); + + zero = context.mark_node(std::make_shared(zero, rhs)); + pi = context.mark_node(std::make_shared(pi, rhs)); + half_pi = context.mark_node(std::make_shared(half_pi, rhs)); + neg_half_pi = context.mark_node(std::make_shared(neg_half_pi, rhs)); + + // x > 0, no adjustment needed + auto x_greater_than_zero = context.mark_node(std::make_shared(rhs, zero)); + + // x < 0 and y >= 0, need to plus pi + auto y_greater_equal_zero = context.mark_node(std::make_shared(lhs, zero)); + auto x_less_than_zero = context.mark_node(std::make_shared(rhs, zero)); + auto add_pi_condition = context.mark_node(std::make_shared(x_less_than_zero, y_greater_equal_zero)); + + // x < 0 and y < 0, need to minus pi + auto y_less_than_zero = std::make_shared(lhs, zero); + auto subtract_pi_condition = + context.mark_node(std::make_shared(x_less_than_zero, y_less_than_zero)); + + // x = 0 and y > 0, pi/2 + auto x_equal_zero = std::make_shared(rhs, zero); + auto y_greater_than_zero = std::make_shared(lhs, zero); + auto half_pi_condition = context.mark_node(std::make_shared(x_equal_zero, y_greater_than_zero)); + + // x = 0 and y < 0, -pi/2 + auto neg_half_pi_condition = context.mark_node(std::make_shared(x_equal_zero, y_less_than_zero)); + + auto special_case_condition = + context.mark_node(std::make_shared(half_pi_condition, neg_half_pi_condition)); + + // do adjustment + auto atan_plus_pi = context.mark_node(std::make_shared(atan, pi)); + auto atan_minus_pi = context.mark_node(std::make_shared(atan, pi)); + + // select result + auto ajusted_case = context.mark_node(std::make_shared(add_pi_condition, atan_plus_pi, atan_minus_pi)); + auto special_case = context.mark_node(std::make_shared(half_pi_condition, half_pi, neg_half_pi)); + auto adjusted_atan = context.mark_node(std::make_shared(x_greater_than_zero, atan, ajusted_case)); + auto result = context.mark_node(std::make_shared(special_case_condition, special_case, adjusted_atan)); + + return {result}; +} + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 5d63a6dc037b14..195977432e40e5 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -42,6 +42,7 @@ OP_CONVERTER(translate_argmax); OP_CONVERTER(translate_argmin); OP_CONVERTER(translate_as_strided); OP_CONVERTER(translate_as_tensor); +OP_CONVERTER(translate_atan2); OP_CONVERTER(translate_avg_pool1d); OP_CONVERTER(translate_avg_pool2d); OP_CONVERTER(translate_avg_pool3d); @@ -385,6 +386,7 @@ const std::unordered_map get_supported_ops_ts() { {"aten::atanh", op::optional_out, 1>}, {"aten::atanh_", op::inplace_op>}, + {"aten::atan2", op::translate_atan2}, {"aten::avg_pool1d", op::quantizable_op}, {"aten::avg_pool2d", op::quantizable_op}, {"aten::avg_pool3d", op::quantizable_op}, @@ -776,6 +778,7 @@ const std::unordered_map get_supported_ops_fx() { {"aten.asinh.default", op::translate_1to1_match_1_inputs_with_fp32_type_alignment}, {"aten.atan.default", op::translate_1to1_match_1_inputs_with_fp32_type_alignment}, {"aten.atanh.default", op::translate_1to1_match_1_inputs_with_fp32_type_alignment}, + {"aten.atan2.default", op::translate_atan2}, {"aten.avg_pool2d.default", op::translate_avg_pool2d}, {"aten.avg_pool3d.default", op::translate_avg_pool3d}, {"aten.baddbmm.default", op::translate_addmm_fx}, diff --git a/tests/layer_tests/pytorch_tests/test_atan2.py b/tests/layer_tests/pytorch_tests/test_atan2.py new file mode 100644 index 00000000000000..77504e8186f925 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_atan2.py @@ -0,0 +1,80 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + +@pytest.mark.parametrize("input_shape_rhs", [ + [2, 5, 3, 4], + [1, 5, 3, 4], + [1] +]) +class TestAtan2(PytorchLayerTest): + + def _prepare_input(self): + return (np.random.randn(2, 5, 3, 4).astype(np.float32), self.input_rhs) + + def create_model(self): + + class aten_atan2(torch.nn.Module): + def __init__(self): + super(aten_atan2, self).__init__() + + def forward(self, lhs, rhs): + return torch.arctan2(lhs, rhs) + + ref_net = None + + return aten_atan2(), ref_net, "aten::atan2" + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.precommit_torch_export + @pytest.mark.precommit_fx_backend + def test_atan2(self, ie_device, precision, ir_version, input_shape_rhs): + self.input_rhs = np.random.randn(*input_shape_rhs).astype(np.float32) + self._test(*self.create_model(), ie_device, precision, ir_version, use_convert_model=True) + +class TestAtan2Types(PytorchLayerTest): + + def _prepare_input(self): + return (torch.randn(self.lhs_shape).to(self.lhs_type).numpy(), + torch.randn(self.rhs_shape).to(self.rhs_type).numpy()) + + def create_model(self, lhs_type, rhs_type): + + class aten_atan2(torch.nn.Module): + def __init__(self, lhs_type, rhs_type): + super(aten_atan2, self).__init__() + self.lhs_type = lhs_type + self.rhs_type = rhs_type + + def forward(self, lhs, rhs): + return torch.arctan2(lhs.to(self.lhs_type), rhs.to(self.rhs_type)) + + ref_net = None + + return aten_atan2(lhs_type, rhs_type), ref_net, "aten::atan2" + + @pytest.mark.parametrize(("lhs_type", "rhs_type"), + [[torch.int, torch.float32], + [torch.int, torch.float64], + [torch.float32, torch.float64], + [torch.int64, torch.float32] + ]) + @pytest.mark.parametrize(("lhs_shape", "rhs_shape"), [([2, 3], [2, 3]), + ([2, 3], [1, 3]), + ([3, 2, 3], [2, 3]), + ]) + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.precommit_torch_export + def test_atan2_types(self, ie_device, precision, ir_version, lhs_type, lhs_shape, rhs_type, rhs_shape): + self.lhs_type = lhs_type + self.lhs_shape = lhs_shape + self.rhs_type = rhs_type + self.rhs_shape = rhs_shape + self._test(*self.create_model(lhs_type, rhs_type), + ie_device, precision, ir_version, freeze_model=False, trace_model=True) \ No newline at end of file