From 4070bea2a3329e971a3baa4dddf94b7520652268 Mon Sep 17 00:00:00 2001 From: Chen Xu Date: Fri, 29 Nov 2024 13:35:36 +0800 Subject: [PATCH] [CPU][Ref] Support Reduce ops with empty input (#27603) ### Details: - *The main part of this PR is contributed by https://github.com/openvinotoolkit/openvino/pull/27438.* - *My revision is placed in the last commit, regarding changes on Reduce node of CPU plugin, mainly about the following contents:* 1. [x64] Avoid the `divisor` in `reduce_kernel_post_process` to be zero, and enable post ops fusion of ReduceMean. 2. [x64] Add `axesZeroDim` and `axesZeroDimFusing` in test cases, so that all these new added test cases will go exactly to the new added "early return" code block, where input tensor is empty and output tensor is not. 3. [x64] For the case of empty input combined with low precision ops fusion, use intermediate buffer to set default results before post ops fusion. 4. [arm] `makeExecutor` is skipped for the case of empty input on ARM, because Acl library does not support empty input tensor (e.g., NEReduceMean::validate return error). Besides, because of early return, the executor won't be needed anyway. 5. [arm] ARM Transformations ConvertReduceProd(Min, Max, Sum) are disabled to avoid empty output. ### Tickets: - *[CVS-117469](https://jira.devtools.intel.com/browse/CVS-117469)* --------- Co-authored-by: mitruska --- .../convert_reduce_to_pooling.hpp | 2 +- .../openvino/reference/reduce_mean.hpp | 4 ++ src/plugins/intel_cpu/src/nodes/reduce.cpp | 64 +++++++++++++----- src/plugins/intel_cpu/src/nodes/reduce.h | 3 + .../arm/pass/convert_reduce_multi_axis.cpp | 3 + .../single_layer_tests/classes/reduce.cpp | 19 ++++++ .../single_layer_tests/classes/reduce.hpp | 2 + .../instances/arm/reduce.cpp | 26 ++++++++ .../instances/x64/reduce.cpp | 65 ++++++++++++++++++- .../single_layer_tests/reduce_ops.cpp | 23 +++++++ .../functional/op_reference/reduce_l1.cpp | 22 +++++++ .../functional/op_reference/reduce_l2.cpp | 22 +++++++ .../functional/op_reference/reduce_max.cpp | 23 +++++++ .../functional/op_reference/reduce_mean.cpp | 22 +++++++ .../functional/op_reference/reduce_min.cpp | 23 +++++++ .../functional/op_reference/reduce_prod.cpp | 22 +++++++ .../functional/op_reference/reduce_sum.cpp | 22 +++++++ 17 files changed, 349 insertions(+), 18 deletions(-) diff --git a/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp b/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp index b74a0ff538e011..662660b926aa52 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp @@ -72,7 +72,7 @@ ov::matcher_pass_callback ConvertReduceBase::convert_reduce_to_pooling() { return [&](ov::pass::pattern::Matcher& m) { auto reduce = std::dynamic_pointer_cast(m.get_match_root()); - if (!reduce || transformation_callback(reduce)) { + if (!reduce || transformation_callback(reduce) || ov::shape_size(reduce->input_value(0).get_shape()) == 0) { return false; } diff --git a/src/core/reference/include/openvino/reference/reduce_mean.hpp b/src/core/reference/include/openvino/reference/reduce_mean.hpp index 4c46d4ca786d09..f046f4f96197bb 100644 --- a/src/core/reference/include/openvino/reference/reduce_mean.hpp +++ b/src/core/reference/include/openvino/reference/reduce_mean.hpp @@ -26,6 +26,10 @@ void reduce_mean(const T* in, T* out, const Shape& in_shape, const AxisSet& redu reduce_sum(in, out, in_shape, reduction_axes); const auto out_shape = util::reduce(in_shape, reduction_axes); + if (shape_size(in_shape) == 0) { + return; + } + const auto out_size = shape_size(out_shape); const auto count = static_cast(shape_size(in_shape) / out_size); std::transform(out, std::next(out, out_size), out, [count](const T value) { diff --git a/src/plugins/intel_cpu/src/nodes/reduce.cpp b/src/plugins/intel_cpu/src/nodes/reduce.cpp index 7c18421ad3d832..1bc0209e0d9c69 100644 --- a/src/plugins/intel_cpu/src/nodes/reduce.cpp +++ b/src/plugins/intel_cpu/src/nodes/reduce.cpp @@ -2020,6 +2020,7 @@ void Reduce::initSupportedPrimitiveDescriptors() { config.outConfs[0].setMemDesc(creatorsMap.at(outFormat)->createSharedDesc(outPrecision, getOutputShapeAtPort(0))); if (useAclExecutor) { +#if defined (OV_CPU_WITH_ACL) std::vector srcMemoryDescs; for (size_t i = 0; i < config.inConfs.size(); i++) { srcMemoryDescs.push_back(config.inConfs[i].getMemDesc()); @@ -2034,22 +2035,29 @@ void Reduce::initSupportedPrimitiveDescriptors() { if (!factory->isEmpty()) { supportedPrimitiveDescriptors.push_back({config, impl_type, factory}); } +#endif } else { supportedPrimitiveDescriptors.push_back({config, impl_type}); } }; #if defined (OV_CPU_WITH_ACL) - reduceAttrs.operation = algorithm; - reduceAttrs.keepDims = keep_dims; - reduceAttrs.axes = raw_axes; - for (auto &axis : reduceAttrs.axes) { - if (axis < 0) - axis += static_cast(getInputShapeAtPort(REDUCE_DATA).getRank()); + // acl doesn't support empty input + if (!isDynamicNode() && shape_size(getInputShapeAtPort(REDUCE_DATA).getStaticDims()) == 0) { + canUseAclExecutor = false; + } else { + reduceAttrs.operation = algorithm; + reduceAttrs.keepDims = keep_dims; + reduceAttrs.axes = raw_axes; + for (auto &axis : reduceAttrs.axes) { + if (axis < 0) + axis += static_cast(getInputShapeAtPort(REDUCE_DATA).getRank()); + } + pushDesc(LayoutType::nspc, LayoutType::nspc, input_prec, output_prec, impl_desc_type::undef, true); + pushDesc(LayoutType::ncsp, LayoutType::ncsp, input_prec, output_prec, impl_desc_type::undef, true); + canUseAclExecutor = !supportedPrimitiveDescriptors.empty(); } - pushDesc(LayoutType::nspc, LayoutType::nspc, input_prec, output_prec, impl_desc_type::undef, true); - pushDesc(LayoutType::ncsp, LayoutType::ncsp, input_prec, output_prec, impl_desc_type::undef, true); - canUseAclExecutor = !supportedPrimitiveDescriptors.empty(); + if (canUseAclExecutor) return; #endif @@ -2089,10 +2097,16 @@ void Reduce::initSupportedPrimitiveDescriptors() { } bool Reduce::isExecutable() const { - return !isInputTensorAtPortEmpty(REDUCE_DATA); + return !isOutputTensorAtPortEmpty(0); } void Reduce::prepareParams() { + auto srcMemPtr = getSrcMemoryAtPort(REDUCE_DATA); + auto dstMemPtr = getDstMemoryAtPort(0); + const auto& src_shape = srcMemPtr->getStaticDims(); + dst_size = dstMemPtr->getSize(); + empty_input = shape_size(src_shape) == 0; +#if defined (OV_CPU_WITH_ACL) if (canUseAclExecutor) { std::vector srcMemoryDescs; for (size_t i = 0; i < getParentEdges().size(); i++) { @@ -2102,11 +2116,15 @@ void Reduce::prepareParams() { dstMemoryDescs.push_back(getDstMemoryAtPort(0)->getDescPtr()); auto selectedPD = getSelectedPrimitiveDescriptor(); - aclExecPtr = selectedPD->getExecutorFactoryAs()->makeExecutor(reduceAttrs, srcMemoryDescs, dstMemoryDescs, {}); - selectedPD->setImplementationType(aclExecPtr->getImplType()); - + if (!empty_input) { + aclExecPtr = selectedPD->getExecutorFactoryAs()->makeExecutor(reduceAttrs, srcMemoryDescs, dstMemoryDescs, {}); + selectedPD->setImplementationType(aclExecPtr->getImplType()); + } else { + selectedPD->setImplementationType(acl); + } return; } +#endif src_dims = getParentEdgeAt(REDUCE_DATA)->getMemory().getDesc().getShape().getDims(); std::vector reduce_axes; @@ -2116,9 +2134,7 @@ void Reduce::prepareParams() { reduce_axes = raw_axes; } - auto dstMemPtr = getDstMemoryAtPort(0); const VectorDims &dst_dims = dstMemPtr->getDesc().getShape().getDims(); - dst_size = dstMemPtr->getSize(); calc_process_dst_dims(reduce_axes, dst_dims); if (jit_mode) { set_reduce_dim_flags(); @@ -2274,11 +2290,26 @@ void Reduce::execute(dnnl::stream strm) { const uint8_t *src_data = srcMemPtr->getDataAs(); uint8_t *dst_data = dstMemPtr->getDataAs(); + if (empty_input && dst_size > 0) { +#if defined(OPENVINO_ARCH_X86_64) + output_info_reassign(&dst_data); + init_dst_data(dst_data, dst_size); + output_info_restore(&dst_data); + if (attr.get()->post_ops_.len() != 0) { + reduce_kernel_post_process(dst_data); + } +#else + init_dst_data(dst_data, dst_size); +#endif + return; + } + if (jit_mode) { if (is_hybrid_layout) { dst_data = reinterpret_cast(prc_mem.get_data_handle()); } reduce_type(src_data, dst_data); +#if defined (OV_CPU_WITH_ACL) } else if (aclExecPtr) { std::vector srcMemory; for (size_t i = 0; i < getParentEdges().size(); i++) { @@ -2288,6 +2319,7 @@ void Reduce::execute(dnnl::stream strm) { dstMemory.push_back(getDstMemoryAtPort(0)); aclExecPtr->exec(srcMemory, dstMemory, postOpsDataPtrs.data()); +#endif } else { if (layout == ReduceLayoutType::reduce_ncsp) { auto in_ptr = reinterpret_cast(src_data); @@ -2725,7 +2757,7 @@ inline void Reduce::reduce_kernel_process(const uint8_t *in_p, uint8_t *out_p, s inline void Reduce::reduce_kernel_post_process(uint8_t *out_ptr) { const uint8_t *in_ptr = fuse_low_precision ? static_cast(&intermediate_buf[0]) : nullptr; - const size_t integerDivisor = IB * IC * ID * IH * IW / (OB * OC * OD * OH * OW); + const size_t integerDivisor = empty_input ? 1 : IB * IC * ID * IH * IW / (OB * OC * OD * OH * OW); const float divisor = static_cast(integerDivisor); if (layout == ReduceLayoutType::reduce_ncsp) { parallel_for2d(OB, OC, [&](size_t ob, size_t oc) { diff --git a/src/plugins/intel_cpu/src/nodes/reduce.h b/src/plugins/intel_cpu/src/nodes/reduce.h index 2464686edb1ee4..c790cefb0583b0 100644 --- a/src/plugins/intel_cpu/src/nodes/reduce.h +++ b/src/plugins/intel_cpu/src/nodes/reduce.h @@ -152,6 +152,7 @@ class Reduce : public Node { bool ReduceCDW_opt = false; bool use_aux_kernel = false; bool set_use_aux_kernel = false; + bool empty_input = false; bool ReduceN, ReduceC, ReduceD, ReduceH, ReduceW; size_t IB, IC, ID, IH, IW; size_t OB, OC, OD, OH, OW; @@ -188,9 +189,11 @@ class Reduce : public Node { std::string errorPrefix; +#if defined (OV_CPU_WITH_ACL) ReduceAttrs reduceAttrs; bool canUseAclExecutor = false; std::shared_ptr aclExecPtr = nullptr; +#endif }; } // namespace node diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.cpp index 22692e644e6e56..ff5632cb0a5e8f 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.cpp @@ -23,6 +23,9 @@ ov::matcher_pass_callback ov::intel_cpu::ConvertReduceMultiAxisBase::convert_red if (!reduction_axes) { return false; } + if (!reduce->is_dynamic() && ov::shape_size(input0.get_shape()) == 0) { + return false; + } if (ov::shape_size(input1.get_shape()) <= 1) { return false; } diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/reduce.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/reduce.cpp index 66b1d60932b262..b379655338aaf9 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/reduce.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/reduce.cpp @@ -243,6 +243,25 @@ const std::vector& reductionTypes() { return reductionTypes; } +const std::vector& reductionTypesArithmetic() { + static const std::vector reductionTypesArithmetic = { + ov::test::utils::ReductionType::Mean, + ov::test::utils::ReductionType::Sum, + ov::test::utils::ReductionType::Prod, + ov::test::utils::ReductionType::L1, + ov::test::utils::ReductionType::L2, + }; + return reductionTypesArithmetic; +} + +const std::vector& reductionTypesCompare() { + static const std::vector reductionTypesCompare = { + ov::test::utils::ReductionType::Max, + ov::test::utils::ReductionType::Min, + }; + return reductionTypesCompare; +} + const std::vector& inpOutPrc() { static const std::vector inpOutPrc = {ElementType::f32}; return inpOutPrc; diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/reduce.hpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/reduce.hpp index b3e5fff2ba0b7b..d45cce4b3f913e 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/reduce.hpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/reduce.hpp @@ -52,6 +52,8 @@ const std::vector>& axes(); const std::vector>& axesND(); const std::vector& opTypes(); const std::vector& reductionTypes(); +const std::vector& reductionTypesArithmetic(); +const std::vector& reductionTypesCompare(); const std::vector& inpOutPrc(); const std::vector> additionalConfig(); const std::vector> additionalConfigFP32(); diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/reduce.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/reduce.cpp index 6d1aa855c31865..45ecc774b5dbf9 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/reduce.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/reduce.cpp @@ -20,6 +20,11 @@ std::vector> inputShapes_5D = { {{{}, {{2, 19, 2, 2, 9}}}}, }; +std::vector> inputShapes_5D_ZeroDim = { + {{{}, {{2, 19, 0, 2, 9}}}}, + {{{}, {{2, 19, 0, 2, 0}}}}, +}; + const std::vector> axes5D = { {2, 4}, {1, 2, 4}, @@ -70,6 +75,20 @@ const auto params_MultiAxis_5D_ref = testing::Combine( testing::Values(emptyFusingSpec), testing::ValuesIn(config_infer_prec_f32)); +const auto params_MultiAxis_5D_ZeroDim_ref = testing::Combine( + testing::Combine( + testing::ValuesIn(axes5D), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn(reductionTypes()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::undefined), + testing::Values(ElementType::undefined), + testing::ValuesIn(inputShapes_5D_ZeroDim)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_ref)), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfigFP32())); + //There are dedicated instences of smoke_Reduce_MultiAxis_5D_CPU test in arm and x64 folders //because ACL does not support 0 as reduction axis INSTANTIATE_TEST_SUITE_P( @@ -87,6 +106,13 @@ INSTANTIATE_TEST_SUITE_P( ReduceCPULayerTest::getTestCaseName ); +INSTANTIATE_TEST_SUITE_P( + smoke_Reduce_MultiAxis_5D_ZeroDim_CPU_ref, + ReduceCPULayerTest, + params_MultiAxis_5D_ZeroDim_ref, + ReduceCPULayerTest::getTestCaseName +); + } // namespace } // namespace Reduce } // namespace test diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/reduce.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/reduce.cpp index 302e47fd45aa84..af4319d66a6efe 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/reduce.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/reduce.cpp @@ -53,6 +53,12 @@ std::vector> inputShapes_SingleBatch_dyn = { {{{{1, 5}, 19, {1, 5}, {1, 10}}, {{1, 19, 2, 2}, {1, 19, 2, 9}}}}, }; +std::vector> inputShapes_Dynmic_ZeroDim = { + {{{-1, -1, -1, -1}, {{2, 0, 3, 9}}}}, + {{{2, 0, -1, -1}, {{2, 0, 3, 9}}}}, + {{{2, 0, -1, -1}, {{2, 0, 3, 0}}}} +}; + std::vector cpuParams_3D = { CPUSpecificParams({ncw}, {ncw}, {}, {}), }; @@ -99,6 +105,10 @@ const std::vector> axesGather = { {3} }; +const std::vector> axesZeroDimFusing = { + {1, 3}, +}; + std::vector cpuParams_5D = { CPUSpecificParams({nCdhw16c}, {nCdhw16c}, {}, {}), CPUSpecificParams({ndhwc}, {ndhwc}, {}, {}), @@ -144,6 +154,17 @@ const auto fusingFakeQuantizeTranspose = fusingSpecificParams{std::make_shared

fusingParamsFullSet { + emptyFusingSpec, + /* activations */ + fusingSwish, + /* FQ */ + fusingFakeQuantizePerChannelRelu, + fusingFakeQuantizePerTensorRelu, + /* another patterns */ + fusingScaleShift +}; + const std::vector fusingParamsSet { /* activations */ fusingSwish, @@ -600,6 +621,34 @@ const auto params_LowPrecision_fusing = testing::Combine( testing::ValuesIn(fusingParamsSet_LowPrecision), testing::ValuesIn(additionalConfig())); +const auto params_DimZero_Arithmetic_fusing = testing::Combine( + testing::Combine( + testing::ValuesIn(axesZeroDimFusing), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn(reductionTypesArithmetic()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::undefined), + testing::Values(ElementType::undefined), + testing::ValuesIn(inputShapes_Dynmic_ZeroDim)), + testing::Values(emptyCPUSpec), + testing::ValuesIn(fusingParamsFullSet), + testing::ValuesIn(additionalConfig())); + +const auto params_DimZero_Compare_fusing = testing::Combine( + testing::Combine( + testing::ValuesIn(axesZeroDimFusing), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn(reductionTypesCompare()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::undefined), + testing::Values(ElementType::undefined), + testing::ValuesIn(inputShapes_Dynmic_ZeroDim)), + testing::Values(emptyCPUSpec), + testing::ValuesIn(fusingParamsFullSet), + testing::ValuesIn(additionalConfigFP32())); + INSTANTIATE_TEST_SUITE_P( smoke_Reduce_OneAxis_fusing_CPU, ReduceCPULayerTest, @@ -635,6 +684,20 @@ INSTANTIATE_TEST_SUITE_P( ReduceCPULayerTest::getTestCaseName ); +INSTANTIATE_TEST_SUITE_P( + smoke_Reduce_DimZero_Arithmetic_fusing_CPU, + ReduceCPULayerTest, + params_DimZero_Arithmetic_fusing, + ReduceCPULayerTest::getTestCaseName +); + +INSTANTIATE_TEST_SUITE_P( + smoke_Reduce_DimZero_Compare_fusing_CPU, + ReduceCPULayerTest, + params_DimZero_Compare_fusing, + ReduceCPULayerTest::getTestCaseName +); + /* ================================ 2.2 Fusion - KeepNoDims ================================ */ const auto params_OneAxis_fusing_KeepNoDims = testing::Combine( testing::Combine( @@ -702,4 +765,4 @@ INSTANTIATE_TEST_SUITE_P( } // namespace } // namespace Reduce } // namespace test -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/reduce_ops.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/reduce_ops.cpp index 297d973a796dd0..9b7ae687e9c81d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/reduce_ops.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/reduce_ops.cpp @@ -29,6 +29,12 @@ const std::vector> input_shapes = { std::vector{3, 5, 7, 9}, }; +const std::vector> input_shapes_0_dim = { + std::vector{2, 0, 4, 1}, + std::vector{8, 0, 4, 0}, + std::vector{0, 0, 0, 0}, +}; + const std::vector> input_shapes_one_axis = { std::vector{10, 20, 30, 40}, std::vector{3, 5, 7, 9}, @@ -167,6 +173,16 @@ const auto params_reduction_types = testing::Combine( testing::Values(ov::test::utils::DEVICE_CPU) ); +const auto params_empty_input = testing::Combine( + testing::ValuesIn(axes), + testing::Values(op_types[1]), + testing::ValuesIn(keep_dims), + testing::ValuesIn(reduction_types), + testing::Values(model_types[0]), + testing::ValuesIn(input_shapes_0_dim), + testing::Values(ov::test::utils::DEVICE_CPU) +); + const auto params_reduction_types_logical = testing::Combine( testing::Values(std::vector{0, 1, 3}), testing::Values(op_types[1]), @@ -250,6 +266,13 @@ INSTANTIATE_TEST_SUITE_P( ReduceOpsLayerTest::getTestCaseName ); +INSTANTIATE_TEST_SUITE_P( + smoke_Reduce_ReductionTypes_EmptyTensor, + ReduceOpsLayerTest, + params_empty_input, + ReduceOpsLayerTest::getTestCaseName +); + INSTANTIATE_TEST_SUITE_P( smoke_ReduceLogical_ReductionTypes, ReduceOpsLayerTest, diff --git a/src/plugins/template/tests/functional/op_reference/reduce_l1.cpp b/src/plugins/template/tests/functional/op_reference/reduce_l1.cpp index 6e0c2fe2aa24e0..e47295f247b35f 100644 --- a/src/plugins/template/tests/functional/op_reference/reduce_l1.cpp +++ b/src/plugins/template/tests/functional/op_reference/reduce_l1.cpp @@ -25,6 +25,28 @@ std::vector generateReductionParams(const bool keep_dims) { reference_tests::Tensor(reduce(Shape{3, 2, 2}, AxisSet{2}, keep_dims), element::Type(IN_ET), std::vector{3, 7, 11, 15, 19, 23}))}; + auto out_shape_from_empty = Shape{2, 1, 1}; + if (keep_dims == false) { + out_shape_from_empty = Shape{2}; + } + params.push_back( + ReductionParams(ReductionType::L1, + keep_dims, + std::vector{1, 2}, + reference_tests::Tensor({2, 0, 4}, element::Type(IN_ET), std::vector{}), + reference_tests::Tensor(out_shape_from_empty, element::Type(IN_ET), std::vector{0, 0}))); + + out_shape_from_empty = Shape{2, 0, 1}; + if (keep_dims == false) { + out_shape_from_empty = Shape{2, 0}; + } + params.push_back( + ReductionParams(ReductionType::L1, + keep_dims, + std::vector{2}, + reference_tests::Tensor({2, 0, 4}, element::Type(IN_ET), std::vector{}), + reference_tests::Tensor(out_shape_from_empty, element::Type(IN_ET), std::vector{0, 0}))); + return params; } diff --git a/src/plugins/template/tests/functional/op_reference/reduce_l2.cpp b/src/plugins/template/tests/functional/op_reference/reduce_l2.cpp index 565f89d58f7238..b5820f6970ae5b 100644 --- a/src/plugins/template/tests/functional/op_reference/reduce_l2.cpp +++ b/src/plugins/template/tests/functional/op_reference/reduce_l2.cpp @@ -29,6 +29,27 @@ std::vector generateReductionParams(const bool keep_dims) { reference_tests::Tensor(reduce(Shape{3, 2, 2}, AxisSet{2}, keep_dims), element::Type(IN_ET), std::vector{2.23606798, 5.0, 7.81024968, 10.63014581, 13.45362405, 16.2788206}))}; + auto out_shape_from_empty = Shape{2, 1, 1}; + if (keep_dims == false) { + out_shape_from_empty = Shape{2}; + } + params.push_back( + ReductionParams(ReductionType::L2, + keep_dims, + std::vector{1, 2}, + reference_tests::Tensor({2, 0, 4}, element::Type(IN_ET), std::vector{}), + reference_tests::Tensor(out_shape_from_empty, element::Type(IN_ET), std::vector{0, 0}))); + + out_shape_from_empty = Shape{2, 0, 1}; + if (keep_dims == false) { + out_shape_from_empty = Shape{2, 0}; + } + params.push_back( + ReductionParams(ReductionType::L2, + keep_dims, + std::vector{2}, + reference_tests::Tensor({2, 0, 4}, element::Type(IN_ET), std::vector{}), + reference_tests::Tensor(out_shape_from_empty, element::Type(IN_ET), std::vector{}))); return params; } @@ -45,6 +66,7 @@ std::vector generateReductionParams(const bool keep_dims) { reference_tests::Tensor(reduce(Shape{3, 2, 2}, AxisSet{2}, keep_dims), element::Type(IN_ET), std::vector{2, 5, 8, 11, 13, 16}))}; + return params; } diff --git a/src/plugins/template/tests/functional/op_reference/reduce_max.cpp b/src/plugins/template/tests/functional/op_reference/reduce_max.cpp index 0674595de4ec43..7ab89fc16d1900 100644 --- a/src/plugins/template/tests/functional/op_reference/reduce_max.cpp +++ b/src/plugins/template/tests/functional/op_reference/reduce_max.cpp @@ -79,6 +79,29 @@ std::vector generateReductionParams(const bool keep_dims) { reference_tests::Tensor(reduce(Shape{3, 3, 3}, AxisSet{0, 1, 2}, keep_dims), element::Type(IN_ET), std::vector{27}))}; + auto out_shape_from_empty = Shape{2, 1, 1}; + if (keep_dims == false) { + out_shape_from_empty = Shape{2}; + } + const auto default_val = std::numeric_limits::lowest(); + params.push_back(ReductionParams( + ReductionType::Max, + keep_dims, + std::vector{1, 2}, + reference_tests::Tensor({2, 0, 4}, element::Type(IN_ET), std::vector{}), + reference_tests::Tensor(out_shape_from_empty, element::Type(IN_ET), std::vector{default_val, default_val}))); + + out_shape_from_empty = Shape{2, 0, 1}; + if (keep_dims == false) { + out_shape_from_empty = Shape{2, 0}; + } + params.push_back( + ReductionParams(ReductionType::Max, + keep_dims, + std::vector{2}, + reference_tests::Tensor({2, 0, 4}, element::Type(IN_ET), std::vector{}), + reference_tests::Tensor(out_shape_from_empty, element::Type(IN_ET), std::vector{}))); + return params; } diff --git a/src/plugins/template/tests/functional/op_reference/reduce_mean.cpp b/src/plugins/template/tests/functional/op_reference/reduce_mean.cpp index def9d837b46df6..07159de9704a30 100644 --- a/src/plugins/template/tests/functional/op_reference/reduce_mean.cpp +++ b/src/plugins/template/tests/functional/op_reference/reduce_mean.cpp @@ -41,6 +41,28 @@ std::vector generateReductionParams(const bool keep_dims) { reference_tests::Tensor(reduce(Shape{3, 2}, AxisSet{1}, keep_dims), element::Type(IN_ET), std::vector{1.5, 3.5, 5.5}))}; + auto out_shape_from_empty = Shape{2, 1, 1}; + if (keep_dims == false) { + out_shape_from_empty = Shape{2}; + } + params.push_back( + ReductionParams(ReductionType::Mean, + keep_dims, + std::vector{1, 2}, + reference_tests::Tensor({2, 0, 4}, element::Type(IN_ET), std::vector{}), + reference_tests::Tensor(out_shape_from_empty, element::Type(IN_ET), std::vector{0, 0}))); + + out_shape_from_empty = Shape{2, 0, 1}; + if (keep_dims == false) { + out_shape_from_empty = Shape{2, 0}; + } + params.push_back( + ReductionParams(ReductionType::Mean, + keep_dims, + std::vector{2}, + reference_tests::Tensor({2, 0, 4}, element::Type(IN_ET), std::vector{}), + reference_tests::Tensor(out_shape_from_empty, element::Type(IN_ET), std::vector{}))); + return params; } diff --git a/src/plugins/template/tests/functional/op_reference/reduce_min.cpp b/src/plugins/template/tests/functional/op_reference/reduce_min.cpp index abc9dca157684b..f982af07ab12a5 100644 --- a/src/plugins/template/tests/functional/op_reference/reduce_min.cpp +++ b/src/plugins/template/tests/functional/op_reference/reduce_min.cpp @@ -79,6 +79,29 @@ std::vector generateReductionParams(const bool keep_dims) { reference_tests::Tensor(reduce(Shape{3, 3, 3}, AxisSet{0, 1, 2}, keep_dims), element::Type(IN_ET), std::vector{1}))}; + auto out_shape_from_empty = Shape{2, 1, 1}; + if (keep_dims == false) { + out_shape_from_empty = Shape{2}; + } + constexpr auto max_value = + std::numeric_limits::has_infinity ? std::numeric_limits::infinity() : std::numeric_limits::max(); + params.push_back(ReductionParams( + ReductionType::Min, + keep_dims, + std::vector{1, 2}, + reference_tests::Tensor({2, 0, 4}, element::Type(IN_ET), std::vector{}), + reference_tests::Tensor(out_shape_from_empty, element::Type(IN_ET), std::vector{max_value, max_value}))); + + out_shape_from_empty = Shape{2, 0, 1}; + if (keep_dims == false) { + out_shape_from_empty = Shape{2, 0}; + } + params.push_back( + ReductionParams(ReductionType::Min, + keep_dims, + std::vector{2}, + reference_tests::Tensor({2, 0, 4}, element::Type(IN_ET), std::vector{}), + reference_tests::Tensor(out_shape_from_empty, element::Type(IN_ET), std::vector{}))); return params; } diff --git a/src/plugins/template/tests/functional/op_reference/reduce_prod.cpp b/src/plugins/template/tests/functional/op_reference/reduce_prod.cpp index d030633932fd73..54e39dad68826f 100644 --- a/src/plugins/template/tests/functional/op_reference/reduce_prod.cpp +++ b/src/plugins/template/tests/functional/op_reference/reduce_prod.cpp @@ -74,6 +74,28 @@ std::vector generateReductionParams(const bool keep_dims) { 19 * 20 * 21, 22 * 23 * 24, 25 * 26 * 27}))}; + auto out_shape_from_empty = Shape{2, 1, 1}; + if (keep_dims == false) { + out_shape_from_empty = Shape{2}; + } + const T default_val = T{1}; + params.push_back(ReductionParams( + ReductionType::Prod, + keep_dims, + std::vector{1, 2}, + reference_tests::Tensor({2, 0, 4}, element::Type(IN_ET), std::vector{}), + reference_tests::Tensor(out_shape_from_empty, element::Type(IN_ET), std::vector{default_val, default_val}))); + + out_shape_from_empty = Shape{2, 0, 1}; + if (keep_dims == false) { + out_shape_from_empty = Shape{2, 0}; + } + params.push_back( + ReductionParams(ReductionType::Prod, + keep_dims, + std::vector{2}, + reference_tests::Tensor({2, 0, 4}, element::Type(IN_ET), std::vector{}), + reference_tests::Tensor(out_shape_from_empty, element::Type(IN_ET), std::vector{}))); return params; } diff --git a/src/plugins/template/tests/functional/op_reference/reduce_sum.cpp b/src/plugins/template/tests/functional/op_reference/reduce_sum.cpp index ab77acc3cf696a..dd8dcd38635c79 100644 --- a/src/plugins/template/tests/functional/op_reference/reduce_sum.cpp +++ b/src/plugins/template/tests/functional/op_reference/reduce_sum.cpp @@ -121,6 +121,28 @@ std::vector generateReductionParams(const bool keep_dims) { reference_tests::Tensor(reduce(Shape{3, 3, 3, 3, 3}, AxisSet{0, 1, 2, 3, 4}, keep_dims), element::Type(IN_ET), std::vector{243}))}; + auto out_shape_from_empty = Shape{2, 1, 1}; + if (keep_dims == false) { + out_shape_from_empty = Shape{2}; + } + params.push_back( + ReductionParams(ReductionType::Sum, + keep_dims, + std::vector{1, 2}, + reference_tests::Tensor({2, 0, 4}, element::Type(IN_ET), std::vector{}), + reference_tests::Tensor(out_shape_from_empty, element::Type(IN_ET), std::vector{0, 0}))); + + out_shape_from_empty = Shape{2, 0, 1}; + if (keep_dims == false) { + out_shape_from_empty = Shape{2, 0}; + } + params.push_back( + ReductionParams(ReductionType::Sum, + keep_dims, + std::vector{2}, + reference_tests::Tensor({2, 0, 4}, element::Type(IN_ET), std::vector{}), + reference_tests::Tensor(out_shape_from_empty, element::Type(IN_ET), std::vector{}))); + return params; }