Skip to content

Commit

Permalink
[CPU][TESTS] Remove sse4 instances from oneDNN related tests
Browse files Browse the repository at this point in the history
Seems to not worth to verify them considering how slow they are
  • Loading branch information
EgorDuplensky committed Jan 17, 2025
1 parent 8d16209 commit 40c491c
Show file tree
Hide file tree
Showing 10 changed files with 14 additions and 363 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -190,16 +190,6 @@ void ConvolutionLayerCPUTest::SetUp() {
}

TEST_P(ConvolutionLayerCPUTest, CompareWithRefs) {
// Skip tests for sse41 convolution where ic or oc cannot be exactly divided by the block size,
// since tails processing for sse41 nspc layout is not supported yet (see 52736).
if (!inFmts.empty() && (inFmts.front() == nwc || inFmts.front() == nhwc || inFmts.front() == ndhwc) && selectedType.find("jit_sse") != std::string::npos) {
auto inpChannels = function->get_parameters().front()->get_partial_shape()[1].get_length();
auto outChannels = function->get_output_partial_shape(0)[1].get_length();
if ((inpChannels % 8) || (outChannels % 8)) {
GTEST_SKIP() << "Disabled test due to the sse41 convolution kernel does not support tails for nspc layout." << std::endl;
}
}

if (!priority.empty()) {
// Skip tests for brgconv convolution where kernel size = 1x1
if (one_of(priority[0], "brgconv_avx512", "brgconv_avx512_amx", "brgconv_avx2")) {
Expand Down Expand Up @@ -340,10 +330,7 @@ const std::vector<InputShape>& inShapesGemm2D_cache() {

const std::vector<CPUSpecificParams>& CPUParams_2D() {
static const std::vector<CPUSpecificParams> CPUParams_2D = {
conv_sse42_2D,
conv_avx2_2D,
conv_avx512_2D,
conv_sse42_2D_nspc,
conv_avx2_2D_nspc,
conv_avx2_2D_nspc_brgconv,
conv_avx512_2D_nspc,
Expand All @@ -354,7 +341,6 @@ const std::vector<CPUSpecificParams>& CPUParams_2D() {

const std::vector<CPUSpecificParams>& CPUParams_3D() {
static const std::vector<CPUSpecificParams> CPUParams_3D = {
//conv_sse42_3D, // not supported jit_sse42 for 3d
conv_avx2_3D,
conv_avx512_3D,
conv_avx2_3D_nspc,
Expand Down Expand Up @@ -479,10 +465,8 @@ const std::vector<InputShape>& inputShapes2d_dynBatch() {

const std::vector<CPUSpecificParams>& CPUParams_1x1_1D() {
static const std::vector<CPUSpecificParams> CPUParams_1x1_1D = {
conv_sse42_1D_1x1,
conv_avx2_1D_1x1,
conv_avx512_1D_1x1,
conv_sse42_1D_1x1_nspc,
conv_avx2_1D_1x1_nspc,
conv_avx2_1D_1x1_nspc_brgconv,
conv_avx512_1D_1x1_nspc,
Expand Down Expand Up @@ -567,10 +551,8 @@ const std::vector<CPUSpecificParams>& CPUParams_GEMM_3D() {

const std::vector<CPUSpecificParams>& CPUParams_1x1_2D() {
static const std::vector<CPUSpecificParams> CPUParams_1x1_2D = {
conv_sse42_2D_1x1,
conv_avx2_2D_1x1,
conv_avx512_2D_1x1,
conv_sse42_2D_1x1_nspc,
conv_avx2_2D_1x1_nspc,
conv_avx2_2D_1x1_nspc_brgconv,
conv_avx512_2D_1x1_nspc,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -787,12 +787,11 @@ const CPUSpecificParams& expectedCpuConfigAnyLayout() {
}

const std::vector<CPUSpecificParams>& vecCpuConfigsFusing_4D() {
const auto sse42_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42"}, "jit_sse42"};
const auto avx2_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx2"}, "jit_avx2"};
const auto avx512_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx512"}, "jit_avx512"};
const auto acl_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"acl"}, "acl"};

static const std::vector<CPUSpecificParams> vecCpuConfigsFusing_4D = {sse42_nhwc, avx2_nhwc, avx512_nhwc, acl_nhwc, expectedCpuConfigAnyLayout()};
static const std::vector<CPUSpecificParams> vecCpuConfigsFusing_4D = {avx2_nhwc, avx512_nhwc, acl_nhwc, expectedCpuConfigAnyLayout()};
return vecCpuConfigsFusing_4D;
}

Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,6 @@ INSTANTIATE_TEST_SUITE_P(Conv_2D_FP32_dilated_empty_fusing, ConvolutionLayerCPUT
ConvolutionLayerCPUTest::getTestCaseName);

const std::vector<CPUSpecificParams> CPUParams_2D_plain_to_blocked = {
conv_sse42_plain_to_blocked_2D,
conv_avx2_plain_to_blocked_2D,
conv_avx512_plain_to_blocked_2D,
};
Expand Down Expand Up @@ -397,4 +396,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_Large_Filter, ConvolutionLayerCPUTest,
} // namespace
} // namespace Convolution
} // namespace test
} // namespace ov
} // namespace ov
Original file line number Diff line number Diff line change
Expand Up @@ -344,10 +344,8 @@ const auto convParams_ExplicitPadding_1D = ::testing::Combine(
);

const std::vector<CPUSpecificParams> CPUParams_1D_f32 = {
conv_sse42_1D,
conv_avx2_1D,
conv_avx512_1D,
conv_sse42_1D_nspc,
conv_avx2_1D_nspc,
conv_avx2_1D_nspc_brgconv,
conv_avx512_1D_nspc,
Expand All @@ -356,10 +354,8 @@ const std::vector<CPUSpecificParams> CPUParams_1D_f32 = {

//Current avx2 I8 fall back on JIT avx2 implement when having src zero point.Not enabling conv_avx2_1D_nspc_brgconv for I8 precision.
const std::vector<CPUSpecificParams> CPUParams_1D_I8 = {
conv_sse42_1D,
conv_avx2_1D,
conv_avx512_1D,
conv_sse42_1D_nspc,
conv_avx2_1D_nspc,
conv_avx512_1D_nspc,
conv_avx512_1D_nspc_brgconv
Expand Down Expand Up @@ -424,7 +420,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_I8, ConvolutionLayerCPUTest,
ConvolutionLayerCPUTest::getTestCaseName);

const std::vector<CPUSpecificParams> CPUParams_1D_plain_to_blocked = {
conv_sse42_plain_to_blocked_1D,
conv_avx2_plain_to_blocked_1D,
conv_avx512_plain_to_blocked_1D,
};
Expand Down Expand Up @@ -630,7 +625,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_FP16, ConvolutionLayerCPUTest,
/* ============= Jit Planar ============= */
/* ============= Convolution planar params (2D) ============= */
const std::vector<CPUSpecificParams> CPUParams_Jit_Planar_2D = {
// sse42 is not supported
conv_avx2_planar_2D,
conv_avx512_planar_2D,
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,8 @@ namespace {
const auto ref = CPUSpecificParams{{}, {}, {"ref_any"}, "ref_any"};
const auto avx512 = CPUSpecificParams{{}, {}, {"jit_avx512"}, "jit_avx512"};
const auto avx = CPUSpecificParams{{}, {}, {"jit_avx"}, "jit_avx"};
const auto sse42 = CPUSpecificParams{{}, {}, {"jit_sse42"}, "jit_sse42"};

const std::vector<CPUSpecificParams> vecCpuConfigs = {sse42, avx, avx512};
const std::vector<CPUSpecificParams> vecCpuConfigs = {avx, avx512};

const std::vector<maxPoolV8SpecificParams> paramsMaxV84D_ref = {
maxPoolV8SpecificParams{ {2, 2}, {2, 2}, {2, 2}, {0, 0}, {0, 0},
Expand Down Expand Up @@ -50,13 +49,9 @@ const auto avx2_nwc = CPUSpecificParams{{nwc}, {nwc}, {"jit_avx2"}, "jit_avx2"};
const auto avx2_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx2"}, "jit_avx2"};
const auto avx2_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_avx2"}, "jit_avx2"};

const auto sse42_nwc = CPUSpecificParams{{nwc}, {nwc}, {"jit_sse42"}, "jit_sse42"};
const auto sse42_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42"}, "jit_sse42"};
const auto sse42_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_sse42"}, "jit_sse42"};

const std::vector<CPUSpecificParams> vecCpuConfigsFusing_3D = {sse42_nwc, avx2_nwc, avx512_nwc};
const std::vector<CPUSpecificParams> vecCpuConfigsFusing_4D = {sse42_nhwc, avx2_nhwc, avx512_nhwc};
const std::vector<CPUSpecificParams> vecCpuConfigsFusing_5D = {sse42_ndhwc, avx2_ndhwc, avx512_ndhwc};
const std::vector<CPUSpecificParams> vecCpuConfigsFusing_3D = {avx2_nwc, avx512_nwc};
const std::vector<CPUSpecificParams> vecCpuConfigsFusing_4D = {avx2_nhwc, avx512_nhwc};
const std::vector<CPUSpecificParams> vecCpuConfigsFusing_5D = {avx2_ndhwc, avx512_ndhwc};

std::vector<fusingSpecificParams> fusingParamsSet {
emptyFusingSpec,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,7 @@ namespace {
const auto optimizedCPUSpec = []()-> std::vector<CPUSpecificParams>{
const auto avx512 = CPUSpecificParams{{}, {}, {"jit"}, "jit_avx512"};
const auto avx2 = CPUSpecificParams{{}, {}, {"jit"}, "jit_avx2"};
const auto sse42 = CPUSpecificParams{{}, {}, {"jit"}, "jit_sse42"};
const std::vector<CPUSpecificParams> vecCpuConfigs = {avx512, avx2, sse42};
const std::vector<CPUSpecificParams> vecCpuConfigs = {avx512, avx2};
auto supportConfigure = CPUTestUtils::filterCPUInfoForDevice(vecCpuConfigs);
// only the MAX ISA of vecCpuConfigs will be tested
if (supportConfigure.size() > 0) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ namespace Kernel_1x1 {

/* ============= Kernel_1x1 (2D) ============= */
const std::vector<CPUSpecificParams> CPUParams2DConv = {
conv_sse42_2D_1x1,
conv_avx2_2D_1x1,
conv_avx512_2D_1x1
};
Expand Down Expand Up @@ -84,7 +83,6 @@ commonConvParams dwConvParams2D = commonConvParams{kernelSize2D(), strides2D(),
numOutChannels(), paddingType(), numOutChannels()};
const ov::Shape inputShapesDW2D{1, 32, 16, 16};
const std::vector<CPUSpecificParams> CPUParams2D = {
conv_sse42_dw_2D,
conv_avx2_dw_2D,
conv_avx512_dw_2D
};
Expand All @@ -104,7 +102,6 @@ commonConvParams dwConvParams3D = commonConvParams{kernelSize3D(), strides3D(),
numOutChannels(), paddingType(), numOutChannels()};
const ov::Shape inputShapesDW3D{1, 32, 8, 16, 16};
const std::vector<CPUSpecificParams> CPUParams3D = {
conv_sse42_dw_3D,
conv_avx2_dw_3D,
conv_avx512_dw_3D
};
Expand Down Expand Up @@ -159,7 +156,6 @@ namespace ConvolutionConcat {
/* ============= Convolution (2D) ============= */
const std::vector<CPUSpecificParams> CPUParams2D = {
conv_ref_2D,
conv_sse42_2D,
conv_avx2_2D,
conv_avx512_2D
};
Expand Down Expand Up @@ -196,7 +192,6 @@ namespace GroupConvolutionConcat {
/* ============= GroupConvolution (2D) ============= */
const std::vector<CPUSpecificParams> CPUParams2D = {
conv_ref_2D,
conv_sse42_2D,
conv_avx2_2D,
conv_avx512_2D
};
Expand Down Expand Up @@ -255,4 +250,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionBackpropData3D, ConvConcatSubgrap

} // namespace GroupConvolutionBackpropDataConcat
} // namespace test
} // namespace ov
} // namespace ov
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,6 @@ TEST_F(EdgeWithSameNameInTwoModels, smoke_CompareWithRef) {
std::tie(inFmts, outFmts, priority, selectedType) = conv_avx512_2D;
} else if (ov::with_cpu_x86_avx2()) {
std::tie(inFmts, outFmts, priority, selectedType) = conv_avx2_2D;
} else if (ov::with_cpu_x86_sse42()) {
std::tie(inFmts, outFmts, priority, selectedType) = conv_sse42_2D;
}

// first model
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,24 +31,6 @@ namespace CPUTestUtils {
const auto conv_gemm_acl_2D_nspc = CPUSpecificParams{{nhwc}, {nhwc}, {"gemm_acl"}, "gemm_acl"};
const auto conv_gemm_acl_3D_nspc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"gemm_acl"}, "gemm_acl"};

const auto conv_sse42_1D = CPUSpecificParams{{nCw8c}, {nCw8c}, {"jit_sse42"}, "jit_sse42"};
const auto conv_sse42_2D = CPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_sse42"}, "jit_sse42"};
const auto conv_sse42_3D = CPUSpecificParams{{nCdhw8c}, {nCdhw8c}, {"jit_sse42"}, "jit_sse42"};
const auto conv_sse42_dw_1D = CPUSpecificParams{{nCw8c}, {nCw8c}, {"jit_sse42_dw"}, "jit_sse42_dw"};
const auto conv_sse42_dw_2D = CPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_sse42_dw"}, "jit_sse42_dw"};
const auto conv_sse42_dw_3D = CPUSpecificParams{{nCdhw8c}, {nCdhw8c}, {"jit_sse42_dw"}, "jit_sse42_dw"};

const auto conv_sse42_plain_to_blocked_1D = CPUSpecificParams{{ncw}, {nCw8c}, {"jit_sse42"}, "jit_sse42"};
const auto conv_sse42_plain_to_blocked_2D = CPUSpecificParams{{nchw}, {nChw8c}, {"jit_sse42"}, "jit_sse42"};
const auto conv_sse42_plain_to_blocked_3D = CPUSpecificParams{{ncdhw}, {nCdhw8c}, {"jit_sse42"}, "jit_sse42"};

const auto conv_sse42_1D_nspc = CPUSpecificParams{{nwc}, {nwc}, {"jit_sse42"}, "jit_sse42"};
const auto conv_sse42_2D_nspc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42"}, "jit_sse42"};
const auto conv_sse42_3D_nspc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_sse42"}, "jit_sse42"};
const auto conv_sse42_dw_1D_nspc = CPUSpecificParams{{nwc}, {nwc}, {"jit_sse42_dw"}, "jit_sse42_dw"};
const auto conv_sse42_dw_2D_nspc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42_dw"}, "jit_sse42_dw"};
const auto conv_sse42_dw_3D_nspc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_sse42_dw"}, "jit_sse42_dw"};

const auto conv_avx2_1D = CPUSpecificParams{{nCw8c}, {nCw8c}, {"jit_avx2"}, "jit_avx2"};
const auto conv_avx2_2D = CPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_avx2"}, "jit_avx2"};
const auto conv_avx2_3D = CPUSpecificParams{{nCdhw8c}, {nCdhw8c}, {"jit_avx2"}, "jit_avx2"};
Expand Down Expand Up @@ -107,22 +89,18 @@ namespace CPUTestUtils {
const auto conv_avx512_2D_nspc_brgconv_amx = CPUSpecificParams{{nhwc}, {nhwc}, {"brgconv_avx512_amx"}, "brgconv_avx512_amx"};
const auto conv_avx512_3D_nspc_brgconv_amx = CPUSpecificParams{{ndhwc}, {ndhwc}, {"brgconv_avx512_amx"}, "brgconv_avx512_amx"};

const auto conv_sse42_1D_1x1 = CPUSpecificParams{{nCw8c}, {nCw8c}, {"jit_sse42_1x1"}, "jit_sse42_1x1"};
const auto conv_avx2_1D_1x1 = CPUSpecificParams{{nCw8c}, {nCw8c}, {"jit_avx2_1x1"}, "jit_avx2_1x1"};
const auto conv_avx512_1D_1x1 = CPUSpecificParams{{nCw16c}, {nCw16c}, {"jit_avx512_1x1"}, "jit_avx512_1x1"};

const auto conv_sse42_1D_1x1_nspc = CPUSpecificParams{{nwc}, {nwc}, {"jit_sse42_1x1"}, "jit_sse42_1x1"};
const auto conv_avx2_1D_1x1_nspc = CPUSpecificParams{{nwc}, {nwc}, {"jit_avx2_1x1"}, "jit_avx2_1x1"};
const auto conv_avx2_1D_1x1_nspc_brgconv = CPUSpecificParams{{nwc}, {nwc}, {"brgconv_avx2_1x1"}, "brgconv_avx2_1x1"};
const auto conv_avx512_1D_1x1_nspc = CPUSpecificParams{{nwc}, {nwc}, {"jit_avx512_1x1"}, "jit_avx512_1x1"};
const auto conv_avx512_1D_1x1_nspc_brgconv = CPUSpecificParams{{nwc}, {nwc}, {"brgconv_avx512_1x1"}, "brgconv_avx512_1x1"};
const auto conv_avx512_1D_1x1_nspc_brgconv_amx = CPUSpecificParams{{nwc}, {nwc}, {"brgconv_avx512_amx_1x1"}, "brgconv_avx512_amx_1x1"};

const auto conv_sse42_2D_1x1 = CPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_sse42_1x1"}, "jit_sse42_1x1"};
const auto conv_avx2_2D_1x1 = CPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_avx2_1x1"}, "jit_avx2_1x1"};
const auto conv_avx512_2D_1x1 = CPUSpecificParams{{nChw16c}, {nChw16c}, {"jit_avx512_1x1"}, "jit_avx512_1x1"};

const auto conv_sse42_2D_1x1_nspc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42_1x1"}, "jit_sse42_1x1"};
const auto conv_avx2_2D_1x1_nspc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx2_1x1"}, "jit_avx2_1x1"};
const auto conv_avx2_2D_1x1_nspc_brgconv = CPUSpecificParams{{nhwc}, {nhwc}, {"brgconv_avx2_1x1"}, "brgconv_avx2_1x1"};
const auto conv_avx2_3D_1x1_nspc_brgconv = CPUSpecificParams{{ndhwc}, {ndhwc}, {"brgconv_avx2_1x1"}, "brgconv_avx2_1x1"};
Expand Down

0 comments on commit 40c491c

Please sign in to comment.