From fc0b54e857a1bc58de95d1e04b35e8fc1a5a23eb Mon Sep 17 00:00:00 2001 From: "Anastasiya(Asya) Pronina" Date: Fri, 29 Nov 2024 14:02:31 +0100 Subject: [PATCH] Fixed Coverity issues found in NPUW (#27552) ### Details: - *Fixed `AUTO_CAUSES_COPY` issues found in NPUW by Coverity* ### Tickets: - *EISW-146516* --- .../src/plugin/npuw/just_sync_infer_request.cpp | 3 --- .../plugin/npuw/partitioning/patterns/compute.cpp | 4 ++-- .../src/plugin/npuw/partitioning/patterns/dcoff.cpp | 2 +- .../src/plugin/npuw/partitioning/patterns/opt.cpp | 12 ++++++------ src/plugins/intel_npu/src/plugin/npuw/spatial.cpp | 2 +- src/plugins/intel_npu/src/plugin/npuw/util.cpp | 4 ++-- 6 files changed, 12 insertions(+), 15 deletions(-) diff --git a/src/plugins/intel_npu/src/plugin/npuw/just_sync_infer_request.cpp b/src/plugins/intel_npu/src/plugin/npuw/just_sync_infer_request.cpp index 8d1c7c4a30acde..16e2a57e991e3a 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/just_sync_infer_request.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/just_sync_infer_request.cpp @@ -783,9 +783,6 @@ void ov::npuw::JustInferRequest::unsafe_infer(std::size_t real_idx) { // Now copy the views from the output full-nway tensor to the output tensors for (std::size_t out_idx = 0u; out_idx < num_outputs; out_idx++) { - const auto& oport = comp_model_desc.compiled_model->outputs()[out_idx]; - auto spatial_tensor_shape = oport.get_shape(); - auto in_view = ov::npuw::util::view(m_spatial_io[real_idx].output_tails.at(out_idx), spatial.out_dim, 0, diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/compute.cpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/compute.cpp index d39c2363b1cd64..1cc47b568bcde9 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/compute.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/compute.cpp @@ -247,8 +247,8 @@ DQMatMulConv::DQMatMulConv(const std::shared_ptr& sn auto callback = [=](ov::pass::pattern::Matcher& m) { auto& node_to_output = m.get_pattern_value_map(); - auto matched_node_param = node_to_output.at(param); - auto matched_node_param2 = node_to_output.at(param2); + const auto& matched_node_param = node_to_output.at(param); + const auto& matched_node_param2 = node_to_output.at(param2); auto matched_node_transpose_in = node_to_output.at(transpose_in).get_node_shared_ptr(); auto matched_node_transpose_out = node_to_output.at(transpose_out).get_node_shared_ptr(); diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.cpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.cpp index a428d956bbad87..31093a34871db9 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.cpp @@ -709,7 +709,7 @@ DCOFFPassReshape4::DCOFFPassReshape4(DCOffMode dcoff_mode, ov::element::Type dco auto matched_paramA = std::static_pointer_cast(matched_nodeA); auto matched_paramC = std::static_pointer_cast(matched_nodeC); - auto matched_out_mulply = node_to_output.at(mulply); + const auto& matched_out_mulply = node_to_output.at(mulply); if (ov::element::i4 == matched_paramA->get_element_type() && (ov::element::f16 == matched_paramC->get_element_type() || diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/opt.cpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/opt.cpp index 968039e88758a1..db9666b9485546 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/opt.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/opt.cpp @@ -166,8 +166,8 @@ DQMatMulCWi::DQMatMulCWi(Context::Ref ctx) { auto matched_node_cvtw = node_to_output.at(qcvtw).get_node_shared_ptr(); auto matched_node_muls = node_to_output.at(qmuls).get_node_shared_ptr(); auto matched_node_mmi = node_to_output.at(qmmi).get_node_shared_ptr(); - auto matched_node_qcoeff_out = uat::_(node_to_output).at_or_at_or_at(qcvtc, reshapec, qcoeff); - auto matched_node_muls_out = uat::_(node_to_output).at_or_at(qcvtm, qmuls); + auto& matched_node_qcoeff_out = uat::_(node_to_output).at_or_at_or_at(qcvtc, reshapec, qcoeff); + auto& matched_node_muls_out = uat::_(node_to_output).at_or_at(qcvtm, qmuls); if (!ctx.get().mm_dq_full) { const auto& matm_mul_out_shape = matched_matmul->get_output_shape(0); @@ -1432,7 +1432,7 @@ SliceLastMatmul::SliceLastMatmul() { auto callback = [=](ov::pass::pattern::Matcher& m) { auto& node_to_output = m.get_pattern_value_map(); - auto matched_out_matmul = node_to_output.at(matmul); + auto& matched_out_matmul = node_to_output.at(matmul); auto shape = matched_out_matmul.get_node()->input(0).get_shape(); @@ -1468,7 +1468,7 @@ SliceLastMatmulAdd::SliceLastMatmulAdd() { auto callback = [=](ov::pass::pattern::Matcher& m) { auto& node_to_output = m.get_pattern_value_map(); - auto matched_out_matmul = node_to_output.at(matmul); + auto& matched_out_matmul = node_to_output.at(matmul); auto shape = matched_out_matmul.get_node()->input(0).get_shape(); @@ -1504,7 +1504,7 @@ SliceLastMatmulTranspose::SliceLastMatmulTranspose() { auto callback = [=](ov::pass::pattern::Matcher& m) { auto& node_to_output = m.get_pattern_value_map(); - auto matched_out_matmul = node_to_output.at(matmul); + auto& matched_out_matmul = node_to_output.at(matmul); auto shape = matched_out_matmul.get_node()->input(0).get_shape(); @@ -1542,7 +1542,7 @@ SliceLastMatmulMultiply::SliceLastMatmulMultiply() { auto callback = [=](ov::pass::pattern::Matcher& m) { auto& node_to_output = m.get_pattern_value_map(); - auto matched_out_matmul = node_to_output.at(matmul); + auto& matched_out_matmul = node_to_output.at(matmul); auto shape = matched_out_matmul.get_node()->input(0).get_shape(); diff --git a/src/plugins/intel_npu/src/plugin/npuw/spatial.cpp b/src/plugins/intel_npu/src/plugin/npuw/spatial.cpp index a7ea56dd3ff910..ca72023dfdc7de 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/spatial.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/spatial.cpp @@ -13,7 +13,7 @@ ov::npuw::runtime::spatial::AttentionMask::AttentionMask(std::size_t param_idx, ov::npuw::runtime::spatial::Selector::Ptr ov::npuw::runtime::spatial::AttentionMask::find( const ov::ISyncInferRequest& rq) { auto is_attn_mask = [](const ov::Output& p) { - const auto shape = p.get_shape(); + const auto& shape = p.get_shape(); return p.get_node()->get_friendly_name() == "attention_mask" && (shape.size() == 1 || (shape.size() == 2 && shape[0] == 1)); }; diff --git a/src/plugins/intel_npu/src/plugin/npuw/util.cpp b/src/plugins/intel_npu/src/plugin/npuw/util.cpp index e9cab91e60bdb0..ffefb747ffb18f 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/util.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/util.cpp @@ -336,7 +336,7 @@ ov::SoPtr ov::npuw::util::view(const ov::SoPtr& src, view_shape.push_back(to[d] - from[d]); } - const auto strides = src->get_strides(); + const auto& strides = src->get_strides(); uint8_t* ptr = static_cast(src->data()); // Shift PTR according to the strides @@ -352,7 +352,7 @@ ov::SoPtr ov::npuw::util::view(const ov::SoPtr& src, std::size_t dim, std::size_t offset, std::size_t len) { - const auto shape = src->get_shape(); + const auto& shape = src->get_shape(); View view_start = View(shape.size(), 0u); View view_end = shape; view_start[dim] = offset;