Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[NPUW] Recent coverity fix #26905

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1719,7 +1719,7 @@ void Partitioner::optimize(const std::string& func_name) {
new_params.push_back(params_to_gather.pnew);
for (auto&& funcall : func_group.refs) {
auto new_elem_type = params_to_gather.pnew->get_element_type();
auto new_shape = params_to_gather.pnew->get_shape();
const auto& new_shape = params_to_gather.pnew->get_shape();
funcall.get()._closure.push_back(ov::Tensor(new_elem_type, new_shape));
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ Context::PPtr Context::concat(ov::ParameterVector&& v, std::size_t dim) {
const auto& first = v.front();
const auto first_shape = first->get_shape();
for (auto&& p : v) {
const auto this_shape = p->get_shape();
const auto& this_shape = p->get_shape();
NPUW_ASSERT(first_shape.size() == this_shape.size());
for (std::size_t d = 0; d < first_shape.size(); d++) {
if (d != dim) {
Expand Down Expand Up @@ -80,8 +80,8 @@ Context::PPtr Context::unpack(Context::PPtr w, Context::PPtr z, Context::PPtr s,
}

Context::PPtr Context::unpack(Context::PPtr w, Context::PPtr s, ov::element::Type type) {
const auto w_shape = w->get_shape();
const auto s_shape = s->get_shape();
const auto& w_shape = w->get_shape();
const auto& s_shape = s->get_shape();

Context::PPtr new_param;
if (w_shape.size() == 3 && s_shape.size() == 3) {
Expand All @@ -100,8 +100,8 @@ Context::PPtr Context::unpack(Context::PPtr w, Context::PPtr s, ov::element::Typ
}

Context::PPtr Context::host_gather(Context::PPtr w, Context::PPtr ids) {
const auto w_shape = w->get_shape();
const auto ids_shape = ids->get_shape();
const auto& w_shape = w->get_shape();
const auto& ids_shape = ids->get_shape();

NPUW_ASSERT(w_shape.size() == 2);
NPUW_ASSERT(ids_shape.size() == 2);
Expand Down Expand Up @@ -473,7 +473,6 @@ DQMatMulGQiP::DQMatMulGQiP(Context::Ref ctx) {
auto qweight_shape = matched_qweight->output(0).get_shape();
auto qcoeff_shape = matched_qcoeff->output(0).get_shape();
auto act_shape = matched_out_mmi.get_shape();
auto out_shape = matched_node_matmul->output(0).get_shape();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it is not used?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes


if (ov::element::i4 == matched_qweight->get_element_type() && qweight_shape.size() == 3 &&
ov::element::f32 == matched_qcoeff->get_element_type() && qcoeff_shape.size() == 3 &&
Expand Down Expand Up @@ -587,7 +586,6 @@ DQMatMulGQ2iP::DQMatMulGQ2iP(Context::Ref ctx) {
auto qweight_shape = matched_qweight->output(0).get_shape();
auto qcoeff_shape = matched_qcoeff->output(0).get_shape();
auto act_shape = matched_out_mmi.get_shape();
auto out_shape = matched_node_matmul->output(0).get_shape();

if (ov::element::i4 == matched_qweight->get_element_type() && qweight_shape.size() == 3 &&
ov::element::f16 == matched_qcoeff->get_element_type() && qcoeff_shape.size() == 3 &&
Expand Down Expand Up @@ -818,7 +816,7 @@ DQLiftGatherAsymCW::DQLiftGatherAsymCW() {
auto matched_out_z = node_to_output.at(qzerop);
auto matched_out_s = node_to_output.at(qcoeff);
auto matched_out_ids = node_to_output.at(cvtids);
auto matched_out_gather = node_to_output.at(gather);
const auto& matched_out_gather = node_to_output.at(gather);

// Replicate the compute part
auto gather_c = std::make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{}, 0);
Expand Down Expand Up @@ -861,7 +859,7 @@ DQLiftGatherSymCW::DQLiftGatherSymCW() {
auto matched_out_w = node_to_output.at(qweight);
auto matched_out_s = node_to_output.at(qcoeff);
auto matched_out_ids = node_to_output.at(cvtids);
auto matched_out_gather = node_to_output.at(gather);
const auto& matched_out_gather = node_to_output.at(gather);

// Create new gathers on W and S, connect respectively
auto new_cvt_w = std::make_shared<ov::op::v0::Convert>(matched_out_w, ov::element::f16);
Expand Down Expand Up @@ -903,7 +901,7 @@ DQLiftGatherSymGQ::DQLiftGatherSymGQ() {
auto matched_out_w = node_to_output.at(qweight);
auto matched_out_s = node_to_output.at(qcoeff);
auto matched_out_ids = node_to_output.at(cvtids);
auto matched_out_gather = node_to_output.at(gather);
const auto& matched_out_gather = node_to_output.at(gather);

auto matched_gather_shape = matched_out_gather.get_shape();

Expand Down Expand Up @@ -1034,10 +1032,10 @@ HostGather::HostGather(Context::Ref ctx) {
auto callback = [=](ov::pass::pattern::Matcher& m) {
auto& node_to_output = m.get_pattern_value_map();
auto out_shape = node_to_output.at(qgthrw).get_shape();
auto matched_out_qweight = node_to_output.at(qweight);
auto& matched_out_qweight = node_to_output.at(qweight);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

shouldn't be const, yes?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure, coverity requested auto&

auto qweight_type = matched_out_qweight.get_element_type();

auto matched_out_gather = node_to_output.at(qgthrw);
const auto& matched_out_gather = node_to_output.at(qgthrw);

auto sole_reader = [](ov::Output<ov::Node> out) {
const auto readers = out.get_target_inputs();
Expand All @@ -1050,7 +1048,7 @@ HostGather::HostGather(Context::Ref ctx) {
ov::is_type<ov::op::v0::Convert>(sole_reader(matched_out_gather)))) {
auto matched_node_qweight = node_to_output.at(qweight).get_node_shared_ptr();
auto matched_node_ids = node_to_output.at(pids).get_node_shared_ptr();
auto matched_out_gthr = node_to_output.at(qgthrw);
const auto& matched_out_gthr = node_to_output.at(qgthrw);
auto matched_qweight = std::static_pointer_cast<ov::op::v0::Parameter>(matched_node_qweight);
auto matched_ids = std::static_pointer_cast<ov::op::v0::Parameter>(matched_node_ids);

Expand Down Expand Up @@ -1094,7 +1092,7 @@ HostGatherDQ::HostGatherDQ(Context::Ref ctx) {

auto callback = [=](ov::pass::pattern::Matcher& m) {
auto& node_to_output = m.get_pattern_value_map();
auto matched_out_mul = node_to_output.at(qmul);
const auto& matched_out_mul = node_to_output.at(qmul);
auto out_shape = matched_out_mul.get_shape();

if (out_shape.size() != 3 && out_shape.size() != 4) {
Expand All @@ -1106,7 +1104,7 @@ HostGatherDQ::HostGatherDQ(Context::Ref ctx) {
// were Hs = hidden size, G is # of groups, N is the prompt size.
auto out_len = out_shape.size() == 3 ? out_shape[2] : out_shape[2] * out_shape[3];

auto matched_out_qweight = node_to_output.at(qweight);
const auto& matched_out_qweight = node_to_output.at(qweight);
auto qweight_type = matched_out_qweight.get_element_type();

if (out_len >= 2048 && qweight_type == ov::element::i4) {
Expand Down Expand Up @@ -1173,8 +1171,6 @@ DQUnpackDictMatMulCWu::DQUnpackDictMatMulCWu(Context::Ref ctx) {
auto matched_result = std::static_pointer_cast<ov::op::v0::Result>(matched_node_res);

auto qcoeff_shape = matched_qcoeff->output(0).get_shape();
auto qzerop_shape = matched_qzerop->output(0).get_shape();
auto act_shape = matched_mmi.get_shape();

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not used?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes

if (ov::element::u8 == matched_qweight->get_element_type() && qcoeff_shape[1] == 1 &&
!matched_matmul->get_transpose_a() && matched_matmul->get_transpose_b()) {
Expand Down Expand Up @@ -1228,8 +1224,7 @@ DQUnpackDictMatMulGQi::DQUnpackDictMatMulGQi(Context::Ref ctx) {
auto matched_matmul = std::static_pointer_cast<ov::op::v0::MatMul>(matched_node_matmul);
auto matched_result = std::static_pointer_cast<ov::op::v0::Result>(matched_node_res);

auto qcoeff_shape = matched_qcoeff->output(0).get_shape();
auto act_shape = matched_mmi.get_shape();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

also not used?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes

const auto& qcoeff_shape = matched_qcoeff->output(0).get_shape();

if (ov::element::i4 == matched_qweight->get_element_type() && qcoeff_shape.size() == 3) {
auto new_cvt_a = std::make_shared<ov::op::v0::Convert>(matched_mmi, ov::element::f16);
Expand Down
6 changes: 3 additions & 3 deletions src/plugins/intel_npu/src/plugin/npuw/util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1539,14 +1539,14 @@ void ov::npuw::util::gather(const ov::SoPtr<ov::ITensor>& src,
NPUW_ASSERT(src_type == ov::element::f16 || src_type == ov::element::f32);
NPUW_ASSERT(src_type == dst_type);

const auto idx_shape = idx->get_shape();
const auto& idx_shape = idx->get_shape();
NPUW_ASSERT(idx_shape.size() == 2);
NPUW_ASSERT(idx_shape[0] == 1);

const auto src_shape = src->get_shape();
const auto& src_shape = src->get_shape();
NPUW_ASSERT(src_shape.size() == 2);

const auto dst_shape = dst->get_shape();
const auto& dst_shape = dst->get_shape();
NPUW_ASSERT(dst_shape.size() == 3);
NPUW_ASSERT(src_shape[1] == dst_shape[2]);

Expand Down
Loading