Skip to content

Commit

Permalink
[CPU] Enable and address google-* clang-tidy remarks
Browse files Browse the repository at this point in the history
except google-explicit-constructor
  • Loading branch information
aobolensk committed Jan 20, 2025
1 parent 1025c76 commit 98b8e04
Show file tree
Hide file tree
Showing 277 changed files with 5,938 additions and 3,159 deletions.
2 changes: 1 addition & 1 deletion src/common/snippets/src/utils/debug_caps_config.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ void DebugCapsConfig::readProperties() {
if (env && *env)
return env;

return (const char*)nullptr;
return static_cast<const char*>(nullptr);
};

const char* envVarValue = nullptr;
Expand Down
10 changes: 7 additions & 3 deletions src/plugins/intel_cpu/src/.clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
### Scopes to be enabled:
#
# cppcoreguidelines-*,
# google-*,
# readability-*,
# modernize-*,
# bugprone-*,
Expand All @@ -26,7 +25,9 @@
# -bugprone-fold-init-type
# -bugprone-implicit-widening-of-multiplication-result
# -cppcoreguidelines-narrowing-conversions
# -google-readability-braces-around-statements
# -google-default-arguments,
# -google-explicit-constructor,
# -google-readability-casting,
# -readability-implicit-bool-conversion,
# -readability-magic-numbers, cppcoreguidelines-avoid-magic-numbers
# -readability-function-cognitive-complexity. Reasonable way to enforce splitting complex code into simple functions
Expand All @@ -35,6 +36,7 @@
Checks: >
-*,
performance-*,
google-*,
modernize-pass-by-value,
cppcoreguidelines-prefer-member-initializer,
-bugprone-easily-swappable-parameters,
Expand All @@ -44,9 +46,11 @@ Checks: >
-cppcoreguidelines-narrowing-conversions,
-cppcoreguidelines-pro-bounds-pointer-arithmetic,
-google-build-using-namespace,
-google-default-arguments,
-google-explicit-constructor,
-google-readability-casting,
-google-readability-todo,
-readability-braces-around-statements,
-google-readability-braces-around-statements,
-modernize-use-trailing-return-type,
-readability-identifier-length,
-readability-implicit-bool-conversion,
Expand Down
46 changes: 26 additions & 20 deletions src/plugins/intel_cpu/src/compiled_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,9 @@ CompiledModel::CompiledModel(const std::shared_ptr<ov::Model>& model,
m_sub_memory_manager(std::move(sub_memory_manager)) {
m_mutex = std::make_shared<std::mutex>();
const auto& core = m_plugin->get_core();
if (!core)
if (!core) {
OPENVINO_THROW("Unable to get API version. Core is unavailable");
}

IStreamsExecutor::Config executor_config;
if (m_cfg.exclusiveAsyncRequests) {
Expand All @@ -81,10 +82,12 @@ CompiledModel::CompiledModel(const std::shared_ptr<ov::Model>& model,
m_callback_executor = m_task_executor;
}

if (m_task_executor)
if (m_task_executor) {
set_task_executor(m_task_executor);
if (m_callback_executor)
}
if (m_callback_executor) {
set_callback_executor(m_callback_executor);
}

int streams = std::max(1, executor_config.get_streams());
std::vector<Task> tasks;
Expand Down Expand Up @@ -208,15 +211,17 @@ std::shared_ptr<ov::IAsyncInferRequest> CompiledModel::create_infer_request() co
}

std::shared_ptr<const ov::Model> CompiledModel::get_runtime_model() const {
if (m_graphs.empty())
if (m_graphs.empty()) {
OPENVINO_THROW("No graph was found");
}

return get_graph()._graph.dump();
}

ov::Any CompiledModel::get_property(const std::string& name) const {
if (m_graphs.empty())
if (m_graphs.empty()) {
OPENVINO_THROW("No graph was found");
}

if (name == ov::loaded_from_cache) {
return m_loaded_from_cache;
Expand Down Expand Up @@ -275,30 +280,30 @@ ov::Any CompiledModel::get_property(const std::string& name) const {
return decltype(ov::model_name)::value_type(modelName);
} else if (name == ov::optimal_number_of_infer_requests) {
const auto streams = config.streamExecutorConfig.get_streams();
return decltype(ov::optimal_number_of_infer_requests)::value_type(
return static_cast<decltype(ov::optimal_number_of_infer_requests)::value_type>(
streams > 0 ? streams : 1); // ov::optimal_number_of_infer_requests has no negative values
} else if (name == ov::num_streams) {
const auto streams = config.streamExecutorConfig.get_streams();
return decltype(ov::num_streams)::value_type(
streams); // ov::num_streams has special negative values (AUTO = -1, NUMA = -2)
} else if (name == ov::inference_num_threads) {
const auto num_threads = config.streamExecutorConfig.get_threads();
return decltype(ov::inference_num_threads)::value_type(num_threads);
return static_cast<decltype(ov::inference_num_threads)::value_type>(num_threads);
} else if (name == ov::enable_profiling.name()) {
const bool perfCount = config.collectPerfCounters;
return decltype(ov::enable_profiling)::value_type(perfCount);
return static_cast<decltype(ov::enable_profiling)::value_type>(perfCount);
} else if (name == ov::hint::inference_precision) {
return decltype(ov::hint::inference_precision)::value_type(config.inferencePrecision);
} else if (name == ov::hint::performance_mode) {
return decltype(ov::hint::performance_mode)::value_type(config.hintPerfMode);
return static_cast<decltype(ov::hint::performance_mode)::value_type>(config.hintPerfMode);
} else if (name == ov::log::level) {
return decltype(ov::log::level)::value_type(config.logLevel);
return static_cast<decltype(ov::log::level)::value_type>(config.logLevel);
} else if (name == ov::hint::enable_cpu_pinning.name()) {
const bool use_pin = config.enableCpuPinning;
return decltype(ov::hint::enable_cpu_pinning)::value_type(use_pin);
return static_cast<decltype(ov::hint::enable_cpu_pinning)::value_type>(use_pin);
} else if (name == ov::hint::enable_cpu_reservation.name()) {
const bool use_reserve = config.enableCpuReservation;
return decltype(ov::hint::enable_cpu_reservation)::value_type(use_reserve);
return static_cast<decltype(ov::hint::enable_cpu_reservation)::value_type>(use_reserve);
} else if (name == ov::hint::scheduling_core_type) {
const auto stream_mode = config.schedulingCoreType;
return stream_mode;
Expand All @@ -307,31 +312,32 @@ ov::Any CompiledModel::get_property(const std::string& name) const {
return distribution_policy;
} else if (name == ov::hint::enable_hyper_threading.name()) {
const bool use_ht = config.enableHyperThreading;
return decltype(ov::hint::enable_hyper_threading)::value_type(use_ht);
return static_cast<decltype(ov::hint::enable_hyper_threading)::value_type>(use_ht);
} else if (name == ov::hint::execution_mode) {
return config.executionMode;
} else if (name == ov::hint::num_requests) {
return decltype(ov::hint::num_requests)::value_type(config.hintNumRequests);
return static_cast<decltype(ov::hint::num_requests)::value_type>(config.hintNumRequests);
} else if (name == ov::execution_devices) {
return decltype(ov::execution_devices)::value_type{m_plugin->get_device_name()};
} else if (name == ov::intel_cpu::denormals_optimization) {
return decltype(ov::intel_cpu::denormals_optimization)::value_type(config.denormalsOptMode ==
Config::DenormalsOptMode::DO_On);
return static_cast<decltype(ov::intel_cpu::denormals_optimization)::value_type>(
config.denormalsOptMode == Config::DenormalsOptMode::DO_On);
} else if (name == ov::intel_cpu::sparse_weights_decompression_rate) {
return decltype(ov::intel_cpu::sparse_weights_decompression_rate)::value_type(
return static_cast<decltype(ov::intel_cpu::sparse_weights_decompression_rate)::value_type>(
config.fcSparseWeiDecompressionRate);
} else if (name == ov::hint::dynamic_quantization_group_size) {
return decltype(ov::hint::dynamic_quantization_group_size)::value_type(config.fcDynamicQuantizationGroupSize);
return static_cast<decltype(ov::hint::dynamic_quantization_group_size)::value_type>(
config.fcDynamicQuantizationGroupSize);
} else if (name == ov::hint::kv_cache_precision) {
return decltype(ov::hint::kv_cache_precision)::value_type(config.kvCachePrecision);
} else if (name == ov::key_cache_precision) {
return decltype(ov::key_cache_precision)::value_type(config.keyCachePrecision);
} else if (name == ov::value_cache_precision) {
return decltype(ov::value_cache_precision)::value_type(config.valueCachePrecision);
} else if (name == ov::key_cache_group_size) {
return decltype(ov::key_cache_group_size)::value_type(config.keyCacheGroupSize);
return static_cast<decltype(ov::key_cache_group_size)::value_type>(config.keyCacheGroupSize);
} else if (name == ov::value_cache_group_size) {
return decltype(ov::value_cache_group_size)::value_type(config.valueCacheGroupSize);
return static_cast<decltype(ov::value_cache_group_size)::value_type>(config.valueCacheGroupSize);
}
OPENVINO_THROW("Unsupported property: ", name);
}
Expand Down
31 changes: 19 additions & 12 deletions src/plugins/intel_cpu/src/config.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,9 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) {
try {
ov::Any value = val.as<std::string>();
int val_i = value.as<int>();
if (val_i < 0)
if (val_i < 0) {
OPENVINO_THROW("invalid value.");
}
hintNumRequests = static_cast<uint32_t>(val_i);
} catch (const ov::Exception&) {
OPENVINO_THROW("Wrong value ",
Expand Down Expand Up @@ -278,14 +279,15 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) {
} else if (key == ov::intel_cpu::snippets_mode.name()) {
try {
auto const mode = val.as<ov::intel_cpu::SnippetsMode>();
if (mode == ov::intel_cpu::SnippetsMode::ENABLE)
if (mode == ov::intel_cpu::SnippetsMode::ENABLE) {
snippetsMode = SnippetsMode::Enable;
else if (mode == ov::intel_cpu::SnippetsMode::IGNORE_CALLBACK)
} else if (mode == ov::intel_cpu::SnippetsMode::IGNORE_CALLBACK) {
snippetsMode = SnippetsMode::IgnoreCallback;
else if (mode == ov::intel_cpu::SnippetsMode::DISABLE)
} else if (mode == ov::intel_cpu::SnippetsMode::DISABLE) {
snippetsMode = SnippetsMode::Disable;
else
} else {
OPENVINO_THROW("invalid value");
}
} catch (ov::Exception&) {
OPENVINO_THROW("Wrong value ",
val.as<std::string>(),
Expand Down Expand Up @@ -396,8 +398,9 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) {
inferencePrecision = ov::element::f16;
}
#endif
if (mayiuse(avx512_core_bf16))
if (mayiuse(avx512_core_bf16)) {
inferencePrecision = ov::element::bf16;
}
} else {
inferencePrecision = ov::element::undefined;
}
Expand Down Expand Up @@ -431,8 +434,9 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) {
}
}

if (!prop.empty())
if (!prop.empty()) {
_config.clear();
}

if (exclusiveAsyncRequests) { // Exclusive request feature disables the streams
streams = 1;
Expand All @@ -453,17 +457,20 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) {
}

void Config::updateProperties() {
if (!_config.empty())
if (!_config.empty()) {
return;
}

if (collectPerfCounters == true)
if (collectPerfCounters == true) {
_config.insert({ov::enable_profiling.name(), "YES"});
else
} else {
_config.insert({ov::enable_profiling.name(), "NO"});
if (exclusiveAsyncRequests == true)
}
if (exclusiveAsyncRequests == true) {
_config.insert({ov::internal::exclusive_async_requests.name(), "YES"});
else
} else {
_config.insert({ov::internal::exclusive_async_requests.name(), "NO"});
}

_config.insert({ov::device::id.name(), device_id});

Expand Down
28 changes: 16 additions & 12 deletions src/plugins/intel_cpu/src/cpu_memory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -135,8 +135,9 @@ void Memory::load(const IMemory& src, bool ftz) const {

void Memory::nullify() {
void* dataPtr = getData();
if (dataPtr != nullptr)
if (dataPtr != nullptr) {
memset(dataPtr, 0, getDesc().getCurrentMemSize());
}
}

void Memory::redefineDesc(MemoryDescPtr desc) {
Expand Down Expand Up @@ -194,8 +195,9 @@ dnnl::memory Memory::DnnlMemPrimHandle::getPrim() const {

void* Memory::getData() const {
void* data = getDataNoThrow();
if (data == nullptr && m_pMemDesc->getShape().isStatic() && m_pMemDesc->getShape().getElementsCount() != 0)
if (data == nullptr && m_pMemDesc->getShape().isStatic() && m_pMemDesc->getShape().getElementsCount() != 0) {
OPENVINO_THROW("Memory has not been allocated");
}
return data;
}

Expand Down Expand Up @@ -492,8 +494,9 @@ dnnl::memory StaticMemory::getPrimitive() const {

void StaticMemory::nullify() {
void* dataPtr = getData();
if (dataPtr != nullptr)
if (dataPtr != nullptr) {
memset(dataPtr, 0, getSize());
}
}

StaticMemory::StaticMemoryBlock::StaticMemoryBlock(size_t size) : m_size(size) {
Expand Down Expand Up @@ -539,13 +542,14 @@ void StaticMemory::StaticMemoryBlock::unregisterMemory(Memory* memPtr) {
# if !defined(__NR_mbind) && defined(__x86_64__)
# define __NR_mbind 237
# endif
static long mbind(void* start,
unsigned long len,
int mode,
const unsigned long* nmask,
unsigned long maxnode,
unsigned flags) {
return syscall(__NR_mbind, (long)start, len, mode, (long)nmask, maxnode, flags);
static int64_t mbind(void* start, uint64_t len, int mode, const uint64_t* nmask, uint64_t maxnode, unsigned flags) {
return syscall(__NR_mbind,
reinterpret_cast<uint64_t>(start),
len,
mode,
reinterpret_cast<uint64_t>(nmask),
maxnode,
flags);
}
#endif

Expand All @@ -555,8 +559,8 @@ bool mbind_move(void* data, size_t size, int targetNode) {
auto pagesize = getpagesize();
auto page_count = (size + pagesize - 1) / pagesize;
char* pages = reinterpret_cast<char*>( // NOLINT(performance-no-int-to-ptr)
(((uintptr_t)data) & ~((uintptr_t)(pagesize - 1))));
unsigned long mask = 0;
((reinterpret_cast<uintptr_t>(data)) & ~(static_cast<uintptr_t>(pagesize - 1))));
uint64_t mask = 0;
unsigned flags = 0;
if (realNode < 0) {
// restore default policy
Expand Down
3 changes: 2 additions & 1 deletion src/plugins/intel_cpu/src/cpu_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,9 @@ void Tensor::set_shape(ov::Shape new_shape) {
vec2str(shape.getStaticDims()),
" -> ",
new_shape.to_string());
if (shape.getStaticDims() == new_shape)
if (shape.getStaticDims() == new_shape) {
return;
}
}

auto desc = m_memptr->getDescPtr();
Expand Down
9 changes: 6 additions & 3 deletions src/plugins/intel_cpu/src/dnnl_extension_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -186,8 +186,9 @@ size_t DnnlExtensionUtils::getMemSizeForDnnlDesc(const dnnl::memory::desc& desc)
"Unexpected non zero offset for a dnnl blocked memory desc");

size_t size = desc.get_size();
if (size == DNNL_RUNTIME_SIZE_VAL)
if (size == DNNL_RUNTIME_SIZE_VAL) {
return MemoryDesc::UNDEFINED_SIZE;
}

return size;
}
Expand All @@ -207,17 +208,19 @@ DnnlMemoryDescPtr DnnlExtensionUtils::query_md(const const_dnnl_primitive_desc_t
auto query = dnnl::convert_to_c(what);
const auto* cdesc = dnnl_primitive_desc_query_md(pd, query, idx);

if (!cdesc)
if (!cdesc) {
OPENVINO_THROW("query_md failed for query=", query, " idx=", idx, ".");
}

return DnnlExtensionUtils::makeDescriptor(cdesc);
}

std::string DnnlExtensionUtils::query_impl_info_str(const const_dnnl_primitive_desc_t& pd) {
const char* res;
dnnl_status_t status = dnnl_primitive_desc_query(pd, dnnl_query_impl_info_str, 0, &res);
if (status != dnnl_success)
if (status != dnnl_success) {
OPENVINO_THROW("query_impl_info_str failed.");
}
return std::string(res);
}

Expand Down
Loading

0 comments on commit 98b8e04

Please sign in to comment.