Skip to content

Commit

Permalink
kernel: add closing comments to config endifs
Browse files Browse the repository at this point in the history
Add a closing comment to the endif with the configuration
information to which the endif belongs too.
To make the code more clearer if the configs need adaptions.

Signed-off-by: Simon Hein <[email protected]>
  • Loading branch information
simhein authored and nashif committed Mar 25, 2024
1 parent 6266dc1 commit bcd1d19
Show file tree
Hide file tree
Showing 49 changed files with 437 additions and 431 deletions.
16 changes: 8 additions & 8 deletions include/zephyr/kernel/internal/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
(CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_OFFSET))
#else
#define Z_MEM_VM_OFFSET 0
#endif
#endif /* CONFIG_MMU */

#define Z_MEM_PHYS_ADDR(virt) ((virt) - Z_MEM_VM_OFFSET)
#define Z_MEM_VIRT_ADDR(phys) ((phys) + Z_MEM_VM_OFFSET)
Expand Down Expand Up @@ -70,26 +70,26 @@ static inline uintptr_t z_mem_phys_addr(void *virt)
__ASSERT(
#if CONFIG_KERNEL_VM_BASE != 0
(addr >= CONFIG_KERNEL_VM_BASE) &&
#endif
#endif /* CONFIG_KERNEL_VM_BASE != 0 */
#if (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE) != 0
(addr < (CONFIG_KERNEL_VM_BASE +
(CONFIG_KERNEL_VM_SIZE))),
#else
false,
#endif
#endif /* CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE != 0 */
"address %p not in permanent mappings", virt);
#else
/* Should be identity-mapped */
__ASSERT(
#if CONFIG_SRAM_BASE_ADDRESS != 0
(addr >= CONFIG_SRAM_BASE_ADDRESS) &&
#endif
#endif /* CONFIG_SRAM_BASE_ADDRESS != 0 */
#if (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)) != 0
(addr < (CONFIG_SRAM_BASE_ADDRESS +
(CONFIG_SRAM_SIZE * 1024UL))),
#else
false,
#endif
#endif /* (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)) != 0 */
"physical address 0x%lx not in RAM",
(unsigned long)addr);
#endif /* CONFIG_MMU */
Expand All @@ -111,15 +111,15 @@ static inline void *z_mem_virt_addr(uintptr_t phys)
__ASSERT(
#if CONFIG_SRAM_BASE_ADDRESS != 0
(phys >= CONFIG_SRAM_BASE_ADDRESS) &&
#endif
#endif /* CONFIG_SRAM_BASE_ADDRESS != 0 */
#if (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)) != 0
(phys < (CONFIG_SRAM_BASE_ADDRESS +
(CONFIG_SRAM_SIZE * 1024UL))),
#else
false,
#endif
#endif /* (CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE * 1024UL)) != 0 */
"physical address 0x%lx not in RAM", (unsigned long)phys);
#endif
#endif /* CONFIG_KERNEL_VM_USE_CUSTOM_MEM_RANGE_CHECK */

/* TODO add assertion that this page frame is pinned to boot mapping,
* the above check won't be sufficient with demand paging
Expand Down
2 changes: 1 addition & 1 deletion include/zephyr/kernel/internal/smp.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,4 @@

void z_sched_ipi(void);

#endif
#endif /* ZEPHYR_INCLUDE_KERNEL_INTERNAL_SMP_H_ */
2 changes: 1 addition & 1 deletion include/zephyr/kernel/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
#include <zephyr/toolchain.h>
#if defined(CONFIG_ARM_MMU) && defined(CONFIG_ARM64)
#include <zephyr/arch/arm64/arm_mem.h>
#endif
#endif /* CONFIG_ARM_MMU && CONFIG_ARM64 */

#include <zephyr/kernel/internal/mm.h>

Expand Down
2 changes: 1 addition & 1 deletion include/zephyr/kernel/mm/demand_paging.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ struct k_mem_paging_stats_t {
#if !defined(CONFIG_DEMAND_PAGING_ALLOW_IRQ) || defined(__DOXYGEN__)
/** Number of page faults while in ISR */
unsigned long in_isr;
#endif
#endif /* !CONFIG_DEMAND_PAGING_ALLOW_IRQ */
} pagefaults;

struct {
Expand Down
8 changes: 4 additions & 4 deletions include/zephyr/kernel/obj_core.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ struct k_obj_core;
#else
#define K_OBJ_CORE_INIT(objp, type) do { } while (0)
#define K_OBJ_CORE_LINK(objp) do { } while (0)
#endif
#endif /* CONFIG_OBJ_CORE */

/**
* INTERNAL_HIDDEN @endcond
Expand Down Expand Up @@ -114,7 +114,7 @@ struct k_obj_type {
#ifdef CONFIG_OBJ_CORE_STATS
/** Pointer to object core statistics descriptor */
struct k_obj_core_stats_desc *stats_desc;
#endif
#endif /* CONFIG_OBJ_CORE_STATS */
};

/** Object core structure */
Expand All @@ -123,7 +123,7 @@ struct k_obj_core {
struct k_obj_type *type; /**< Object type to which object belongs */
#ifdef CONFIG_OBJ_CORE_STATS
void *stats; /**< Pointer to kernel object's stats */
#endif
#endif /* CONFIG_OBJ_CORE_STATS */
};

/**
Expand Down Expand Up @@ -280,7 +280,7 @@ static inline void k_obj_core_stats_init(struct k_obj_core *obj_core,
{
obj_core->stats = stats;
}
#endif
#endif /* CONFIG_OBJ_CORE_STATS */

/**
* @brief Register kernel object for gathering statistics
Expand Down
4 changes: 2 additions & 2 deletions include/zephyr/kernel/stats.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ struct k_cycle_stats {
uint64_t longest; /**< \# of cycles in longest usage window */
uint32_t num_windows; /**< \# of usage windows */
/** @} */
#endif
#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
bool track_usage; /**< true if gathering usage stats */
};

#endif
#endif /* ZEPHYR_INCLUDE_KERNEL_STATS_H_ */
56 changes: 28 additions & 28 deletions include/zephyr/kernel/thread.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

#ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
#include <zephyr/kernel/mm/demand_paging.h>
#endif
#endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */

#include <zephyr/kernel/stats.h>
#include <zephyr/arch/arch_interface.h>
Expand Down Expand Up @@ -38,7 +38,7 @@ struct __thread_entry {
void *parameter2;
void *parameter3;
};
#endif
#endif /* CONFIG_THREAD_MONITOR */

struct k_thread;

Expand Down Expand Up @@ -96,14 +96,14 @@ struct _thread_base {
#else /* Little Endian */
int8_t prio;
uint8_t sched_locked;
#endif
#endif /* CONFIG_BIG_ENDIAN */
};
uint16_t preempt;
};

#ifdef CONFIG_SCHED_DEADLINE
int prio_deadline;
#endif
#endif /* CONFIG_SCHED_DEADLINE */

uint32_t order_key;

Expand All @@ -117,15 +117,15 @@ struct _thread_base {
/* Recursive count of irq_lock() calls */
uint8_t global_lock_count;

#endif
#endif /* CONFIG_SMP */

#ifdef CONFIG_SCHED_CPU_MASK
/* "May run on" bits for each CPU */
#if CONFIG_MP_MAX_NUM_CPUS <= 8
uint8_t cpu_mask;
#else
uint16_t cpu_mask;
#endif
#endif /* CONFIG_MP_MAX_NUM_CPUS */
#endif /* CONFIG_SCHED_CPU_MASK */

/* data returned by APIs */
Expand All @@ -134,17 +134,17 @@ struct _thread_base {
#ifdef CONFIG_SYS_CLOCK_EXISTS
/* this thread's entry in a timeout queue */
struct _timeout timeout;
#endif
#endif /* CONFIG_SYS_CLOCK_EXISTS */

#ifdef CONFIG_TIMESLICE_PER_THREAD
int32_t slice_ticks;
k_thread_timeslice_fn_t slice_expired;
void *slice_data;
#endif
#endif /* CONFIG_TIMESLICE_PER_THREAD */

#ifdef CONFIG_SCHED_THREAD_USAGE
struct k_cycle_stats usage; /* Track thread usage statistics */
#endif
#endif /* CONFIG_SCHED_THREAD_USAGE */
};

typedef struct _thread_base _thread_base_t;
Expand Down Expand Up @@ -190,9 +190,9 @@ struct _mem_domain_info {
struct _thread_userspace_local_data {
#if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS) && !defined(CONFIG_LIBC_ERRNO)
int errno_var;
#endif
#endif /* CONFIG_ERRNO && !CONFIG_ERRNO_IN_TLS && !CONFIG_LIBC_ERRNO */
};
#endif
#endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */

typedef struct k_thread_runtime_stats {
#ifdef CONFIG_SCHED_THREAD_USAGE
Expand All @@ -203,7 +203,7 @@ typedef struct k_thread_runtime_stats {
* as the total # of non-idle cycles. In the context of CPU statistics,
* it refers to the sum of non-idle + idle cycles.
*/
#endif
#endif /* CONFIG_SCHED_THREAD_USAGE */

#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
/*
Expand All @@ -216,7 +216,7 @@ typedef struct k_thread_runtime_stats {
uint64_t current_cycles; /* current # of non-idle cycles */
uint64_t peak_cycles; /* peak # of non-idle cycles */
uint64_t average_cycles; /* average # of non-idle cycles */
#endif
#endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */

#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
/*
Expand All @@ -226,7 +226,7 @@ typedef struct k_thread_runtime_stats {
*/

uint64_t idle_cycles;
#endif
#endif /* CONFIG_SCHED_THREAD_USAGE_ALL */

#if defined(__cplusplus) && !defined(CONFIG_SCHED_THREAD_USAGE) && \
!defined(CONFIG_SCHED_THREAD_USAGE_ANALYSIS) && !defined(CONFIG_SCHED_THREAD_USAGE_ALL)
Expand Down Expand Up @@ -262,7 +262,7 @@ struct k_thread {

#if defined(CONFIG_POLL)
struct z_poller poller;
#endif
#endif /* CONFIG_POLL */

#if defined(CONFIG_EVENTS)
struct k_thread *next_event_link;
Expand All @@ -272,36 +272,36 @@ struct k_thread {

/** true if timeout should not wake the thread */
bool no_wake_on_timeout;
#endif
#endif /* CONFIG_EVENTS */

#if defined(CONFIG_THREAD_MONITOR)
/** thread entry and parameters description */
struct __thread_entry entry;

/** next item in list of all threads */
struct k_thread *next_thread;
#endif
#endif /* CONFIG_THREAD_MONITOR */

#if defined(CONFIG_THREAD_NAME)
/** Thread name */
char name[CONFIG_THREAD_MAX_NAME_LEN];
#endif
#endif /* CONFIG_THREAD_NAME */

#ifdef CONFIG_THREAD_CUSTOM_DATA
/** crude thread-local storage */
void *custom_data;
#endif
#endif /* CONFIG_THREAD_CUSTOM_DATA */

#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
struct _thread_userspace_local_data *userspace_local_data;
#endif
#endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */

#if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS) && !defined(CONFIG_LIBC_ERRNO)
#ifndef CONFIG_USERSPACE
/** per-thread errno variable */
int errno_var;
#endif
#endif
#endif /* CONFIG_USERSPACE */
#endif /* CONFIG_ERRNO && !CONFIG_ERRNO_IN_TLS && !CONFIG_LIBC_ERRNO */

#if defined(CONFIG_THREAD_STACK_INFO)
/** Stack Info */
Expand All @@ -328,7 +328,7 @@ struct k_thread {

/** Context handle returned via arch_switch() */
void *switch_handle;
#endif
#endif /* CONFIG_USE_SWITCH */
/** resource pool */
struct k_heap *resource_pool;

Expand All @@ -340,21 +340,21 @@ struct k_thread {
#ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
/** Paging statistics */
struct k_mem_paging_stats_t paging_stats;
#endif
#endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */

#ifdef CONFIG_PIPES
/** Pipe descriptor used with blocking k_pipe operations */
struct _pipe_desc pipe_desc;
#endif
#endif /* CONFIG_PIPES */

#ifdef CONFIG_OBJ_CORE_THREAD
struct k_obj_core obj_core;
#endif
#endif /* CONFIG_OBJ_CORE_THREAD */

#ifdef CONFIG_SMP
/** threads waiting in k_thread_suspend() */
_wait_q_t halt_queue;
#endif
#endif /* CONFIG_SMP */

/** arch-specifics: must always be at the end */
struct _thread_arch arch;
Expand All @@ -363,4 +363,4 @@ struct k_thread {
typedef struct k_thread _thread_t;
typedef struct k_thread *k_tid_t;

#endif
#endif /* ZEPHYR_INCLUDE_KERNEL_THREAD_H_ */
14 changes: 7 additions & 7 deletions include/zephyr/kernel/thread_stack.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ static inline char *z_stack_ptr_align(char *ptr)
#define K_KERNEL_STACK_RESERVED ((size_t)ARCH_KERNEL_STACK_RESERVED)
#else
#define K_KERNEL_STACK_RESERVED ((size_t)0)
#endif
#endif /* ARCH_KERNEL_STACK_RESERVED */

#define Z_KERNEL_STACK_SIZE_ADJUST(size) (ROUND_UP(size, \
ARCH_STACK_PTR_ALIGN) + \
Expand All @@ -102,7 +102,7 @@ static inline char *z_stack_ptr_align(char *ptr)
#define Z_KERNEL_STACK_OBJ_ALIGN ARCH_KERNEL_STACK_OBJ_ALIGN
#else
#define Z_KERNEL_STACK_OBJ_ALIGN ARCH_STACK_PTR_ALIGN
#endif
#endif /* ARCH_KERNEL_STACK_OBJ_ALIGN */

#define Z_KERNEL_STACK_LEN(size) \
ROUND_UP(Z_KERNEL_STACK_SIZE_ADJUST(size), Z_KERNEL_STACK_OBJ_ALIGN)
Expand Down Expand Up @@ -232,7 +232,7 @@ static inline char *z_stack_ptr_align(char *ptr)
#else
#define K_KERNEL_PINNED_STACK_DEFINE(sym, size) \
Z_KERNEL_STACK_DEFINE_IN(sym, size, __kstackmem)
#endif
#endif /* CONFIG_LINKER_USE_PINNED_SECTION */

/**
* @brief Define a toplevel array of kernel stack memory regions
Expand Down Expand Up @@ -265,7 +265,7 @@ static inline char *z_stack_ptr_align(char *ptr)
#else
#define K_KERNEL_PINNED_STACK_ARRAY_DEFINE(sym, nmemb, size) \
Z_KERNEL_STACK_ARRAY_DEFINE_IN(sym, nmemb, size, __kstackmem)
#endif
#endif /* CONFIG_LINKER_USE_PINNED_SECTION */

/**
* @brief Define an embedded stack memory region
Expand Down Expand Up @@ -320,7 +320,7 @@ static inline char *Z_KERNEL_STACK_BUFFER(k_thread_stack_t *sym)
#define K_THREAD_STACK_RESERVED ((size_t)(ARCH_THREAD_STACK_RESERVED))
#else
#define K_THREAD_STACK_RESERVED ((size_t)0U)
#endif
#endif /* ARCH_THREAD_STACK_RESERVED */

/**
* @brief Properly align the lowest address of a stack object
Expand Down Expand Up @@ -553,7 +553,7 @@ static inline char *Z_KERNEL_STACK_BUFFER(k_thread_stack_t *sym)
#else
#define K_THREAD_PINNED_STACK_DEFINE(sym, size) \
K_THREAD_STACK_DEFINE(sym, size)
#endif
#endif /* CONFIG_LINKER_USE_PINNED_SECTION */

/**
* @brief Calculate size of stacks to be allocated in a stack array
Expand Down Expand Up @@ -611,7 +611,7 @@ static inline char *Z_KERNEL_STACK_BUFFER(k_thread_stack_t *sym)
#else
#define K_THREAD_PINNED_STACK_ARRAY_DEFINE(sym, nmemb, size) \
K_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size)
#endif
#endif /* CONFIG_LINKER_USE_PINNED_SECTION */

/**
* @brief Define an embedded stack memory region
Expand Down
Loading

0 comments on commit bcd1d19

Please sign in to comment.