diff --git a/drivers/power_domain/power_domain_gpio.c b/drivers/power_domain/power_domain_gpio.c index 15c60a65d75d..346d2937f2b2 100644 --- a/drivers/power_domain/power_domain_gpio.c +++ b/drivers/power_domain/power_domain_gpio.c @@ -37,7 +37,7 @@ static int pd_on_domain_visitor(const struct device *dev, void *context) struct pd_visitor_context *visitor_context = context; /* Only run action if the device is on the specified domain */ - if (!dev->pm || (dev->pm->domain != visitor_context->domain)) { + if (!dev->pm || (dev->pm_base->domain != visitor_context->domain)) { return 0; } diff --git a/drivers/power_domain/power_domain_gpio_monitor.c b/drivers/power_domain/power_domain_gpio_monitor.c index 55e44570553b..437493d03c46 100644 --- a/drivers/power_domain/power_domain_gpio_monitor.c +++ b/drivers/power_domain/power_domain_gpio_monitor.c @@ -35,7 +35,7 @@ static int pd_on_domain_visitor(const struct device *dev, void *context) struct pd_visitor_context *visitor_context = context; /* Only run action if the device is on the specified domain */ - if (!dev->pm || (dev->pm->domain != visitor_context->domain)) { + if (!dev->pm || (dev->pm_base->domain != visitor_context->domain)) { return 0; } diff --git a/include/zephyr/device.h b/include/zephyr/device.h index c55958924ff8..e581fe74d383 100644 --- a/include/zephyr/device.h +++ b/include/zephyr/device.h @@ -371,7 +371,9 @@ struct device_state { bool initialized : 1; }; +struct pm_device_base; struct pm_device; +struct pm_device_isr; #ifdef CONFIG_DEVICE_DEPS_DYNAMIC #define Z_DEVICE_DEPS_CONST @@ -409,7 +411,11 @@ struct device { * Reference to the device PM resources (only available if * @kconfig{CONFIG_PM_DEVICE} is enabled). */ - struct pm_device *pm; + union { + struct pm_device_base *pm_base; + struct pm_device *pm; + struct pm_device_isr *pm_isr; + }; #endif }; @@ -885,7 +891,7 @@ static inline bool z_impl_device_is_ready(const struct device *dev) * @brief Initializer for @ref device. * * @param name_ Name of the device. - * @param pm_ Reference to @ref pm_device (optional). + * @param pm_ Reference to @ref pm_device_base (optional). * @param data_ Reference to device data. * @param config_ Reference to device config. * @param api_ Reference to device API ops. @@ -900,7 +906,7 @@ static inline bool z_impl_device_is_ready(const struct device *dev) .state = (state_), \ .data = (data_), \ IF_ENABLED(CONFIG_DEVICE_DEPS, (.deps = (deps_),)) /**/ \ - IF_ENABLED(CONFIG_PM_DEVICE, (.pm = (pm_),)) /**/ \ + IF_ENABLED(CONFIG_PM_DEVICE, (.pm_base = (pm_),)) /**/ \ } /** @@ -919,7 +925,7 @@ static inline bool z_impl_device_is_ready(const struct device *dev) * software device). * @param dev_id Device identifier (used to name the defined @ref device). * @param name Name of the device. - * @param pm Reference to @ref pm_device associated with the device. + * @param pm Reference to @ref pm_device_base associated with the device. * (optional). * @param data Reference to device data. * @param config Reference to device config. @@ -991,7 +997,7 @@ static inline bool z_impl_device_is_ready(const struct device *dev) * @param dev_id Device identifier (used to name the defined @ref device). * @param name Name of the device. * @param init_fn Device init function. - * @param pm Reference to @ref pm_device associated with the device. + * @param pm Reference to @ref pm_device_base associated with the device. * (optional). * @param data Reference to device data. * @param config Reference to device config. diff --git a/include/zephyr/pm/device.h b/include/zephyr/pm/device.h index ab54fd677fe7..5b6cf0cb33c8 100644 --- a/include/zephyr/pm/device.h +++ b/include/zephyr/pm/device.h @@ -50,10 +50,22 @@ enum pm_device_flag { PM_DEVICE_FLAG_PD, /** Indicates if device runtime PM should be automatically enabled */ PM_DEVICE_FLAG_RUNTIME_AUTO, + /** Indicates that device runtime PM supports suspending and resuming from any context. */ + PM_DEVICE_FLAG_ISR_SAFE, }; /** @endcond */ +/** @brief Flag indicating that runtime PM API for the device can be called from any context. + * + * If @ref PM_DEVICE_ISR_SAFE flag is used for device definition, it indicates that PM actions + * are synchronous and can be executed from any context. This approach can be used for cases where + * suspending and resuming is short as it is executed in the critical section. This mode requires + * less resources (~80 byte less RAM) and allows to use device runtime PM from any context + * (including interrupts). + */ +#define PM_DEVICE_ISR_SAFE 1 + /** @brief Device power states. */ enum pm_device_state { /** Device is in active or regular state. */ @@ -122,8 +134,37 @@ typedef bool (*pm_device_action_failed_cb_t)(const struct device *dev, /** * @brief Device PM info + * + * Structure holds fields which are common for two PM devices: generic and + * synchronous. + */ +struct pm_device_base { + /** Device PM status flags. */ + atomic_t flags; + /** Device power state */ + enum pm_device_state state; + /** Device PM action callback */ + pm_device_action_cb_t action_cb; +#if defined(CONFIG_PM_DEVICE_RUNTIME) || defined(__DOXYGEN__) + /** Device usage count */ + uint32_t usage; +#endif /* CONFIG_PM_DEVICE_RUNTIME */ +#ifdef CONFIG_PM_DEVICE_POWER_DOMAIN + /** Power Domain it belongs */ + const struct device *domain; +#endif /* CONFIG_PM_DEVICE_POWER_DOMAIN */ +}; + +/** + * @brief Runtime PM info for device with generic PM. + * + * Generic PM involves suspending and resuming operations which can be blocking, + * long lasting or asynchronous. Runtime PM API is limited when used from + * interrupt context. */ struct pm_device { + /** Base info. */ + struct pm_device_base base; #if defined(CONFIG_PM_DEVICE_RUNTIME) || defined(__DOXYGEN__) /** Pointer to the device */ const struct device *dev; @@ -131,23 +172,31 @@ struct pm_device { struct k_sem lock; /** Event var to listen to the sync request events */ struct k_event event; - /** Device usage count */ - uint32_t usage; /** Work object for asynchronous calls */ struct k_work_delayable work; #endif /* CONFIG_PM_DEVICE_RUNTIME */ -#ifdef CONFIG_PM_DEVICE_POWER_DOMAIN - /** Power Domain it belongs */ - const struct device *domain; -#endif /* CONFIG_PM_DEVICE_POWER_DOMAIN */ - /* Device PM status flags. */ - atomic_t flags; - /** Device power state */ - enum pm_device_state state; - /** Device PM action callback */ - pm_device_action_cb_t action_cb; }; +/** + * @brief Runtime PM info for device with synchronous PM. + * + * Synchronous PM can be used with devices which suspend and resume operations can + * be performed in the critical section as they are short and non-blocking. + * Runtime PM API can be used from any context in that case. + */ +struct pm_device_isr { + /** Base info. */ + struct pm_device_base base; +#if defined(CONFIG_PM_DEVICE_RUNTIME) || defined(__DOXYGEN__) + /** Lock to synchronize the synchronous get/put operations */ + struct k_spinlock lock; +#endif +}; + +/* Base part must be the first element. */ +BUILD_ASSERT(offsetof(struct pm_device, base) == 0); +BUILD_ASSERT(offsetof(struct pm_device_isr, base) == 0); + /** @cond INTERNAL_HIDDEN */ #ifdef CONFIG_PM_DEVICE_RUNTIME @@ -167,7 +216,7 @@ struct pm_device { #endif /* CONFIG_PM_DEVICE_POWER_DOMAIN */ /** - * @brief Utility macro to initialize #pm_device flags + * @brief Utility macro to initialize #pm_device_base flags * * @param node_id Devicetree node for the initialized device (can be invalid). */ @@ -188,17 +237,34 @@ struct pm_device { * @note #DT_PROP_OR is used to retrieve the wakeup_source property because * it may not be defined on all devices. * - * @param obj Name of the #pm_device structure being initialized. + * @param obj Name of the #pm_device_base structure being initialized. + * @param node_id Devicetree node for the initialized device (can be invalid). + * @param pm_action_cb Device PM control callback function. + * @param _flags Additional flags passed to the structure. + */ +#define Z_PM_DEVICE_BASE_INIT(obj, node_id, pm_action_cb, _flags) \ + { \ + .action_cb = pm_action_cb, \ + .state = PM_DEVICE_STATE_ACTIVE, \ + .flags = ATOMIC_INIT(Z_PM_DEVICE_FLAGS(node_id) | (_flags)), \ + Z_PM_DEVICE_POWER_DOMAIN_INIT(node_id) \ + } + +/** + * @brief Utility macro to initialize #pm_device_rt. + * + * @note #DT_PROP_OR is used to retrieve the wakeup_source property because + * it may not be defined on all devices. + * + * @param obj Name of the #pm_device_base structure being initialized. * @param node_id Devicetree node for the initialized device (can be invalid). * @param pm_action_cb Device PM control callback function. */ -#define Z_PM_DEVICE_INIT(obj, node_id, pm_action_cb) \ - { \ - Z_PM_DEVICE_RUNTIME_INIT(obj) \ - .action_cb = pm_action_cb, \ - .state = PM_DEVICE_STATE_ACTIVE, \ - .flags = ATOMIC_INIT(Z_PM_DEVICE_FLAGS(node_id)), \ - Z_PM_DEVICE_POWER_DOMAIN_INIT(node_id) \ +#define Z_PM_DEVICE_INIT(obj, node_id, pm_action_cb, isr_safe) \ + { \ + .base = Z_PM_DEVICE_BASE_INIT(obj, node_id, pm_action_cb, \ + isr_safe ? BIT(PM_DEVICE_FLAG_ISR_SAFE) : 0), \ + COND_CODE_1(isr_safe, (), (Z_PM_DEVICE_RUNTIME_INIT(obj))) \ } /** @@ -231,21 +297,22 @@ struct pm_device { * @param dev_id Device id. * @param pm_action_cb PM control callback. */ -#define Z_PM_DEVICE_DEFINE(node_id, dev_id, pm_action_cb) \ - Z_PM_DEVICE_DEFINE_SLOT(dev_id); \ - static struct pm_device Z_PM_DEVICE_NAME(dev_id) = \ - Z_PM_DEVICE_INIT(Z_PM_DEVICE_NAME(dev_id), node_id, \ - pm_action_cb) +#define Z_PM_DEVICE_DEFINE(node_id, dev_id, pm_action_cb, isr_safe) \ + Z_PM_DEVICE_DEFINE_SLOT(dev_id); \ + static struct COND_CODE_1(isr_safe, (pm_device_isr), (pm_device)) \ + Z_PM_DEVICE_NAME(dev_id) = \ + Z_PM_DEVICE_INIT(Z_PM_DEVICE_NAME(dev_id), node_id, \ + pm_action_cb, isr_safe) /** * Get a reference to the device PM resources. * * @param dev_id Device id. */ -#define Z_PM_DEVICE_GET(dev_id) (&Z_PM_DEVICE_NAME(dev_id)) +#define Z_PM_DEVICE_GET(dev_id) ((struct pm_device_base *)&Z_PM_DEVICE_NAME(dev_id)) #else -#define Z_PM_DEVICE_DEFINE(node_id, dev_id, pm_action_cb) +#define Z_PM_DEVICE_DEFINE(node_id, dev_id, pm_action_cb, isr_safe) #define Z_PM_DEVICE_GET(dev_id) NULL #endif /* CONFIG_PM_DEVICE */ @@ -258,11 +325,13 @@ struct pm_device { * * @param dev_id Device id. * @param pm_action_cb PM control callback. + * @param ... Optional flag to indicate that ISR safe. Use @ref PM_DEVICE_ISR_SAFE or 0. * * @see #PM_DEVICE_DT_DEFINE, #PM_DEVICE_DT_INST_DEFINE */ -#define PM_DEVICE_DEFINE(dev_id, pm_action_cb) \ - Z_PM_DEVICE_DEFINE(DT_INVALID_NODE, dev_id, pm_action_cb) +#define PM_DEVICE_DEFINE(dev_id, pm_action_cb, ...) \ + Z_PM_DEVICE_DEFINE(DT_INVALID_NODE, dev_id, pm_action_cb, \ + COND_CODE_1(IS_EMPTY(__VA_ARGS__), (0), (__VA_ARGS__))) /** * Define device PM resources for the given node identifier. @@ -271,12 +340,13 @@ struct pm_device { * * @param node_id Node identifier. * @param pm_action_cb PM control callback. + * @param ... Optional flag to indicate that device is isr_ok. Use @ref PM_DEVICE_ISR_SAFE or 0. * * @see #PM_DEVICE_DT_INST_DEFINE, #PM_DEVICE_DEFINE */ -#define PM_DEVICE_DT_DEFINE(node_id, pm_action_cb) \ - Z_PM_DEVICE_DEFINE(node_id, Z_DEVICE_DT_DEV_ID(node_id), \ - pm_action_cb) +#define PM_DEVICE_DT_DEFINE(node_id, pm_action_cb, ...) \ + Z_PM_DEVICE_DEFINE(node_id, Z_DEVICE_DT_DEV_ID(node_id), pm_action_cb, \ + COND_CODE_1(IS_EMPTY(__VA_ARGS__), (0), (__VA_ARGS__))) /** * Define device PM resources for the given instance. @@ -285,13 +355,15 @@ struct pm_device { * * @param idx Instance index. * @param pm_action_cb PM control callback. + * @param ... Optional flag to indicate that device is isr_ok. Use @ref PM_DEVICE_ISR_SAFE or 0. * * @see #PM_DEVICE_DT_DEFINE, #PM_DEVICE_DEFINE */ -#define PM_DEVICE_DT_INST_DEFINE(idx, pm_action_cb) \ +#define PM_DEVICE_DT_INST_DEFINE(idx, pm_action_cb, ...) \ Z_PM_DEVICE_DEFINE(DT_DRV_INST(idx), \ Z_DEVICE_DT_DEV_ID(DT_DRV_INST(idx)), \ - pm_action_cb) + pm_action_cb, \ + COND_CODE_1(IS_EMPTY(__VA_ARGS__), (0), (__VA_ARGS__))) /** * @brief Obtain a reference to the device PM resources for the given device. @@ -393,7 +465,7 @@ int pm_device_state_get(const struct device *dev, */ static inline void pm_device_init_suspended(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; pm->state = PM_DEVICE_STATE_SUSPENDED; } @@ -413,7 +485,7 @@ static inline void pm_device_init_suspended(const struct device *dev) */ static inline void pm_device_init_off(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; pm->state = PM_DEVICE_STATE_OFF; } diff --git a/kernel/include/kernel_offsets.h b/kernel/include/kernel_offsets.h index f7676438d9eb..5644dbb15753 100644 --- a/kernel/include/kernel_offsets.h +++ b/kernel/include/kernel_offsets.h @@ -86,7 +86,7 @@ GEN_ABSOLUTE_SYM(_DEVICE_STRUCT_PM_OFFSET, /* member offsets in the pm_device structure. Used in image post-processing */ GEN_ABSOLUTE_SYM(_PM_DEVICE_STRUCT_FLAGS_OFFSET, - offsetof(struct pm_device, flags)); + offsetof(struct pm_device_base, flags)); GEN_ABSOLUTE_SYM(_PM_DEVICE_FLAG_PD, PM_DEVICE_FLAG_PD); diff --git a/subsys/pm/device.c b/subsys/pm/device.c index 15e8085773b5..ca3816fa93bd 100644 --- a/subsys/pm/device.c +++ b/subsys/pm/device.c @@ -42,7 +42,7 @@ const char *pm_device_state_str(enum pm_device_state state) int pm_device_action_run(const struct device *dev, enum pm_device_action action) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; int ret; if (pm == NULL) { @@ -139,13 +139,13 @@ static int power_domain_add_or_remove(const struct device *dev, while (rv[i] != Z_DEVICE_DEPS_ENDS) { if (add == false) { if (rv[i] == dev_handle) { - dev->pm->domain = NULL; + dev->pm_base->domain = NULL; rv[i] = DEVICE_HANDLE_NULL; return 0; } } else { if (rv[i] == DEVICE_HANDLE_NULL) { - dev->pm->domain = domain; + dev->pm_base->domain = domain; rv[i] = dev_handle; return 0; } @@ -212,7 +212,7 @@ void pm_device_children_action_run(const struct device *dev, int pm_device_state_get(const struct device *dev, enum pm_device_state *state) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { return -ENOSYS; @@ -231,7 +231,7 @@ bool pm_device_is_any_busy(void) devc = z_device_get_all_static(&devs); for (const struct device *dev = devs; dev < (devs + devc); dev++) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { continue; @@ -247,7 +247,7 @@ bool pm_device_is_any_busy(void) bool pm_device_is_busy(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { return false; @@ -258,7 +258,7 @@ bool pm_device_is_busy(const struct device *dev) void pm_device_busy_set(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { return; @@ -269,7 +269,7 @@ void pm_device_busy_set(const struct device *dev) void pm_device_busy_clear(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { return; @@ -281,7 +281,7 @@ void pm_device_busy_clear(const struct device *dev) bool pm_device_wakeup_enable(const struct device *dev, bool enable) { atomic_val_t flags, new_flags; - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { return false; @@ -305,7 +305,7 @@ bool pm_device_wakeup_enable(const struct device *dev, bool enable) bool pm_device_wakeup_is_enabled(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { return false; @@ -317,7 +317,7 @@ bool pm_device_wakeup_is_enabled(const struct device *dev) bool pm_device_wakeup_is_capable(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { return false; @@ -329,7 +329,7 @@ bool pm_device_wakeup_is_capable(const struct device *dev) void pm_device_state_lock(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if ((pm != NULL) && !pm_device_runtime_is_enabled(dev)) { atomic_set_bit(&pm->flags, PM_DEVICE_FLAG_STATE_LOCKED); @@ -338,7 +338,7 @@ void pm_device_state_lock(const struct device *dev) void pm_device_state_unlock(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm != NULL) { atomic_clear_bit(&pm->flags, PM_DEVICE_FLAG_STATE_LOCKED); @@ -347,7 +347,7 @@ void pm_device_state_unlock(const struct device *dev) bool pm_device_state_is_locked(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { return false; @@ -360,7 +360,7 @@ bool pm_device_state_is_locked(const struct device *dev) bool pm_device_on_power_domain(const struct device *dev) { #ifdef CONFIG_PM_DEVICE_POWER_DOMAIN - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; if (pm == NULL) { return false; @@ -375,14 +375,14 @@ bool pm_device_on_power_domain(const struct device *dev) bool pm_device_is_powered(const struct device *dev) { #ifdef CONFIG_PM_DEVICE_POWER_DOMAIN - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; /* If a device doesn't support PM or is not under a PM domain, * assume it is always powered on. */ return (pm == NULL) || (pm->domain == NULL) || - (pm->domain->pm->state == PM_DEVICE_STATE_ACTIVE); + (pm->domain->pm_base->state == PM_DEVICE_STATE_ACTIVE); #else ARG_UNUSED(dev); return true; @@ -392,7 +392,7 @@ bool pm_device_is_powered(const struct device *dev) int pm_device_driver_init(const struct device *dev, pm_device_action_cb_t action_cb) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; int rc = 0; /* Work only needs to be performed if the device is powered */ diff --git a/subsys/pm/device_runtime.c b/subsys/pm/device_runtime.c index f7d9b7a8369c..8fc36bed6327 100644 --- a/subsys/pm/device_runtime.c +++ b/subsys/pm/device_runtime.c @@ -52,7 +52,7 @@ static int runtime_suspend(const struct device *dev, bool async, /* * Early return if device runtime is not enabled. */ - if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { + if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { return 0; } @@ -65,30 +65,30 @@ static int runtime_suspend(const struct device *dev, bool async, } } - if (pm->usage == 0U) { + if (pm->base.usage == 0U) { LOG_WRN("Unbalanced suspend"); ret = -EALREADY; goto unlock; } - pm->usage--; - if (pm->usage > 0U) { + pm->base.usage--; + if (pm->base.usage > 0U) { goto unlock; } if (async && !k_is_pre_kernel()) { /* queue suspend */ - pm->state = PM_DEVICE_STATE_SUSPENDING; + pm->base.state = PM_DEVICE_STATE_SUSPENDING; (void)k_work_schedule(&pm->work, delay); } else { /* suspend now */ - ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND); + ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND); if (ret < 0) { - pm->usage++; + pm->base.usage++; goto unlock; } - pm->state = PM_DEVICE_STATE_SUSPENDED; + pm->base.state = PM_DEVICE_STATE_SUSPENDED; } unlock: @@ -105,16 +105,16 @@ static void runtime_suspend_work(struct k_work *work) struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct pm_device *pm = CONTAINER_OF(dwork, struct pm_device, work); - ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND); + ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND); (void)k_sem_take(&pm->lock, K_FOREVER); if (ret < 0) { - pm->usage++; - pm->state = PM_DEVICE_STATE_ACTIVE; + pm->base.usage++; + pm->base.state = PM_DEVICE_STATE_ACTIVE; } else { - pm->state = PM_DEVICE_STATE_SUSPENDED; + pm->base.state = PM_DEVICE_STATE_SUSPENDED; } - k_event_set(&pm->event, BIT(pm->state)); + k_event_set(&pm->event, BIT(pm->base.state)); k_sem_give(&pm->lock); /* @@ -122,13 +122,47 @@ static void runtime_suspend_work(struct k_work *work) * finishes its operation */ if ((ret == 0) && - atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_PD_CLAIMED)) { - (void)pm_device_runtime_put(PM_DOMAIN(pm)); + atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_PD_CLAIMED)) { + (void)pm_device_runtime_put(PM_DOMAIN(&pm->base)); } __ASSERT(ret == 0, "Could not suspend device (%d)", ret); } +static int get_sync_locked(const struct device *dev) +{ + int ret; + struct pm_device_isr *pm = dev->pm_isr; + uint32_t flags = pm->base.flags; + + if (pm->base.usage == 0) { + if (flags & BIT(PM_DEVICE_FLAG_PD_CLAIMED)) { + const struct device *domain = PM_DOMAIN(&pm->base); + + if (domain->pm_base->flags & PM_DEVICE_FLAG_ISR_SAFE) { + ret = pm_device_runtime_get(domain); + if (ret < 0) { + return ret; + } + } else { + return -EWOULDBLOCK; + } + } + + ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME); + if (ret < 0) { + return ret; + } + pm->base.state = PM_DEVICE_STATE_ACTIVE; + } else { + ret = 0; + } + + pm->base.usage++; + + return ret; +} + int pm_device_runtime_get(const struct device *dev) { int ret = 0; @@ -143,10 +177,19 @@ int pm_device_runtime_get(const struct device *dev) /* * Early return if device runtime is not enabled. */ - if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { + if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { return 0; } + if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) { + struct pm_device_isr *pm_sync = dev->pm_isr; + k_spinlock_key_t k = k_spin_lock(&pm_sync->lock); + + ret = get_sync_locked(dev); + k_spin_unlock(&pm_sync->lock, k); + goto end; + } + if (!k_is_pre_kernel()) { ret = k_sem_take(&pm->lock, k_is_in_isr() ? K_NO_WAIT : K_FOREVER); if (ret < 0) { @@ -154,7 +197,7 @@ int pm_device_runtime_get(const struct device *dev) } } - if (k_is_in_isr() && (pm->state == PM_DEVICE_STATE_SUSPENDING)) { + if (k_is_in_isr() && (pm->base.state == PM_DEVICE_STATE_SUSPENDING)) { ret = -EWOULDBLOCK; goto unlock; } @@ -163,31 +206,33 @@ int pm_device_runtime_get(const struct device *dev) * If the device is under a power domain, the domain has to be get * first. */ - if (PM_DOMAIN(pm) != NULL) { - ret = pm_device_runtime_get(PM_DOMAIN(pm)); + const struct device *domain = PM_DOMAIN(&pm->base); + + if (domain != NULL) { + ret = pm_device_runtime_get(domain); if (ret != 0) { goto unlock; } /* Check if powering up this device failed */ - if (atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_TURN_ON_FAILED)) { - (void)pm_device_runtime_put(PM_DOMAIN(pm)); + if (atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_TURN_ON_FAILED)) { + (void)pm_device_runtime_put(domain); ret = -EAGAIN; goto unlock; } /* Power domain successfully claimed */ - atomic_set_bit(&pm->flags, PM_DEVICE_FLAG_PD_CLAIMED); + atomic_set_bit(&pm->base.flags, PM_DEVICE_FLAG_PD_CLAIMED); } - pm->usage++; + pm->base.usage++; /* * Check if the device has a pending suspend operation (not started * yet) and cancel it. This way we avoid unnecessary operations because * the device is actually active. */ - if ((pm->state == PM_DEVICE_STATE_SUSPENDING) && + if ((pm->base.state == PM_DEVICE_STATE_SUSPENDING) && ((k_work_cancel_delayable(&pm->work) & K_WORK_RUNNING) == 0)) { - pm->state = PM_DEVICE_STATE_ACTIVE; + pm->base.state = PM_DEVICE_STATE_ACTIVE; goto unlock; } @@ -196,7 +241,7 @@ int pm_device_runtime_get(const struct device *dev) * If the device is already suspending there is * nothing else we can do but wait until it finishes. */ - while (pm->state == PM_DEVICE_STATE_SUSPENDING) { + while (pm->base.state == PM_DEVICE_STATE_SUSPENDING) { k_sem_give(&pm->lock); k_event_wait(&pm->event, EVENT_MASK, true, K_FOREVER); @@ -205,45 +250,95 @@ int pm_device_runtime_get(const struct device *dev) } } - if (pm->usage > 1U) { + if (pm->base.usage > 1U) { goto unlock; } - ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_RESUME); + ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_RESUME); if (ret < 0) { - pm->usage--; + pm->base.usage--; goto unlock; } - pm->state = PM_DEVICE_STATE_ACTIVE; + pm->base.state = PM_DEVICE_STATE_ACTIVE; unlock: if (!k_is_pre_kernel()) { k_sem_give(&pm->lock); } +end: SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_get, dev, ret); return ret; } + +static int put_sync_locked(const struct device *dev) +{ + int ret; + struct pm_device_isr *pm = dev->pm_isr; + uint32_t flags = pm->base.flags; + + if (!(flags & BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED))) { + return 0; + } + + if (pm->base.usage == 0U) { + return -EALREADY; + } + + pm->base.usage--; + if (pm->base.usage == 0U) { + ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_SUSPEND); + if (ret < 0) { + return ret; + } + pm->base.state = PM_DEVICE_STATE_SUSPENDED; + + if (flags & BIT(PM_DEVICE_FLAG_PD_CLAIMED)) { + const struct device *domain = PM_DOMAIN(&pm->base); + + if (domain->pm_base->flags & PM_DEVICE_FLAG_ISR_SAFE) { + ret = put_sync_locked(domain); + } else { + ret = -EWOULDBLOCK; + } + } + } else { + ret = 0; + } + + return ret; +} + int pm_device_runtime_put(const struct device *dev) { int ret; - if (dev->pm == NULL) { + if (dev->pm_base == NULL) { return 0; } SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_put, dev); - ret = runtime_suspend(dev, false, K_NO_WAIT); - /* - * Now put the domain - */ - if ((ret == 0) && - atomic_test_bit(&dev->pm->flags, PM_DEVICE_FLAG_PD_CLAIMED)) { - ret = pm_device_runtime_put(PM_DOMAIN(dev->pm)); + if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) { + struct pm_device_isr *pm_sync = dev->pm_isr; + k_spinlock_key_t k = k_spin_lock(&pm_sync->lock); + + ret = put_sync_locked(dev); + + k_spin_unlock(&pm_sync->lock, k); + } else { + ret = runtime_suspend(dev, false, K_NO_WAIT); + + /* + * Now put the domain + */ + if ((ret == 0) && + atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_PD_CLAIMED)) { + ret = pm_device_runtime_put(PM_DOMAIN(dev->pm_base)); + } } SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_put, dev, ret); @@ -254,12 +349,21 @@ int pm_device_runtime_put_async(const struct device *dev, k_timeout_t delay) { int ret; - if (dev->pm == NULL) { + if (dev->pm_base == NULL) { return 0; } SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_put_async, dev, delay); - ret = runtime_suspend(dev, true, delay); + if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) { + struct pm_device_isr *pm_sync = dev->pm_isr; + k_spinlock_key_t k = k_spin_lock(&pm_sync->lock); + + ret = put_sync_locked(dev); + + k_spin_unlock(&pm_sync->lock, k); + } else { + ret = runtime_suspend(dev, true, delay); + } SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_put_async, dev, delay, ret); return ret; @@ -268,7 +372,7 @@ int pm_device_runtime_put_async(const struct device *dev, k_timeout_t delay) __boot_func int pm_device_runtime_auto_enable(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; /* No action needed if PM_DEVICE_FLAG_RUNTIME_AUTO is not enabled */ if (!pm || !atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_AUTO)) { @@ -277,6 +381,36 @@ int pm_device_runtime_auto_enable(const struct device *dev) return pm_device_runtime_enable(dev); } +static int runtime_enable_sync(const struct device *dev) +{ + int ret; + struct pm_device_isr *pm = dev->pm_isr; + k_spinlock_key_t k = k_spin_lock(&pm->lock); + + /* Because context is locked we can access flags directly. */ + if (pm->base.flags & BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED)) { + ret = 0; + goto unlock; + } + + if (pm->base.state == PM_DEVICE_STATE_ACTIVE) { + ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_SUSPEND); + if (ret < 0) { + goto unlock; + } + + pm->base.state = PM_DEVICE_STATE_SUSPENDED; + } else { + ret = 0; + } + + pm->base.flags |= BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED); + pm->base.usage = 0U; +unlock: + k_spin_unlock(&pm->lock, k); + return ret; +} + int pm_device_runtime_enable(const struct device *dev) { int ret = 0; @@ -293,11 +427,16 @@ int pm_device_runtime_enable(const struct device *dev) goto end; } + if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) { + ret = runtime_enable_sync(dev); + goto end; + } + if (!k_is_pre_kernel()) { (void)k_sem_take(&pm->lock, K_FOREVER); } - if (atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { + if (atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { goto unlock; } @@ -307,17 +446,17 @@ int pm_device_runtime_enable(const struct device *dev) k_work_init_delayable(&pm->work, runtime_suspend_work); } - if (pm->state == PM_DEVICE_STATE_ACTIVE) { - ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND); + if (pm->base.state == PM_DEVICE_STATE_ACTIVE) { + ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND); if (ret < 0) { goto unlock; } - pm->state = PM_DEVICE_STATE_SUSPENDED; + pm->base.state = PM_DEVICE_STATE_SUSPENDED; } - pm->usage = 0U; + pm->base.usage = 0U; - atomic_set_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED); + atomic_set_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED); unlock: if (!k_is_pre_kernel()) { @@ -329,6 +468,34 @@ int pm_device_runtime_enable(const struct device *dev) return ret; } +static int runtime_disable_sync(const struct device *dev) +{ + struct pm_device_isr *pm = dev->pm_isr; + int ret; + k_spinlock_key_t k = k_spin_lock(&pm->lock); + + if (!(pm->base.flags & BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED))) { + ret = 0; + goto unlock; + } + + if (pm->base.state == PM_DEVICE_STATE_SUSPENDED) { + ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME); + if (ret < 0) { + goto unlock; + } + + pm->base.state = PM_DEVICE_STATE_ACTIVE; + } else { + ret = 0; + } + + pm->base.flags &= ~BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED); +unlock: + k_spin_unlock(&pm->lock, k); + return ret; +} + int pm_device_runtime_disable(const struct device *dev) { int ret = 0; @@ -340,23 +507,28 @@ int pm_device_runtime_disable(const struct device *dev) SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_disable, dev); + if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) { + ret = runtime_disable_sync(dev); + goto end; + } + if (!k_is_pre_kernel()) { (void)k_sem_take(&pm->lock, K_FOREVER); } - if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { + if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { goto unlock; } if (!k_is_pre_kernel()) { - if ((pm->state == PM_DEVICE_STATE_SUSPENDING) && + if ((pm->base.state == PM_DEVICE_STATE_SUSPENDING) && ((k_work_cancel_delayable(&pm->work) & K_WORK_RUNNING) == 0)) { - pm->state = PM_DEVICE_STATE_ACTIVE; + pm->base.state = PM_DEVICE_STATE_ACTIVE; goto clear_bit; } /* wait until possible async suspend is completed */ - while (pm->state == PM_DEVICE_STATE_SUSPENDING) { + while (pm->base.state == PM_DEVICE_STATE_SUSPENDING) { k_sem_give(&pm->lock); k_event_wait(&pm->event, EVENT_MASK, true, K_FOREVER); @@ -366,23 +538,24 @@ int pm_device_runtime_disable(const struct device *dev) } /* wake up the device if suspended */ - if (pm->state == PM_DEVICE_STATE_SUSPENDED) { - ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_RESUME); + if (pm->base.state == PM_DEVICE_STATE_SUSPENDED) { + ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME); if (ret < 0) { goto unlock; } - pm->state = PM_DEVICE_STATE_ACTIVE; + pm->base.state = PM_DEVICE_STATE_ACTIVE; } clear_bit: - atomic_clear_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED); + atomic_clear_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED); unlock: if (!k_is_pre_kernel()) { k_sem_give(&pm->lock); } +end: SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_disable, dev, ret); return ret; @@ -390,7 +563,7 @@ int pm_device_runtime_disable(const struct device *dev) bool pm_device_runtime_is_enabled(const struct device *dev) { - struct pm_device *pm = dev->pm; + struct pm_device_base *pm = dev->pm_base; return pm && atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED); } diff --git a/tests/subsys/pm/device_runtime_api/Kconfig b/tests/subsys/pm/device_runtime_api/Kconfig new file mode 100644 index 000000000000..a116921810f0 --- /dev/null +++ b/tests/subsys/pm/device_runtime_api/Kconfig @@ -0,0 +1,8 @@ +# Copyright 2024 Nordic Semiconductor ASA +# +# SPDX-License-Identifier: Apache-2.0 + +source "Kconfig.zephyr" + +config TEST_PM_DEVICE_ISR_SAFE + bool "Use ISR safe PM for the test" diff --git a/tests/subsys/pm/device_runtime_api/src/main.c b/tests/subsys/pm/device_runtime_api/src/main.c index 186ad7e6c783..623aa20b6123 100644 --- a/tests/subsys/pm/device_runtime_api/src/main.c +++ b/tests/subsys/pm/device_runtime_api/src/main.c @@ -141,105 +141,111 @@ ZTEST(device_runtime_api, test_api) ret = pm_device_runtime_put_async(test_dev, K_NO_WAIT); zassert_equal(ret, 0); - (void)pm_device_state_get(test_dev, &state); - zassert_equal(state, PM_DEVICE_STATE_SUSPENDING); - - /* usage: 0, -1, suspend: no (unbalanced call) */ - ret = pm_device_runtime_put(test_dev); - zassert_equal(ret, -EALREADY); - - /* usage: 0, -1, suspend: no (unbalanced call) */ - ret = pm_device_runtime_put_async(test_dev, K_NO_WAIT); - zassert_equal(ret, -EALREADY); + if (IS_ENABLED(CONFIG_TEST_PM_DEVICE_ISR_SAFE)) { + /* In sync mode async put is equivalent as normal put. */ + (void)pm_device_state_get(test_dev, &state); + zassert_equal(state, PM_DEVICE_STATE_SUSPENDED); + } else { + (void)pm_device_state_get(test_dev, &state); + zassert_equal(state, PM_DEVICE_STATE_SUSPENDING); + + /* usage: 0, -1, suspend: no (unbalanced call) */ + ret = pm_device_runtime_put(test_dev); + zassert_equal(ret, -EALREADY); + + /* usage: 0, -1, suspend: no (unbalanced call) */ + ret = pm_device_runtime_put_async(test_dev, K_NO_WAIT); + zassert_equal(ret, -EALREADY); + + /* unblock test driver and let it finish */ + test_driver_pm_done(test_dev); + k_yield(); - /* unblock test driver and let it finish */ - test_driver_pm_done(test_dev); - k_yield(); + (void)pm_device_state_get(test_dev, &state); + zassert_equal(state, PM_DEVICE_STATE_SUSPENDED); - (void)pm_device_state_get(test_dev, &state); - zassert_equal(state, PM_DEVICE_STATE_SUSPENDED); + /*** get + asynchronous put + get (while suspend still ongoing) ***/ - /*** get + asynchronous put + get (while suspend still ongoing) ***/ + /* usage: 0, +1, resume: yes */ + ret = pm_device_runtime_get(test_dev); + zassert_equal(ret, 0); - /* usage: 0, +1, resume: yes */ - ret = pm_device_runtime_get(test_dev); - zassert_equal(ret, 0); + (void)pm_device_state_get(test_dev, &state); + zassert_equal(state, PM_DEVICE_STATE_ACTIVE); - (void)pm_device_state_get(test_dev, &state); - zassert_equal(state, PM_DEVICE_STATE_ACTIVE); + test_driver_pm_async(test_dev); - test_driver_pm_async(test_dev); + /* usage: 1, -1, suspend: yes (queued) */ + ret = pm_device_runtime_put_async(test_dev, K_NO_WAIT); + zassert_equal(ret, 0); - /* usage: 1, -1, suspend: yes (queued) */ - ret = pm_device_runtime_put_async(test_dev, K_NO_WAIT); - zassert_equal(ret, 0); + (void)pm_device_state_get(test_dev, &state); + zassert_equal(state, PM_DEVICE_STATE_SUSPENDING); - (void)pm_device_state_get(test_dev, &state); - zassert_equal(state, PM_DEVICE_STATE_SUSPENDING); + /* let suspension start */ + k_yield(); - /* let suspension start */ - k_yield(); + /* create and start get_runner thread + * get_runner thread is used to test synchronous path while asynchronous + * is ongoing. It is important to set its priority >= to the system work + * queue to make sure sync path run by the thread is forced to wait. + */ + k_thread_create(&get_runner_td, get_runner_stack, + K_THREAD_STACK_SIZEOF(get_runner_stack), get_runner, + NULL, NULL, NULL, CONFIG_SYSTEM_WORKQUEUE_PRIORITY, 0, + K_NO_WAIT); + k_yield(); - /* create and start get_runner thread - * get_runner thread is used to test synchronous path while asynchronous - * is ongoing. It is important to set its priority >= to the system work - * queue to make sure sync path run by the thread is forced to wait. - */ - k_thread_create(&get_runner_td, get_runner_stack, - K_THREAD_STACK_SIZEOF(get_runner_stack), get_runner, - NULL, NULL, NULL, CONFIG_SYSTEM_WORKQUEUE_PRIORITY, 0, - K_NO_WAIT); - k_yield(); - - /* let driver suspend to finish and wait until get_runner finishes - * resuming the driver - */ - test_driver_pm_done(test_dev); - k_thread_join(&get_runner_td, K_FOREVER); + /* let driver suspend to finish and wait until get_runner finishes + * resuming the driver + */ + test_driver_pm_done(test_dev); + k_thread_join(&get_runner_td, K_FOREVER); - (void)pm_device_state_get(test_dev, &state); - zassert_equal(state, PM_DEVICE_STATE_ACTIVE); + (void)pm_device_state_get(test_dev, &state); + zassert_equal(state, PM_DEVICE_STATE_ACTIVE); - /* Test if getting a device before an async operation starts does - * not trigger any device pm action. - */ - size_t count = test_driver_pm_count(test_dev); + /* Test if getting a device before an async operation starts does + * not trigger any device pm action. + */ + size_t count = test_driver_pm_count(test_dev); - ret = pm_device_runtime_put_async(test_dev, K_MSEC(10)); - zassert_equal(ret, 0); + ret = pm_device_runtime_put_async(test_dev, K_MSEC(10)); + zassert_equal(ret, 0); - (void)pm_device_state_get(test_dev, &state); - zassert_equal(state, PM_DEVICE_STATE_SUSPENDING); + (void)pm_device_state_get(test_dev, &state); + zassert_equal(state, PM_DEVICE_STATE_SUSPENDING); - ret = pm_device_runtime_get(test_dev); - zassert_equal(ret, 0); + ret = pm_device_runtime_get(test_dev); + zassert_equal(ret, 0); - /* Now lets check if the calls above have triggered a device - * pm action - */ - zassert_equal(count, test_driver_pm_count(test_dev)); + /* Now lets check if the calls above have triggered a device + * pm action + */ + zassert_equal(count, test_driver_pm_count(test_dev)); - /* - * test if async put with a delay respects the given time. - */ - ret = pm_device_runtime_put_async(test_dev, K_MSEC(100)); + /* + * test if async put with a delay respects the given time. + */ + ret = pm_device_runtime_put_async(test_dev, K_MSEC(100)); - (void)pm_device_state_get(test_dev, &state); - zassert_equal(state, PM_DEVICE_STATE_SUSPENDING); + (void)pm_device_state_get(test_dev, &state); + zassert_equal(state, PM_DEVICE_STATE_SUSPENDING); - k_sleep(K_MSEC(80)); + k_sleep(K_MSEC(80)); - /* It should still be suspending since we have waited less than - * the delay we've set. - */ - (void)pm_device_state_get(test_dev, &state); - zassert_equal(state, PM_DEVICE_STATE_SUSPENDING); + /* It should still be suspending since we have waited less than + * the delay we've set. + */ + (void)pm_device_state_get(test_dev, &state); + zassert_equal(state, PM_DEVICE_STATE_SUSPENDING); - k_sleep(K_MSEC(30)); + k_sleep(K_MSEC(30)); - /* Now it should be already suspended */ - (void)pm_device_state_get(test_dev, &state); - zassert_equal(state, PM_DEVICE_STATE_SUSPENDED); + /* Now it should be already suspended */ + (void)pm_device_state_get(test_dev, &state); + zassert_equal(state, PM_DEVICE_STATE_SUSPENDED); + } /* Put operation should fail due the state be locked. */ ret = pm_device_runtime_disable(test_dev); diff --git a/tests/subsys/pm/device_runtime_api/src/test_driver.c b/tests/subsys/pm/device_runtime_api/src/test_driver.c index d7c06f9cddb0..48dd4385cbc0 100644 --- a/tests/subsys/pm/device_runtime_api/src/test_driver.c +++ b/tests/subsys/pm/device_runtime_api/src/test_driver.c @@ -21,14 +21,16 @@ static int test_driver_action(const struct device *dev, { struct test_driver_data *data = dev->data; - data->ongoing = true; + if (!IS_ENABLED(CONFIG_TEST_PM_DEVICE_ISR_SAFE)) { + data->ongoing = true; - if (data->async) { - k_sem_take(&data->sync, K_FOREVER); - data->async = false; - } + if (data->async) { + k_sem_take(&data->sync, K_FOREVER); + data->async = false; + } - data->ongoing = false; + data->ongoing = false; + } data->count++; @@ -72,7 +74,9 @@ int test_driver_init(const struct device *dev) return 0; } -PM_DEVICE_DEFINE(test_driver, test_driver_action); +#define PM_DEVICE_TYPE COND_CODE_1(CONFIG_TEST_PM_DEVICE_ISR_SAFE, (PM_DEVICE_ISR_SAFE), (0)) + +PM_DEVICE_DEFINE(test_driver, test_driver_action, PM_DEVICE_TYPE); static struct test_driver_data data; diff --git a/tests/subsys/pm/device_runtime_api/testcase.yaml b/tests/subsys/pm/device_runtime_api/testcase.yaml index 72c9c5129428..d86f1ba95d8c 100644 --- a/tests/subsys/pm/device_runtime_api/testcase.yaml +++ b/tests/subsys/pm/device_runtime_api/testcase.yaml @@ -3,3 +3,9 @@ tests: tags: pm integration_platforms: - native_sim + pm.device_runtime.isr_safe.api: + tags: pm + integration_platforms: + - native_sim + extra_configs: + - CONFIG_TEST_PM_DEVICE_ISR_SAFE=y