diff options
| author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2025-07-17 19:55:25 +0200 |
|---|---|---|
| committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2025-07-17 19:55:25 +0200 |
| commit | 7c1f7c22e69fae209eaad58de2627b8b5acb3cb3 (patch) | |
| tree | 045bdc9025e68539080660073e661911e3b20406 /drivers/base | |
| parent | ebd6884167eac94bae9f92793fcd84069d9e4415 (diff) | |
| parent | 2096d42d82dc983d9db861bd6585723bd24a0819 (diff) | |
| download | linux-7c1f7c22e69fae209eaad58de2627b8b5acb3cb3.tar.gz linux-7c1f7c22e69fae209eaad58de2627b8b5acb3cb3.tar.bz2 linux-7c1f7c22e69fae209eaad58de2627b8b5acb3cb3.zip | |
Merge back earlier material related to system sleep
Diffstat (limited to 'drivers/base')
| -rw-r--r-- | drivers/base/power/main.c | 76 | ||||
| -rw-r--r-- | drivers/base/power/runtime.c | 127 |
2 files changed, 147 insertions, 56 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 7a50af416cac..5d9b3dc9011d 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -647,14 +647,27 @@ static void dpm_async_resume_children(struct device *dev, async_func_t func) /* * Start processing "async" children of the device unless it's been * started already for them. - * - * This could have been done for the device's "async" consumers too, but - * they either need to wait for their parents or the processing has - * already started for them after their parents were processed. */ device_for_each_child(dev, func, dpm_async_with_cleanup); } +static void dpm_async_resume_subordinate(struct device *dev, async_func_t func) +{ + struct device_link *link; + int idx; + + dpm_async_resume_children(dev, func); + + idx = device_links_read_lock(); + + /* Start processing the device's "async" consumers. */ + list_for_each_entry_rcu(link, &dev->links.consumers, s_node) + if (READ_ONCE(link->status) != DL_STATE_DORMANT) + dpm_async_with_cleanup(link->consumer, func); + + device_links_read_unlock(idx); +} + static void dpm_clear_async_state(struct device *dev) { reinit_completion(&dev->power.completion); @@ -663,7 +676,14 @@ static void dpm_clear_async_state(struct device *dev) static bool dpm_root_device(struct device *dev) { - return !dev->parent; + lockdep_assert_held(&dpm_list_mtx); + + /* + * Since this function is required to run under dpm_list_mtx, the + * list_empty() below will only return true if the device's list of + * consumers is actually empty before calling it. + */ + return !dev->parent && list_empty(&dev->links.suppliers); } static void async_resume_noirq(void *data, async_cookie_t cookie); @@ -752,7 +772,7 @@ Out: pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); } - dpm_async_resume_children(dev, async_resume_noirq); + dpm_async_resume_subordinate(dev, async_resume_noirq); } static void async_resume_noirq(void *data, async_cookie_t cookie) @@ -895,7 +915,7 @@ Out: pm_dev_err(dev, state, async ? " async early" : " early", error); } - dpm_async_resume_children(dev, async_resume_early); + dpm_async_resume_subordinate(dev, async_resume_early); } static void async_resume_early(void *data, async_cookie_t cookie) @@ -1071,7 +1091,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) pm_dev_err(dev, state, async ? " async" : "", error); } - dpm_async_resume_children(dev, async_resume); + dpm_async_resume_subordinate(dev, async_resume); } static void async_resume(void *data, async_cookie_t cookie) @@ -1095,7 +1115,6 @@ void dpm_resume(pm_message_t state) ktime_t starttime = ktime_get(); trace_suspend_resume(TPS("dpm_resume"), state.event, true); - might_sleep(); pm_transition = state; async_error = 0; @@ -1198,7 +1217,6 @@ void dpm_complete(pm_message_t state) struct list_head list; trace_suspend_resume(TPS("dpm_complete"), state.event, true); - might_sleep(); INIT_LIST_HEAD(&list); mutex_lock(&dpm_list_mtx); @@ -1258,10 +1276,15 @@ static bool dpm_leaf_device(struct device *dev) return false; } - return true; + /* + * Since this function is required to run under dpm_list_mtx, the + * list_empty() below will only return true if the device's list of + * consumers is actually empty before calling it. + */ + return list_empty(&dev->links.consumers); } -static void dpm_async_suspend_parent(struct device *dev, async_func_t func) +static bool dpm_async_suspend_parent(struct device *dev, async_func_t func) { guard(mutex)(&dpm_list_mtx); @@ -1273,11 +1296,31 @@ static void dpm_async_suspend_parent(struct device *dev, async_func_t func) * deleted before it. */ if (!device_pm_initialized(dev)) - return; + return false; /* Start processing the device's parent if it is "async". */ if (dev->parent) dpm_async_with_cleanup(dev->parent, func); + + return true; +} + +static void dpm_async_suspend_superior(struct device *dev, async_func_t func) +{ + struct device_link *link; + int idx; + + if (!dpm_async_suspend_parent(dev, func)) + return; + + idx = device_links_read_lock(); + + /* Start processing the device's "async" suppliers. */ + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) + if (READ_ONCE(link->status) != DL_STATE_DORMANT) + dpm_async_with_cleanup(link->supplier, func); + + device_links_read_unlock(idx); } static void dpm_async_suspend_complete_all(struct list_head *device_list) @@ -1417,7 +1460,7 @@ Complete: if (error || async_error) return error; - dpm_async_suspend_parent(dev, async_suspend_noirq); + dpm_async_suspend_superior(dev, async_suspend_noirq); return 0; } @@ -1614,7 +1657,7 @@ Complete: if (error || async_error) return error; - dpm_async_suspend_parent(dev, async_suspend_late); + dpm_async_suspend_superior(dev, async_suspend_late); return 0; } @@ -1906,7 +1949,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) if (error || async_error) return error; - dpm_async_suspend_parent(dev, async_suspend); + dpm_async_suspend_superior(dev, async_suspend); return 0; } @@ -2129,7 +2172,6 @@ int dpm_prepare(pm_message_t state) int error = 0; trace_suspend_resume(TPS("dpm_prepare"), state.event, true); - might_sleep(); /* * Give a chance for the known devices to complete their probes, before diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index c55a7c70bc1a..05ff3d2209e6 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -19,10 +19,24 @@ typedef int (*pm_callback_t)(struct device *); +static inline pm_callback_t get_callback_ptr(const void *start, size_t offset) +{ + return *(pm_callback_t *)(start + offset); +} + +static pm_callback_t __rpm_get_driver_callback(struct device *dev, + size_t cb_offset) +{ + if (dev->driver && dev->driver->pm) + return get_callback_ptr(dev->driver->pm, cb_offset); + + return NULL; +} + static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) { - pm_callback_t cb; const struct dev_pm_ops *ops; + pm_callback_t cb = NULL; if (dev->pm_domain) ops = &dev->pm_domain->ops; @@ -36,12 +50,10 @@ static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) ops = NULL; if (ops) - cb = *(pm_callback_t *)((void *)ops + cb_offset); - else - cb = NULL; + cb = get_callback_ptr(ops, cb_offset); - if (!cb && dev->driver && dev->driver->pm) - cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); + if (!cb) + cb = __rpm_get_driver_callback(dev, cb_offset); return cb; } @@ -1827,7 +1839,7 @@ void pm_runtime_init(struct device *dev) dev->power.request_pending = false; dev->power.request = RPM_REQ_NONE; dev->power.deferred_resume = false; - dev->power.needs_force_resume = 0; + dev->power.needs_force_resume = false; INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; @@ -1854,6 +1866,11 @@ void pm_runtime_reinit(struct device *dev) pm_runtime_put(dev->parent); } } + /* + * Clear power.needs_force_resume in case it has been set by + * pm_runtime_force_suspend() invoked from a driver remove callback. + */ + dev->power.needs_force_resume = false; } /** @@ -1941,13 +1958,23 @@ void pm_runtime_drop_link(struct device_link *link) pm_request_idle(link->supplier); } -bool pm_runtime_need_not_resume(struct device *dev) +static pm_callback_t get_callback(struct device *dev, size_t cb_offset) { - return atomic_read(&dev->power.usage_count) <= 1 && - (atomic_read(&dev->power.child_count) == 0 || - dev->power.ignore_children); + /* + * Setting power.strict_midlayer means that the middle layer + * code does not want its runtime PM callbacks to be invoked via + * pm_runtime_force_suspend() and pm_runtime_force_resume(), so + * return a direct pointer to the driver callback in that case. + */ + if (dev_pm_strict_midlayer_is_set(dev)) + return __rpm_get_driver_callback(dev, cb_offset); + + return __rpm_get_callback(dev, cb_offset); } +#define GET_CALLBACK(dev, callback) \ + get_callback(dev, offsetof(struct dev_pm_ops, callback)) + /** * pm_runtime_force_suspend - Force a device into suspend state if needed. * @dev: Device to suspend. @@ -1964,10 +1991,6 @@ bool pm_runtime_need_not_resume(struct device *dev) * sure the device is put into low power state and it should only be used during * system-wide PM transitions to sleep states. It assumes that the analogous * pm_runtime_force_resume() will be used to resume the device. - * - * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent - * state where this function has called the ->runtime_suspend callback but the - * PM core marks the driver as runtime active. */ int pm_runtime_force_suspend(struct device *dev) { @@ -1975,10 +1998,10 @@ int pm_runtime_force_suspend(struct device *dev) int ret; pm_runtime_disable(dev); - if (pm_runtime_status_suspended(dev)) + if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume) return 0; - callback = RPM_GET_CALLBACK(dev, runtime_suspend); + callback = GET_CALLBACK(dev, runtime_suspend); dev_pm_enable_wake_irq_check(dev, true); ret = callback ? callback(dev) : 0; @@ -1990,15 +2013,16 @@ int pm_runtime_force_suspend(struct device *dev) /* * If the device can stay in suspend after the system-wide transition * to the working state that will follow, drop the children counter of - * its parent, but set its status to RPM_SUSPENDED anyway in case this - * function will be called again for it in the meantime. + * its parent and the usage counters of its suppliers. Otherwise, set + * power.needs_force_resume to let pm_runtime_force_resume() know that + * the device needs to be taken care of and to prevent this function + * from handling the device again in case the device is passed to it + * once more subsequently. */ - if (pm_runtime_need_not_resume(dev)) { + if (pm_runtime_need_not_resume(dev)) pm_runtime_set_suspended(dev); - } else { - __update_runtime_status(dev, RPM_SUSPENDED); - dev->power.needs_force_resume = 1; - } + else + dev->power.needs_force_resume = true; return 0; @@ -2009,33 +2033,37 @@ err: } EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); +#ifdef CONFIG_PM_SLEEP + /** * pm_runtime_force_resume - Force a device into resume state if needed. * @dev: Device to resume. * - * Prior invoking this function we expect the user to have brought the device - * into low power state by a call to pm_runtime_force_suspend(). Here we reverse - * those actions and bring the device into full power, if it is expected to be - * used on system resume. In the other case, we defer the resume to be managed - * via runtime PM. + * This function expects that either pm_runtime_force_suspend() has put the + * device into a low-power state prior to calling it, or the device had been + * runtime-suspended before the preceding system-wide suspend transition and it + * was left in suspend during that transition. + * + * The actions carried out by pm_runtime_force_suspend(), or by a runtime + * suspend in general, are reversed and the device is brought back into full + * power if it is expected to be used on system resume, which is the case when + * its needs_force_resume flag is set or when its smart_suspend flag is set and + * its runtime PM status is "active". * - * Typically this function may be invoked from a system resume callback. + * In other cases, the resume is deferred to be managed via runtime PM. + * + * Typically, this function may be invoked from a system resume callback. */ int pm_runtime_force_resume(struct device *dev) { int (*callback)(struct device *); int ret = 0; - if (!dev->power.needs_force_resume) + if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) || + pm_runtime_status_suspended(dev))) goto out; - /* - * The value of the parent's children counter is correct already, so - * just update the status of the device. - */ - __update_runtime_status(dev, RPM_ACTIVE); - - callback = RPM_GET_CALLBACK(dev, runtime_resume); + callback = GET_CALLBACK(dev, runtime_resume); dev_pm_disable_wake_irq_check(dev, false); ret = callback ? callback(dev) : 0; @@ -2046,9 +2074,30 @@ int pm_runtime_force_resume(struct device *dev) } pm_runtime_mark_last_busy(dev); + out: - dev->power.needs_force_resume = 0; + /* + * The smart_suspend flag can be cleared here because it is not going + * to be necessary until the next system-wide suspend transition that + * will update it again. + */ + dev->power.smart_suspend = false; + /* + * Also clear needs_force_resume to make this function skip devices that + * have been seen by it once. + */ + dev->power.needs_force_resume = false; + pm_runtime_enable(dev); return ret; } EXPORT_SYMBOL_GPL(pm_runtime_force_resume); + +bool pm_runtime_need_not_resume(struct device *dev) +{ + return atomic_read(&dev->power.usage_count) <= 1 && + (atomic_read(&dev->power.child_count) == 0 || + dev->power.ignore_children); +} + +#endif /* CONFIG_PM_SLEEP */ |
