diff options
Diffstat (limited to 'drivers/base/power/main.c')
| -rw-r--r-- | drivers/base/power/main.c | 370 |
1 files changed, 304 insertions, 66 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 1b41fca3d65..bf412961a93 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -29,6 +29,7 @@ #include <linux/async.h> #include <linux/suspend.h> #include <trace/events/power.h> +#include <linux/cpufreq.h> #include <linux/cpuidle.h> #include <linux/timer.h> @@ -91,6 +92,8 @@ void device_pm_sleep_init(struct device *dev) { dev->power.is_prepared = false; dev->power.is_suspended = false; + dev->power.is_noirq_suspended = false; + dev->power.is_late_suspended = false; init_completion(&dev->power.completion); complete_all(&dev->power.completion); dev->power.wakeup = NULL; @@ -211,9 +214,6 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime, pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), error, (unsigned long long)nsecs >> 10); } - - trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event), - error); } /** @@ -384,7 +384,9 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev, calltime = initcall_debug_start(dev); pm_dev_dbg(dev, state, info); + trace_device_pm_callback_start(dev, info, state.event); error = cb(dev); + trace_device_pm_callback_end(dev, error); suspend_report_result(cb, error); initcall_debug_report(dev, calltime, error, state, info); @@ -467,7 +469,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd) * The driver of @dev will not receive interrupts while this function is being * executed. */ -static int device_resume_noirq(struct device *dev, pm_message_t state) +static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; char *info = NULL; @@ -476,9 +478,14 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) TRACE_DEVICE(dev); TRACE_RESUME(0); - if (dev->power.syscore) + if (dev->power.syscore || dev->power.direct_complete) + goto Out; + + if (!dev->power.is_noirq_suspended) goto Out; + dpm_wait(dev->parent, async); + if (dev->pm_domain) { info = "noirq power domain "; callback = pm_noirq_op(&dev->pm_domain->ops, state); @@ -499,12 +506,32 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) } error = dpm_run_callback(callback, dev, state, info); + dev->power.is_noirq_suspended = false; Out: + complete_all(&dev->power.completion); TRACE_RESUME(error); return error; } +static bool is_async(struct device *dev) +{ + return dev->power.async_suspend && pm_async_enabled + && !pm_trace_is_enabled(); +} + +static void async_resume_noirq(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = device_resume_noirq(dev, pm_transition, true); + if (error) + pm_dev_err(dev, pm_transition, " async", error); + + put_device(dev); +} + /** * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. * @state: PM transition of the system being carried out. @@ -514,32 +541,53 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) */ static void dpm_resume_noirq(pm_message_t state) { + struct device *dev; ktime_t starttime = ktime_get(); + trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true); mutex_lock(&dpm_list_mtx); - while (!list_empty(&dpm_noirq_list)) { - struct device *dev = to_device(dpm_noirq_list.next); - int error; + pm_transition = state; + /* + * Advanced the async threads upfront, + * in case the starting of async threads is + * delayed by non-async resuming devices. + */ + list_for_each_entry(dev, &dpm_noirq_list, power.entry) { + reinit_completion(&dev->power.completion); + if (is_async(dev)) { + get_device(dev); + async_schedule(async_resume_noirq, dev); + } + } + + while (!list_empty(&dpm_noirq_list)) { + dev = to_device(dpm_noirq_list.next); get_device(dev); list_move_tail(&dev->power.entry, &dpm_late_early_list); mutex_unlock(&dpm_list_mtx); - error = device_resume_noirq(dev, state); - if (error) { - suspend_stats.failed_resume_noirq++; - dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, state, " noirq", error); + if (!is_async(dev)) { + int error; + + error = device_resume_noirq(dev, state, false); + if (error) { + suspend_stats.failed_resume_noirq++; + dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, " noirq", error); + } } mutex_lock(&dpm_list_mtx); put_device(dev); } mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); dpm_show_time(starttime, state, "noirq"); resume_device_irqs(); cpuidle_resume(); + trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); } /** @@ -549,7 +597,7 @@ static void dpm_resume_noirq(pm_message_t state) * * Runtime PM is disabled for @dev while this function is being executed. */ -static int device_resume_early(struct device *dev, pm_message_t state) +static int device_resume_early(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; char *info = NULL; @@ -558,9 +606,14 @@ static int device_resume_early(struct device *dev, pm_message_t state) TRACE_DEVICE(dev); TRACE_RESUME(0); - if (dev->power.syscore) + if (dev->power.syscore || dev->power.direct_complete) goto Out; + if (!dev->power.is_late_suspended) + goto Out; + + dpm_wait(dev->parent, async); + if (dev->pm_domain) { info = "early power domain "; callback = pm_late_early_op(&dev->pm_domain->ops, state); @@ -581,44 +634,78 @@ static int device_resume_early(struct device *dev, pm_message_t state) } error = dpm_run_callback(callback, dev, state, info); + dev->power.is_late_suspended = false; Out: TRACE_RESUME(error); pm_runtime_enable(dev); + complete_all(&dev->power.completion); return error; } +static void async_resume_early(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = device_resume_early(dev, pm_transition, true); + if (error) + pm_dev_err(dev, pm_transition, " async", error); + + put_device(dev); +} + /** * dpm_resume_early - Execute "early resume" callbacks for all devices. * @state: PM transition of the system being carried out. */ static void dpm_resume_early(pm_message_t state) { + struct device *dev; ktime_t starttime = ktime_get(); + trace_suspend_resume(TPS("dpm_resume_early"), state.event, true); mutex_lock(&dpm_list_mtx); - while (!list_empty(&dpm_late_early_list)) { - struct device *dev = to_device(dpm_late_early_list.next); - int error; + pm_transition = state; + /* + * Advanced the async threads upfront, + * in case the starting of async threads is + * delayed by non-async resuming devices. + */ + list_for_each_entry(dev, &dpm_late_early_list, power.entry) { + reinit_completion(&dev->power.completion); + if (is_async(dev)) { + get_device(dev); + async_schedule(async_resume_early, dev); + } + } + + while (!list_empty(&dpm_late_early_list)) { + dev = to_device(dpm_late_early_list.next); get_device(dev); list_move_tail(&dev->power.entry, &dpm_suspended_list); mutex_unlock(&dpm_list_mtx); - error = device_resume_early(dev, state); - if (error) { - suspend_stats.failed_resume_early++; - dpm_save_failed_step(SUSPEND_RESUME_EARLY); - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, state, " early", error); - } + if (!is_async(dev)) { + int error; + error = device_resume_early(dev, state, false); + if (error) { + suspend_stats.failed_resume_early++; + dpm_save_failed_step(SUSPEND_RESUME_EARLY); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, " early", error); + } + } mutex_lock(&dpm_list_mtx); put_device(dev); } mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); dpm_show_time(starttime, state, "early"); + trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); } /** @@ -651,6 +738,12 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete; + if (dev->power.direct_complete) { + /* Match the pm_runtime_disable() in __device_suspend(). */ + pm_runtime_enable(dev); + goto Complete; + } + dpm_wait(dev->parent, async); dpm_watchdog_set(&wd, dev); device_lock(dev); @@ -732,12 +825,6 @@ static void async_resume(void *data, async_cookie_t cookie) put_device(dev); } -static bool is_async(struct device *dev) -{ - return dev->power.async_suspend && pm_async_enabled - && !pm_trace_is_enabled(); -} - /** * dpm_resume - Execute "resume" callbacks for non-sysdev devices. * @state: PM transition of the system being carried out. @@ -750,6 +837,7 @@ void dpm_resume(pm_message_t state) struct device *dev; ktime_t starttime = ktime_get(); + trace_suspend_resume(TPS("dpm_resume"), state.event, true); might_sleep(); mutex_lock(&dpm_list_mtx); @@ -789,6 +877,9 @@ void dpm_resume(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, NULL); + + cpufreq_resume(); + trace_suspend_resume(TPS("dpm_resume"), state.event, false); } /** @@ -827,7 +918,9 @@ static void device_complete(struct device *dev, pm_message_t state) if (callback) { pm_dev_dbg(dev, state, info); + trace_device_pm_callback_start(dev, info, state.event); callback(dev); + trace_device_pm_callback_end(dev, 0); } device_unlock(dev); @@ -846,6 +939,7 @@ void dpm_complete(pm_message_t state) { struct list_head list; + trace_suspend_resume(TPS("dpm_complete"), state.event, true); might_sleep(); INIT_LIST_HEAD(&list); @@ -865,6 +959,7 @@ void dpm_complete(pm_message_t state) } list_splice(&list, &dpm_list); mutex_unlock(&dpm_list_mtx); + trace_suspend_resume(TPS("dpm_complete"), state.event, false); } /** @@ -913,13 +1008,24 @@ static pm_message_t resume_event(pm_message_t sleep_state) * The driver of @dev will not receive interrupts while this function is being * executed. */ -static int device_suspend_noirq(struct device *dev, pm_message_t state) +static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; char *info = NULL; + int error = 0; - if (dev->power.syscore) - return 0; + if (async_error) + goto Complete; + + if (pm_wakeup_pending()) { + async_error = -EBUSY; + goto Complete; + } + + if (dev->power.syscore || dev->power.direct_complete) + goto Complete; + + dpm_wait_for_children(dev, async); if (dev->pm_domain) { info = "noirq power domain "; @@ -940,7 +1046,41 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) callback = pm_noirq_op(dev->driver->pm, state); } - return dpm_run_callback(callback, dev, state, info); + error = dpm_run_callback(callback, dev, state, info); + if (!error) + dev->power.is_noirq_suspended = true; + else + async_error = error; + +Complete: + complete_all(&dev->power.completion); + return error; +} + +static void async_suspend_noirq(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = __device_suspend_noirq(dev, pm_transition, true); + if (error) { + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, pm_transition, " async", error); + } + + put_device(dev); +} + +static int device_suspend_noirq(struct device *dev) +{ + reinit_completion(&dev->power.completion); + + if (pm_async_enabled && dev->power.async_suspend) { + get_device(dev); + async_schedule(async_suspend_noirq, dev); + return 0; + } + return __device_suspend_noirq(dev, pm_transition, false); } /** @@ -955,22 +1095,24 @@ static int dpm_suspend_noirq(pm_message_t state) ktime_t starttime = ktime_get(); int error = 0; + trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); cpuidle_pause(); suspend_device_irqs(); mutex_lock(&dpm_list_mtx); + pm_transition = state; + async_error = 0; + while (!list_empty(&dpm_late_early_list)) { struct device *dev = to_device(dpm_late_early_list.prev); get_device(dev); mutex_unlock(&dpm_list_mtx); - error = device_suspend_noirq(dev, state); + error = device_suspend_noirq(dev); mutex_lock(&dpm_list_mtx); if (error) { pm_dev_err(dev, state, " noirq", error); - suspend_stats.failed_suspend_noirq++; - dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); dpm_save_failed_dev(dev_name(dev)); put_device(dev); break; @@ -979,16 +1121,22 @@ static int dpm_suspend_noirq(pm_message_t state) list_move(&dev->power.entry, &dpm_noirq_list); put_device(dev); - if (pm_wakeup_pending()) { - error = -EBUSY; + if (async_error) break; - } } mutex_unlock(&dpm_list_mtx); - if (error) + async_synchronize_full(); + if (!error) + error = async_error; + + if (error) { + suspend_stats.failed_suspend_noirq++; + dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); dpm_resume_noirq(resume_event(state)); - else + } else { dpm_show_time(starttime, state, "noirq"); + } + trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false); return error; } @@ -999,15 +1147,26 @@ static int dpm_suspend_noirq(pm_message_t state) * * Runtime PM is disabled for @dev while this function is being executed. */ -static int device_suspend_late(struct device *dev, pm_message_t state) +static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; char *info = NULL; + int error = 0; __pm_runtime_disable(dev, false); - if (dev->power.syscore) - return 0; + if (async_error) + goto Complete; + + if (pm_wakeup_pending()) { + async_error = -EBUSY; + goto Complete; + } + + if (dev->power.syscore || dev->power.direct_complete) + goto Complete; + + dpm_wait_for_children(dev, async); if (dev->pm_domain) { info = "late power domain "; @@ -1028,7 +1187,41 @@ static int device_suspend_late(struct device *dev, pm_message_t state) callback = pm_late_early_op(dev->driver->pm, state); } - return dpm_run_callback(callback, dev, state, info); + error = dpm_run_callback(callback, dev, state, info); + if (!error) + dev->power.is_late_suspended = true; + else + async_error = error; + +Complete: + complete_all(&dev->power.completion); + return error; +} + +static void async_suspend_late(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = __device_suspend_late(dev, pm_transition, true); + if (error) { + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, pm_transition, " async", error); + } + put_device(dev); +} + +static int device_suspend_late(struct device *dev) +{ + reinit_completion(&dev->power.completion); + + if (pm_async_enabled && dev->power.async_suspend) { + get_device(dev); + async_schedule(async_suspend_late, dev); + return 0; + } + + return __device_suspend_late(dev, pm_transition, false); } /** @@ -1040,20 +1233,22 @@ static int dpm_suspend_late(pm_message_t state) ktime_t starttime = ktime_get(); int error = 0; + trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); mutex_lock(&dpm_list_mtx); + pm_transition = state; + async_error = 0; + while (!list_empty(&dpm_suspended_list)) { struct device *dev = to_device(dpm_suspended_list.prev); get_device(dev); mutex_unlock(&dpm_list_mtx); - error = device_suspend_late(dev, state); + error = device_suspend_late(dev); mutex_lock(&dpm_list_mtx); if (error) { pm_dev_err(dev, state, " late", error); - suspend_stats.failed_suspend_late++; - dpm_save_failed_step(SUSPEND_SUSPEND_LATE); dpm_save_failed_dev(dev_name(dev)); put_device(dev); break; @@ -1062,17 +1257,19 @@ static int dpm_suspend_late(pm_message_t state) list_move(&dev->power.entry, &dpm_late_early_list); put_device(dev); - if (pm_wakeup_pending()) { - error = -EBUSY; + if (async_error) break; - } } mutex_unlock(&dpm_list_mtx); - if (error) + async_synchronize_full(); + if (error) { + suspend_stats.failed_suspend_late++; + dpm_save_failed_step(SUSPEND_SUSPEND_LATE); dpm_resume_early(resume_event(state)); - else + } else { dpm_show_time(starttime, state, "late"); - + } + trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false); return error; } @@ -1111,7 +1308,9 @@ static int legacy_suspend(struct device *dev, pm_message_t state, calltime = initcall_debug_start(dev); + trace_device_pm_callback_start(dev, info, state.event); error = cb(dev, state); + trace_device_pm_callback_end(dev, error); suspend_report_result(cb, error); initcall_debug_report(dev, calltime, error, state, info); @@ -1154,6 +1353,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete; + if (dev->power.direct_complete) { + if (pm_runtime_status_suspended(dev)) { + pm_runtime_disable(dev); + if (pm_runtime_suspended_if_enabled(dev)) + goto Complete; + + pm_runtime_enable(dev); + } + dev->power.direct_complete = false; + } + dpm_watchdog_set(&wd, dev); device_lock(dev); @@ -1204,10 +1414,19 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) End: if (!error) { + struct device *parent = dev->parent; + dev->power.is_suspended = true; - if (dev->power.wakeup_path - && dev->parent && !dev->parent->power.ignore_children) - dev->parent->power.wakeup_path = true; + if (parent) { + spin_lock_irq(&parent->power.lock); + + dev->parent->power.direct_complete = false; + if (dev->power.wakeup_path + && !dev->parent->power.ignore_children) + dev->parent->power.wakeup_path = true; + + spin_unlock_irq(&parent->power.lock); + } } device_unlock(dev); @@ -1257,8 +1476,11 @@ int dpm_suspend(pm_message_t state) ktime_t starttime = ktime_get(); int error = 0; + trace_suspend_resume(TPS("dpm_suspend"), state.event, true); might_sleep(); + cpufreq_suspend(); + mutex_lock(&dpm_list_mtx); pm_transition = state; async_error = 0; @@ -1292,6 +1514,7 @@ int dpm_suspend(pm_message_t state) dpm_save_failed_step(SUSPEND_SUSPEND); } else dpm_show_time(starttime, state, NULL); + trace_suspend_resume(TPS("dpm_suspend"), state.event, false); return error; } @@ -1307,7 +1530,7 @@ static int device_prepare(struct device *dev, pm_message_t state) { int (*callback)(struct device *) = NULL; char *info = NULL; - int error = 0; + int ret = 0; if (dev->power.syscore) return 0; @@ -1344,16 +1567,29 @@ static int device_prepare(struct device *dev, pm_message_t state) } if (callback) { - error = callback(dev); - suspend_report_result(callback, error); + trace_device_pm_callback_start(dev, info, state.event); + ret = callback(dev); + trace_device_pm_callback_end(dev, ret); } device_unlock(dev); - if (error) + if (ret < 0) { + suspend_report_result(callback, ret); pm_runtime_put(dev); - - return error; + return ret; + } + /* + * A positive return value from ->prepare() means "this device appears + * to be runtime-suspended and its state is fine, so if it really is + * runtime-suspended, you can leave it in that state provided that you + * will do the same thing with all of its descendants". This only + * applies to suspend transitions, however. + */ + spin_lock_irq(&dev->power.lock); + dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND; + spin_unlock_irq(&dev->power.lock); + return 0; } /** @@ -1366,6 +1602,7 @@ int dpm_prepare(pm_message_t state) { int error = 0; + trace_suspend_resume(TPS("dpm_prepare"), state.event, true); might_sleep(); mutex_lock(&dpm_list_mtx); @@ -1396,6 +1633,7 @@ int dpm_prepare(pm_message_t state) put_device(dev); } mutex_unlock(&dpm_list_mtx); + trace_suspend_resume(TPS("dpm_prepare"), state.event, false); return error; } |
