diff options
Diffstat (limited to 'drivers/base/power/main.c')
| -rw-r--r-- | drivers/base/power/main.c | 1351 | 
1 files changed, 977 insertions, 374 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 31b526661ec..bf412961a93 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -8,7 +8,7 @@   *   *   * The driver model core calls device_pm_add() when a device is registered. - * This will intialize the embedded device_pm_info object in the device + * This will initialize the embedded device_pm_info object in the device   * and add it to the list of power-controlled devices. sysfs entries for   * controlling device power management will also be added.   * @@ -19,6 +19,7 @@  #include <linux/device.h>  #include <linux/kallsyms.h> +#include <linux/export.h>  #include <linux/mutex.h>  #include <linux/pm.h>  #include <linux/pm_runtime.h> @@ -26,10 +27,17 @@  #include <linux/interrupt.h>  #include <linux/sched.h>  #include <linux/async.h> +#include <linux/suspend.h> +#include <trace/events/power.h> +#include <linux/cpufreq.h> +#include <linux/cpuidle.h> +#include <linux/timer.h>  #include "../base.h"  #include "power.h" +typedef int (*pm_callback_t)(struct device *); +  /*   * The entries in the dpm_list list are in a depth first order, simply   * because children are guaranteed to be discovered after parents, and @@ -41,30 +49,55 @@   */  LIST_HEAD(dpm_list); +static LIST_HEAD(dpm_prepared_list); +static LIST_HEAD(dpm_suspended_list); +static LIST_HEAD(dpm_late_early_list); +static LIST_HEAD(dpm_noirq_list); +struct suspend_stats suspend_stats;  static DEFINE_MUTEX(dpm_list_mtx);  static pm_message_t pm_transition; -/* - * Set once the preparation of devices for a PM transition has started, reset - * before starting to resume devices.  Protected by dpm_list_mtx. - */ -static bool transition_started; -  static int async_error; +static char *pm_verb(int event) +{ +	switch (event) { +	case PM_EVENT_SUSPEND: +		return "suspend"; +	case PM_EVENT_RESUME: +		return "resume"; +	case PM_EVENT_FREEZE: +		return "freeze"; +	case PM_EVENT_QUIESCE: +		return "quiesce"; +	case PM_EVENT_HIBERNATE: +		return "hibernate"; +	case PM_EVENT_THAW: +		return "thaw"; +	case PM_EVENT_RESTORE: +		return "restore"; +	case PM_EVENT_RECOVER: +		return "recover"; +	default: +		return "(unknown PM event)"; +	} +} +  /** - * device_pm_init - Initialize the PM-related part of a device object. + * device_pm_sleep_init - Initialize system suspend-related device fields.   * @dev: Device object being initialized.   */ -void device_pm_init(struct device *dev) +void device_pm_sleep_init(struct device *dev)  { -	dev->power.status = DPM_ON; +	dev->power.is_prepared = false; +	dev->power.is_suspended = false; +	dev->power.is_noirq_suspended = false; +	dev->power.is_late_suspended = false;  	init_completion(&dev->power.completion);  	complete_all(&dev->power.completion);  	dev->power.wakeup = NULL; -	spin_lock_init(&dev->power.lock); -	pm_runtime_init(dev); +	INIT_LIST_HEAD(&dev->power.entry);  }  /** @@ -90,22 +123,11 @@ void device_pm_unlock(void)  void device_pm_add(struct device *dev)  {  	pr_debug("PM: Adding info for %s:%s\n", -		 dev->bus ? dev->bus->name : "No Bus", -		 kobject_name(&dev->kobj)); +		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));  	mutex_lock(&dpm_list_mtx); -	if (dev->parent) { -		if (dev->parent->power.status >= DPM_SUSPENDING) -			dev_warn(dev, "parent %s should not be sleeping\n", -				 dev_name(dev->parent)); -	} else if (transition_started) { -		/* -		 * We refuse to register parentless devices while a PM -		 * transition is in progress in order to avoid leaving them -		 * unhandled down the road -		 */ -		dev_WARN(dev, "Parentless device registered during a PM transaction\n"); -	} - +	if (dev->parent && dev->parent->power.is_prepared) +		dev_warn(dev, "parent %s should not be sleeping\n", +			dev_name(dev->parent));  	list_add_tail(&dev->power.entry, &dpm_list);  	mutex_unlock(&dpm_list_mtx);  } @@ -117,8 +139,7 @@ void device_pm_add(struct device *dev)  void device_pm_remove(struct device *dev)  {  	pr_debug("PM: Removing info for %s:%s\n", -		 dev->bus ? dev->bus->name : "No Bus", -		 kobject_name(&dev->kobj)); +		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));  	complete_all(&dev->power.completion);  	mutex_lock(&dpm_list_mtx);  	list_del_init(&dev->power.entry); @@ -135,10 +156,8 @@ void device_pm_remove(struct device *dev)  void device_pm_move_before(struct device *deva, struct device *devb)  {  	pr_debug("PM: Moving %s:%s before %s:%s\n", -		 deva->bus ? deva->bus->name : "No Bus", -		 kobject_name(&deva->kobj), -		 devb->bus ? devb->bus->name : "No Bus", -		 kobject_name(&devb->kobj)); +		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), +		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));  	/* Delete deva from dpm_list and reinsert before devb. */  	list_move_tail(&deva->power.entry, &devb->power.entry);  } @@ -151,10 +170,8 @@ void device_pm_move_before(struct device *deva, struct device *devb)  void device_pm_move_after(struct device *deva, struct device *devb)  {  	pr_debug("PM: Moving %s:%s after %s:%s\n", -		 deva->bus ? deva->bus->name : "No Bus", -		 kobject_name(&deva->kobj), -		 devb->bus ? devb->bus->name : "No Bus", -		 kobject_name(&devb->kobj)); +		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), +		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));  	/* Delete deva from dpm_list and reinsert after devb. */  	list_move(&deva->power.entry, &devb->power.entry);  } @@ -166,8 +183,7 @@ void device_pm_move_after(struct device *deva, struct device *devb)  void device_pm_move_last(struct device *dev)  {  	pr_debug("PM: Moving %s:%s to end of list\n", -		 dev->bus ? dev->bus->name : "No Bus", -		 kobject_name(&dev->kobj)); +		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));  	list_move_tail(&dev->power.entry, &dpm_list);  } @@ -175,9 +191,10 @@ static ktime_t initcall_debug_start(struct device *dev)  {  	ktime_t calltime = ktime_set(0, 0); -	if (initcall_debug) { -		pr_info("calling  %s+ @ %i\n", -				dev_name(dev), task_pid_nr(current)); +	if (pm_print_times_enabled) { +		pr_info("calling  %s+ @ %i, parent: %s\n", +			dev_name(dev), task_pid_nr(current), +			dev->parent ? dev_name(dev->parent) : "none");  		calltime = ktime_get();  	} @@ -185,15 +202,17 @@ static ktime_t initcall_debug_start(struct device *dev)  }  static void initcall_debug_report(struct device *dev, ktime_t calltime, -				  int error) +				  int error, pm_message_t state, char *info)  { -	ktime_t delta, rettime; +	ktime_t rettime; +	s64 nsecs; -	if (initcall_debug) { -		rettime = ktime_get(); -		delta = ktime_sub(rettime, calltime); +	rettime = ktime_get(); +	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime)); + +	if (pm_print_times_enabled) {  		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), -			error, (unsigned long long)ktime_to_ns(delta) >> 10); +			error, (unsigned long long)nsecs >> 10);  	}  } @@ -223,175 +242,103 @@ static void dpm_wait_for_children(struct device *dev, bool async)  }  /** - * pm_op - Execute the PM operation appropriate for given PM event. - * @dev: Device to handle. + * pm_op - Return the PM operation appropriate for given PM event.   * @ops: PM operations to choose from.   * @state: PM transition of the system being carried out.   */ -static int pm_op(struct device *dev, -		 const struct dev_pm_ops *ops, -		 pm_message_t state) +static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)  { -	int error = 0; -	ktime_t calltime; - -	calltime = initcall_debug_start(dev); -  	switch (state.event) {  #ifdef CONFIG_SUSPEND  	case PM_EVENT_SUSPEND: -		if (ops->suspend) { -			error = ops->suspend(dev); -			suspend_report_result(ops->suspend, error); -		} -		break; +		return ops->suspend;  	case PM_EVENT_RESUME: -		if (ops->resume) { -			error = ops->resume(dev); -			suspend_report_result(ops->resume, error); -		} -		break; +		return ops->resume;  #endif /* CONFIG_SUSPEND */ -#ifdef CONFIG_HIBERNATION +#ifdef CONFIG_HIBERNATE_CALLBACKS  	case PM_EVENT_FREEZE:  	case PM_EVENT_QUIESCE: -		if (ops->freeze) { -			error = ops->freeze(dev); -			suspend_report_result(ops->freeze, error); -		} -		break; +		return ops->freeze;  	case PM_EVENT_HIBERNATE: -		if (ops->poweroff) { -			error = ops->poweroff(dev); -			suspend_report_result(ops->poweroff, error); -		} -		break; +		return ops->poweroff;  	case PM_EVENT_THAW:  	case PM_EVENT_RECOVER: -		if (ops->thaw) { -			error = ops->thaw(dev); -			suspend_report_result(ops->thaw, error); -		} +		return ops->thaw;  		break;  	case PM_EVENT_RESTORE: -		if (ops->restore) { -			error = ops->restore(dev); -			suspend_report_result(ops->restore, error); -		} -		break; -#endif /* CONFIG_HIBERNATION */ -	default: -		error = -EINVAL; +		return ops->restore; +#endif /* CONFIG_HIBERNATE_CALLBACKS */  	} -	initcall_debug_report(dev, calltime, error); - -	return error; +	return NULL;  }  /** - * pm_noirq_op - Execute the PM operation appropriate for given PM event. - * @dev: Device to handle. + * pm_late_early_op - Return the PM operation appropriate for given PM event.   * @ops: PM operations to choose from.   * @state: PM transition of the system being carried out.   * - * The driver of @dev will not receive interrupts while this function is being - * executed. + * Runtime PM is disabled for @dev while this function is being executed.   */ -static int pm_noirq_op(struct device *dev, -			const struct dev_pm_ops *ops, -			pm_message_t state) +static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops, +				      pm_message_t state)  { -	int error = 0; -	ktime_t calltime, delta, rettime; - -	if (initcall_debug) { -		pr_info("calling  %s+ @ %i, parent: %s\n", -				dev_name(dev), task_pid_nr(current), -				dev->parent ? dev_name(dev->parent) : "none"); -		calltime = ktime_get(); -	} -  	switch (state.event) {  #ifdef CONFIG_SUSPEND  	case PM_EVENT_SUSPEND: -		if (ops->suspend_noirq) { -			error = ops->suspend_noirq(dev); -			suspend_report_result(ops->suspend_noirq, error); -		} -		break; +		return ops->suspend_late;  	case PM_EVENT_RESUME: -		if (ops->resume_noirq) { -			error = ops->resume_noirq(dev); -			suspend_report_result(ops->resume_noirq, error); -		} -		break; +		return ops->resume_early;  #endif /* CONFIG_SUSPEND */ -#ifdef CONFIG_HIBERNATION +#ifdef CONFIG_HIBERNATE_CALLBACKS  	case PM_EVENT_FREEZE:  	case PM_EVENT_QUIESCE: -		if (ops->freeze_noirq) { -			error = ops->freeze_noirq(dev); -			suspend_report_result(ops->freeze_noirq, error); -		} -		break; +		return ops->freeze_late;  	case PM_EVENT_HIBERNATE: -		if (ops->poweroff_noirq) { -			error = ops->poweroff_noirq(dev); -			suspend_report_result(ops->poweroff_noirq, error); -		} -		break; +		return ops->poweroff_late;  	case PM_EVENT_THAW:  	case PM_EVENT_RECOVER: -		if (ops->thaw_noirq) { -			error = ops->thaw_noirq(dev); -			suspend_report_result(ops->thaw_noirq, error); -		} -		break; +		return ops->thaw_early;  	case PM_EVENT_RESTORE: -		if (ops->restore_noirq) { -			error = ops->restore_noirq(dev); -			suspend_report_result(ops->restore_noirq, error); -		} -		break; -#endif /* CONFIG_HIBERNATION */ -	default: -		error = -EINVAL; +		return ops->restore_early; +#endif /* CONFIG_HIBERNATE_CALLBACKS */  	} -	if (initcall_debug) { -		rettime = ktime_get(); -		delta = ktime_sub(rettime, calltime); -		printk("initcall %s_i+ returned %d after %Ld usecs\n", -			dev_name(dev), error, -			(unsigned long long)ktime_to_ns(delta) >> 10); -	} - -	return error; +	return NULL;  } -static char *pm_verb(int event) +/** + * pm_noirq_op - Return the PM operation appropriate for given PM event. + * @ops: PM operations to choose from. + * @state: PM transition of the system being carried out. + * + * The driver of @dev will not receive interrupts while this function is being + * executed. + */ +static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)  { -	switch (event) { +	switch (state.event) { +#ifdef CONFIG_SUSPEND  	case PM_EVENT_SUSPEND: -		return "suspend"; +		return ops->suspend_noirq;  	case PM_EVENT_RESUME: -		return "resume"; +		return ops->resume_noirq; +#endif /* CONFIG_SUSPEND */ +#ifdef CONFIG_HIBERNATE_CALLBACKS  	case PM_EVENT_FREEZE: -		return "freeze";  	case PM_EVENT_QUIESCE: -		return "quiesce"; +		return ops->freeze_noirq;  	case PM_EVENT_HIBERNATE: -		return "hibernate"; +		return ops->poweroff_noirq;  	case PM_EVENT_THAW: -		return "thaw"; -	case PM_EVENT_RESTORE: -		return "restore";  	case PM_EVENT_RECOVER: -		return "recover"; -	default: -		return "(unknown PM event)"; +		return ops->thaw_noirq; +	case PM_EVENT_RESTORE: +		return ops->restore_noirq; +#endif /* CONFIG_HIBERNATE_CALLBACKS */  	} + +	return NULL;  }  static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) @@ -405,7 +352,7 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info,  			int error)  {  	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", -		kobject_name(&dev->kobj), pm_verb(state.event), info, error); +		dev_name(dev), pm_verb(state.event), info, error);  }  static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) @@ -425,6 +372,93 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)  		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);  } +static int dpm_run_callback(pm_callback_t cb, struct device *dev, +			    pm_message_t state, char *info) +{ +	ktime_t calltime; +	int error; + +	if (!cb) +		return 0; + +	calltime = initcall_debug_start(dev); + +	pm_dev_dbg(dev, state, info); +	trace_device_pm_callback_start(dev, info, state.event); +	error = cb(dev); +	trace_device_pm_callback_end(dev, error); +	suspend_report_result(cb, error); + +	initcall_debug_report(dev, calltime, error, state, info); + +	return error; +} + +#ifdef CONFIG_DPM_WATCHDOG +struct dpm_watchdog { +	struct device		*dev; +	struct task_struct	*tsk; +	struct timer_list	timer; +}; + +#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ +	struct dpm_watchdog wd + +/** + * dpm_watchdog_handler - Driver suspend / resume watchdog handler. + * @data: Watchdog object address. + * + * Called when a driver has timed out suspending or resuming. + * There's not much we can do here to recover so panic() to + * capture a crash-dump in pstore. + */ +static void dpm_watchdog_handler(unsigned long data) +{ +	struct dpm_watchdog *wd = (void *)data; + +	dev_emerg(wd->dev, "**** DPM device timeout ****\n"); +	show_stack(wd->tsk, NULL); +	panic("%s %s: unrecoverable failure\n", +		dev_driver_string(wd->dev), dev_name(wd->dev)); +} + +/** + * dpm_watchdog_set - Enable pm watchdog for given device. + * @wd: Watchdog. Must be allocated on the stack. + * @dev: Device to handle. + */ +static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) +{ +	struct timer_list *timer = &wd->timer; + +	wd->dev = dev; +	wd->tsk = current; + +	init_timer_on_stack(timer); +	/* use same timeout value for both suspend and resume */ +	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT; +	timer->function = dpm_watchdog_handler; +	timer->data = (unsigned long)wd; +	add_timer(timer); +} + +/** + * dpm_watchdog_clear - Disable suspend/resume watchdog. + * @wd: Watchdog to disable. + */ +static void dpm_watchdog_clear(struct dpm_watchdog *wd) +{ +	struct timer_list *timer = &wd->timer; + +	del_timer_sync(timer); +	destroy_timer_on_stack(timer); +} +#else +#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) +#define dpm_watchdog_set(x, y) +#define dpm_watchdog_clear(x) +#endif +  /*------------------------- Resume routines -------------------------*/  /** @@ -435,86 +469,256 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)   * The driver of @dev will not receive interrupts while this function is being   * executed.   */ -static int device_resume_noirq(struct device *dev, pm_message_t state) +static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)  { +	pm_callback_t callback = NULL; +	char *info = NULL;  	int error = 0;  	TRACE_DEVICE(dev);  	TRACE_RESUME(0); -	if (dev->bus && dev->bus->pm) { -		pm_dev_dbg(dev, state, "EARLY "); -		error = pm_noirq_op(dev, dev->bus->pm, state); -		if (error) -			goto End; -	} +	if (dev->power.syscore || dev->power.direct_complete) +		goto Out; -	if (dev->type && dev->type->pm) { -		pm_dev_dbg(dev, state, "EARLY type "); -		error = pm_noirq_op(dev, dev->type->pm, state); -		if (error) -			goto End; +	if (!dev->power.is_noirq_suspended) +		goto Out; + +	dpm_wait(dev->parent, async); + +	if (dev->pm_domain) { +		info = "noirq power domain "; +		callback = pm_noirq_op(&dev->pm_domain->ops, state); +	} else if (dev->type && dev->type->pm) { +		info = "noirq type "; +		callback = pm_noirq_op(dev->type->pm, state); +	} else if (dev->class && dev->class->pm) { +		info = "noirq class "; +		callback = pm_noirq_op(dev->class->pm, state); +	} else if (dev->bus && dev->bus->pm) { +		info = "noirq bus "; +		callback = pm_noirq_op(dev->bus->pm, state);  	} -	if (dev->class && dev->class->pm) { -		pm_dev_dbg(dev, state, "EARLY class "); -		error = pm_noirq_op(dev, dev->class->pm, state); +	if (!callback && dev->driver && dev->driver->pm) { +		info = "noirq driver "; +		callback = pm_noirq_op(dev->driver->pm, state);  	} -End: +	error = dpm_run_callback(callback, dev, state, info); +	dev->power.is_noirq_suspended = false; + + Out: +	complete_all(&dev->power.completion);  	TRACE_RESUME(error);  	return error;  } +static bool is_async(struct device *dev) +{ +	return dev->power.async_suspend && pm_async_enabled +		&& !pm_trace_is_enabled(); +} + +static void async_resume_noirq(void *data, async_cookie_t cookie) +{ +	struct device *dev = (struct device *)data; +	int error; + +	error = device_resume_noirq(dev, pm_transition, true); +	if (error) +		pm_dev_err(dev, pm_transition, " async", error); + +	put_device(dev); +} +  /** - * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices. + * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.   * @state: PM transition of the system being carried out.   * - * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and + * Call the "noirq" resume handlers for all devices in dpm_noirq_list and   * enable device drivers to receive interrupts.   */ -void dpm_resume_noirq(pm_message_t state) +static void dpm_resume_noirq(pm_message_t state)  {  	struct device *dev;  	ktime_t starttime = ktime_get(); +	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);  	mutex_lock(&dpm_list_mtx); -	transition_started = false; -	list_for_each_entry(dev, &dpm_list, power.entry) -		if (dev->power.status > DPM_OFF) { +	pm_transition = state; + +	/* +	 * Advanced the async threads upfront, +	 * in case the starting of async threads is +	 * delayed by non-async resuming devices. +	 */ +	list_for_each_entry(dev, &dpm_noirq_list, power.entry) { +		reinit_completion(&dev->power.completion); +		if (is_async(dev)) { +			get_device(dev); +			async_schedule(async_resume_noirq, dev); +		} +	} + +	while (!list_empty(&dpm_noirq_list)) { +		dev = to_device(dpm_noirq_list.next); +		get_device(dev); +		list_move_tail(&dev->power.entry, &dpm_late_early_list); +		mutex_unlock(&dpm_list_mtx); + +		if (!is_async(dev)) {  			int error; -			dev->power.status = DPM_OFF; -			error = device_resume_noirq(dev, state); -			if (error) -				pm_dev_err(dev, state, " early", error); +			error = device_resume_noirq(dev, state, false); +			if (error) { +				suspend_stats.failed_resume_noirq++; +				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); +				dpm_save_failed_dev(dev_name(dev)); +				pm_dev_err(dev, state, " noirq", error); +			}  		} + +		mutex_lock(&dpm_list_mtx); +		put_device(dev); +	}  	mutex_unlock(&dpm_list_mtx); -	dpm_show_time(starttime, state, "early"); +	async_synchronize_full(); +	dpm_show_time(starttime, state, "noirq");  	resume_device_irqs(); +	cpuidle_resume(); +	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);  } -EXPORT_SYMBOL_GPL(dpm_resume_noirq);  /** - * legacy_resume - Execute a legacy (bus or class) resume callback for device. - * @dev: Device to resume. - * @cb: Resume callback to execute. + * device_resume_early - Execute an "early resume" callback for given device. + * @dev: Device to handle. + * @state: PM transition of the system being carried out. + * + * Runtime PM is disabled for @dev while this function is being executed.   */ -static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) +static int device_resume_early(struct device *dev, pm_message_t state, bool async)  { -	int error; -	ktime_t calltime; +	pm_callback_t callback = NULL; +	char *info = NULL; +	int error = 0; -	calltime = initcall_debug_start(dev); +	TRACE_DEVICE(dev); +	TRACE_RESUME(0); -	error = cb(dev); -	suspend_report_result(cb, error); +	if (dev->power.syscore || dev->power.direct_complete) +		goto Out; -	initcall_debug_report(dev, calltime, error); +	if (!dev->power.is_late_suspended) +		goto Out; + +	dpm_wait(dev->parent, async); +	if (dev->pm_domain) { +		info = "early power domain "; +		callback = pm_late_early_op(&dev->pm_domain->ops, state); +	} else if (dev->type && dev->type->pm) { +		info = "early type "; +		callback = pm_late_early_op(dev->type->pm, state); +	} else if (dev->class && dev->class->pm) { +		info = "early class "; +		callback = pm_late_early_op(dev->class->pm, state); +	} else if (dev->bus && dev->bus->pm) { +		info = "early bus "; +		callback = pm_late_early_op(dev->bus->pm, state); +	} + +	if (!callback && dev->driver && dev->driver->pm) { +		info = "early driver "; +		callback = pm_late_early_op(dev->driver->pm, state); +	} + +	error = dpm_run_callback(callback, dev, state, info); +	dev->power.is_late_suspended = false; + + Out: +	TRACE_RESUME(error); + +	pm_runtime_enable(dev); +	complete_all(&dev->power.completion);  	return error;  } +static void async_resume_early(void *data, async_cookie_t cookie) +{ +	struct device *dev = (struct device *)data; +	int error; + +	error = device_resume_early(dev, pm_transition, true); +	if (error) +		pm_dev_err(dev, pm_transition, " async", error); + +	put_device(dev); +} + +/** + * dpm_resume_early - Execute "early resume" callbacks for all devices. + * @state: PM transition of the system being carried out. + */ +static void dpm_resume_early(pm_message_t state) +{ +	struct device *dev; +	ktime_t starttime = ktime_get(); + +	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true); +	mutex_lock(&dpm_list_mtx); +	pm_transition = state; + +	/* +	 * Advanced the async threads upfront, +	 * in case the starting of async threads is +	 * delayed by non-async resuming devices. +	 */ +	list_for_each_entry(dev, &dpm_late_early_list, power.entry) { +		reinit_completion(&dev->power.completion); +		if (is_async(dev)) { +			get_device(dev); +			async_schedule(async_resume_early, dev); +		} +	} + +	while (!list_empty(&dpm_late_early_list)) { +		dev = to_device(dpm_late_early_list.next); +		get_device(dev); +		list_move_tail(&dev->power.entry, &dpm_suspended_list); +		mutex_unlock(&dpm_list_mtx); + +		if (!is_async(dev)) { +			int error; + +			error = device_resume_early(dev, state, false); +			if (error) { +				suspend_stats.failed_resume_early++; +				dpm_save_failed_step(SUSPEND_RESUME_EARLY); +				dpm_save_failed_dev(dev_name(dev)); +				pm_dev_err(dev, state, " early", error); +			} +		} +		mutex_lock(&dpm_list_mtx); +		put_device(dev); +	} +	mutex_unlock(&dpm_list_mtx); +	async_synchronize_full(); +	dpm_show_time(starttime, state, "early"); +	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); +} + +/** + * dpm_resume_start - Execute "noirq" and "early" device callbacks. + * @state: PM transition of the system being carried out. + */ +void dpm_resume_start(pm_message_t state) +{ +	dpm_resume_noirq(state); +	dpm_resume_early(state); +} +EXPORT_SYMBOL_GPL(dpm_resume_start); +  /**   * device_resume - Execute "resume" callbacks for given device.   * @dev: Device to handle. @@ -523,51 +727,90 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))   */  static int device_resume(struct device *dev, pm_message_t state, bool async)  { +	pm_callback_t callback = NULL; +	char *info = NULL;  	int error = 0; +	DECLARE_DPM_WATCHDOG_ON_STACK(wd);  	TRACE_DEVICE(dev);  	TRACE_RESUME(0); +	if (dev->power.syscore) +		goto Complete; + +	if (dev->power.direct_complete) { +		/* Match the pm_runtime_disable() in __device_suspend(). */ +		pm_runtime_enable(dev); +		goto Complete; +	} +  	dpm_wait(dev->parent, async); +	dpm_watchdog_set(&wd, dev);  	device_lock(dev); -	dev->power.status = DPM_RESUMING; +	/* +	 * This is a fib.  But we'll allow new children to be added below +	 * a resumed device, even if the device hasn't been completed yet. +	 */ +	dev->power.is_prepared = false; -	if (dev->bus) { -		if (dev->bus->pm) { -			pm_dev_dbg(dev, state, ""); -			error = pm_op(dev, dev->bus->pm, state); -		} else if (dev->bus->resume) { -			pm_dev_dbg(dev, state, "legacy "); -			error = legacy_resume(dev, dev->bus->resume); -		} -		if (error) -			goto End; +	if (!dev->power.is_suspended) +		goto Unlock; + +	if (dev->pm_domain) { +		info = "power domain "; +		callback = pm_op(&dev->pm_domain->ops, state); +		goto Driver;  	} -	if (dev->type) { -		if (dev->type->pm) { -			pm_dev_dbg(dev, state, "type "); -			error = pm_op(dev, dev->type->pm, state); -		} -		if (error) -			goto End; +	if (dev->type && dev->type->pm) { +		info = "type "; +		callback = pm_op(dev->type->pm, state); +		goto Driver;  	}  	if (dev->class) {  		if (dev->class->pm) { -			pm_dev_dbg(dev, state, "class "); -			error = pm_op(dev, dev->class->pm, state); +			info = "class "; +			callback = pm_op(dev->class->pm, state); +			goto Driver;  		} else if (dev->class->resume) { -			pm_dev_dbg(dev, state, "legacy class "); -			error = legacy_resume(dev, dev->class->resume); +			info = "legacy class "; +			callback = dev->class->resume; +			goto End;  		}  	} + +	if (dev->bus) { +		if (dev->bus->pm) { +			info = "bus "; +			callback = pm_op(dev->bus->pm, state); +		} else if (dev->bus->resume) { +			info = "legacy bus "; +			callback = dev->bus->resume; +			goto End; +		} +	} + + Driver: +	if (!callback && dev->driver && dev->driver->pm) { +		info = "driver "; +		callback = pm_op(dev->driver->pm, state); +	} +   End: +	error = dpm_run_callback(callback, dev, state, info); +	dev->power.is_suspended = false; + + Unlock:  	device_unlock(dev); +	dpm_watchdog_clear(&wd); + + Complete:  	complete_all(&dev->power.completion);  	TRACE_RESUME(error); +  	return error;  } @@ -582,12 +825,6 @@ static void async_resume(void *data, async_cookie_t cookie)  	put_device(dev);  } -static bool is_async(struct device *dev) -{ -	return dev->power.async_suspend && pm_async_enabled -		&& !pm_trace_is_enabled(); -} -  /**   * dpm_resume - Execute "resume" callbacks for non-sysdev devices.   * @state: PM transition of the system being carried out. @@ -595,53 +832,54 @@ static bool is_async(struct device *dev)   * Execute the appropriate "resume" callback for all devices whose status   * indicates that they are suspended.   */ -static void dpm_resume(pm_message_t state) +void dpm_resume(pm_message_t state)  { -	struct list_head list;  	struct device *dev;  	ktime_t starttime = ktime_get(); -	INIT_LIST_HEAD(&list); +	trace_suspend_resume(TPS("dpm_resume"), state.event, true); +	might_sleep(); +  	mutex_lock(&dpm_list_mtx);  	pm_transition = state;  	async_error = 0; -	list_for_each_entry(dev, &dpm_list, power.entry) { -		if (dev->power.status < DPM_OFF) -			continue; - -		INIT_COMPLETION(dev->power.completion); +	list_for_each_entry(dev, &dpm_suspended_list, power.entry) { +		reinit_completion(&dev->power.completion);  		if (is_async(dev)) {  			get_device(dev);  			async_schedule(async_resume, dev);  		}  	} -	while (!list_empty(&dpm_list)) { -		dev = to_device(dpm_list.next); +	while (!list_empty(&dpm_suspended_list)) { +		dev = to_device(dpm_suspended_list.next);  		get_device(dev); -		if (dev->power.status >= DPM_OFF && !is_async(dev)) { +		if (!is_async(dev)) {  			int error;  			mutex_unlock(&dpm_list_mtx);  			error = device_resume(dev, state, false); +			if (error) { +				suspend_stats.failed_resume++; +				dpm_save_failed_step(SUSPEND_RESUME); +				dpm_save_failed_dev(dev_name(dev)); +				pm_dev_err(dev, state, "", error); +			}  			mutex_lock(&dpm_list_mtx); -			if (error) -				pm_dev_err(dev, state, "", error); -		} else if (dev->power.status == DPM_SUSPENDING) { -			/* Allow new children of the device to be registered */ -			dev->power.status = DPM_RESUMING;  		}  		if (!list_empty(&dev->power.entry)) -			list_move_tail(&dev->power.entry, &list); +			list_move_tail(&dev->power.entry, &dpm_prepared_list);  		put_device(dev);  	} -	list_splice(&list, &dpm_list);  	mutex_unlock(&dpm_list_mtx);  	async_synchronize_full();  	dpm_show_time(starttime, state, NULL); + +	cpufreq_resume(); +	trace_suspend_resume(TPS("dpm_resume"), state.event, false);  }  /** @@ -651,24 +889,43 @@ static void dpm_resume(pm_message_t state)   */  static void device_complete(struct device *dev, pm_message_t state)  { +	void (*callback)(struct device *) = NULL; +	char *info = NULL; + +	if (dev->power.syscore) +		return; +  	device_lock(dev); -	if (dev->class && dev->class->pm && dev->class->pm->complete) { -		pm_dev_dbg(dev, state, "completing class "); -		dev->class->pm->complete(dev); +	if (dev->pm_domain) { +		info = "completing power domain "; +		callback = dev->pm_domain->ops.complete; +	} else if (dev->type && dev->type->pm) { +		info = "completing type "; +		callback = dev->type->pm->complete; +	} else if (dev->class && dev->class->pm) { +		info = "completing class "; +		callback = dev->class->pm->complete; +	} else if (dev->bus && dev->bus->pm) { +		info = "completing bus "; +		callback = dev->bus->pm->complete;  	} -	if (dev->type && dev->type->pm && dev->type->pm->complete) { -		pm_dev_dbg(dev, state, "completing type "); -		dev->type->pm->complete(dev); +	if (!callback && dev->driver && dev->driver->pm) { +		info = "completing driver "; +		callback = dev->driver->pm->complete;  	} -	if (dev->bus && dev->bus->pm && dev->bus->pm->complete) { -		pm_dev_dbg(dev, state, "completing "); -		dev->bus->pm->complete(dev); +	if (callback) { +		pm_dev_dbg(dev, state, info); +		trace_device_pm_callback_start(dev, info, state.event); +		callback(dev); +		trace_device_pm_callback_end(dev, 0);  	}  	device_unlock(dev); + +	pm_runtime_put(dev);  }  /** @@ -678,32 +935,31 @@ static void device_complete(struct device *dev, pm_message_t state)   * Execute the ->complete() callbacks for all devices whose PM status is not   * DPM_ON (this allows new devices to be registered).   */ -static void dpm_complete(pm_message_t state) +void dpm_complete(pm_message_t state)  {  	struct list_head list; +	trace_suspend_resume(TPS("dpm_complete"), state.event, true); +	might_sleep(); +  	INIT_LIST_HEAD(&list);  	mutex_lock(&dpm_list_mtx); -	transition_started = false; -	while (!list_empty(&dpm_list)) { -		struct device *dev = to_device(dpm_list.prev); +	while (!list_empty(&dpm_prepared_list)) { +		struct device *dev = to_device(dpm_prepared_list.prev);  		get_device(dev); -		if (dev->power.status > DPM_ON) { -			dev->power.status = DPM_ON; -			mutex_unlock(&dpm_list_mtx); +		dev->power.is_prepared = false; +		list_move(&dev->power.entry, &list); +		mutex_unlock(&dpm_list_mtx); -			device_complete(dev, state); -			pm_runtime_put_sync(dev); +		device_complete(dev, state); -			mutex_lock(&dpm_list_mtx); -		} -		if (!list_empty(&dev->power.entry)) -			list_move(&dev->power.entry, &list); +		mutex_lock(&dpm_list_mtx);  		put_device(dev);  	}  	list_splice(&list, &dpm_list);  	mutex_unlock(&dpm_list_mtx); +	trace_suspend_resume(TPS("dpm_complete"), state.event, false);  }  /** @@ -715,7 +971,6 @@ static void dpm_complete(pm_message_t state)   */  void dpm_resume_end(pm_message_t state)  { -	might_sleep();  	dpm_resume(state);  	dpm_complete(state);  } @@ -753,64 +1008,290 @@ static pm_message_t resume_event(pm_message_t sleep_state)   * The driver of @dev will not receive interrupts while this function is being   * executed.   */ -static int device_suspend_noirq(struct device *dev, pm_message_t state) +static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)  { +	pm_callback_t callback = NULL; +	char *info = NULL;  	int error = 0; -	if (dev->class && dev->class->pm) { -		pm_dev_dbg(dev, state, "LATE class "); -		error = pm_noirq_op(dev, dev->class->pm, state); -		if (error) -			goto End; +	if (async_error) +		goto Complete; + +	if (pm_wakeup_pending()) { +		async_error = -EBUSY; +		goto Complete;  	} -	if (dev->type && dev->type->pm) { -		pm_dev_dbg(dev, state, "LATE type "); -		error = pm_noirq_op(dev, dev->type->pm, state); -		if (error) -			goto End; +	if (dev->power.syscore || dev->power.direct_complete) +		goto Complete; + +	dpm_wait_for_children(dev, async); + +	if (dev->pm_domain) { +		info = "noirq power domain "; +		callback = pm_noirq_op(&dev->pm_domain->ops, state); +	} else if (dev->type && dev->type->pm) { +		info = "noirq type "; +		callback = pm_noirq_op(dev->type->pm, state); +	} else if (dev->class && dev->class->pm) { +		info = "noirq class "; +		callback = pm_noirq_op(dev->class->pm, state); +	} else if (dev->bus && dev->bus->pm) { +		info = "noirq bus "; +		callback = pm_noirq_op(dev->bus->pm, state);  	} -	if (dev->bus && dev->bus->pm) { -		pm_dev_dbg(dev, state, "LATE "); -		error = pm_noirq_op(dev, dev->bus->pm, state); +	if (!callback && dev->driver && dev->driver->pm) { +		info = "noirq driver "; +		callback = pm_noirq_op(dev->driver->pm, state);  	} -End: +	error = dpm_run_callback(callback, dev, state, info); +	if (!error) +		dev->power.is_noirq_suspended = true; +	else +		async_error = error; + +Complete: +	complete_all(&dev->power.completion);  	return error;  } +static void async_suspend_noirq(void *data, async_cookie_t cookie) +{ +	struct device *dev = (struct device *)data; +	int error; + +	error = __device_suspend_noirq(dev, pm_transition, true); +	if (error) { +		dpm_save_failed_dev(dev_name(dev)); +		pm_dev_err(dev, pm_transition, " async", error); +	} + +	put_device(dev); +} + +static int device_suspend_noirq(struct device *dev) +{ +	reinit_completion(&dev->power.completion); + +	if (pm_async_enabled && dev->power.async_suspend) { +		get_device(dev); +		async_schedule(async_suspend_noirq, dev); +		return 0; +	} +	return __device_suspend_noirq(dev, pm_transition, false); +} +  /** - * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices. + * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.   * @state: PM transition of the system being carried out.   *   * Prevent device drivers from receiving interrupts and call the "noirq" suspend   * handlers for all non-sysdev devices.   */ -int dpm_suspend_noirq(pm_message_t state) +static int dpm_suspend_noirq(pm_message_t state)  { -	struct device *dev;  	ktime_t starttime = ktime_get();  	int error = 0; +	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); +	cpuidle_pause();  	suspend_device_irqs();  	mutex_lock(&dpm_list_mtx); -	list_for_each_entry_reverse(dev, &dpm_list, power.entry) { -		error = device_suspend_noirq(dev, state); +	pm_transition = state; +	async_error = 0; + +	while (!list_empty(&dpm_late_early_list)) { +		struct device *dev = to_device(dpm_late_early_list.prev); + +		get_device(dev); +		mutex_unlock(&dpm_list_mtx); + +		error = device_suspend_noirq(dev); + +		mutex_lock(&dpm_list_mtx);  		if (error) { -			pm_dev_err(dev, state, " late", error); +			pm_dev_err(dev, state, " noirq", error); +			dpm_save_failed_dev(dev_name(dev)); +			put_device(dev);  			break;  		} -		dev->power.status = DPM_OFF_IRQ; +		if (!list_empty(&dev->power.entry)) +			list_move(&dev->power.entry, &dpm_noirq_list); +		put_device(dev); + +		if (async_error) +			break;  	}  	mutex_unlock(&dpm_list_mtx); -	if (error) +	async_synchronize_full(); +	if (!error) +		error = async_error; + +	if (error) { +		suspend_stats.failed_suspend_noirq++; +		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);  		dpm_resume_noirq(resume_event(state)); +	} else { +		dpm_show_time(starttime, state, "noirq"); +	} +	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false); +	return error; +} + +/** + * device_suspend_late - Execute a "late suspend" callback for given device. + * @dev: Device to handle. + * @state: PM transition of the system being carried out. + * + * Runtime PM is disabled for @dev while this function is being executed. + */ +static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) +{ +	pm_callback_t callback = NULL; +	char *info = NULL; +	int error = 0; + +	__pm_runtime_disable(dev, false); + +	if (async_error) +		goto Complete; + +	if (pm_wakeup_pending()) { +		async_error = -EBUSY; +		goto Complete; +	} + +	if (dev->power.syscore || dev->power.direct_complete) +		goto Complete; + +	dpm_wait_for_children(dev, async); + +	if (dev->pm_domain) { +		info = "late power domain "; +		callback = pm_late_early_op(&dev->pm_domain->ops, state); +	} else if (dev->type && dev->type->pm) { +		info = "late type "; +		callback = pm_late_early_op(dev->type->pm, state); +	} else if (dev->class && dev->class->pm) { +		info = "late class "; +		callback = pm_late_early_op(dev->class->pm, state); +	} else if (dev->bus && dev->bus->pm) { +		info = "late bus "; +		callback = pm_late_early_op(dev->bus->pm, state); +	} + +	if (!callback && dev->driver && dev->driver->pm) { +		info = "late driver "; +		callback = pm_late_early_op(dev->driver->pm, state); +	} + +	error = dpm_run_callback(callback, dev, state, info); +	if (!error) +		dev->power.is_late_suspended = true;  	else +		async_error = error; + +Complete: +	complete_all(&dev->power.completion); +	return error; +} + +static void async_suspend_late(void *data, async_cookie_t cookie) +{ +	struct device *dev = (struct device *)data; +	int error; + +	error = __device_suspend_late(dev, pm_transition, true); +	if (error) { +		dpm_save_failed_dev(dev_name(dev)); +		pm_dev_err(dev, pm_transition, " async", error); +	} +	put_device(dev); +} + +static int device_suspend_late(struct device *dev) +{ +	reinit_completion(&dev->power.completion); + +	if (pm_async_enabled && dev->power.async_suspend) { +		get_device(dev); +		async_schedule(async_suspend_late, dev); +		return 0; +	} + +	return __device_suspend_late(dev, pm_transition, false); +} + +/** + * dpm_suspend_late - Execute "late suspend" callbacks for all devices. + * @state: PM transition of the system being carried out. + */ +static int dpm_suspend_late(pm_message_t state) +{ +	ktime_t starttime = ktime_get(); +	int error = 0; + +	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); +	mutex_lock(&dpm_list_mtx); +	pm_transition = state; +	async_error = 0; + +	while (!list_empty(&dpm_suspended_list)) { +		struct device *dev = to_device(dpm_suspended_list.prev); + +		get_device(dev); +		mutex_unlock(&dpm_list_mtx); + +		error = device_suspend_late(dev); + +		mutex_lock(&dpm_list_mtx); +		if (error) { +			pm_dev_err(dev, state, " late", error); +			dpm_save_failed_dev(dev_name(dev)); +			put_device(dev); +			break; +		} +		if (!list_empty(&dev->power.entry)) +			list_move(&dev->power.entry, &dpm_late_early_list); +		put_device(dev); + +		if (async_error) +			break; +	} +	mutex_unlock(&dpm_list_mtx); +	async_synchronize_full(); +	if (error) { +		suspend_stats.failed_suspend_late++; +		dpm_save_failed_step(SUSPEND_SUSPEND_LATE); +		dpm_resume_early(resume_event(state)); +	} else {  		dpm_show_time(starttime, state, "late"); +	} +	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);  	return error;  } -EXPORT_SYMBOL_GPL(dpm_suspend_noirq); + +/** + * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks. + * @state: PM transition of the system being carried out. + */ +int dpm_suspend_end(pm_message_t state) +{ +	int error = dpm_suspend_late(state); +	if (error) +		return error; + +	error = dpm_suspend_noirq(state); +	if (error) { +		dpm_resume_early(resume_event(state)); +		return error; +	} + +	return 0; +} +EXPORT_SYMBOL_GPL(dpm_suspend_end);  /**   * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. @@ -819,17 +1300,20 @@ EXPORT_SYMBOL_GPL(dpm_suspend_noirq);   * @cb: Suspend callback to execute.   */  static int legacy_suspend(struct device *dev, pm_message_t state, -			  int (*cb)(struct device *dev, pm_message_t state)) +			  int (*cb)(struct device *dev, pm_message_t state), +			  char *info)  {  	int error;  	ktime_t calltime;  	calltime = initcall_debug_start(dev); +	trace_device_pm_callback_start(dev, info, state.event);  	error = cb(dev, state); +	trace_device_pm_callback_end(dev, error);  	suspend_report_result(cb, error); -	initcall_debug_report(dev, calltime, error); +	initcall_debug_report(dev, calltime, error, state, info);  	return error;  } @@ -842,52 +1326,114 @@ static int legacy_suspend(struct device *dev, pm_message_t state,   */  static int __device_suspend(struct device *dev, pm_message_t state, bool async)  { +	pm_callback_t callback = NULL; +	char *info = NULL;  	int error = 0; +	DECLARE_DPM_WATCHDOG_ON_STACK(wd);  	dpm_wait_for_children(dev, async); -	device_lock(dev);  	if (async_error) -		goto End; +		goto Complete; + +	/* +	 * If a device configured to wake up the system from sleep states +	 * has been suspended at run time and there's a resume request pending +	 * for it, this is equivalent to the device signaling wakeup, so the +	 * system suspend operation should be aborted. +	 */ +	if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) +		pm_wakeup_event(dev, 0); + +	if (pm_wakeup_pending()) { +		async_error = -EBUSY; +		goto Complete; +	} + +	if (dev->power.syscore) +		goto Complete; + +	if (dev->power.direct_complete) { +		if (pm_runtime_status_suspended(dev)) { +			pm_runtime_disable(dev); +			if (pm_runtime_suspended_if_enabled(dev)) +				goto Complete; + +			pm_runtime_enable(dev); +		} +		dev->power.direct_complete = false; +	} + +	dpm_watchdog_set(&wd, dev); +	device_lock(dev); + +	if (dev->pm_domain) { +		info = "power domain "; +		callback = pm_op(&dev->pm_domain->ops, state); +		goto Run; +	} + +	if (dev->type && dev->type->pm) { +		info = "type "; +		callback = pm_op(dev->type->pm, state); +		goto Run; +	}  	if (dev->class) {  		if (dev->class->pm) { -			pm_dev_dbg(dev, state, "class "); -			error = pm_op(dev, dev->class->pm, state); +			info = "class "; +			callback = pm_op(dev->class->pm, state); +			goto Run;  		} else if (dev->class->suspend) {  			pm_dev_dbg(dev, state, "legacy class "); -			error = legacy_suspend(dev, state, dev->class->suspend); -		} -		if (error) +			error = legacy_suspend(dev, state, dev->class->suspend, +						"legacy class ");  			goto End; -	} - -	if (dev->type) { -		if (dev->type->pm) { -			pm_dev_dbg(dev, state, "type "); -			error = pm_op(dev, dev->type->pm, state);  		} -		if (error) -			goto End;  	}  	if (dev->bus) {  		if (dev->bus->pm) { -			pm_dev_dbg(dev, state, ""); -			error = pm_op(dev, dev->bus->pm, state); +			info = "bus "; +			callback = pm_op(dev->bus->pm, state);  		} else if (dev->bus->suspend) { -			pm_dev_dbg(dev, state, "legacy "); -			error = legacy_suspend(dev, state, dev->bus->suspend); +			pm_dev_dbg(dev, state, "legacy bus "); +			error = legacy_suspend(dev, state, dev->bus->suspend, +						"legacy bus "); +			goto End;  		}  	} -	if (!error) -		dev->power.status = DPM_OFF; + Run: +	if (!callback && dev->driver && dev->driver->pm) { +		info = "driver "; +		callback = pm_op(dev->driver->pm, state); +	} + +	error = dpm_run_callback(callback, dev, state, info);   End: +	if (!error) { +		struct device *parent = dev->parent; + +		dev->power.is_suspended = true; +		if (parent) { +			spin_lock_irq(&parent->power.lock); + +			dev->parent->power.direct_complete = false; +			if (dev->power.wakeup_path +			    && !dev->parent->power.ignore_children) +				dev->parent->power.wakeup_path = true; + +			spin_unlock_irq(&parent->power.lock); +		} +	} +  	device_unlock(dev); -	complete_all(&dev->power.completion); +	dpm_watchdog_clear(&wd); + Complete: +	complete_all(&dev->power.completion);  	if (error)  		async_error = error; @@ -900,15 +1446,17 @@ static void async_suspend(void *data, async_cookie_t cookie)  	int error;  	error = __device_suspend(dev, pm_transition, true); -	if (error) +	if (error) { +		dpm_save_failed_dev(dev_name(dev));  		pm_dev_err(dev, pm_transition, " async", error); +	}  	put_device(dev);  }  static int device_suspend(struct device *dev)  { -	INIT_COMPLETION(dev->power.completion); +	reinit_completion(&dev->power.completion);  	if (pm_async_enabled && dev->power.async_suspend) {  		get_device(dev); @@ -923,18 +1471,21 @@ static int device_suspend(struct device *dev)   * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.   * @state: PM transition of the system being carried out.   */ -static int dpm_suspend(pm_message_t state) +int dpm_suspend(pm_message_t state)  { -	struct list_head list;  	ktime_t starttime = ktime_get();  	int error = 0; -	INIT_LIST_HEAD(&list); +	trace_suspend_resume(TPS("dpm_suspend"), state.event, true); +	might_sleep(); + +	cpufreq_suspend(); +  	mutex_lock(&dpm_list_mtx);  	pm_transition = state;  	async_error = 0; -	while (!list_empty(&dpm_list)) { -		struct device *dev = to_device(dpm_list.prev); +	while (!list_empty(&dpm_prepared_list)) { +		struct device *dev = to_device(dpm_prepared_list.prev);  		get_device(dev);  		mutex_unlock(&dpm_list_mtx); @@ -944,22 +1495,26 @@ static int dpm_suspend(pm_message_t state)  		mutex_lock(&dpm_list_mtx);  		if (error) {  			pm_dev_err(dev, state, "", error); +			dpm_save_failed_dev(dev_name(dev));  			put_device(dev);  			break;  		}  		if (!list_empty(&dev->power.entry)) -			list_move(&dev->power.entry, &list); +			list_move(&dev->power.entry, &dpm_suspended_list);  		put_device(dev);  		if (async_error)  			break;  	} -	list_splice(&list, dpm_list.prev);  	mutex_unlock(&dpm_list_mtx);  	async_synchronize_full();  	if (!error)  		error = async_error; -	if (!error) +	if (error) { +		suspend_stats.failed_suspend++; +		dpm_save_failed_step(SUSPEND_SUSPEND); +	} else  		dpm_show_time(starttime, state, NULL); +	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);  	return error;  } @@ -973,35 +1528,68 @@ static int dpm_suspend(pm_message_t state)   */  static int device_prepare(struct device *dev, pm_message_t state)  { -	int error = 0; +	int (*callback)(struct device *) = NULL; +	char *info = NULL; +	int ret = 0; + +	if (dev->power.syscore) +		return 0; + +	/* +	 * If a device's parent goes into runtime suspend at the wrong time, +	 * it won't be possible to resume the device.  To prevent this we +	 * block runtime suspend here, during the prepare phase, and allow +	 * it again during the complete phase. +	 */ +	pm_runtime_get_noresume(dev);  	device_lock(dev); -	if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) { -		pm_dev_dbg(dev, state, "preparing "); -		error = dev->bus->pm->prepare(dev); -		suspend_report_result(dev->bus->pm->prepare, error); -		if (error) -			goto End; +	dev->power.wakeup_path = device_may_wakeup(dev); + +	if (dev->pm_domain) { +		info = "preparing power domain "; +		callback = dev->pm_domain->ops.prepare; +	} else if (dev->type && dev->type->pm) { +		info = "preparing type "; +		callback = dev->type->pm->prepare; +	} else if (dev->class && dev->class->pm) { +		info = "preparing class "; +		callback = dev->class->pm->prepare; +	} else if (dev->bus && dev->bus->pm) { +		info = "preparing bus "; +		callback = dev->bus->pm->prepare;  	} -	if (dev->type && dev->type->pm && dev->type->pm->prepare) { -		pm_dev_dbg(dev, state, "preparing type "); -		error = dev->type->pm->prepare(dev); -		suspend_report_result(dev->type->pm->prepare, error); -		if (error) -			goto End; +	if (!callback && dev->driver && dev->driver->pm) { +		info = "preparing driver "; +		callback = dev->driver->pm->prepare;  	} -	if (dev->class && dev->class->pm && dev->class->pm->prepare) { -		pm_dev_dbg(dev, state, "preparing class "); -		error = dev->class->pm->prepare(dev); -		suspend_report_result(dev->class->pm->prepare, error); +	if (callback) { +		trace_device_pm_callback_start(dev, info, state.event); +		ret = callback(dev); +		trace_device_pm_callback_end(dev, ret);  	} - End: +  	device_unlock(dev); -	return error; +	if (ret < 0) { +		suspend_report_result(callback, ret); +		pm_runtime_put(dev); +		return ret; +	} +	/* +	 * A positive return value from ->prepare() means "this device appears +	 * to be runtime-suspended and its state is fine, so if it really is +	 * runtime-suspended, you can leave it in that state provided that you +	 * will do the same thing with all of its descendants".  This only +	 * applies to suspend transitions, however. +	 */ +	spin_lock_irq(&dev->power.lock); +	dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND; +	spin_unlock_irq(&dev->power.lock); +	return 0;  }  /** @@ -1010,51 +1598,42 @@ static int device_prepare(struct device *dev, pm_message_t state)   *   * Execute the ->prepare() callback(s) for all devices.   */ -static int dpm_prepare(pm_message_t state) +int dpm_prepare(pm_message_t state)  { -	struct list_head list;  	int error = 0; -	INIT_LIST_HEAD(&list); +	trace_suspend_resume(TPS("dpm_prepare"), state.event, true); +	might_sleep(); +  	mutex_lock(&dpm_list_mtx); -	transition_started = true;  	while (!list_empty(&dpm_list)) {  		struct device *dev = to_device(dpm_list.next);  		get_device(dev); -		dev->power.status = DPM_PREPARING;  		mutex_unlock(&dpm_list_mtx); -		pm_runtime_get_noresume(dev); -		if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { -			/* Wake-up requested during system sleep transition. */ -			pm_runtime_put_sync(dev); -			error = -EBUSY; -		} else { -			error = device_prepare(dev, state); -		} +		error = device_prepare(dev, state);  		mutex_lock(&dpm_list_mtx);  		if (error) { -			dev->power.status = DPM_ON;  			if (error == -EAGAIN) {  				put_device(dev);  				error = 0;  				continue;  			} -			printk(KERN_ERR "PM: Failed to prepare device %s " -				"for power transition: error %d\n", -				kobject_name(&dev->kobj), error); +			printk(KERN_INFO "PM: Device %s not prepared " +				"for power transition: code %d\n", +				dev_name(dev), error);  			put_device(dev);  			break;  		} -		dev->power.status = DPM_SUSPENDING; +		dev->power.is_prepared = true;  		if (!list_empty(&dev->power.entry)) -			list_move_tail(&dev->power.entry, &list); +			list_move_tail(&dev->power.entry, &dpm_prepared_list);  		put_device(dev);  	} -	list_splice(&list, &dpm_list);  	mutex_unlock(&dpm_list_mtx); +	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);  	return error;  } @@ -1069,9 +1648,11 @@ int dpm_suspend_start(pm_message_t state)  {  	int error; -	might_sleep();  	error = dpm_prepare(state); -	if (!error) +	if (error) { +		suspend_stats.failed_prepare++; +		dpm_save_failed_step(SUSPEND_PREPARE); +	} else  		error = dpm_suspend(state);  	return error;  } @@ -1095,3 +1676,25 @@ int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)  	return async_error;  }  EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); + +/** + * dpm_for_each_dev - device iterator. + * @data: data for the callback. + * @fn: function to be called for each device. + * + * Iterate over devices in dpm_list, and call @fn for each device, + * passing it @data. + */ +void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) +{ +	struct device *dev; + +	if (!fn) +		return; + +	device_pm_lock(); +	list_for_each_entry(dev, &dpm_list, power.entry) +		fn(dev, data); +	device_pm_unlock(); +} +EXPORT_SYMBOL_GPL(dpm_for_each_dev);  | 
