diff options
Diffstat (limited to 'drivers/base/power')
| -rw-r--r-- | drivers/base/power/Makefile | 3 | ||||
| -rw-r--r-- | drivers/base/power/clock_ops.c | 31 | ||||
| -rw-r--r-- | drivers/base/power/common.c | 1 | ||||
| -rw-r--r-- | drivers/base/power/domain.c | 18 | ||||
| -rw-r--r-- | drivers/base/power/domain_governor.c | 1 | ||||
| -rw-r--r-- | drivers/base/power/generic_ops.c | 6 | ||||
| -rw-r--r-- | drivers/base/power/main.c | 446 | ||||
| -rw-r--r-- | drivers/base/power/opp.c | 218 | ||||
| -rw-r--r-- | drivers/base/power/power.h | 4 | ||||
| -rw-r--r-- | drivers/base/power/qos.c | 220 | ||||
| -rw-r--r-- | drivers/base/power/runtime.c | 169 | ||||
| -rw-r--r-- | drivers/base/power/sysfs.c | 97 | ||||
| -rw-r--r-- | drivers/base/power/wakeup.c | 6 | 
13 files changed, 875 insertions, 345 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 2e58ebb1f6c..1cb8544598d 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -1,6 +1,5 @@ -obj-$(CONFIG_PM)	+= sysfs.o generic_ops.o common.o qos.o +obj-$(CONFIG_PM)	+= sysfs.o generic_ops.o common.o qos.o runtime.o  obj-$(CONFIG_PM_SLEEP)	+= main.o wakeup.o -obj-$(CONFIG_PM_RUNTIME)	+= runtime.o  obj-$(CONFIG_PM_TRACE_RTC)	+= trace.o  obj-$(CONFIG_PM_OPP)	+= opp.o  obj-$(CONFIG_PM_GENERIC_DOMAINS)	+=  domain.o domain_governor.o diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 9d8fde70939..b99e6c06ee6 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -6,7 +6,6 @@   * This file is released under the GPLv2.   */ -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/device.h>  #include <linux/io.h> @@ -33,6 +32,21 @@ struct pm_clock_entry {  };  /** + * pm_clk_enable - Enable a clock, reporting any errors + * @dev: The device for the given clock + * @clk: The clock being enabled. + */ +static inline int __pm_clk_enable(struct device *dev, struct clk *clk) +{ +	int ret = clk_enable(clk); +	if (ret) +		dev_err(dev, "%s: failed to enable clk %p, error %d\n", +			__func__, clk, ret); + +	return ret; +} + +/**   * pm_clk_acquire - Acquire a device clock.   * @dev: Device whose clock is to be acquired.   * @ce: PM clock entry corresponding to the clock. @@ -43,6 +57,7 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)  	if (IS_ERR(ce->clk)) {  		ce->status = PCE_STATUS_ERROR;  	} else { +		clk_prepare(ce->clk);  		ce->status = PCE_STATUS_ACQUIRED;  		dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);  	} @@ -99,10 +114,12 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)  	if (ce->status < PCE_STATUS_ERROR) {  		if (ce->status == PCE_STATUS_ENABLED) -			clk_disable_unprepare(ce->clk); +			clk_disable(ce->clk); -		if (ce->status >= PCE_STATUS_ACQUIRED) +		if (ce->status >= PCE_STATUS_ACQUIRED) { +			clk_unprepare(ce->clk);  			clk_put(ce->clk); +		}  	}  	kfree(ce->con_id); @@ -249,6 +266,7 @@ int pm_clk_resume(struct device *dev)  	struct pm_subsys_data *psd = dev_to_psd(dev);  	struct pm_clock_entry *ce;  	unsigned long flags; +	int ret;  	dev_dbg(dev, "%s()\n", __func__); @@ -259,8 +277,9 @@ int pm_clk_resume(struct device *dev)  	list_for_each_entry(ce, &psd->clock_list, node) {  		if (ce->status < PCE_STATUS_ERROR) { -			clk_enable(ce->clk); -			ce->status = PCE_STATUS_ENABLED; +			ret = __pm_clk_enable(dev, ce->clk); +			if (!ret) +				ce->status = PCE_STATUS_ENABLED;  		}  	} @@ -376,7 +395,7 @@ int pm_clk_resume(struct device *dev)  	spin_lock_irqsave(&psd->lock, flags);  	list_for_each_entry(ce, &psd->clock_list, node) -		clk_enable(ce->clk); +		__pm_clk_enable(dev, ce->clk);  	spin_unlock_irqrestore(&psd->lock, flags); diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index 5da91404130..df2e5eeaeb0 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -6,7 +6,6 @@   * This file is released under the GPLv2.   */ -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/device.h>  #include <linux/export.h> diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index bfb8955c406..eee55c1e5fd 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -6,7 +6,6 @@   * This file is released under the GPLv2.   */ -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/io.h>  #include <linux/pm_runtime.h> @@ -42,7 +41,7 @@  	struct gpd_timing_data *__td = &dev_gpd_data(dev)->td;			\  	if (!__retval && __elapsed > __td->field) {				\  		__td->field = __elapsed;					\ -		dev_warn(dev, name " latency exceeded, new value %lld ns\n",	\ +		dev_dbg(dev, name " latency exceeded, new value %lld ns\n",	\  			__elapsed);						\  		genpd->max_off_time_changed = true;				\  		__td->constraint_changed = true;				\ @@ -106,7 +105,7 @@ static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)  static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)  {  	atomic_inc(&genpd->sd_count); -	smp_mb__after_atomic_inc(); +	smp_mb__after_atomic();  }  static void genpd_acquire_lock(struct generic_pm_domain *genpd) @@ -706,6 +705,14 @@ static int pm_genpd_runtime_resume(struct device *dev)  	return 0;  } +static bool pd_ignore_unused; +static int __init pd_ignore_unused_setup(char *__unused) +{ +	pd_ignore_unused = true; +	return 1; +} +__setup("pd_ignore_unused", pd_ignore_unused_setup); +  /**   * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.   */ @@ -713,6 +720,11 @@ void pm_genpd_poweroff_unused(void)  {  	struct generic_pm_domain *genpd; +	if (pd_ignore_unused) { +		pr_warn("genpd: Not disabling unused power domains\n"); +		return; +	} +  	mutex_lock(&gpd_list_lock);  	list_for_each_entry(genpd, &gpd_list, gpd_list_node) diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 28dee3053f1..a089e3bcdfb 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -6,7 +6,6 @@   * This file is released under the GPLv2.   */ -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/pm_domain.h>  #include <linux/pm_qos.h> diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 5ee030a864f..96a92db83ca 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c @@ -10,7 +10,7 @@  #include <linux/pm_runtime.h>  #include <linux/export.h> -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM  /**   * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.   * @dev: Device to suspend. @@ -48,7 +48,7 @@ int pm_generic_runtime_resume(struct device *dev)  	return ret;  }  EXPORT_SYMBOL_GPL(pm_generic_runtime_resume); -#endif /* CONFIG_PM_RUNTIME */ +#endif /* CONFIG_PM */  #ifdef CONFIG_PM_SLEEP  /** @@ -285,7 +285,7 @@ int pm_generic_restore(struct device *dev)  EXPORT_SYMBOL_GPL(pm_generic_restore);  /** - * pm_generic_complete - Generic routine competing a device power transition. + * pm_generic_complete - Generic routine completing a device power transition.   * @dev: Device to handle.   *   * Complete a device power transition during a system-wide power transition. diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 9f098a82cf0..bf412961a93 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -29,7 +29,10 @@  #include <linux/async.h>  #include <linux/suspend.h>  #include <trace/events/power.h> +#include <linux/cpufreq.h>  #include <linux/cpuidle.h> +#include <linux/timer.h> +  #include "../base.h"  #include "power.h" @@ -89,6 +92,8 @@ void device_pm_sleep_init(struct device *dev)  {  	dev->power.is_prepared = false;  	dev->power.is_suspended = false; +	dev->power.is_noirq_suspended = false; +	dev->power.is_late_suspended = false;  	init_completion(&dev->power.completion);  	complete_all(&dev->power.completion);  	dev->power.wakeup = NULL; @@ -209,9 +214,6 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,  		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),  			error, (unsigned long long)nsecs >> 10);  	} - -	trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event), -				    error);  }  /** @@ -382,7 +384,9 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,  	calltime = initcall_debug_start(dev);  	pm_dev_dbg(dev, state, info); +	trace_device_pm_callback_start(dev, info, state.event);  	error = cb(dev); +	trace_device_pm_callback_end(dev, error);  	suspend_report_result(cb, error);  	initcall_debug_report(dev, calltime, error, state, info); @@ -390,6 +394,71 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,  	return error;  } +#ifdef CONFIG_DPM_WATCHDOG +struct dpm_watchdog { +	struct device		*dev; +	struct task_struct	*tsk; +	struct timer_list	timer; +}; + +#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ +	struct dpm_watchdog wd + +/** + * dpm_watchdog_handler - Driver suspend / resume watchdog handler. + * @data: Watchdog object address. + * + * Called when a driver has timed out suspending or resuming. + * There's not much we can do here to recover so panic() to + * capture a crash-dump in pstore. + */ +static void dpm_watchdog_handler(unsigned long data) +{ +	struct dpm_watchdog *wd = (void *)data; + +	dev_emerg(wd->dev, "**** DPM device timeout ****\n"); +	show_stack(wd->tsk, NULL); +	panic("%s %s: unrecoverable failure\n", +		dev_driver_string(wd->dev), dev_name(wd->dev)); +} + +/** + * dpm_watchdog_set - Enable pm watchdog for given device. + * @wd: Watchdog. Must be allocated on the stack. + * @dev: Device to handle. + */ +static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) +{ +	struct timer_list *timer = &wd->timer; + +	wd->dev = dev; +	wd->tsk = current; + +	init_timer_on_stack(timer); +	/* use same timeout value for both suspend and resume */ +	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT; +	timer->function = dpm_watchdog_handler; +	timer->data = (unsigned long)wd; +	add_timer(timer); +} + +/** + * dpm_watchdog_clear - Disable suspend/resume watchdog. + * @wd: Watchdog to disable. + */ +static void dpm_watchdog_clear(struct dpm_watchdog *wd) +{ +	struct timer_list *timer = &wd->timer; + +	del_timer_sync(timer); +	destroy_timer_on_stack(timer); +} +#else +#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) +#define dpm_watchdog_set(x, y) +#define dpm_watchdog_clear(x) +#endif +  /*------------------------- Resume routines -------------------------*/  /** @@ -400,7 +469,7 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,   * The driver of @dev will not receive interrupts while this function is being   * executed.   */ -static int device_resume_noirq(struct device *dev, pm_message_t state) +static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)  {  	pm_callback_t callback = NULL;  	char *info = NULL; @@ -409,9 +478,14 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)  	TRACE_DEVICE(dev);  	TRACE_RESUME(0); -	if (dev->power.syscore) +	if (dev->power.syscore || dev->power.direct_complete)  		goto Out; +	if (!dev->power.is_noirq_suspended) +		goto Out; + +	dpm_wait(dev->parent, async); +  	if (dev->pm_domain) {  		info = "noirq power domain ";  		callback = pm_noirq_op(&dev->pm_domain->ops, state); @@ -432,12 +506,32 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)  	}  	error = dpm_run_callback(callback, dev, state, info); +	dev->power.is_noirq_suspended = false;   Out: +	complete_all(&dev->power.completion);  	TRACE_RESUME(error);  	return error;  } +static bool is_async(struct device *dev) +{ +	return dev->power.async_suspend && pm_async_enabled +		&& !pm_trace_is_enabled(); +} + +static void async_resume_noirq(void *data, async_cookie_t cookie) +{ +	struct device *dev = (struct device *)data; +	int error; + +	error = device_resume_noirq(dev, pm_transition, true); +	if (error) +		pm_dev_err(dev, pm_transition, " async", error); + +	put_device(dev); +} +  /**   * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.   * @state: PM transition of the system being carried out. @@ -447,32 +541,53 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)   */  static void dpm_resume_noirq(pm_message_t state)  { +	struct device *dev;  	ktime_t starttime = ktime_get(); +	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);  	mutex_lock(&dpm_list_mtx); -	while (!list_empty(&dpm_noirq_list)) { -		struct device *dev = to_device(dpm_noirq_list.next); -		int error; +	pm_transition = state; + +	/* +	 * Advanced the async threads upfront, +	 * in case the starting of async threads is +	 * delayed by non-async resuming devices. +	 */ +	list_for_each_entry(dev, &dpm_noirq_list, power.entry) { +		reinit_completion(&dev->power.completion); +		if (is_async(dev)) { +			get_device(dev); +			async_schedule(async_resume_noirq, dev); +		} +	} +	while (!list_empty(&dpm_noirq_list)) { +		dev = to_device(dpm_noirq_list.next);  		get_device(dev);  		list_move_tail(&dev->power.entry, &dpm_late_early_list);  		mutex_unlock(&dpm_list_mtx); -		error = device_resume_noirq(dev, state); -		if (error) { -			suspend_stats.failed_resume_noirq++; -			dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); -			dpm_save_failed_dev(dev_name(dev)); -			pm_dev_err(dev, state, " noirq", error); +		if (!is_async(dev)) { +			int error; + +			error = device_resume_noirq(dev, state, false); +			if (error) { +				suspend_stats.failed_resume_noirq++; +				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); +				dpm_save_failed_dev(dev_name(dev)); +				pm_dev_err(dev, state, " noirq", error); +			}  		}  		mutex_lock(&dpm_list_mtx);  		put_device(dev);  	}  	mutex_unlock(&dpm_list_mtx); +	async_synchronize_full();  	dpm_show_time(starttime, state, "noirq");  	resume_device_irqs();  	cpuidle_resume(); +	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);  }  /** @@ -482,7 +597,7 @@ static void dpm_resume_noirq(pm_message_t state)   *   * Runtime PM is disabled for @dev while this function is being executed.   */ -static int device_resume_early(struct device *dev, pm_message_t state) +static int device_resume_early(struct device *dev, pm_message_t state, bool async)  {  	pm_callback_t callback = NULL;  	char *info = NULL; @@ -491,9 +606,14 @@ static int device_resume_early(struct device *dev, pm_message_t state)  	TRACE_DEVICE(dev);  	TRACE_RESUME(0); -	if (dev->power.syscore) +	if (dev->power.syscore || dev->power.direct_complete) +		goto Out; + +	if (!dev->power.is_late_suspended)  		goto Out; +	dpm_wait(dev->parent, async); +  	if (dev->pm_domain) {  		info = "early power domain ";  		callback = pm_late_early_op(&dev->pm_domain->ops, state); @@ -514,44 +634,78 @@ static int device_resume_early(struct device *dev, pm_message_t state)  	}  	error = dpm_run_callback(callback, dev, state, info); +	dev->power.is_late_suspended = false;   Out:  	TRACE_RESUME(error);  	pm_runtime_enable(dev); +	complete_all(&dev->power.completion);  	return error;  } +static void async_resume_early(void *data, async_cookie_t cookie) +{ +	struct device *dev = (struct device *)data; +	int error; + +	error = device_resume_early(dev, pm_transition, true); +	if (error) +		pm_dev_err(dev, pm_transition, " async", error); + +	put_device(dev); +} +  /**   * dpm_resume_early - Execute "early resume" callbacks for all devices.   * @state: PM transition of the system being carried out.   */  static void dpm_resume_early(pm_message_t state)  { +	struct device *dev;  	ktime_t starttime = ktime_get(); +	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);  	mutex_lock(&dpm_list_mtx); -	while (!list_empty(&dpm_late_early_list)) { -		struct device *dev = to_device(dpm_late_early_list.next); -		int error; +	pm_transition = state; +	/* +	 * Advanced the async threads upfront, +	 * in case the starting of async threads is +	 * delayed by non-async resuming devices. +	 */ +	list_for_each_entry(dev, &dpm_late_early_list, power.entry) { +		reinit_completion(&dev->power.completion); +		if (is_async(dev)) { +			get_device(dev); +			async_schedule(async_resume_early, dev); +		} +	} + +	while (!list_empty(&dpm_late_early_list)) { +		dev = to_device(dpm_late_early_list.next);  		get_device(dev);  		list_move_tail(&dev->power.entry, &dpm_suspended_list);  		mutex_unlock(&dpm_list_mtx); -		error = device_resume_early(dev, state); -		if (error) { -			suspend_stats.failed_resume_early++; -			dpm_save_failed_step(SUSPEND_RESUME_EARLY); -			dpm_save_failed_dev(dev_name(dev)); -			pm_dev_err(dev, state, " early", error); -		} +		if (!is_async(dev)) { +			int error; +			error = device_resume_early(dev, state, false); +			if (error) { +				suspend_stats.failed_resume_early++; +				dpm_save_failed_step(SUSPEND_RESUME_EARLY); +				dpm_save_failed_dev(dev_name(dev)); +				pm_dev_err(dev, state, " early", error); +			} +		}  		mutex_lock(&dpm_list_mtx);  		put_device(dev);  	}  	mutex_unlock(&dpm_list_mtx); +	async_synchronize_full();  	dpm_show_time(starttime, state, "early"); +	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);  }  /** @@ -576,6 +730,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)  	pm_callback_t callback = NULL;  	char *info = NULL;  	int error = 0; +	DECLARE_DPM_WATCHDOG_ON_STACK(wd);  	TRACE_DEVICE(dev);  	TRACE_RESUME(0); @@ -583,7 +738,14 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)  	if (dev->power.syscore)  		goto Complete; +	if (dev->power.direct_complete) { +		/* Match the pm_runtime_disable() in __device_suspend(). */ +		pm_runtime_enable(dev); +		goto Complete; +	} +  	dpm_wait(dev->parent, async); +	dpm_watchdog_set(&wd, dev);  	device_lock(dev);  	/* @@ -642,6 +804,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)   Unlock:  	device_unlock(dev); +	dpm_watchdog_clear(&wd);   Complete:  	complete_all(&dev->power.completion); @@ -662,12 +825,6 @@ static void async_resume(void *data, async_cookie_t cookie)  	put_device(dev);  } -static bool is_async(struct device *dev) -{ -	return dev->power.async_suspend && pm_async_enabled -		&& !pm_trace_is_enabled(); -} -  /**   * dpm_resume - Execute "resume" callbacks for non-sysdev devices.   * @state: PM transition of the system being carried out. @@ -680,6 +837,7 @@ void dpm_resume(pm_message_t state)  	struct device *dev;  	ktime_t starttime = ktime_get(); +	trace_suspend_resume(TPS("dpm_resume"), state.event, true);  	might_sleep();  	mutex_lock(&dpm_list_mtx); @@ -687,7 +845,7 @@ void dpm_resume(pm_message_t state)  	async_error = 0;  	list_for_each_entry(dev, &dpm_suspended_list, power.entry) { -		INIT_COMPLETION(dev->power.completion); +		reinit_completion(&dev->power.completion);  		if (is_async(dev)) {  			get_device(dev);  			async_schedule(async_resume, dev); @@ -719,6 +877,9 @@ void dpm_resume(pm_message_t state)  	mutex_unlock(&dpm_list_mtx);  	async_synchronize_full();  	dpm_show_time(starttime, state, NULL); + +	cpufreq_resume(); +	trace_suspend_resume(TPS("dpm_resume"), state.event, false);  }  /** @@ -757,7 +918,9 @@ static void device_complete(struct device *dev, pm_message_t state)  	if (callback) {  		pm_dev_dbg(dev, state, info); +		trace_device_pm_callback_start(dev, info, state.event);  		callback(dev); +		trace_device_pm_callback_end(dev, 0);  	}  	device_unlock(dev); @@ -776,6 +939,7 @@ void dpm_complete(pm_message_t state)  {  	struct list_head list; +	trace_suspend_resume(TPS("dpm_complete"), state.event, true);  	might_sleep();  	INIT_LIST_HEAD(&list); @@ -795,6 +959,7 @@ void dpm_complete(pm_message_t state)  	}  	list_splice(&list, &dpm_list);  	mutex_unlock(&dpm_list_mtx); +	trace_suspend_resume(TPS("dpm_complete"), state.event, false);  }  /** @@ -843,13 +1008,24 @@ static pm_message_t resume_event(pm_message_t sleep_state)   * The driver of @dev will not receive interrupts while this function is being   * executed.   */ -static int device_suspend_noirq(struct device *dev, pm_message_t state) +static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)  {  	pm_callback_t callback = NULL;  	char *info = NULL; +	int error = 0; -	if (dev->power.syscore) -		return 0; +	if (async_error) +		goto Complete; + +	if (pm_wakeup_pending()) { +		async_error = -EBUSY; +		goto Complete; +	} + +	if (dev->power.syscore || dev->power.direct_complete) +		goto Complete; + +	dpm_wait_for_children(dev, async);  	if (dev->pm_domain) {  		info = "noirq power domain "; @@ -870,7 +1046,41 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)  		callback = pm_noirq_op(dev->driver->pm, state);  	} -	return dpm_run_callback(callback, dev, state, info); +	error = dpm_run_callback(callback, dev, state, info); +	if (!error) +		dev->power.is_noirq_suspended = true; +	else +		async_error = error; + +Complete: +	complete_all(&dev->power.completion); +	return error; +} + +static void async_suspend_noirq(void *data, async_cookie_t cookie) +{ +	struct device *dev = (struct device *)data; +	int error; + +	error = __device_suspend_noirq(dev, pm_transition, true); +	if (error) { +		dpm_save_failed_dev(dev_name(dev)); +		pm_dev_err(dev, pm_transition, " async", error); +	} + +	put_device(dev); +} + +static int device_suspend_noirq(struct device *dev) +{ +	reinit_completion(&dev->power.completion); + +	if (pm_async_enabled && dev->power.async_suspend) { +		get_device(dev); +		async_schedule(async_suspend_noirq, dev); +		return 0; +	} +	return __device_suspend_noirq(dev, pm_transition, false);  }  /** @@ -885,22 +1095,24 @@ static int dpm_suspend_noirq(pm_message_t state)  	ktime_t starttime = ktime_get();  	int error = 0; +	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);  	cpuidle_pause();  	suspend_device_irqs();  	mutex_lock(&dpm_list_mtx); +	pm_transition = state; +	async_error = 0; +  	while (!list_empty(&dpm_late_early_list)) {  		struct device *dev = to_device(dpm_late_early_list.prev);  		get_device(dev);  		mutex_unlock(&dpm_list_mtx); -		error = device_suspend_noirq(dev, state); +		error = device_suspend_noirq(dev);  		mutex_lock(&dpm_list_mtx);  		if (error) {  			pm_dev_err(dev, state, " noirq", error); -			suspend_stats.failed_suspend_noirq++; -			dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);  			dpm_save_failed_dev(dev_name(dev));  			put_device(dev);  			break; @@ -909,16 +1121,22 @@ static int dpm_suspend_noirq(pm_message_t state)  			list_move(&dev->power.entry, &dpm_noirq_list);  		put_device(dev); -		if (pm_wakeup_pending()) { -			error = -EBUSY; +		if (async_error)  			break; -		}  	}  	mutex_unlock(&dpm_list_mtx); -	if (error) +	async_synchronize_full(); +	if (!error) +		error = async_error; + +	if (error) { +		suspend_stats.failed_suspend_noirq++; +		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);  		dpm_resume_noirq(resume_event(state)); -	else +	} else {  		dpm_show_time(starttime, state, "noirq"); +	} +	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);  	return error;  } @@ -929,15 +1147,26 @@ static int dpm_suspend_noirq(pm_message_t state)   *   * Runtime PM is disabled for @dev while this function is being executed.   */ -static int device_suspend_late(struct device *dev, pm_message_t state) +static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)  {  	pm_callback_t callback = NULL;  	char *info = NULL; +	int error = 0;  	__pm_runtime_disable(dev, false); -	if (dev->power.syscore) -		return 0; +	if (async_error) +		goto Complete; + +	if (pm_wakeup_pending()) { +		async_error = -EBUSY; +		goto Complete; +	} + +	if (dev->power.syscore || dev->power.direct_complete) +		goto Complete; + +	dpm_wait_for_children(dev, async);  	if (dev->pm_domain) {  		info = "late power domain "; @@ -958,7 +1187,41 @@ static int device_suspend_late(struct device *dev, pm_message_t state)  		callback = pm_late_early_op(dev->driver->pm, state);  	} -	return dpm_run_callback(callback, dev, state, info); +	error = dpm_run_callback(callback, dev, state, info); +	if (!error) +		dev->power.is_late_suspended = true; +	else +		async_error = error; + +Complete: +	complete_all(&dev->power.completion); +	return error; +} + +static void async_suspend_late(void *data, async_cookie_t cookie) +{ +	struct device *dev = (struct device *)data; +	int error; + +	error = __device_suspend_late(dev, pm_transition, true); +	if (error) { +		dpm_save_failed_dev(dev_name(dev)); +		pm_dev_err(dev, pm_transition, " async", error); +	} +	put_device(dev); +} + +static int device_suspend_late(struct device *dev) +{ +	reinit_completion(&dev->power.completion); + +	if (pm_async_enabled && dev->power.async_suspend) { +		get_device(dev); +		async_schedule(async_suspend_late, dev); +		return 0; +	} + +	return __device_suspend_late(dev, pm_transition, false);  }  /** @@ -970,20 +1233,22 @@ static int dpm_suspend_late(pm_message_t state)  	ktime_t starttime = ktime_get();  	int error = 0; +	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);  	mutex_lock(&dpm_list_mtx); +	pm_transition = state; +	async_error = 0; +  	while (!list_empty(&dpm_suspended_list)) {  		struct device *dev = to_device(dpm_suspended_list.prev);  		get_device(dev);  		mutex_unlock(&dpm_list_mtx); -		error = device_suspend_late(dev, state); +		error = device_suspend_late(dev);  		mutex_lock(&dpm_list_mtx);  		if (error) {  			pm_dev_err(dev, state, " late", error); -			suspend_stats.failed_suspend_late++; -			dpm_save_failed_step(SUSPEND_SUSPEND_LATE);  			dpm_save_failed_dev(dev_name(dev));  			put_device(dev);  			break; @@ -992,17 +1257,19 @@ static int dpm_suspend_late(pm_message_t state)  			list_move(&dev->power.entry, &dpm_late_early_list);  		put_device(dev); -		if (pm_wakeup_pending()) { -			error = -EBUSY; +		if (async_error)  			break; -		}  	}  	mutex_unlock(&dpm_list_mtx); -	if (error) +	async_synchronize_full(); +	if (error) { +		suspend_stats.failed_suspend_late++; +		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);  		dpm_resume_early(resume_event(state)); -	else +	} else {  		dpm_show_time(starttime, state, "late"); - +	} +	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);  	return error;  } @@ -1041,7 +1308,9 @@ static int legacy_suspend(struct device *dev, pm_message_t state,  	calltime = initcall_debug_start(dev); +	trace_device_pm_callback_start(dev, info, state.event);  	error = cb(dev, state); +	trace_device_pm_callback_end(dev, error);  	suspend_report_result(cb, error);  	initcall_debug_report(dev, calltime, error, state, info); @@ -1060,6 +1329,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)  	pm_callback_t callback = NULL;  	char *info = NULL;  	int error = 0; +	DECLARE_DPM_WATCHDOG_ON_STACK(wd);  	dpm_wait_for_children(dev, async); @@ -1083,6 +1353,18 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)  	if (dev->power.syscore)  		goto Complete; +	if (dev->power.direct_complete) { +		if (pm_runtime_status_suspended(dev)) { +			pm_runtime_disable(dev); +			if (pm_runtime_suspended_if_enabled(dev)) +				goto Complete; + +			pm_runtime_enable(dev); +		} +		dev->power.direct_complete = false; +	} + +	dpm_watchdog_set(&wd, dev);  	device_lock(dev);  	if (dev->pm_domain) { @@ -1132,13 +1414,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)   End:  	if (!error) { +		struct device *parent = dev->parent; +  		dev->power.is_suspended = true; -		if (dev->power.wakeup_path -		    && dev->parent && !dev->parent->power.ignore_children) -			dev->parent->power.wakeup_path = true; +		if (parent) { +			spin_lock_irq(&parent->power.lock); + +			dev->parent->power.direct_complete = false; +			if (dev->power.wakeup_path +			    && !dev->parent->power.ignore_children) +				dev->parent->power.wakeup_path = true; + +			spin_unlock_irq(&parent->power.lock); +		}  	}  	device_unlock(dev); +	dpm_watchdog_clear(&wd);   Complete:  	complete_all(&dev->power.completion); @@ -1164,7 +1456,7 @@ static void async_suspend(void *data, async_cookie_t cookie)  static int device_suspend(struct device *dev)  { -	INIT_COMPLETION(dev->power.completion); +	reinit_completion(&dev->power.completion);  	if (pm_async_enabled && dev->power.async_suspend) {  		get_device(dev); @@ -1184,8 +1476,11 @@ int dpm_suspend(pm_message_t state)  	ktime_t starttime = ktime_get();  	int error = 0; +	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);  	might_sleep(); +	cpufreq_suspend(); +  	mutex_lock(&dpm_list_mtx);  	pm_transition = state;  	async_error = 0; @@ -1219,6 +1514,7 @@ int dpm_suspend(pm_message_t state)  		dpm_save_failed_step(SUSPEND_SUSPEND);  	} else  		dpm_show_time(starttime, state, NULL); +	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);  	return error;  } @@ -1234,7 +1530,7 @@ static int device_prepare(struct device *dev, pm_message_t state)  {  	int (*callback)(struct device *) = NULL;  	char *info = NULL; -	int error = 0; +	int ret = 0;  	if (dev->power.syscore)  		return 0; @@ -1271,13 +1567,29 @@ static int device_prepare(struct device *dev, pm_message_t state)  	}  	if (callback) { -		error = callback(dev); -		suspend_report_result(callback, error); +		trace_device_pm_callback_start(dev, info, state.event); +		ret = callback(dev); +		trace_device_pm_callback_end(dev, ret);  	}  	device_unlock(dev); -	return error; +	if (ret < 0) { +		suspend_report_result(callback, ret); +		pm_runtime_put(dev); +		return ret; +	} +	/* +	 * A positive return value from ->prepare() means "this device appears +	 * to be runtime-suspended and its state is fine, so if it really is +	 * runtime-suspended, you can leave it in that state provided that you +	 * will do the same thing with all of its descendants".  This only +	 * applies to suspend transitions, however. +	 */ +	spin_lock_irq(&dev->power.lock); +	dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND; +	spin_unlock_irq(&dev->power.lock); +	return 0;  }  /** @@ -1290,6 +1602,7 @@ int dpm_prepare(pm_message_t state)  {  	int error = 0; +	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);  	might_sleep();  	mutex_lock(&dpm_list_mtx); @@ -1320,6 +1633,7 @@ int dpm_prepare(pm_message_t state)  		put_device(dev);  	}  	mutex_unlock(&dpm_list_mtx); +	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);  	return error;  } diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index ef89897c604..89ced955faf 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -14,14 +14,12 @@  #include <linux/kernel.h>  #include <linux/errno.h>  #include <linux/err.h> -#include <linux/init.h>  #include <linux/slab.h> -#include <linux/cpufreq.h>  #include <linux/device.h>  #include <linux/list.h>  #include <linux/rculist.h>  #include <linux/rcupdate.h> -#include <linux/opp.h> +#include <linux/pm_opp.h>  #include <linux/of.h>  #include <linux/export.h> @@ -42,7 +40,7 @@   */  /** - * struct opp - Generic OPP description structure + * struct dev_pm_opp - Generic OPP description structure   * @node:	opp list node. The nodes are maintained throughout the lifetime   *		of boot. It is expected only an optimal set of OPPs are   *		added to the library by the SoC framework. @@ -59,7 +57,7 @@   *   * This structure stores the OPP information for a given device.   */ -struct opp { +struct dev_pm_opp {  	struct list_head node;  	bool available; @@ -136,7 +134,7 @@ static struct device_opp *find_device_opp(struct device *dev)  }  /** - * opp_get_voltage() - Gets the voltage corresponding to an available opp + * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp   * @opp:	opp for which voltage has to be returned for   *   * Return voltage in micro volt corresponding to the opp, else @@ -150,9 +148,9 @@ static struct device_opp *find_device_opp(struct device *dev)   * prior to unlocking with rcu_read_unlock() to maintain the integrity of the   * pointer.   */ -unsigned long opp_get_voltage(struct opp *opp) +unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)  { -	struct opp *tmp_opp; +	struct dev_pm_opp *tmp_opp;  	unsigned long v = 0;  	tmp_opp = rcu_dereference(opp); @@ -163,10 +161,10 @@ unsigned long opp_get_voltage(struct opp *opp)  	return v;  } -EXPORT_SYMBOL_GPL(opp_get_voltage); +EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);  /** - * opp_get_freq() - Gets the frequency corresponding to an available opp + * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp   * @opp:	opp for which frequency has to be returned for   *   * Return frequency in hertz corresponding to the opp, else @@ -180,9 +178,9 @@ EXPORT_SYMBOL_GPL(opp_get_voltage);   * prior to unlocking with rcu_read_unlock() to maintain the integrity of the   * pointer.   */ -unsigned long opp_get_freq(struct opp *opp) +unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)  { -	struct opp *tmp_opp; +	struct dev_pm_opp *tmp_opp;  	unsigned long f = 0;  	tmp_opp = rcu_dereference(opp); @@ -193,10 +191,10 @@ unsigned long opp_get_freq(struct opp *opp)  	return f;  } -EXPORT_SYMBOL_GPL(opp_get_freq); +EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);  /** - * opp_get_opp_count() - Get number of opps available in the opp list + * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list   * @dev:	device for which we do this operation   *   * This function returns the number of available opps if there are any, @@ -206,10 +204,10 @@ EXPORT_SYMBOL_GPL(opp_get_freq);   * internally references two RCU protected structures: device_opp and opp which   * are safe as long as we are under a common RCU locked section.   */ -int opp_get_opp_count(struct device *dev) +int dev_pm_opp_get_opp_count(struct device *dev)  {  	struct device_opp *dev_opp; -	struct opp *temp_opp; +	struct dev_pm_opp *temp_opp;  	int count = 0;  	dev_opp = find_device_opp(dev); @@ -226,10 +224,10 @@ int opp_get_opp_count(struct device *dev)  	return count;  } -EXPORT_SYMBOL_GPL(opp_get_opp_count); +EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);  /** - * opp_find_freq_exact() - search for an exact frequency + * dev_pm_opp_find_freq_exact() - search for an exact frequency   * @dev:		device for which we do this operation   * @freq:		frequency to search for   * @available:		true/false - match for available opp @@ -254,11 +252,12 @@ EXPORT_SYMBOL_GPL(opp_get_opp_count);   * under the locked area. The pointer returned must be used prior to unlocking   * with rcu_read_unlock() to maintain the integrity of the pointer.   */ -struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, -				bool available) +struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, +					      unsigned long freq, +					      bool available)  {  	struct device_opp *dev_opp; -	struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); +	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);  	dev_opp = find_device_opp(dev);  	if (IS_ERR(dev_opp)) { @@ -277,10 +276,10 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,  	return opp;  } -EXPORT_SYMBOL_GPL(opp_find_freq_exact); +EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);  /** - * opp_find_freq_ceil() - Search for an rounded ceil freq + * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq   * @dev:	device for which we do this operation   * @freq:	Start frequency   * @@ -300,10 +299,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_exact);   * under the locked area. The pointer returned must be used prior to unlocking   * with rcu_read_unlock() to maintain the integrity of the pointer.   */ -struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) +struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, +					     unsigned long *freq)  {  	struct device_opp *dev_opp; -	struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); +	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);  	if (!dev || !freq) {  		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); @@ -324,10 +324,10 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)  	return opp;  } -EXPORT_SYMBOL_GPL(opp_find_freq_ceil); +EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);  /** - * opp_find_freq_floor() - Search for a rounded floor freq + * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq   * @dev:	device for which we do this operation   * @freq:	Start frequency   * @@ -347,10 +347,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_ceil);   * under the locked area. The pointer returned must be used prior to unlocking   * with rcu_read_unlock() to maintain the integrity of the pointer.   */ -struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) +struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, +					      unsigned long *freq)  {  	struct device_opp *dev_opp; -	struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); +	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);  	if (!dev || !freq) {  		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); @@ -375,32 +376,39 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)  	return opp;  } -EXPORT_SYMBOL_GPL(opp_find_freq_floor); +EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);  /** - * opp_add()  - Add an OPP table from a table definitions + * dev_pm_opp_add()  - Add an OPP table from a table definitions   * @dev:	device for which we do this operation   * @freq:	Frequency in Hz for this OPP   * @u_volt:	Voltage in uVolts for this OPP   *   * This function adds an opp definition to the opp list and returns status.   * The opp is made available by default and it can be controlled using - * opp_enable/disable functions. + * dev_pm_opp_enable/disable functions.   *   * Locking: The internal device_opp and opp structures are RCU protected.   * Hence this function internally uses RCU updater strategy with mutex locks   * to keep the integrity of the internal data structures. Callers should ensure   * that this function is *NOT* called under RCU protection or in contexts where   * mutex cannot be locked. + * + * Return: + * 0:		On success OR + *		Duplicate OPPs (both freq and volt are same) and opp->available + * -EEXIST:	Freq are same and volt are different OR + *		Duplicate OPPs (both freq and volt are same) and !opp->available + * -ENOMEM:	Memory allocation failure   */ -int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) +int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)  {  	struct device_opp *dev_opp = NULL; -	struct opp *opp, *new_opp; +	struct dev_pm_opp *opp, *new_opp;  	struct list_head *head;  	/* allocate new OPP node */ -	new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL); +	new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);  	if (!new_opp) {  		dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);  		return -ENOMEM; @@ -441,15 +449,31 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)  	new_opp->u_volt = u_volt;  	new_opp->available = true; -	/* Insert new OPP in order of increasing frequency */ +	/* +	 * Insert new OPP in order of increasing frequency +	 * and discard if already present +	 */  	head = &dev_opp->opp_list;  	list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { -		if (new_opp->rate < opp->rate) +		if (new_opp->rate <= opp->rate)  			break;  		else  			head = &opp->node;  	} +	/* Duplicate OPPs ? */ +	if (new_opp->rate == opp->rate) { +		int ret = opp->available && new_opp->u_volt == opp->u_volt ? +			0 : -EEXIST; + +		dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", +			 __func__, opp->rate, opp->u_volt, opp->available, +			 new_opp->rate, new_opp->u_volt, new_opp->available); +		mutex_unlock(&dev_opp_list_lock); +		kfree(new_opp); +		return ret; +	} +  	list_add_rcu(&new_opp->node, head);  	mutex_unlock(&dev_opp_list_lock); @@ -460,7 +484,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)  	srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);  	return 0;  } -EXPORT_SYMBOL_GPL(opp_add); +EXPORT_SYMBOL_GPL(dev_pm_opp_add);  /**   * opp_set_availability() - helper to set the availability of an opp @@ -485,11 +509,11 @@ static int opp_set_availability(struct device *dev, unsigned long freq,  		bool availability_req)  {  	struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); -	struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); +	struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);  	int r = 0;  	/* keep the node allocated */ -	new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL); +	new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);  	if (!new_opp) {  		dev_warn(dev, "%s: Unable to create OPP\n", __func__);  		return -ENOMEM; @@ -552,13 +576,13 @@ unlock:  }  /** - * opp_enable() - Enable a specific OPP + * dev_pm_opp_enable() - Enable a specific OPP   * @dev:	device for which we do this operation   * @freq:	OPP frequency to enable   *   * Enables a provided opp. If the operation is valid, this returns 0, else the   * corresponding error value. It is meant to be used for users an OPP available - * after being temporarily made unavailable with opp_disable. + * after being temporarily made unavailable with dev_pm_opp_disable.   *   * Locking: The internal device_opp and opp structures are RCU protected.   * Hence this function indirectly uses RCU and mutex locks to keep the @@ -566,21 +590,21 @@ unlock:   * this function is *NOT* called under RCU protection or in contexts where   * mutex locking or synchronize_rcu() blocking calls cannot be used.   */ -int opp_enable(struct device *dev, unsigned long freq) +int dev_pm_opp_enable(struct device *dev, unsigned long freq)  {  	return opp_set_availability(dev, freq, true);  } -EXPORT_SYMBOL_GPL(opp_enable); +EXPORT_SYMBOL_GPL(dev_pm_opp_enable);  /** - * opp_disable() - Disable a specific OPP + * dev_pm_opp_disable() - Disable a specific OPP   * @dev:	device for which we do this operation   * @freq:	OPP frequency to disable   *   * Disables a provided opp. If the operation is valid, this returns   * 0, else the corresponding error value. It is meant to be a temporary   * control by users to make this OPP not available until the circumstances are - * right to make it available again (with a call to opp_enable). + * right to make it available again (with a call to dev_pm_opp_enable).   *   * Locking: The internal device_opp and opp structures are RCU protected.   * Hence this function indirectly uses RCU and mutex locks to keep the @@ -588,107 +612,17 @@ EXPORT_SYMBOL_GPL(opp_enable);   * this function is *NOT* called under RCU protection or in contexts where   * mutex locking or synchronize_rcu() blocking calls cannot be used.   */ -int opp_disable(struct device *dev, unsigned long freq) +int dev_pm_opp_disable(struct device *dev, unsigned long freq)  {  	return opp_set_availability(dev, freq, false);  } -EXPORT_SYMBOL_GPL(opp_disable); - -#ifdef CONFIG_CPU_FREQ -/** - * opp_init_cpufreq_table() - create a cpufreq table for a device - * @dev:	device for which we do this operation - * @table:	Cpufreq table returned back to caller - * - * Generate a cpufreq table for a provided device- this assumes that the - * opp list is already initialized and ready for usage. - * - * This function allocates required memory for the cpufreq table. It is - * expected that the caller does the required maintenance such as freeing - * the table as required. - * - * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM - * if no memory available for the operation (table is not populated), returns 0 - * if successful and table is populated. - * - * WARNING: It is  important for the callers to ensure refreshing their copy of - * the table if any of the mentioned functions have been invoked in the interim. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * To simplify the logic, we pretend we are updater and hold relevant mutex here - * Callers should ensure that this function is *NOT* called under RCU protection - * or in contexts where mutex locking cannot be used. - */ -int opp_init_cpufreq_table(struct device *dev, -			    struct cpufreq_frequency_table **table) -{ -	struct device_opp *dev_opp; -	struct opp *opp; -	struct cpufreq_frequency_table *freq_table; -	int i = 0; - -	/* Pretend as if I am an updater */ -	mutex_lock(&dev_opp_list_lock); - -	dev_opp = find_device_opp(dev); -	if (IS_ERR(dev_opp)) { -		int r = PTR_ERR(dev_opp); -		mutex_unlock(&dev_opp_list_lock); -		dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r); -		return r; -	} - -	freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * -			     (opp_get_opp_count(dev) + 1), GFP_KERNEL); -	if (!freq_table) { -		mutex_unlock(&dev_opp_list_lock); -		dev_warn(dev, "%s: Unable to allocate frequency table\n", -			__func__); -		return -ENOMEM; -	} - -	list_for_each_entry(opp, &dev_opp->opp_list, node) { -		if (opp->available) { -			freq_table[i].driver_data = i; -			freq_table[i].frequency = opp->rate / 1000; -			i++; -		} -	} -	mutex_unlock(&dev_opp_list_lock); - -	freq_table[i].driver_data = i; -	freq_table[i].frequency = CPUFREQ_TABLE_END; - -	*table = &freq_table[0]; - -	return 0; -} -EXPORT_SYMBOL_GPL(opp_init_cpufreq_table); - -/** - * opp_free_cpufreq_table() - free the cpufreq table - * @dev:	device for which we do this operation - * @table:	table to free - * - * Free up the table allocated by opp_init_cpufreq_table - */ -void opp_free_cpufreq_table(struct device *dev, -				struct cpufreq_frequency_table **table) -{ -	if (!table) -		return; - -	kfree(*table); -	*table = NULL; -} -EXPORT_SYMBOL_GPL(opp_free_cpufreq_table); -#endif		/* CONFIG_CPU_FREQ */ +EXPORT_SYMBOL_GPL(dev_pm_opp_disable);  /** - * opp_get_notifier() - find notifier_head of the device with opp + * dev_pm_opp_get_notifier() - find notifier_head of the device with opp   * @dev:	device pointer used to lookup device OPPs.   */ -struct srcu_notifier_head *opp_get_notifier(struct device *dev) +struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)  {  	struct device_opp *dev_opp = find_device_opp(dev); @@ -732,11 +666,9 @@ int of_init_opp_table(struct device *dev)  		unsigned long freq = be32_to_cpup(val++) * 1000;  		unsigned long volt = be32_to_cpup(val++); -		if (opp_add(dev, freq, volt)) { +		if (dev_pm_opp_add(dev, freq, volt))  			dev_warn(dev, "%s: Failed to add OPP %ld\n",  				 __func__, freq); -			continue; -		}  		nr -= 2;  	} diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index cfc3226ec49..a21223d9592 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -89,8 +89,8 @@ extern void dpm_sysfs_remove(struct device *dev);  extern void rpm_sysfs_remove(struct device *dev);  extern int wakeup_sysfs_add(struct device *dev);  extern void wakeup_sysfs_remove(struct device *dev); -extern int pm_qos_sysfs_add_latency(struct device *dev); -extern void pm_qos_sysfs_remove_latency(struct device *dev); +extern int pm_qos_sysfs_add_resume_latency(struct device *dev); +extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);  extern int pm_qos_sysfs_add_flags(struct device *dev);  extern void pm_qos_sysfs_remove_flags(struct device *dev); diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 5c1361a9e5d..36b9eb4862c 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -105,7 +105,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);  s32 __dev_pm_qos_read_value(struct device *dev)  {  	return IS_ERR_OR_NULL(dev->power.qos) ? -		0 : pm_qos_read_value(&dev->power.qos->latency); +		0 : pm_qos_read_value(&dev->power.qos->resume_latency);  }  /** @@ -141,16 +141,24 @@ static int apply_constraint(struct dev_pm_qos_request *req,  	int ret;  	switch(req->type) { -	case DEV_PM_QOS_LATENCY: -		ret = pm_qos_update_target(&qos->latency, &req->data.pnode, -					   action, value); +	case DEV_PM_QOS_RESUME_LATENCY: +		ret = pm_qos_update_target(&qos->resume_latency, +					   &req->data.pnode, action, value);  		if (ret) { -			value = pm_qos_read_value(&qos->latency); +			value = pm_qos_read_value(&qos->resume_latency);  			blocking_notifier_call_chain(&dev_pm_notifiers,  						     (unsigned long)value,  						     req);  		}  		break; +	case DEV_PM_QOS_LATENCY_TOLERANCE: +		ret = pm_qos_update_target(&qos->latency_tolerance, +					   &req->data.pnode, action, value); +		if (ret) { +			value = pm_qos_read_value(&qos->latency_tolerance); +			req->dev->power.set_latency_tolerance(req->dev, value); +		} +		break;  	case DEV_PM_QOS_FLAGS:  		ret = pm_qos_update_flags(&qos->flags, &req->data.flr,  					  action, value); @@ -186,13 +194,21 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)  	}  	BLOCKING_INIT_NOTIFIER_HEAD(n); -	c = &qos->latency; +	c = &qos->resume_latency;  	plist_head_init(&c->list); -	c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; -	c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; +	c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; +	c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; +	c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;  	c->type = PM_QOS_MIN;  	c->notifiers = n; +	c = &qos->latency_tolerance; +	plist_head_init(&c->list); +	c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; +	c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; +	c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; +	c->type = PM_QOS_MIN; +  	INIT_LIST_HEAD(&qos->flags.list);  	spin_lock_irq(&dev->power.lock); @@ -224,7 +240,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)  	 * If the device's PM QoS resume latency limit or PM QoS flags have been  	 * exposed to user space, they have to be hidden at this point.  	 */ -	pm_qos_sysfs_remove_latency(dev); +	pm_qos_sysfs_remove_resume_latency(dev);  	pm_qos_sysfs_remove_flags(dev);  	mutex_lock(&dev_pm_qos_mtx); @@ -237,7 +253,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)  		goto out;  	/* Flush the constraints lists for the device. */ -	c = &qos->latency; +	c = &qos->resume_latency;  	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {  		/*  		 * Update constraints list and call the notification @@ -246,6 +262,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)  		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);  		memset(req, 0, sizeof(*req));  	} +	c = &qos->latency_tolerance; +	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { +		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); +		memset(req, 0, sizeof(*req)); +	}  	f = &qos->flags;  	list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {  		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); @@ -265,6 +286,40 @@ void dev_pm_qos_constraints_destroy(struct device *dev)  	mutex_unlock(&dev_pm_qos_sysfs_mtx);  } +static bool dev_pm_qos_invalid_request(struct device *dev, +				       struct dev_pm_qos_request *req) +{ +	return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE +			&& !dev->power.set_latency_tolerance); +} + +static int __dev_pm_qos_add_request(struct device *dev, +				    struct dev_pm_qos_request *req, +				    enum dev_pm_qos_req_type type, s32 value) +{ +	int ret = 0; + +	if (!dev || dev_pm_qos_invalid_request(dev, req)) +		return -EINVAL; + +	if (WARN(dev_pm_qos_request_active(req), +		 "%s() called for already added request\n", __func__)) +		return -EINVAL; + +	if (IS_ERR(dev->power.qos)) +		ret = -ENODEV; +	else if (!dev->power.qos) +		ret = dev_pm_qos_constraints_allocate(dev); + +	trace_dev_pm_qos_add_request(dev_name(dev), type, value); +	if (!ret) { +		req->dev = dev; +		req->type = type; +		ret = apply_constraint(req, PM_QOS_ADD_REQ, value); +	} +	return ret; +} +  /**   * dev_pm_qos_add_request - inserts new qos request into the list   * @dev: target device for the constraint @@ -290,31 +345,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)  int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,  			   enum dev_pm_qos_req_type type, s32 value)  { -	int ret = 0; - -	if (!dev || !req) /*guard against callers passing in null */ -		return -EINVAL; - -	if (WARN(dev_pm_qos_request_active(req), -		 "%s() called for already added request\n", __func__)) -		return -EINVAL; +	int ret;  	mutex_lock(&dev_pm_qos_mtx); - -	if (IS_ERR(dev->power.qos)) -		ret = -ENODEV; -	else if (!dev->power.qos) -		ret = dev_pm_qos_constraints_allocate(dev); - -	trace_dev_pm_qos_add_request(dev_name(dev), type, value); -	if (!ret) { -		req->dev = dev; -		req->type = type; -		ret = apply_constraint(req, PM_QOS_ADD_REQ, value); -	} - +	ret = __dev_pm_qos_add_request(dev, req, type, value);  	mutex_unlock(&dev_pm_qos_mtx); -  	return ret;  }  EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); @@ -341,7 +376,8 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,  		return -ENODEV;  	switch(req->type) { -	case DEV_PM_QOS_LATENCY: +	case DEV_PM_QOS_RESUME_LATENCY: +	case DEV_PM_QOS_LATENCY_TOLERANCE:  		curr_value = req->data.pnode.prio;  		break;  	case DEV_PM_QOS_FLAGS: @@ -460,8 +496,8 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)  		ret = dev_pm_qos_constraints_allocate(dev);  	if (!ret) -		ret = blocking_notifier_chain_register( -				dev->power.qos->latency.notifiers, notifier); +		ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers, +						       notifier);  	mutex_unlock(&dev_pm_qos_mtx);  	return ret; @@ -487,9 +523,8 @@ int dev_pm_qos_remove_notifier(struct device *dev,  	/* Silently return if the constraints object is not present. */  	if (!IS_ERR_OR_NULL(dev->power.qos)) -		retval = blocking_notifier_chain_unregister( -				dev->power.qos->latency.notifiers, -				notifier); +		retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers, +							    notifier);  	mutex_unlock(&dev_pm_qos_mtx);  	return retval; @@ -530,20 +565,32 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);   * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.   * @dev: Device whose ancestor to add the request for.   * @req: Pointer to the preallocated handle. + * @type: Type of the request.   * @value: Constraint latency value.   */  int dev_pm_qos_add_ancestor_request(struct device *dev, -				    struct dev_pm_qos_request *req, s32 value) +				    struct dev_pm_qos_request *req, +				    enum dev_pm_qos_req_type type, s32 value)  {  	struct device *ancestor = dev->parent;  	int ret = -ENODEV; -	while (ancestor && !ancestor->power.ignore_children) -		ancestor = ancestor->parent; +	switch (type) { +	case DEV_PM_QOS_RESUME_LATENCY: +		while (ancestor && !ancestor->power.ignore_children) +			ancestor = ancestor->parent; +		break; +	case DEV_PM_QOS_LATENCY_TOLERANCE: +		while (ancestor && !ancestor->power.set_latency_tolerance) +			ancestor = ancestor->parent; + +		break; +	default: +		ancestor = NULL; +	}  	if (ancestor) -		ret = dev_pm_qos_add_request(ancestor, req, -					     DEV_PM_QOS_LATENCY, value); +		ret = dev_pm_qos_add_request(ancestor, req, type, value);  	if (ret < 0)  		req->dev = NULL; @@ -559,9 +606,13 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,  	struct dev_pm_qos_request *req = NULL;  	switch(type) { -	case DEV_PM_QOS_LATENCY: -		req = dev->power.qos->latency_req; -		dev->power.qos->latency_req = NULL; +	case DEV_PM_QOS_RESUME_LATENCY: +		req = dev->power.qos->resume_latency_req; +		dev->power.qos->resume_latency_req = NULL; +		break; +	case DEV_PM_QOS_LATENCY_TOLERANCE: +		req = dev->power.qos->latency_tolerance_req; +		dev->power.qos->latency_tolerance_req = NULL;  		break;  	case DEV_PM_QOS_FLAGS:  		req = dev->power.qos->flags_req; @@ -597,7 +648,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)  	if (!req)  		return -ENOMEM; -	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); +	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);  	if (ret < 0) {  		kfree(req);  		return ret; @@ -609,7 +660,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)  	if (IS_ERR_OR_NULL(dev->power.qos))  		ret = -ENODEV; -	else if (dev->power.qos->latency_req) +	else if (dev->power.qos->resume_latency_req)  		ret = -EEXIST;  	if (ret < 0) { @@ -618,13 +669,13 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)  		mutex_unlock(&dev_pm_qos_mtx);  		goto out;  	} -	dev->power.qos->latency_req = req; +	dev->power.qos->resume_latency_req = req;  	mutex_unlock(&dev_pm_qos_mtx); -	ret = pm_qos_sysfs_add_latency(dev); +	ret = pm_qos_sysfs_add_resume_latency(dev);  	if (ret) -		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); +		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);   out:  	mutex_unlock(&dev_pm_qos_sysfs_mtx); @@ -634,8 +685,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);  static void __dev_pm_qos_hide_latency_limit(struct device *dev)  { -	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) -		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); +	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req) +		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);  }  /** @@ -646,7 +697,7 @@ void dev_pm_qos_hide_latency_limit(struct device *dev)  {  	mutex_lock(&dev_pm_qos_sysfs_mtx); -	pm_qos_sysfs_remove_latency(dev); +	pm_qos_sysfs_remove_resume_latency(dev);  	mutex_lock(&dev_pm_qos_mtx);  	__dev_pm_qos_hide_latency_limit(dev); @@ -768,6 +819,67 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)  	pm_runtime_put(dev);  	return ret;  } + +/** + * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance. + * @dev: Device to obtain the user space latency tolerance for. + */ +s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) +{ +	s32 ret; + +	mutex_lock(&dev_pm_qos_mtx); +	ret = IS_ERR_OR_NULL(dev->power.qos) +		|| !dev->power.qos->latency_tolerance_req ? +			PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT : +			dev->power.qos->latency_tolerance_req->data.pnode.prio; +	mutex_unlock(&dev_pm_qos_mtx); +	return ret; +} + +/** + * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance. + * @dev: Device to update the user space latency tolerance for. + * @val: New user space latency tolerance for @dev (negative values disable). + */ +int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) +{ +	int ret; + +	mutex_lock(&dev_pm_qos_mtx); + +	if (IS_ERR_OR_NULL(dev->power.qos) +	    || !dev->power.qos->latency_tolerance_req) { +		struct dev_pm_qos_request *req; + +		if (val < 0) { +			ret = -EINVAL; +			goto out; +		} +		req = kzalloc(sizeof(*req), GFP_KERNEL); +		if (!req) { +			ret = -ENOMEM; +			goto out; +		} +		ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val); +		if (ret < 0) { +			kfree(req); +			goto out; +		} +		dev->power.qos->latency_tolerance_req = req; +	} else { +		if (val < 0) { +			__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE); +			ret = 0; +		} else { +			ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val); +		} +	} + + out: +	mutex_unlock(&dev_pm_qos_mtx); +	return ret; +}  #else /* !CONFIG_PM_RUNTIME */  static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}  static void __dev_pm_qos_hide_flags(struct device *dev) {} diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 268a3509757..67c7938e430 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -13,6 +13,43 @@  #include <trace/events/rpm.h>  #include "power.h" +#define RPM_GET_CALLBACK(dev, cb)				\ +({								\ +	int (*__rpm_cb)(struct device *__d);			\ +								\ +	if (dev->pm_domain)					\ +		__rpm_cb = dev->pm_domain->ops.cb;		\ +	else if (dev->type && dev->type->pm)			\ +		__rpm_cb = dev->type->pm->cb;			\ +	else if (dev->class && dev->class->pm)			\ +		__rpm_cb = dev->class->pm->cb;			\ +	else if (dev->bus && dev->bus->pm)			\ +		__rpm_cb = dev->bus->pm->cb;			\ +	else							\ +		__rpm_cb = NULL;				\ +								\ +	if (!__rpm_cb && dev->driver && dev->driver->pm)	\ +		__rpm_cb = dev->driver->pm->cb;			\ +								\ +	__rpm_cb;						\ +}) + +static int (*rpm_get_suspend_cb(struct device *dev))(struct device *) +{ +	return RPM_GET_CALLBACK(dev, runtime_suspend); +} + +static int (*rpm_get_resume_cb(struct device *dev))(struct device *) +{ +	return RPM_GET_CALLBACK(dev, runtime_resume); +} + +#ifdef CONFIG_PM_RUNTIME +static int (*rpm_get_idle_cb(struct device *dev))(struct device *) +{ +	return RPM_GET_CALLBACK(dev, runtime_idle); +} +  static int rpm_resume(struct device *dev, int rpmflags);  static int rpm_suspend(struct device *dev, int rpmflags); @@ -258,7 +295,8 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)   * Check if the device's runtime PM status allows it to be suspended.  If   * another idle notification has been started earlier, return immediately.  If   * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise - * run the ->runtime_idle() callback directly. + * run the ->runtime_idle() callback directly. If the ->runtime_idle callback + * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.   *   * This function must be called under dev->power.lock with interrupts disabled.   */ @@ -309,19 +347,7 @@ static int rpm_idle(struct device *dev, int rpmflags)  	dev->power.idle_notification = true; -	if (dev->pm_domain) -		callback = dev->pm_domain->ops.runtime_idle; -	else if (dev->type && dev->type->pm) -		callback = dev->type->pm->runtime_idle; -	else if (dev->class && dev->class->pm) -		callback = dev->class->pm->runtime_idle; -	else if (dev->bus && dev->bus->pm) -		callback = dev->bus->pm->runtime_idle; -	else -		callback = NULL; - -	if (!callback && dev->driver && dev->driver->pm) -		callback = dev->driver->pm->runtime_idle; +	callback = rpm_get_idle_cb(dev);  	if (callback)  		retval = __rpm_callback(callback, dev); @@ -331,7 +357,7 @@ static int rpm_idle(struct device *dev, int rpmflags)   out:  	trace_rpm_return_int(dev, _THIS_IP_, retval); -	return retval ? retval : rpm_suspend(dev, rpmflags); +	return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);  }  /** @@ -491,19 +517,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)  	__update_runtime_status(dev, RPM_SUSPENDING); -	if (dev->pm_domain) -		callback = dev->pm_domain->ops.runtime_suspend; -	else if (dev->type && dev->type->pm) -		callback = dev->type->pm->runtime_suspend; -	else if (dev->class && dev->class->pm) -		callback = dev->class->pm->runtime_suspend; -	else if (dev->bus && dev->bus->pm) -		callback = dev->bus->pm->runtime_suspend; -	else -		callback = NULL; - -	if (!callback && dev->driver && dev->driver->pm) -		callback = dev->driver->pm->runtime_suspend; +	callback = rpm_get_suspend_cb(dev);  	retval = rpm_callback(callback, dev);  	if (retval) @@ -723,19 +737,7 @@ static int rpm_resume(struct device *dev, int rpmflags)  	__update_runtime_status(dev, RPM_RESUMING); -	if (dev->pm_domain) -		callback = dev->pm_domain->ops.runtime_resume; -	else if (dev->type && dev->type->pm) -		callback = dev->type->pm->runtime_resume; -	else if (dev->class && dev->class->pm) -		callback = dev->class->pm->runtime_resume; -	else if (dev->bus && dev->bus->pm) -		callback = dev->bus->pm->runtime_resume; -	else -		callback = NULL; - -	if (!callback && dev->driver && dev->driver->pm) -		callback = dev->driver->pm->runtime_resume; +	callback = rpm_get_resume_cb(dev);  	retval = rpm_callback(callback, dev);  	if (retval) { @@ -1129,7 +1131,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_barrier);   * @dev: Device to handle.   * @check_resume: If set, check if there's a resume request for the device.   * - * Increment power.disable_depth for the device and if was zero previously, + * Increment power.disable_depth for the device and if it was zero previously,   * cancel all pending runtime PM requests for the device and wait for all   * operations in progress to complete.  The device can be either active or   * suspended after its runtime PM has been disabled. @@ -1400,3 +1402,86 @@ void pm_runtime_remove(struct device *dev)  	if (dev->power.irq_safe && dev->parent)  		pm_runtime_put(dev->parent);  } +#endif + +/** + * pm_runtime_force_suspend - Force a device into suspend state if needed. + * @dev: Device to suspend. + * + * Disable runtime PM so we safely can check the device's runtime PM status and + * if it is active, invoke it's .runtime_suspend callback to bring it into + * suspend state. Keep runtime PM disabled to preserve the state unless we + * encounter errors. + * + * Typically this function may be invoked from a system suspend callback to make + * sure the device is put into low power state. + */ +int pm_runtime_force_suspend(struct device *dev) +{ +	int (*callback)(struct device *); +	int ret = 0; + +	pm_runtime_disable(dev); + +	/* +	 * Note that pm_runtime_status_suspended() returns false while +	 * !CONFIG_PM_RUNTIME, which means the device will be put into low +	 * power state. +	 */ +	if (pm_runtime_status_suspended(dev)) +		return 0; + +	callback = rpm_get_suspend_cb(dev); + +	if (!callback) { +		ret = -ENOSYS; +		goto err; +	} + +	ret = callback(dev); +	if (ret) +		goto err; + +	pm_runtime_set_suspended(dev); +	return 0; +err: +	pm_runtime_enable(dev); +	return ret; +} +EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); + +/** + * pm_runtime_force_resume - Force a device into resume state. + * @dev: Device to resume. + * + * Prior invoking this function we expect the user to have brought the device + * into low power state by a call to pm_runtime_force_suspend(). Here we reverse + * those actions and brings the device into full power. We update the runtime PM + * status and re-enables runtime PM. + * + * Typically this function may be invoked from a system resume callback to make + * sure the device is put into full power state. + */ +int pm_runtime_force_resume(struct device *dev) +{ +	int (*callback)(struct device *); +	int ret = 0; + +	callback = rpm_get_resume_cb(dev); + +	if (!callback) { +		ret = -ENOSYS; +		goto out; +	} + +	ret = callback(dev); +	if (ret) +		goto out; + +	pm_runtime_set_active(dev); +	pm_runtime_mark_last_busy(dev); +out: +	pm_runtime_enable(dev); +	return ret; +} +EXPORT_SYMBOL_GPL(pm_runtime_force_resume); diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 03e089ade5c..95b181d1ca6 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -218,15 +218,16 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,  static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,  		autosuspend_delay_ms_store); -static ssize_t pm_qos_latency_show(struct device *dev, -				   struct device_attribute *attr, char *buf) +static ssize_t pm_qos_resume_latency_show(struct device *dev, +					  struct device_attribute *attr, +					  char *buf)  { -	return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev)); +	return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev));  } -static ssize_t pm_qos_latency_store(struct device *dev, -				    struct device_attribute *attr, -				    const char *buf, size_t n) +static ssize_t pm_qos_resume_latency_store(struct device *dev, +					   struct device_attribute *attr, +					   const char *buf, size_t n)  {  	s32 value;  	int ret; @@ -237,12 +238,47 @@ static ssize_t pm_qos_latency_store(struct device *dev,  	if (value < 0)  		return -EINVAL; -	ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value); +	ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req, +					value);  	return ret < 0 ? ret : n;  }  static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, -		   pm_qos_latency_show, pm_qos_latency_store); +		   pm_qos_resume_latency_show, pm_qos_resume_latency_store); + +static ssize_t pm_qos_latency_tolerance_show(struct device *dev, +					     struct device_attribute *attr, +					     char *buf) +{ +	s32 value = dev_pm_qos_get_user_latency_tolerance(dev); + +	if (value < 0) +		return sprintf(buf, "auto\n"); +	else if (value == PM_QOS_LATENCY_ANY) +		return sprintf(buf, "any\n"); + +	return sprintf(buf, "%d\n", value); +} + +static ssize_t pm_qos_latency_tolerance_store(struct device *dev, +					      struct device_attribute *attr, +					      const char *buf, size_t n) +{ +	s32 value; +	int ret; + +	if (kstrtos32(buf, 0, &value)) { +		if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n")) +			value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; +		else if (!strcmp(buf, "any") || !strcmp(buf, "any\n")) +			value = PM_QOS_LATENCY_ANY; +	} +	ret = dev_pm_qos_update_user_latency_tolerance(dev, value); +	return ret < 0 ? ret : n; +} + +static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644, +		   pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store);  static ssize_t pm_qos_no_power_off_show(struct device *dev,  					struct device_attribute *attr, @@ -618,15 +654,26 @@ static struct attribute_group pm_runtime_attr_group = {  	.attrs	= runtime_attrs,  }; -static struct attribute *pm_qos_latency_attrs[] = { +static struct attribute *pm_qos_resume_latency_attrs[] = {  #ifdef CONFIG_PM_RUNTIME  	&dev_attr_pm_qos_resume_latency_us.attr,  #endif /* CONFIG_PM_RUNTIME */  	NULL,  }; -static struct attribute_group pm_qos_latency_attr_group = { +static struct attribute_group pm_qos_resume_latency_attr_group = { +	.name	= power_group_name, +	.attrs	= pm_qos_resume_latency_attrs, +}; + +static struct attribute *pm_qos_latency_tolerance_attrs[] = { +#ifdef CONFIG_PM_RUNTIME +	&dev_attr_pm_qos_latency_tolerance_us.attr, +#endif /* CONFIG_PM_RUNTIME */ +	NULL, +}; +static struct attribute_group pm_qos_latency_tolerance_attr_group = {  	.name	= power_group_name, -	.attrs	= pm_qos_latency_attrs, +	.attrs	= pm_qos_latency_tolerance_attrs,  };  static struct attribute *pm_qos_flags_attrs[] = { @@ -654,18 +701,23 @@ int dpm_sysfs_add(struct device *dev)  		if (rc)  			goto err_out;  	} -  	if (device_can_wakeup(dev)) {  		rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); -		if (rc) { -			if (pm_runtime_callbacks_present(dev)) -				sysfs_unmerge_group(&dev->kobj, -						    &pm_runtime_attr_group); -			goto err_out; -		} +		if (rc) +			goto err_runtime; +	} +	if (dev->power.set_latency_tolerance) { +		rc = sysfs_merge_group(&dev->kobj, +				       &pm_qos_latency_tolerance_attr_group); +		if (rc) +			goto err_wakeup;  	}  	return 0; + err_wakeup: +	sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); + err_runtime: +	sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);   err_out:  	sysfs_remove_group(&dev->kobj, &pm_attr_group);  	return rc; @@ -681,14 +733,14 @@ void wakeup_sysfs_remove(struct device *dev)  	sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);  } -int pm_qos_sysfs_add_latency(struct device *dev) +int pm_qos_sysfs_add_resume_latency(struct device *dev)  { -	return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group); +	return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);  } -void pm_qos_sysfs_remove_latency(struct device *dev) +void pm_qos_sysfs_remove_resume_latency(struct device *dev)  { -	sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group); +	sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);  }  int pm_qos_sysfs_add_flags(struct device *dev) @@ -708,6 +760,7 @@ void rpm_sysfs_remove(struct device *dev)  void dpm_sysfs_remove(struct device *dev)  { +	sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);  	dev_pm_qos_constraints_destroy(dev);  	rpm_sysfs_remove(dev);  	sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 2d56f4113ae..eb1bd2ecad8 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -318,10 +318,16 @@ int device_init_wakeup(struct device *dev, bool enable)  {  	int ret = 0; +	if (!dev) +		return -EINVAL; +  	if (enable) {  		device_set_wakeup_capable(dev, true);  		ret = device_wakeup_enable(dev);  	} else { +		if (dev->power.can_wakeup) +			device_wakeup_disable(dev); +  		device_set_wakeup_capable(dev, false);  	}  | 
