aboutsummaryrefslogtreecommitdiff
path: root/drivers/base/power/runtime.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power/runtime.c')
-rw-r--r--drivers/base/power/runtime.c533
1 files changed, 409 insertions, 124 deletions
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 02c652be83e..67c7938e430 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1,5 +1,5 @@
/*
- * drivers/base/power/runtime.c - Helper functions for device run-time PM
+ * drivers/base/power/runtime.c - Helper functions for device runtime PM
*
* Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
* Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
@@ -8,9 +8,48 @@
*/
#include <linux/sched.h>
+#include <linux/export.h>
#include <linux/pm_runtime.h>
+#include <trace/events/rpm.h>
#include "power.h"
+#define RPM_GET_CALLBACK(dev, cb) \
+({ \
+ int (*__rpm_cb)(struct device *__d); \
+ \
+ if (dev->pm_domain) \
+ __rpm_cb = dev->pm_domain->ops.cb; \
+ else if (dev->type && dev->type->pm) \
+ __rpm_cb = dev->type->pm->cb; \
+ else if (dev->class && dev->class->pm) \
+ __rpm_cb = dev->class->pm->cb; \
+ else if (dev->bus && dev->bus->pm) \
+ __rpm_cb = dev->bus->pm->cb; \
+ else \
+ __rpm_cb = NULL; \
+ \
+ if (!__rpm_cb && dev->driver && dev->driver->pm) \
+ __rpm_cb = dev->driver->pm->cb; \
+ \
+ __rpm_cb; \
+})
+
+static int (*rpm_get_suspend_cb(struct device *dev))(struct device *)
+{
+ return RPM_GET_CALLBACK(dev, runtime_suspend);
+}
+
+static int (*rpm_get_resume_cb(struct device *dev))(struct device *)
+{
+ return RPM_GET_CALLBACK(dev, runtime_resume);
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int (*rpm_get_idle_cb(struct device *dev))(struct device *)
+{
+ return RPM_GET_CALLBACK(dev, runtime_idle);
+}
+
static int rpm_resume(struct device *dev, int rpmflags);
static int rpm_suspend(struct device *dev, int rpmflags);
@@ -28,13 +67,10 @@ static int rpm_suspend(struct device *dev, int rpmflags);
void update_pm_runtime_accounting(struct device *dev)
{
unsigned long now = jiffies;
- int delta;
+ unsigned long delta;
delta = now - dev->power.accounting_timestamp;
- if (delta < 0)
- delta = 0;
-
dev->power.accounting_timestamp = now;
if (dev->power.disable_depth > 0)
@@ -125,6 +161,76 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
}
EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
+static int dev_memalloc_noio(struct device *dev, void *data)
+{
+ return dev->power.memalloc_noio;
+}
+
+/*
+ * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
+ * @dev: Device to handle.
+ * @enable: True for setting the flag and False for clearing the flag.
+ *
+ * Set the flag for all devices in the path from the device to the
+ * root device in the device tree if @enable is true, otherwise clear
+ * the flag for devices in the path whose siblings don't set the flag.
+ *
+ * The function should only be called by block device, or network
+ * device driver for solving the deadlock problem during runtime
+ * resume/suspend:
+ *
+ * If memory allocation with GFP_KERNEL is called inside runtime
+ * resume/suspend callback of any one of its ancestors(or the
+ * block device itself), the deadlock may be triggered inside the
+ * memory allocation since it might not complete until the block
+ * device becomes active and the involed page I/O finishes. The
+ * situation is pointed out first by Alan Stern. Network device
+ * are involved in iSCSI kind of situation.
+ *
+ * The lock of dev_hotplug_mutex is held in the function for handling
+ * hotplug race because pm_runtime_set_memalloc_noio() may be called
+ * in async probe().
+ *
+ * The function should be called between device_add() and device_del()
+ * on the affected device(block/network device).
+ */
+void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
+{
+ static DEFINE_MUTEX(dev_hotplug_mutex);
+
+ mutex_lock(&dev_hotplug_mutex);
+ for (;;) {
+ bool enabled;
+
+ /* hold power lock since bitfield is not SMP-safe. */
+ spin_lock_irq(&dev->power.lock);
+ enabled = dev->power.memalloc_noio;
+ dev->power.memalloc_noio = enable;
+ spin_unlock_irq(&dev->power.lock);
+
+ /*
+ * not need to enable ancestors any more if the device
+ * has been enabled.
+ */
+ if (enabled && enable)
+ break;
+
+ dev = dev->parent;
+
+ /*
+ * clear flag of the parent device only if all the
+ * children don't set the flag because ancestor's
+ * flag was set by any one of the descendants.
+ */
+ if (!dev || (!enable &&
+ device_for_each_child(dev, NULL,
+ dev_memalloc_noio)))
+ break;
+ }
+ mutex_unlock(&dev_hotplug_mutex);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
+
/**
* rpm_check_suspend_allowed - Test whether a device may be suspended.
* @dev: Device to test.
@@ -135,8 +241,9 @@ static int rpm_check_suspend_allowed(struct device *dev)
if (dev->power.runtime_error)
retval = -EINVAL;
- else if (atomic_read(&dev->power.usage_count) > 0
- || dev->power.disable_depth > 0)
+ else if (dev->power.disable_depth > 0)
+ retval = -EACCES;
+ else if (atomic_read(&dev->power.usage_count) > 0)
retval = -EAGAIN;
else if (!pm_children_suspended(dev))
retval = -EBUSY;
@@ -147,6 +254,8 @@ static int rpm_check_suspend_allowed(struct device *dev)
|| (dev->power.request_pending
&& dev->power.request == RPM_REQ_RESUME))
retval = -EAGAIN;
+ else if (__dev_pm_qos_read_value(dev) < 0)
+ retval = -EPERM;
else if (dev->power.runtime_status == RPM_SUSPENDED)
retval = 1;
@@ -154,14 +263,40 @@ static int rpm_check_suspend_allowed(struct device *dev)
}
/**
+ * __rpm_callback - Run a given runtime PM callback for a given device.
+ * @cb: Runtime PM callback to run.
+ * @dev: Device to run the callback for.
+ */
+static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
+ __releases(&dev->power.lock) __acquires(&dev->power.lock)
+{
+ int retval;
+
+ if (dev->power.irq_safe)
+ spin_unlock(&dev->power.lock);
+ else
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = cb(dev);
+
+ if (dev->power.irq_safe)
+ spin_lock(&dev->power.lock);
+ else
+ spin_lock_irq(&dev->power.lock);
+
+ return retval;
+}
+
+/**
* rpm_idle - Notify device bus type if the device can be suspended.
* @dev: Device to notify the bus type about.
* @rpmflags: Flag bits.
*
- * Check if the device's run-time PM status allows it to be suspended. If
+ * Check if the device's runtime PM status allows it to be suspended. If
* another idle notification has been started earlier, return immediately. If
* the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
- * run the ->runtime_idle() callback directly.
+ * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
+ * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
@@ -170,6 +305,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
int (*callback)(struct device *);
int retval;
+ trace_rpm_idle(dev, rpmflags);
retval = rpm_check_suspend_allowed(dev);
if (retval < 0)
; /* Conditions are wrong. */
@@ -195,11 +331,8 @@ static int rpm_idle(struct device *dev, int rpmflags)
/* Pending requests need to be canceled. */
dev->power.request = RPM_REQ_NONE;
- if (dev->power.no_callbacks) {
- /* Assume ->runtime_idle() callback would have suspended. */
- retval = rpm_suspend(dev, rpmflags);
+ if (dev->power.no_callbacks)
goto out;
- }
/* Carry out an asynchronous or a synchronous idle notification. */
if (rpmflags & RPM_ASYNC) {
@@ -208,33 +341,23 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.request_pending = true;
queue_work(pm_wq, &dev->power.work);
}
- goto out;
+ trace_rpm_return_int(dev, _THIS_IP_, 0);
+ return 0;
}
dev->power.idle_notification = true;
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle)
- callback = dev->bus->pm->runtime_idle;
- else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle)
- callback = dev->type->pm->runtime_idle;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_idle;
- else
- callback = NULL;
+ callback = rpm_get_idle_cb(dev);
- if (callback) {
- spin_unlock_irq(&dev->power.lock);
-
- callback(dev);
-
- spin_lock_irq(&dev->power.lock);
- }
+ if (callback)
+ retval = __rpm_callback(callback, dev);
dev->power.idle_notification = false;
wake_up_all(&dev->power.wait_queue);
out:
- return retval;
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
+ return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
}
/**
@@ -243,36 +366,53 @@ static int rpm_idle(struct device *dev, int rpmflags)
* @dev: Device to run the callback for.
*/
static int rpm_callback(int (*cb)(struct device *), struct device *dev)
- __releases(&dev->power.lock) __acquires(&dev->power.lock)
{
int retval;
if (!cb)
return -ENOSYS;
- spin_unlock_irq(&dev->power.lock);
+ if (dev->power.memalloc_noio) {
+ unsigned int noio_flag;
- retval = cb(dev);
+ /*
+ * Deadlock might be caused if memory allocation with
+ * GFP_KERNEL happens inside runtime_suspend and
+ * runtime_resume callbacks of one block device's
+ * ancestor or the block device itself. Network
+ * device might be thought as part of iSCSI block
+ * device, so network device and its ancestor should
+ * be marked as memalloc_noio too.
+ */
+ noio_flag = memalloc_noio_save();
+ retval = __rpm_callback(cb, dev);
+ memalloc_noio_restore(noio_flag);
+ } else {
+ retval = __rpm_callback(cb, dev);
+ }
- spin_lock_irq(&dev->power.lock);
dev->power.runtime_error = retval;
-
- return retval;
+ return retval != -EACCES ? retval : -EIO;
}
/**
- * rpm_suspend - Carry out run-time suspend of given device.
+ * rpm_suspend - Carry out runtime suspend of given device.
* @dev: Device to suspend.
* @rpmflags: Flag bits.
*
- * Check if the device's run-time PM status allows it to be suspended. If
- * another suspend has been started earlier, either return immediately or wait
- * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
- * pending idle notification. If the RPM_ASYNC flag is set then queue a
- * suspend request; otherwise run the ->runtime_suspend() callback directly.
- * If a deferred resume was requested while the callback was running then carry
- * it out; otherwise send an idle notification for the device (if the suspend
- * failed) or for its parent (if the suspend succeeded).
+ * Check if the device's runtime PM status allows it to be suspended.
+ * Cancel a pending idle notification, autosuspend or suspend. If
+ * another suspend has been started earlier, either return immediately
+ * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
+ * flags. If the RPM_ASYNC flag is set then queue a suspend request;
+ * otherwise run the ->runtime_suspend() callback directly. When
+ * ->runtime_suspend succeeded, if a deferred resume was requested while
+ * the callback was running then carry it out, otherwise send an idle
+ * notification for its parent (if the suspend succeeded and both
+ * ignore_children of parent->power and irq_safe of dev->power are not set).
+ * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
+ * flag is set and the next autosuspend-delay expiration time is in the
+ * future, schedule another autosuspend attempt.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
@@ -283,7 +423,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval;
- dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
+ trace_rpm_suspend(dev, rpmflags);
repeat:
retval = rpm_check_suspend_allowed(dev);
@@ -335,6 +475,15 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto out;
}
+ if (dev->power.irq_safe) {
+ spin_unlock(&dev->power.lock);
+
+ cpu_relax();
+
+ spin_lock(&dev->power.lock);
+ goto repeat;
+ }
+
/* Wait for the other suspend running in parallel with us. */
for (;;) {
prepare_to_wait(&dev->power.wait_queue, &wait,
@@ -352,7 +501,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto repeat;
}
- dev->power.deferred_resume = false;
if (dev->power.no_callbacks)
goto no_callback; /* Assume success. */
@@ -369,63 +517,76 @@ static int rpm_suspend(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_SUSPENDING);
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
- callback = dev->bus->pm->runtime_suspend;
- else if (dev->type && dev->type->pm && dev->type->pm->runtime_suspend)
- callback = dev->type->pm->runtime_suspend;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_suspend;
- else
- callback = NULL;
+ callback = rpm_get_suspend_cb(dev);
retval = rpm_callback(callback, dev);
- if (retval) {
- __update_runtime_status(dev, RPM_ACTIVE);
- dev->power.deferred_resume = 0;
- if (retval == -EAGAIN || retval == -EBUSY)
- dev->power.runtime_error = 0;
- else
- pm_runtime_cancel_pending(dev);
- } else {
+ if (retval)
+ goto fail;
+
no_callback:
- __update_runtime_status(dev, RPM_SUSPENDED);
- pm_runtime_deactivate_timer(dev);
+ __update_runtime_status(dev, RPM_SUSPENDED);
+ pm_runtime_deactivate_timer(dev);
- if (dev->parent) {
- parent = dev->parent;
- atomic_add_unless(&parent->power.child_count, -1, 0);
- }
+ if (dev->parent) {
+ parent = dev->parent;
+ atomic_add_unless(&parent->power.child_count, -1, 0);
}
wake_up_all(&dev->power.wait_queue);
if (dev->power.deferred_resume) {
+ dev->power.deferred_resume = false;
rpm_resume(dev, 0);
retval = -EAGAIN;
goto out;
}
- if (parent && !parent->power.ignore_children) {
- spin_unlock_irq(&dev->power.lock);
+ /* Maybe the parent is now able to suspend. */
+ if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
+ spin_unlock(&dev->power.lock);
- pm_request_idle(parent);
+ spin_lock(&parent->power.lock);
+ rpm_idle(parent, RPM_ASYNC);
+ spin_unlock(&parent->power.lock);
- spin_lock_irq(&dev->power.lock);
+ spin_lock(&dev->power.lock);
}
out:
- dev_dbg(dev, "%s returns %d\n", __func__, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
+
+ fail:
+ __update_runtime_status(dev, RPM_ACTIVE);
+ dev->power.deferred_resume = false;
+ wake_up_all(&dev->power.wait_queue);
+
+ if (retval == -EAGAIN || retval == -EBUSY) {
+ dev->power.runtime_error = 0;
+
+ /*
+ * If the callback routine failed an autosuspend, and
+ * if the last_busy time has been updated so that there
+ * is a new autosuspend expiration time, automatically
+ * reschedule another autosuspend.
+ */
+ if ((rpmflags & RPM_AUTO) &&
+ pm_runtime_autosuspend_expiration(dev) != 0)
+ goto repeat;
+ } else {
+ pm_runtime_cancel_pending(dev);
+ }
+ goto out;
}
/**
- * rpm_resume - Carry out run-time resume of given device.
+ * rpm_resume - Carry out runtime resume of given device.
* @dev: Device to resume.
* @rpmflags: Flag bits.
*
- * Check if the device's run-time PM status allows it to be resumed. Cancel
+ * Check if the device's runtime PM status allows it to be resumed. Cancel
* any scheduled or pending requests. If another resume has been started
- * earlier, either return imediately or wait for it to finish, depending on the
+ * earlier, either return immediately or wait for it to finish, depending on the
* RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
* parallel with this function, either tell the other process to resume after
* suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
@@ -442,13 +603,16 @@ static int rpm_resume(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval = 0;
- dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
+ trace_rpm_resume(dev, rpmflags);
repeat:
if (dev->power.runtime_error)
retval = -EINVAL;
+ else if (dev->power.disable_depth == 1 && dev->power.is_suspended
+ && dev->power.runtime_status == RPM_ACTIVE)
+ retval = 1;
else if (dev->power.disable_depth > 0)
- retval = -EAGAIN;
+ retval = -EACCES;
if (retval)
goto out;
@@ -479,6 +643,15 @@ static int rpm_resume(struct device *dev, int rpmflags)
goto out;
}
+ if (dev->power.irq_safe) {
+ spin_unlock(&dev->power.lock);
+
+ cpu_relax();
+
+ spin_lock(&dev->power.lock);
+ goto repeat;
+ }
+
/* Wait for the operation carried out in parallel with us. */
for (;;) {
prepare_to_wait(&dev->power.wait_queue, &wait,
@@ -509,6 +682,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
|| dev->parent->power.runtime_status == RPM_ACTIVE) {
atomic_inc(&dev->parent->power.child_count);
spin_unlock(&dev->parent->power.lock);
+ retval = 1;
goto no_callback; /* Assume success. */
}
spin_unlock(&dev->parent->power.lock);
@@ -527,17 +701,20 @@ static int rpm_resume(struct device *dev, int rpmflags)
if (!parent && dev->parent) {
/*
- * Increment the parent's resume counter and resume it if
- * necessary.
+ * Increment the parent's usage counter and resume it if
+ * necessary. Not needed if dev is irq-safe; then the
+ * parent is permanently resumed.
*/
parent = dev->parent;
+ if (dev->power.irq_safe)
+ goto skip_parent;
spin_unlock(&dev->power.lock);
pm_runtime_get_noresume(parent);
spin_lock(&parent->power.lock);
/*
- * We can resume if the parent's run-time PM is disabled or it
+ * We can resume if the parent's runtime PM is disabled or it
* is set to ignore children.
*/
if (!parent->power.disable_depth
@@ -553,20 +730,14 @@ static int rpm_resume(struct device *dev, int rpmflags)
goto out;
goto repeat;
}
+ skip_parent:
if (dev->power.no_callbacks)
goto no_callback; /* Assume success. */
__update_runtime_status(dev, RPM_RESUMING);
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
- callback = dev->bus->pm->runtime_resume;
- else if (dev->type && dev->type->pm && dev->type->pm->runtime_resume)
- callback = dev->type->pm->runtime_resume;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_resume;
- else
- callback = NULL;
+ callback = rpm_get_resume_cb(dev);
retval = rpm_callback(callback, dev);
if (retval) {
@@ -580,11 +751,11 @@ static int rpm_resume(struct device *dev, int rpmflags)
}
wake_up_all(&dev->power.wait_queue);
- if (!retval)
+ if (retval >= 0)
rpm_idle(dev, RPM_ASYNC);
out:
- if (parent) {
+ if (parent && !dev->power.irq_safe) {
spin_unlock_irq(&dev->power.lock);
pm_runtime_put(parent);
@@ -592,17 +763,17 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock_irq(&dev->power.lock);
}
- dev_dbg(dev, "%s returns %d\n", __func__, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
}
/**
- * pm_runtime_work - Universal run-time PM work function.
+ * pm_runtime_work - Universal runtime PM work function.
* @work: Work structure used for scheduling the execution of this function.
*
* Use @work to get the device object the work is to be done for, determine what
- * is to be done and execute the appropriate run-time PM function.
+ * is to be done and execute the appropriate runtime PM function.
*/
static void pm_runtime_work(struct work_struct *work)
{
@@ -701,7 +872,7 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
EXPORT_SYMBOL_GPL(pm_schedule_suspend);
/**
- * __pm_runtime_idle - Entry point for run-time idle operations.
+ * __pm_runtime_idle - Entry point for runtime idle operations.
* @dev: Device to send idle notification for.
* @rpmflags: Flag bits.
*
@@ -709,13 +880,16 @@ EXPORT_SYMBOL_GPL(pm_schedule_suspend);
* return immediately if it is larger than zero. Then carry out an idle
* notification, either synchronous or asynchronous.
*
- * This routine may be called in atomic context if the RPM_ASYNC flag is set.
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
*/
int __pm_runtime_idle(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
@@ -730,7 +904,7 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_idle);
/**
- * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
+ * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
* @dev: Device to suspend.
* @rpmflags: Flag bits.
*
@@ -738,13 +912,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_idle);
* return immediately if it is larger than zero. Then carry out a suspend,
* either synchronous or asynchronous.
*
- * This routine may be called in atomic context if the RPM_ASYNC flag is set.
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
*/
int __pm_runtime_suspend(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
@@ -759,20 +936,23 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
/**
- * __pm_runtime_resume - Entry point for run-time resume operations.
+ * __pm_runtime_resume - Entry point for runtime resume operations.
* @dev: Device to resume.
* @rpmflags: Flag bits.
*
* If the RPM_GET_PUT flag is set, increment the device's usage count. Then
* carry out a resume, either synchronous or asynchronous.
*
- * This routine may be called in atomic context if the RPM_ASYNC flag is set.
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
*/
int __pm_runtime_resume(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
if (rpmflags & RPM_GET_PUT)
atomic_inc(&dev->power.usage_count);
@@ -785,11 +965,11 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
- * __pm_runtime_set_status - Set run-time PM status of a device.
+ * __pm_runtime_set_status - Set runtime PM status of a device.
* @dev: Device to handle.
- * @status: New run-time PM status of the device.
+ * @status: New runtime PM status of the device.
*
- * If run-time PM of the device is disabled or its power.runtime_error field is
+ * If runtime PM of the device is disabled or its power.runtime_error field is
* different from zero, the status may be changed either to RPM_ACTIVE, or to
* RPM_SUSPENDED, as long as that reflects the actual state of the device.
* However, if the device has a parent and the parent is not active, and the
@@ -835,7 +1015,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
/*
* It is invalid to put an active child under a parent that is
- * not active, has run-time PM enabled and the
+ * not active, has runtime PM enabled and the
* 'power.ignore_children' flag unset.
*/
if (!parent->power.disable_depth
@@ -869,7 +1049,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
* @dev: Device to handle.
*
* Flush all pending requests for the device from pm_wq and wait for all
- * run-time PM operations involving the device in progress to complete.
+ * runtime PM operations involving the device in progress to complete.
*
* Should be called under dev->power.lock with interrupts disabled.
*/
@@ -917,7 +1097,7 @@ static void __pm_runtime_barrier(struct device *dev)
* Prevent the device from being suspended by incrementing its usage counter and
* if there's a pending resume request for the device, wake the device up.
* Next, make sure that all pending requests for the device have been flushed
- * from pm_wq and wait for all run-time PM operations involving the device in
+ * from pm_wq and wait for all runtime PM operations involving the device in
* progress to complete.
*
* Return value:
@@ -947,18 +1127,18 @@ int pm_runtime_barrier(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_barrier);
/**
- * __pm_runtime_disable - Disable run-time PM of a device.
+ * __pm_runtime_disable - Disable runtime PM of a device.
* @dev: Device to handle.
* @check_resume: If set, check if there's a resume request for the device.
*
- * Increment power.disable_depth for the device and if was zero previously,
- * cancel all pending run-time PM requests for the device and wait for all
+ * Increment power.disable_depth for the device and if it was zero previously,
+ * cancel all pending runtime PM requests for the device and wait for all
* operations in progress to complete. The device can be either active or
- * suspended after its run-time PM has been disabled.
+ * suspended after its runtime PM has been disabled.
*
* If @check_resume is set and there's a resume request pending when
* __pm_runtime_disable() is called and power.disable_depth is zero, the
- * function will wake up the device before disabling its run-time PM.
+ * function will wake up the device before disabling its runtime PM.
*/
void __pm_runtime_disable(struct device *dev, bool check_resume)
{
@@ -971,7 +1151,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
/*
* Wake up the device if there's a resume request pending, because that
- * means there probably is some I/O to process and disabling run-time PM
+ * means there probably is some I/O to process and disabling runtime PM
* shouldn't prevent the device from processing the I/O.
*/
if (check_resume && dev->power.request_pending
@@ -996,7 +1176,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
EXPORT_SYMBOL_GPL(__pm_runtime_disable);
/**
- * pm_runtime_enable - Enable run-time PM of a device.
+ * pm_runtime_enable - Enable runtime PM of a device.
* @dev: Device to handle.
*/
void pm_runtime_enable(struct device *dev)
@@ -1015,7 +1195,7 @@ void pm_runtime_enable(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_enable);
/**
- * pm_runtime_forbid - Block run-time PM of a device.
+ * pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle.
*
* Increase the device's usage count and clear its power.runtime_auto flag,
@@ -1038,7 +1218,7 @@ void pm_runtime_forbid(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_forbid);
/**
- * pm_runtime_allow - Unblock run-time PM of a device.
+ * pm_runtime_allow - Unblock runtime PM of a device.
* @dev: Device to handle.
*
* Decrease the device's usage count and set its power.runtime_auto flag.
@@ -1059,13 +1239,12 @@ void pm_runtime_allow(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_allow);
/**
- * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
+ * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
* @dev: Device to handle.
*
* Set the power.no_callbacks flag, which tells the PM core that this
- * device is power-managed through its parent and has no run-time PM
- * callbacks of its own. The run-time sysfs attributes will be removed.
- *
+ * device is power-managed through its parent and has no runtime PM
+ * callbacks of its own. The runtime sysfs attributes will be removed.
*/
void pm_runtime_no_callbacks(struct device *dev)
{
@@ -1078,6 +1257,27 @@ void pm_runtime_no_callbacks(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
/**
+ * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
+ * @dev: Device to handle
+ *
+ * Set the power.irq_safe flag, which tells the PM core that the
+ * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
+ * always be invoked with the spinlock held and interrupts disabled. It also
+ * causes the parent's usage counter to be permanently incremented, preventing
+ * the parent from runtime suspending -- otherwise an irq-safe child might have
+ * to wait for a non-irq-safe parent.
+ */
+void pm_runtime_irq_safe(struct device *dev)
+{
+ if (dev->parent)
+ pm_runtime_get_sync(dev->parent);
+ spin_lock_irq(&dev->power.lock);
+ dev->power.irq_safe = 1;
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
+
+/**
* update_autosuspend - Handle a change to a device's autosuspend settings.
* @dev: Device to handle.
* @old_delay: The former autosuspend_delay value.
@@ -1120,8 +1320,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
* @delay: Value of the new delay in milliseconds.
*
* Set the device's power.autosuspend_delay value. If it changes to negative
- * and the power.use_autosuspend flag is set, prevent run-time suspends. If it
- * changes the other way, allow run-time suspends.
+ * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
+ * changes the other way, allow runtime suspends.
*/
void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
{
@@ -1141,7 +1341,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
* @dev: Device to handle.
* @use: New value for use_autosuspend.
*
- * Set the device's power.use_autosuspend flag, and allow or prevent run-time
+ * Set the device's power.use_autosuspend flag, and allow or prevent runtime
* suspends as needed.
*/
void __pm_runtime_use_autosuspend(struct device *dev, bool use)
@@ -1158,7 +1358,7 @@ void __pm_runtime_use_autosuspend(struct device *dev, bool use)
EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
/**
- * pm_runtime_init - Initialize run-time PM fields in given device object.
+ * pm_runtime_init - Initialize runtime PM fields in given device object.
* @dev: Device object to initialize.
*/
void pm_runtime_init(struct device *dev)
@@ -1199,4 +1399,89 @@ void pm_runtime_remove(struct device *dev)
/* Change the status back to 'suspended' to match the initial status. */
if (dev->power.runtime_status == RPM_ACTIVE)
pm_runtime_set_suspended(dev);
+ if (dev->power.irq_safe && dev->parent)
+ pm_runtime_put(dev->parent);
+}
+#endif
+
+/**
+ * pm_runtime_force_suspend - Force a device into suspend state if needed.
+ * @dev: Device to suspend.
+ *
+ * Disable runtime PM so we safely can check the device's runtime PM status and
+ * if it is active, invoke it's .runtime_suspend callback to bring it into
+ * suspend state. Keep runtime PM disabled to preserve the state unless we
+ * encounter errors.
+ *
+ * Typically this function may be invoked from a system suspend callback to make
+ * sure the device is put into low power state.
+ */
+int pm_runtime_force_suspend(struct device *dev)
+{
+ int (*callback)(struct device *);
+ int ret = 0;
+
+ pm_runtime_disable(dev);
+
+ /*
+ * Note that pm_runtime_status_suspended() returns false while
+ * !CONFIG_PM_RUNTIME, which means the device will be put into low
+ * power state.
+ */
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ callback = rpm_get_suspend_cb(dev);
+
+ if (!callback) {
+ ret = -ENOSYS;
+ goto err;
+ }
+
+ ret = callback(dev);
+ if (ret)
+ goto err;
+
+ pm_runtime_set_suspended(dev);
+ return 0;
+err:
+ pm_runtime_enable(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
+
+/**
+ * pm_runtime_force_resume - Force a device into resume state.
+ * @dev: Device to resume.
+ *
+ * Prior invoking this function we expect the user to have brought the device
+ * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
+ * those actions and brings the device into full power. We update the runtime PM
+ * status and re-enables runtime PM.
+ *
+ * Typically this function may be invoked from a system resume callback to make
+ * sure the device is put into full power state.
+ */
+int pm_runtime_force_resume(struct device *dev)
+{
+ int (*callback)(struct device *);
+ int ret = 0;
+
+ callback = rpm_get_resume_cb(dev);
+
+ if (!callback) {
+ ret = -ENOSYS;
+ goto out;
+ }
+
+ ret = callback(dev);
+ if (ret)
+ goto out;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_mark_last_busy(dev);
+out:
+ pm_runtime_enable(dev);
+ return ret;
}
+EXPORT_SYMBOL_GPL(pm_runtime_force_resume);