diff options
Diffstat (limited to 'drivers/base/power/domain.c')
| -rw-r--r-- | drivers/base/power/domain.c | 622 |
1 files changed, 499 insertions, 123 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 83aa694a8ef..eee55c1e5fd 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -6,7 +6,6 @@ * This file is released under the GPLv2. */ -#include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/pm_runtime.h> @@ -42,7 +41,7 @@ struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \ if (!__retval && __elapsed > __td->field) { \ __td->field = __elapsed; \ - dev_warn(dev, name " latency exceeded, new value %lld ns\n", \ + dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \ __elapsed); \ genpd->max_off_time_changed = true; \ __td->constraint_changed = true; \ @@ -53,6 +52,24 @@ static LIST_HEAD(gpd_list); static DEFINE_MUTEX(gpd_list_lock); +static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name) +{ + struct generic_pm_domain *genpd = NULL, *gpd; + + if (IS_ERR_OR_NULL(domain_name)) + return NULL; + + mutex_lock(&gpd_list_lock); + list_for_each_entry(gpd, &gpd_list, gpd_list_node) { + if (!strcmp(gpd->name, domain_name)) { + genpd = gpd; + break; + } + } + mutex_unlock(&gpd_list_lock); + return genpd; +} + #ifdef CONFIG_PM struct generic_pm_domain *dev_to_genpd(struct device *dev) @@ -75,19 +92,6 @@ static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) start_latency_ns, "start"); } -static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, - save_state_latency_ns, "state save"); -} - -static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) -{ - return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, - restore_state_latency_ns, - "state restore"); -} - static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) { bool ret = false; @@ -101,7 +105,7 @@ static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) { atomic_inc(&genpd->sd_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } static void genpd_acquire_lock(struct generic_pm_domain *genpd) @@ -139,6 +143,19 @@ static void genpd_set_active(struct generic_pm_domain *genpd) genpd->status = GPD_STATE_ACTIVE; } +static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) +{ + s64 usecs64; + + if (!genpd->cpu_data) + return; + + usecs64 = genpd->power_on_latency_ns; + do_div(usecs64, NSEC_PER_USEC); + usecs64 += genpd->cpu_data->saved_exit_latency; + genpd->cpu_data->idle_state->exit_latency = usecs64; +} + /** * __pm_genpd_poweron - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. @@ -146,7 +163,7 @@ static void genpd_set_active(struct generic_pm_domain *genpd) * Restore power to @genpd and all of its masters so that it is possible to * resume a device belonging to it. */ -int __pm_genpd_poweron(struct generic_pm_domain *genpd) +static int __pm_genpd_poweron(struct generic_pm_domain *genpd) __releases(&genpd->lock) __acquires(&genpd->lock) { struct gpd_link *link; @@ -176,6 +193,13 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd) return 0; } + if (genpd->cpu_data) { + cpuidle_pause_and_lock(); + genpd->cpu_data->idle_state->disabled = true; + cpuidle_resume_and_unlock(); + goto out; + } + /* * The list is guaranteed not to change while the loop below is being * executed, unless one of the masters' .power_on() callbacks fiddles @@ -215,6 +239,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd) if (elapsed_ns > genpd->power_on_latency_ns) { genpd->power_on_latency_ns = elapsed_ns; genpd->max_off_time_changed = true; + genpd_recalc_cpu_exit_latency(genpd); if (genpd->name) pr_warning("%s: Power-on latency exceeded, " "new value %lld ns\n", genpd->name, @@ -222,6 +247,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd) } } + out: genpd_set_active(genpd); return 0; @@ -247,10 +273,41 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd) return ret; } +/** + * pm_genpd_name_poweron - Restore power to a given PM domain and its masters. + * @domain_name: Name of the PM domain to power up. + */ +int pm_genpd_name_poweron(const char *domain_name) +{ + struct generic_pm_domain *genpd; + + genpd = pm_genpd_lookup_name(domain_name); + return genpd ? pm_genpd_poweron(genpd) : -EINVAL; +} + #endif /* CONFIG_PM */ #ifdef CONFIG_PM_RUNTIME +static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, + struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, start, dev); +} + +static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, + save_state_latency_ns, "state save"); +} + +static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, + restore_state_latency_ns, + "state restore"); +} + static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, unsigned long val, void *ptr) { @@ -275,7 +332,7 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, pdd = dev->power.subsys_data ? dev->power.subsys_data->domain_data : NULL; - if (pdd) { + if (pdd && pdd->dev) { to_gpd_data(pdd)->td.constraint_changed = true; genpd = dev_to_genpd(dev); } else { @@ -339,19 +396,16 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd, { struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); struct device *dev = pdd->dev; + bool need_restore = gpd_data->need_restore; - if (!gpd_data->need_restore) - return; - + gpd_data->need_restore = false; mutex_unlock(&genpd->lock); genpd_start_dev(genpd, dev); - genpd_restore_dev(genpd, dev); - genpd_stop_dev(genpd, dev); + if (need_restore) + genpd_restore_dev(genpd, dev); mutex_lock(&genpd->lock); - - gpd_data->need_restore = false; } /** @@ -378,8 +432,7 @@ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) */ void genpd_queue_power_off_work(struct generic_pm_domain *genpd) { - if (!work_pending(&genpd->power_off_work)) - queue_work(pm_wq, &genpd->power_off_work); + queue_work(pm_wq, &genpd->power_off_work); } /** @@ -415,10 +468,19 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) return -EBUSY; not_suspended = 0; - list_for_each_entry(pdd, &genpd->dev_list, list_node) + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + enum pm_qos_flags_status stat; + + stat = dev_pm_qos_flags(pdd->dev, + PM_QOS_FLAG_NO_POWER_OFF + | PM_QOS_FLAG_REMOTE_WAKEUP); + if (stat > PM_QOS_FLAGS_NONE) + return -EBUSY; + if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) - || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on)) + || pdd->dev->power.irq_safe)) not_suspended++; + } if (not_suspended > genpd->in_progress) return -EBUSY; @@ -458,6 +520,21 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) } } + if (genpd->cpu_data) { + /* + * If cpu_data is set, cpuidle should turn the domain off when + * the CPU in it is idle. In that case we don't decrement the + * subdomain counts of the master domains, so that power is not + * removed from the current domain prematurely as a result of + * cutting off the masters' power. + */ + genpd->status = GPD_STATE_POWER_OFF; + cpuidle_pause_and_lock(); + genpd->cpu_data->idle_state->disabled = false; + cpuidle_resume_and_unlock(); + goto out; + } + if (genpd->power_off) { ktime_t time_start; s64 elapsed_ns; @@ -544,9 +621,6 @@ static int pm_genpd_runtime_suspend(struct device *dev) might_sleep_if(!genpd->dev_irq_safe); - if (dev_gpd_data(dev)->always_on) - return -EBUSY; - stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; if (stop_ok && !stop_ok(dev)) return -EBUSY; @@ -595,7 +669,7 @@ static int pm_genpd_runtime_resume(struct device *dev) /* If power.irq_safe, the PM domain is never powered off. */ if (dev->power.irq_safe) - goto out; + return genpd_start_dev_no_timing(genpd, dev); mutex_lock(&genpd->lock); ret = __pm_genpd_poweron(genpd); @@ -628,12 +702,17 @@ static int pm_genpd_runtime_resume(struct device *dev) wake_up_all(&genpd->status_wait_queue); mutex_unlock(&genpd->lock); - out: - genpd_start_dev(genpd, dev); - return 0; } +static bool pd_ignore_unused; +static int __init pd_ignore_unused_setup(char *__unused) +{ + pd_ignore_unused = true; + return 1; +} +__setup("pd_ignore_unused", pd_ignore_unused_setup); + /** * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. */ @@ -641,6 +720,11 @@ void pm_genpd_poweroff_unused(void) { struct generic_pm_domain *genpd; + if (pd_ignore_unused) { + pr_warn("genpd: Not disabling unused power domains\n"); + return; + } + mutex_lock(&gpd_list_lock); list_for_each_entry(genpd, &gpd_list, gpd_list_node) @@ -666,6 +750,24 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {} #ifdef CONFIG_PM_SLEEP +/** + * pm_genpd_present - Check if the given PM domain has been initialized. + * @genpd: PM domain to check. + */ +static bool pm_genpd_present(struct generic_pm_domain *genpd) +{ + struct generic_pm_domain *gpd; + + if (IS_ERR_OR_NULL(genpd)) + return false; + + list_for_each_entry(gpd, &gpd_list, gpd_list_node) + if (gpd == genpd) + return true; + + return false; +} + static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, struct device *dev) { @@ -719,9 +821,10 @@ static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev) * Check if the given PM domain can be powered off (during system suspend or * hibernation) and do that if so. Also, in that case propagate to its masters. * - * This function is only called in "noirq" stages of system power transitions, - * so it need not acquire locks (all of the "noirq" callbacks are executed - * sequentially, so it is guaranteed that it will never run twice in parallel). + * This function is only called in "noirq" and "syscore" stages of system power + * transitions, so it need not acquire locks (all of the "noirq" callbacks are + * executed sequentially, so it is guaranteed that it will never run twice in + * parallel). */ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) { @@ -746,6 +849,33 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) } /** + * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters. + * @genpd: PM domain to power on. + * + * This function is only called in "noirq" and "syscore" stages of system power + * transitions, so it need not acquire locks (all of the "noirq" callbacks are + * executed sequentially, so it is guaranteed that it will never run twice in + * parallel). + */ +static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd) +{ + struct gpd_link *link; + + if (genpd->status != GPD_STATE_POWER_OFF) + return; + + list_for_each_entry(link, &genpd->slave_links, slave_node) { + pm_genpd_sync_poweron(link->master); + genpd_sd_counter_inc(link->master); + } + + if (genpd->power_on) + genpd->power_on(genpd); + + genpd->status = GPD_STATE_ACTIVE; +} + +/** * resume_needed - Check whether to resume a device before system suspend. * @dev: Device to check. * @genpd: PM domain the device belongs to. @@ -802,7 +932,7 @@ static int pm_genpd_prepare(struct device *dev) pm_wakeup_event(dev, 0); if (pm_wakeup_pending()) { - pm_runtime_put_sync(dev); + pm_runtime_put(dev); return -EBUSY; } @@ -843,7 +973,7 @@ static int pm_genpd_prepare(struct device *dev) pm_runtime_enable(dev); } - pm_runtime_put_sync(dev); + pm_runtime_put(dev); return ret; } @@ -906,7 +1036,7 @@ static int pm_genpd_suspend_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on + if (genpd->suspend_power_off || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) return 0; @@ -939,7 +1069,7 @@ static int pm_genpd_resume_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on + if (genpd->suspend_power_off || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) return 0; @@ -948,7 +1078,7 @@ static int pm_genpd_resume_noirq(struct device *dev) * guaranteed that this function will never run twice in parallel for * the same PM domain, so it is not necessary to use locking here. */ - pm_genpd_poweron(genpd); + pm_genpd_sync_poweron(genpd); genpd->suspended_count--; return genpd_start_dev(genpd, dev); @@ -1059,8 +1189,7 @@ static int pm_genpd_freeze_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? - 0 : genpd_stop_dev(genpd, dev); + return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev); } /** @@ -1080,8 +1209,7 @@ static int pm_genpd_thaw_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? - 0 : genpd_start_dev(genpd, dev); + return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev); } /** @@ -1155,8 +1283,8 @@ static int pm_genpd_restore_noirq(struct device *dev) if (genpd->suspended_count++ == 0) { /* * The boot kernel might put the domain into arbitrary state, - * so make it appear as powered off to pm_genpd_poweron(), so - * that it tries to power it on in case it was really off. + * so make it appear as powered off to pm_genpd_sync_poweron(), + * so that it tries to power it on in case it was really off. */ genpd->status = GPD_STATE_POWER_OFF; if (genpd->suspend_power_off) { @@ -1174,9 +1302,9 @@ static int pm_genpd_restore_noirq(struct device *dev) if (genpd->suspend_power_off) return 0; - pm_genpd_poweron(genpd); + pm_genpd_sync_poweron(genpd); - return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev); + return genpd_start_dev(genpd, dev); } /** @@ -1211,9 +1339,34 @@ static void pm_genpd_complete(struct device *dev) pm_generic_complete(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); - pm_runtime_idle(dev); + pm_request_idle(dev); + } +} + +/** + * pm_genpd_syscore_switch - Switch power during system core suspend or resume. + * @dev: Device that normally is marked as "always on" to switch power for. + * + * This routine may only be called during the system core (syscore) suspend or + * resume phase for devices whose "always on" flags are set. + */ +void pm_genpd_syscore_switch(struct device *dev, bool suspend) +{ + struct generic_pm_domain *genpd; + + genpd = dev_to_genpd(dev); + if (!pm_genpd_present(genpd)) + return; + + if (suspend) { + genpd->suspended_count++; + pm_genpd_sync_poweroff(genpd); + } else { + pm_genpd_sync_poweron(genpd); + genpd->suspended_count--; } } +EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch); #else @@ -1235,6 +1388,27 @@ static void pm_genpd_complete(struct device *dev) #endif /* CONFIG_PM_SLEEP */ +static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev) +{ + struct generic_pm_domain_data *gpd_data; + + gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); + if (!gpd_data) + return NULL; + + mutex_init(&gpd_data->lock); + gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; + dev_pm_qos_add_notifier(dev, &gpd_data->nb); + return gpd_data; +} + +static void __pm_genpd_free_dev_data(struct device *dev, + struct generic_pm_domain_data *gpd_data) +{ + dev_pm_qos_remove_notifier(dev, &gpd_data->nb); + kfree(gpd_data); +} + /** * __pm_genpd_add_device - Add a device to an I/O PM domain. * @genpd: PM domain to add the device to. @@ -1244,7 +1418,7 @@ static void pm_genpd_complete(struct device *dev) int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, struct gpd_timing_data *td) { - struct generic_pm_domain_data *gpd_data; + struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL; struct pm_domain_data *pdd; int ret = 0; @@ -1253,14 +1427,10 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) return -EINVAL; - gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); - if (!gpd_data) + gpd_data_new = __pm_genpd_alloc_dev_data(dev); + if (!gpd_data_new) return -ENOMEM; - mutex_init(&gpd_data->lock); - gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; - dev_pm_qos_add_notifier(dev, &gpd_data->nb); - genpd_acquire_lock(genpd); if (genpd->prepared_count > 0) { @@ -1274,35 +1444,42 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, goto out; } + ret = dev_pm_get_subsys_data(dev); + if (ret) + goto out; + genpd->device_count++; genpd->max_off_time_changed = true; - dev_pm_get_subsys_data(dev); - - mutex_lock(&gpd_data->lock); spin_lock_irq(&dev->power.lock); + dev->pm_domain = &genpd->domain; - dev->power.subsys_data->domain_data = &gpd_data->base; - gpd_data->base.dev = dev; - list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); - gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF; + if (dev->power.subsys_data->domain_data) { + gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); + } else { + gpd_data = gpd_data_new; + dev->power.subsys_data->domain_data = &gpd_data->base; + } + gpd_data->refcount++; if (td) gpd_data->td = *td; + spin_unlock_irq(&dev->power.lock); + + mutex_lock(&gpd_data->lock); + gpd_data->base.dev = dev; + list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); + gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF; gpd_data->td.constraint_changed = true; gpd_data->td.effective_constraint_ns = -1; - spin_unlock_irq(&dev->power.lock); mutex_unlock(&gpd_data->lock); - genpd_release_lock(genpd); - - return 0; - out: genpd_release_lock(genpd); - dev_pm_qos_remove_notifier(dev, &gpd_data->nb); - kfree(gpd_data); + if (gpd_data != gpd_data_new) + __pm_genpd_free_dev_data(dev, gpd_data_new); + return ret; } @@ -1338,6 +1515,19 @@ int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev, return __pm_genpd_add_device(genpd, dev, td); } + +/** + * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it. + * @domain_name: Name of the PM domain to add the device to. + * @dev: Device to be added. + * @td: Set of PM QoS timing parameters to attach to the device. + */ +int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, + struct gpd_timing_data *td) +{ + return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td); +} + /** * pm_genpd_remove_device - Remove a device from an I/O PM domain. * @genpd: PM domain to remove the device from. @@ -1348,6 +1538,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, { struct generic_pm_domain_data *gpd_data; struct pm_domain_data *pdd; + bool remove = false; int ret = 0; dev_dbg(dev, "%s()\n", __func__); @@ -1368,22 +1559,28 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, genpd->max_off_time_changed = true; spin_lock_irq(&dev->power.lock); + dev->pm_domain = NULL; pdd = dev->power.subsys_data->domain_data; list_del_init(&pdd->list_node); - dev->power.subsys_data->domain_data = NULL; + gpd_data = to_gpd_data(pdd); + if (--gpd_data->refcount == 0) { + dev->power.subsys_data->domain_data = NULL; + remove = true; + } + spin_unlock_irq(&dev->power.lock); - gpd_data = to_gpd_data(pdd); mutex_lock(&gpd_data->lock); pdd->dev = NULL; mutex_unlock(&gpd_data->lock); genpd_release_lock(genpd); - dev_pm_qos_remove_notifier(dev, &gpd_data->nb); - kfree(gpd_data); dev_pm_put_subsys_data(dev); + if (remove) + __pm_genpd_free_dev_data(dev, gpd_data); + return 0; out: @@ -1393,26 +1590,6 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, } /** - * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device. - * @dev: Device to set/unset the flag for. - * @val: The new value of the device's "always on" flag. - */ -void pm_genpd_dev_always_on(struct device *dev, bool val) -{ - struct pm_subsys_data *psd; - unsigned long flags; - - spin_lock_irqsave(&dev->power.lock, flags); - - psd = dev_to_psd(dev); - if (psd && psd->domain_data) - to_gpd_data(psd->domain_data)->always_on = val; - - spin_unlock_irqrestore(&dev->power.lock, flags); -} -EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on); - -/** * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag. * @dev: Device to set/unset the flag for. * @val: The new value of the device's "need restore" flag. @@ -1443,7 +1620,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct gpd_link *link; int ret = 0; - if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) + if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) + || genpd == subdomain) return -EINVAL; start: @@ -1490,6 +1668,35 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, } /** + * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain. + * @master_name: Name of the master PM domain to add the subdomain to. + * @subdomain_name: Name of the subdomain to be added. + */ +int pm_genpd_add_subdomain_names(const char *master_name, + const char *subdomain_name) +{ + struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd; + + if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name)) + return -EINVAL; + + mutex_lock(&gpd_list_lock); + list_for_each_entry(gpd, &gpd_list, gpd_list_node) { + if (!master && !strcmp(gpd->name, master_name)) + master = gpd; + + if (!subdomain && !strcmp(gpd->name, subdomain_name)) + subdomain = gpd; + + if (master && subdomain) + break; + } + mutex_unlock(&gpd_list_lock); + + return pm_genpd_add_subdomain(master, subdomain); +} + +/** * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. * @genpd: Master PM domain to remove the subdomain from. * @subdomain: Subdomain to be removed. @@ -1541,33 +1748,52 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, * @dev: Device to add the callbacks to. * @ops: Set of callbacks to add. * @td: Timing data to add to the device along with the callbacks (optional). + * + * Every call to this routine should be balanced with a call to + * __pm_genpd_remove_callbacks() and they must not be nested. */ int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, struct gpd_timing_data *td) { - struct pm_domain_data *pdd; + struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL; int ret = 0; - if (!(dev && dev->power.subsys_data && ops)) + if (!(dev && ops)) return -EINVAL; + gpd_data_new = __pm_genpd_alloc_dev_data(dev); + if (!gpd_data_new) + return -ENOMEM; + pm_runtime_disable(dev); device_pm_lock(); - pdd = dev->power.subsys_data->domain_data; - if (pdd) { - struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); + ret = dev_pm_get_subsys_data(dev); + if (ret) + goto out; - gpd_data->ops = *ops; - if (td) - gpd_data->td = *td; + spin_lock_irq(&dev->power.lock); + + if (dev->power.subsys_data->domain_data) { + gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); } else { - ret = -EINVAL; + gpd_data = gpd_data_new; + dev->power.subsys_data->domain_data = &gpd_data->base; } + gpd_data->refcount++; + gpd_data->ops = *ops; + if (td) + gpd_data->td = *td; + spin_unlock_irq(&dev->power.lock); + + out: device_pm_unlock(); pm_runtime_enable(dev); + if (gpd_data != gpd_data_new) + __pm_genpd_free_dev_data(dev, gpd_data_new); + return ret; } EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); @@ -1576,10 +1802,13 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device. * @dev: Device to remove the callbacks from. * @clear_td: If set, clear the device's timing data too. + * + * This routine can only be called after pm_genpd_add_callbacks(). */ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) { - struct pm_domain_data *pdd; + struct generic_pm_domain_data *gpd_data = NULL; + bool remove = false; int ret = 0; if (!(dev && dev->power.subsys_data)) @@ -1588,24 +1817,156 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) pm_runtime_disable(dev); device_pm_lock(); - pdd = dev->power.subsys_data->domain_data; - if (pdd) { - struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); + spin_lock_irq(&dev->power.lock); - gpd_data->ops = (struct gpd_dev_ops){ 0 }; + if (dev->power.subsys_data->domain_data) { + gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); + gpd_data->ops = (struct gpd_dev_ops){ NULL }; if (clear_td) gpd_data->td = (struct gpd_timing_data){ 0 }; + + if (--gpd_data->refcount == 0) { + dev->power.subsys_data->domain_data = NULL; + remove = true; + } } else { ret = -EINVAL; } + spin_unlock_irq(&dev->power.lock); + device_pm_unlock(); pm_runtime_enable(dev); - return ret; + if (ret) + return ret; + + dev_pm_put_subsys_data(dev); + if (remove) + __pm_genpd_free_dev_data(dev, gpd_data); + + return 0; } EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); +/** + * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle. + * @genpd: PM domain to be connected with cpuidle. + * @state: cpuidle state this domain can disable/enable. + * + * Make a PM domain behave as though it contained a CPU core, that is, instead + * of calling its power down routine it will enable the given cpuidle state so + * that the cpuidle subsystem can power it down (if possible and desirable). + */ +int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) +{ + struct cpuidle_driver *cpuidle_drv; + struct gpd_cpu_data *cpu_data; + struct cpuidle_state *idle_state; + int ret = 0; + + if (IS_ERR_OR_NULL(genpd) || state < 0) + return -EINVAL; + + genpd_acquire_lock(genpd); + + if (genpd->cpu_data) { + ret = -EEXIST; + goto out; + } + cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL); + if (!cpu_data) { + ret = -ENOMEM; + goto out; + } + cpuidle_drv = cpuidle_driver_ref(); + if (!cpuidle_drv) { + ret = -ENODEV; + goto err_drv; + } + if (cpuidle_drv->state_count <= state) { + ret = -EINVAL; + goto err; + } + idle_state = &cpuidle_drv->states[state]; + if (!idle_state->disabled) { + ret = -EAGAIN; + goto err; + } + cpu_data->idle_state = idle_state; + cpu_data->saved_exit_latency = idle_state->exit_latency; + genpd->cpu_data = cpu_data; + genpd_recalc_cpu_exit_latency(genpd); + + out: + genpd_release_lock(genpd); + return ret; + + err: + cpuidle_driver_unref(); + + err_drv: + kfree(cpu_data); + goto out; +} + +/** + * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it. + * @name: Name of the domain to connect to cpuidle. + * @state: cpuidle state this domain can manipulate. + */ +int pm_genpd_name_attach_cpuidle(const char *name, int state) +{ + return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state); +} + +/** + * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain. + * @genpd: PM domain to remove the cpuidle connection from. + * + * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the + * given PM domain. + */ +int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) +{ + struct gpd_cpu_data *cpu_data; + struct cpuidle_state *idle_state; + int ret = 0; + + if (IS_ERR_OR_NULL(genpd)) + return -EINVAL; + + genpd_acquire_lock(genpd); + + cpu_data = genpd->cpu_data; + if (!cpu_data) { + ret = -ENODEV; + goto out; + } + idle_state = cpu_data->idle_state; + if (!idle_state->disabled) { + ret = -EAGAIN; + goto out; + } + idle_state->exit_latency = cpu_data->saved_exit_latency; + cpuidle_driver_unref(); + genpd->cpu_data = NULL; + kfree(cpu_data); + + out: + genpd_release_lock(genpd); + return ret; +} + +/** + * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it. + * @name: Name of the domain to disconnect cpuidle from. + */ +int pm_genpd_name_detach_cpuidle(const char *name) +{ + return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name)); +} + /* Default device callbacks for generic PM domains. */ /** @@ -1615,16 +1976,24 @@ EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); static int pm_genpd_default_save_state(struct device *dev) { int (*cb)(struct device *__dev); - struct device_driver *drv = dev->driver; cb = dev_gpd_data(dev)->ops.save_state; if (cb) return cb(dev); - if (drv && drv->pm && drv->pm->runtime_suspend) - return drv->pm->runtime_suspend(dev); + if (dev->type && dev->type->pm) + cb = dev->type->pm->runtime_suspend; + else if (dev->class && dev->class->pm) + cb = dev->class->pm->runtime_suspend; + else if (dev->bus && dev->bus->pm) + cb = dev->bus->pm->runtime_suspend; + else + cb = NULL; - return 0; + if (!cb && dev->driver && dev->driver->pm) + cb = dev->driver->pm->runtime_suspend; + + return cb ? cb(dev) : 0; } /** @@ -1634,16 +2003,24 @@ static int pm_genpd_default_save_state(struct device *dev) static int pm_genpd_default_restore_state(struct device *dev) { int (*cb)(struct device *__dev); - struct device_driver *drv = dev->driver; cb = dev_gpd_data(dev)->ops.restore_state; if (cb) return cb(dev); - if (drv && drv->pm && drv->pm->runtime_resume) - return drv->pm->runtime_resume(dev); + if (dev->type && dev->type->pm) + cb = dev->type->pm->runtime_resume; + else if (dev->class && dev->class->pm) + cb = dev->class->pm->runtime_resume; + else if (dev->bus && dev->bus->pm) + cb = dev->bus->pm->runtime_resume; + else + cb = NULL; - return 0; + if (!cb && dev->driver && dev->driver->pm) + cb = dev->driver->pm->runtime_resume; + + return cb ? cb(dev) : 0; } #ifdef CONFIG_PM_SLEEP @@ -1778,7 +2155,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd, genpd->max_off_time_changed = true; genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; - genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; genpd->domain.ops.prepare = pm_genpd_prepare; genpd->domain.ops.suspend = pm_genpd_suspend; genpd->domain.ops.suspend_late = pm_genpd_suspend_late; |
