diff options
Diffstat (limited to 'drivers/base')
54 files changed, 6482 insertions, 2140 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index c8b453939da..23b8726962a 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -1,11 +1,10 @@ menu "Generic Driver Options" -config UEVENT_HELPER_PATH - string "path to uevent helper" - depends on HOTPLUG - default "" +config UEVENT_HELPER + bool "Support for uevent helper" + default y help - Path to uevent helper program forked by the kernel for + The uevent helper program is forked by the kernel for every uevent. Before the switch to the netlink-based uevent source, this was used to hook hotplug scripts into kernel device events. It @@ -16,14 +15,18 @@ config UEVENT_HELPER_PATH that it creates a high system load, or on smaller systems it is known to create out-of-memory situations during bootup. - To disable user space helper program execution at early boot - time specify an empty string here. This setting can be altered +config UEVENT_HELPER_PATH + string "path to uevent helper" + depends on UEVENT_HELPER + default "" + help + To disable user space helper program execution at by default + specify an empty string here. This setting can still be altered via /proc/sys/kernel/hotplug or via /sys/kernel/uevent_helper later at runtime. config DEVTMPFS bool "Maintain a devtmpfs filesystem to mount at /dev" - depends on HOTPLUG help This creates a tmpfs/ramfs filesystem instance early at bootup. In this filesystem, the kernel driver core maintains device @@ -51,7 +54,7 @@ config DEVTMPFS_MOUNT with the commandline parameter: devtmpfs.mount=0|1. This option does not affect initramfs based booting, here the devtmpfs filesystem always needs to be mounted manually - after the roots is mounted. + after the rootfs is mounted. With this option enabled, it allows to bring up a system in rescue mode with init=/bin/sh, even when the /dev directory on the rootfs is completely empty. @@ -145,6 +148,17 @@ config EXTRA_FIRMWARE_DIR this option you can point it elsewhere, such as /lib/firmware/ or some other directory containing the firmware files. +config FW_LOADER_USER_HELPER + bool "Fallback user-helper invocation for firmware loading" + depends on FW_LOADER + default y + help + This option enables / disables the invocation of user-helper + (e.g. udev) for loading firmware files as a fallback after the + direct file loading in kernel fails. The user-mode helper is + no longer required unless you have a special firmware file that + resides in a non-standard path. + config DEBUG_DRIVER bool "Driver Core verbose debug messages" depends on DEBUG_KERNEL @@ -176,6 +190,9 @@ config GENERIC_CPU_DEVICES bool default n +config GENERIC_CPU_AUTOPROBE + bool + config SOC_BUS bool @@ -191,11 +208,9 @@ config DMA_SHARED_BUFFER APIs extension; the file's descriptor can then be passed on to other driver. -config CMA - bool "Contiguous Memory Allocator" - depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK - select MIGRATION - select MEMORY_ISOLATION +config DMA_CMA + bool "DMA Contiguous Memory Allocator" + depends on HAVE_DMA_CONTIGUOUS && CMA help This enables the Contiguous Memory Allocator which allows drivers to allocate big physically-contiguous blocks of memory for use with @@ -204,17 +219,7 @@ config CMA For more information see <include/linux/dma-contiguous.h>. If unsure, say "n". -if CMA - -config CMA_DEBUG - bool "CMA debug messages (DEVELOPMENT)" - depends on DEBUG_KERNEL - help - Turns on debug messages in CMA. This produces KERN_DEBUG - messages for every CMA call as well as various messages while - processing calls such as dma_alloc_from_contiguous(). - This option does not affect warning and error messages. - +if DMA_CMA comment "Default contiguous memory area size:" config CMA_SIZE_MBYTES @@ -253,7 +258,7 @@ endchoice config CMA_ALIGNMENT int "Maximum PAGE_SIZE order of alignment for contiguous buffers" - range 4 9 + range 4 12 default 8 help DMA mapping framework by default aligns all buffers to the smallest diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 5aa2d703d19..04b314e0fa5 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -1,16 +1,16 @@ # Makefile for the Linux device tree -obj-y := core.o bus.o dd.o syscore.o \ +obj-y := component.o core.o bus.o dd.o syscore.o \ driver.o class.o platform.o \ cpu.o firmware.o init.o map.o devres.o \ attribute_container.o transport_class.o \ - topology.o + topology.o container.o obj-$(CONFIG_DEVTMPFS) += devtmpfs.o -obj-$(CONFIG_CMA) += dma-contiguous.o +obj-$(CONFIG_DMA_CMA) += dma-contiguous.o obj-y += power/ obj-$(CONFIG_HAS_DMA) += dma-mapping.o obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o -obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o +obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o reservation.o obj-$(CONFIG_ISA) += isa.o obj-$(CONFIG_FW_LOADER) += firmware_class.o obj-$(CONFIG_NUMA) += node.o @@ -21,6 +21,7 @@ endif obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o obj-$(CONFIG_REGMAP) += regmap/ obj-$(CONFIG_SOC_BUS) += soc.o +obj-$(CONFIG_PINCTRL) += pinctrl.o ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c index d78b204e65c..b84ca8f13f9 100644 --- a/drivers/base/attribute_container.c +++ b/drivers/base/attribute_container.c @@ -12,7 +12,6 @@ */ #include <linux/attribute_container.h> -#include <linux/init.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> @@ -167,7 +166,7 @@ attribute_container_add_device(struct device *dev, ic->classdev.parent = get_device(dev); ic->classdev.class = cont->class; cont->class->dev_release = attribute_container_release; - dev_set_name(&ic->classdev, dev_name(dev)); + dev_set_name(&ic->classdev, "%s", dev_name(dev)); if (fn) fn(cont, dev, &ic->classdev); else diff --git a/drivers/base/base.h b/drivers/base/base.h index 6ee17bb391a..251c5d30f96 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -63,8 +63,6 @@ struct driver_private { * binding of drivers which were unable to get all the resources needed by * the device; typically because it depends on another driver getting * probed first. - * @driver_data - private pointer for driver specific info. Will turn into a - * list soon. * @device - pointer back to the struct class that this structure is * associated with. * @@ -76,7 +74,6 @@ struct device_private { struct klist_node knode_driver; struct klist_node knode_bus; struct list_head deferred_probe; - void *driver_data; struct device *device; }; #define to_device_private_parent(obj) \ @@ -100,6 +97,9 @@ static inline int hypervisor_init(void) { return 0; } #endif extern int platform_bus_init(void); extern void cpu_dev_init(void); +extern void container_dev_init(void); + +struct kobject *virtual_device_parent(struct device *dev); extern int bus_add_device(struct device *dev); extern void bus_probe_device(struct device *dev); @@ -117,6 +117,16 @@ static inline int driver_match_device(struct device_driver *drv, return drv->bus->match ? drv->bus->match(dev, drv) : 1; } +extern int driver_add_groups(struct device_driver *drv, + const struct attribute_group **groups); +extern void driver_remove_groups(struct device_driver *drv, + const struct attribute_group **groups); + +extern int device_add_groups(struct device *dev, + const struct attribute_group **groups); +extern void device_remove_groups(struct device *dev, + const struct attribute_group **groups); + extern char *make_class_name(const char *name, struct kobject *kobj); extern int devres_release_all(struct device *dev); diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 24eb0786834..83e910a5756 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -17,6 +17,7 @@ #include <linux/init.h> #include <linux/string.h> #include <linux/mutex.h> +#include <linux/sysfs.h> #include "base.h" #include "power/power.h" @@ -145,8 +146,19 @@ void bus_remove_file(struct bus_type *bus, struct bus_attribute *attr) } EXPORT_SYMBOL_GPL(bus_remove_file); +static void bus_release(struct kobject *kobj) +{ + struct subsys_private *priv = + container_of(kobj, typeof(*priv), subsys.kobj); + struct bus_type *bus = priv->bus; + + kfree(priv); + bus->p = NULL; +} + static struct kobj_type bus_ktype = { .sysfs_ops = &bus_sysfs_ops, + .release = bus_release, }; static int bus_uevent_filter(struct kset *kset, struct kobject *kobj) @@ -165,8 +177,8 @@ static const struct kset_uevent_ops bus_uevent_ops = { static struct kset *bus_kset; /* Manually detach a device from its associated driver. */ -static ssize_t driver_unbind(struct device_driver *drv, - const char *buf, size_t count) +static ssize_t unbind_store(struct device_driver *drv, const char *buf, + size_t count) { struct bus_type *bus = bus_get(drv->bus); struct device *dev; @@ -185,15 +197,15 @@ static ssize_t driver_unbind(struct device_driver *drv, bus_put(bus); return err; } -static DRIVER_ATTR(unbind, S_IWUSR, NULL, driver_unbind); +static DRIVER_ATTR_WO(unbind); /* * Manually attach a device to a driver. * Note: the driver must want to bind to the device, * it is not possible to override the driver's id table. */ -static ssize_t driver_bind(struct device_driver *drv, - const char *buf, size_t count) +static ssize_t bind_store(struct device_driver *drv, const char *buf, + size_t count) { struct bus_type *bus = bus_get(drv->bus); struct device *dev; @@ -221,7 +233,7 @@ static ssize_t driver_bind(struct device_driver *drv, bus_put(bus); return err; } -static DRIVER_ATTR(bind, S_IWUSR, NULL, driver_bind); +static DRIVER_ATTR_WO(bind); static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf) { @@ -290,7 +302,7 @@ int bus_for_each_dev(struct bus_type *bus, struct device *start, struct device *dev; int error = 0; - if (!bus) + if (!bus || !bus->p) return -EINVAL; klist_iter_init_node(&bus->p->klist_devices, &i, @@ -324,7 +336,7 @@ struct device *bus_find_device(struct bus_type *bus, struct klist_iter i; struct device *dev; - if (!bus) + if (!bus || !bus->p) return NULL; klist_iter_init_node(&bus->p->klist_devices, &i, @@ -460,7 +472,7 @@ static int device_add_attrs(struct bus_type *bus, struct device *dev) if (!bus->dev_attrs) return 0; - for (i = 0; attr_name(bus->dev_attrs[i]); i++) { + for (i = 0; bus->dev_attrs[i].attr.name; i++) { error = device_create_file(dev, &bus->dev_attrs[i]); if (error) { while (--i >= 0) @@ -476,7 +488,7 @@ static void device_remove_attrs(struct bus_type *bus, struct device *dev) int i; if (bus->dev_attrs) { - for (i = 0; attr_name(bus->dev_attrs[i]); i++) + for (i = 0; bus->dev_attrs[i].attr.name; i++) device_remove_file(dev, &bus->dev_attrs[i]); } } @@ -499,6 +511,9 @@ int bus_add_device(struct device *dev) error = device_add_attrs(bus, dev); if (error) goto out_put; + error = device_add_groups(dev, bus->dev_groups); + if (error) + goto out_groups; error = sysfs_create_link(&bus->p->devices_kset->kobj, &dev->kobj, dev_name(dev)); if (error) @@ -513,6 +528,8 @@ int bus_add_device(struct device *dev) out_subsys: sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev)); +out_groups: + device_remove_groups(dev, bus->dev_groups); out_id: device_remove_attrs(bus, dev); out_put: @@ -575,6 +592,7 @@ void bus_remove_device(struct device *dev) sysfs_remove_link(&dev->bus->p->devices_kset->kobj, dev_name(dev)); device_remove_attrs(dev->bus, dev); + device_remove_groups(dev, dev->bus->dev_groups); if (klist_node_attached(&dev->p->knode_bus)) klist_del(&dev->p->knode_bus); @@ -584,37 +602,6 @@ void bus_remove_device(struct device *dev) bus_put(dev->bus); } -static int driver_add_attrs(struct bus_type *bus, struct device_driver *drv) -{ - int error = 0; - int i; - - if (bus->drv_attrs) { - for (i = 0; attr_name(bus->drv_attrs[i]); i++) { - error = driver_create_file(drv, &bus->drv_attrs[i]); - if (error) - goto err; - } - } -done: - return error; -err: - while (--i >= 0) - driver_remove_file(drv, &bus->drv_attrs[i]); - goto done; -} - -static void driver_remove_attrs(struct bus_type *bus, - struct device_driver *drv) -{ - int i; - - if (bus->drv_attrs) { - for (i = 0; attr_name(bus->drv_attrs[i]); i++) - driver_remove_file(drv, &bus->drv_attrs[i]); - } -} - static int __must_check add_bind_files(struct device_driver *drv) { int ret; @@ -659,8 +646,8 @@ static void remove_probe_files(struct bus_type *bus) bus_remove_file(bus, &bus_attr_drivers_probe); } -static ssize_t driver_uevent_store(struct device_driver *drv, - const char *buf, size_t count) +static ssize_t uevent_store(struct device_driver *drv, const char *buf, + size_t count) { enum kobject_action action; @@ -668,7 +655,7 @@ static ssize_t driver_uevent_store(struct device_driver *drv, kobject_uevent(&drv->p->kobj, action); return count; } -static DRIVER_ATTR(uevent, S_IWUSR, NULL, driver_uevent_store); +static DRIVER_ATTR_WO(uevent); /** * bus_add_driver - Add a driver to the bus. @@ -700,12 +687,12 @@ int bus_add_driver(struct device_driver *drv) if (error) goto out_unregister; + klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers); if (drv->bus->p->drivers_autoprobe) { error = driver_attach(drv); if (error) goto out_unregister; } - klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers); module_add_driver(drv->owner, drv); error = driver_create_file(drv, &driver_attr_uevent); @@ -713,10 +700,10 @@ int bus_add_driver(struct device_driver *drv) printk(KERN_ERR "%s: uevent attr (%s) failed\n", __func__, drv->name); } - error = driver_add_attrs(bus, drv); + error = driver_add_groups(drv, bus->drv_groups); if (error) { /* How the hell do we get out of this pickle? Give up */ - printk(KERN_ERR "%s: driver_add_attrs(%s) failed\n", + printk(KERN_ERR "%s: driver_create_groups(%s) failed\n", __func__, drv->name); } @@ -755,7 +742,7 @@ void bus_remove_driver(struct device_driver *drv) if (!drv->suppress_bind_attrs) remove_bind_files(drv); - driver_remove_attrs(drv->bus, drv); + driver_remove_groups(drv, drv->bus->drv_groups); driver_remove_file(drv, &driver_attr_uevent); klist_remove(&drv->p->knode_bus); pr_debug("bus: '%s': remove driver %s\n", drv->bus->name, drv->name); @@ -834,40 +821,16 @@ struct bus_type *find_bus(char *name) } #endif /* 0 */ - -/** - * bus_add_attrs - Add default attributes for this bus. - * @bus: Bus that has just been registered. - */ - -static int bus_add_attrs(struct bus_type *bus) +static int bus_add_groups(struct bus_type *bus, + const struct attribute_group **groups) { - int error = 0; - int i; - - if (bus->bus_attrs) { - for (i = 0; attr_name(bus->bus_attrs[i]); i++) { - error = bus_create_file(bus, &bus->bus_attrs[i]); - if (error) - goto err; - } - } -done: - return error; -err: - while (--i >= 0) - bus_remove_file(bus, &bus->bus_attrs[i]); - goto done; + return sysfs_create_groups(&bus->p->subsys.kobj, groups); } -static void bus_remove_attrs(struct bus_type *bus) +static void bus_remove_groups(struct bus_type *bus, + const struct attribute_group **groups) { - int i; - - if (bus->bus_attrs) { - for (i = 0; attr_name(bus->bus_attrs[i]); i++) - bus_remove_file(bus, &bus->bus_attrs[i]); - } + sysfs_remove_groups(&bus->p->subsys.kobj, groups); } static void klist_devices_get(struct klist_node *n) @@ -898,18 +861,18 @@ static ssize_t bus_uevent_store(struct bus_type *bus, static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store); /** - * __bus_register - register a driver-core subsystem + * bus_register - register a driver-core subsystem * @bus: bus to register - * @key: lockdep class key * * Once we have that, we register the bus with the kobject * infrastructure, then register the children subsystems it has: * the devices and drivers that belong to the subsystem. */ -int __bus_register(struct bus_type *bus, struct lock_class_key *key) +int bus_register(struct bus_type *bus) { int retval; struct subsys_private *priv; + struct lock_class_key *key = &bus->lock_key; priv = kzalloc(sizeof(struct subsys_private), GFP_KERNEL); if (!priv) @@ -959,14 +922,14 @@ int __bus_register(struct bus_type *bus, struct lock_class_key *key) if (retval) goto bus_probe_files_fail; - retval = bus_add_attrs(bus); + retval = bus_add_groups(bus, bus->bus_groups); if (retval) - goto bus_attrs_fail; + goto bus_groups_fail; pr_debug("bus: '%s': registered\n", bus->name); return 0; -bus_attrs_fail: +bus_groups_fail: remove_probe_files(bus); bus_probe_files_fail: kset_unregister(bus->p->drivers_kset); @@ -981,7 +944,7 @@ out: bus->p = NULL; return retval; } -EXPORT_SYMBOL_GPL(__bus_register); +EXPORT_SYMBOL_GPL(bus_register); /** * bus_unregister - remove a bus from the system @@ -995,14 +958,12 @@ void bus_unregister(struct bus_type *bus) pr_debug("bus: '%s': unregistering\n", bus->name); if (bus->dev_root) device_unregister(bus->dev_root); - bus_remove_attrs(bus); + bus_remove_groups(bus, bus->bus_groups); remove_probe_files(bus); kset_unregister(bus->p->drivers_kset); kset_unregister(bus->p->devices_kset); bus_remove_file(bus, &bus_attr_uevent); kset_unregister(&bus->p->subsys); - kfree(bus->p); - bus->p = NULL; } EXPORT_SYMBOL_GPL(bus_unregister); @@ -1205,26 +1166,10 @@ static void system_root_device_release(struct device *dev) { kfree(dev); } -/** - * subsys_system_register - register a subsystem at /sys/devices/system/ - * @subsys: system subsystem - * @groups: default attributes for the root device - * - * All 'system' subsystems have a /sys/devices/system/<name> root device - * with the name of the subsystem. The root device can carry subsystem- - * wide attributes. All registered devices are below this single root - * device and are named after the subsystem with a simple enumeration - * number appended. The registered devices are not explicitely named; - * only 'id' in the device needs to be set. - * - * Do not use this interface for anything new, it exists for compatibility - * with bad ideas only. New subsystems should use plain subsystems; and - * add the subsystem-wide attributes should be added to the subsystem - * directory itself and not some create fake root-device placed in - * /sys/devices/system/<name>. - */ -int subsys_system_register(struct bus_type *subsys, - const struct attribute_group **groups) + +static int subsys_register(struct bus_type *subsys, + const struct attribute_group **groups, + struct kobject *parent_of_root) { struct device *dev; int err; @@ -1243,7 +1188,7 @@ int subsys_system_register(struct bus_type *subsys, if (err < 0) goto err_name; - dev->kobj.parent = &system_kset->kobj; + dev->kobj.parent = parent_of_root; dev->groups = groups; dev->release = system_root_device_release; @@ -1263,8 +1208,56 @@ err_dev: bus_unregister(subsys); return err; } + +/** + * subsys_system_register - register a subsystem at /sys/devices/system/ + * @subsys: system subsystem + * @groups: default attributes for the root device + * + * All 'system' subsystems have a /sys/devices/system/<name> root device + * with the name of the subsystem. The root device can carry subsystem- + * wide attributes. All registered devices are below this single root + * device and are named after the subsystem with a simple enumeration + * number appended. The registered devices are not explicitly named; + * only 'id' in the device needs to be set. + * + * Do not use this interface for anything new, it exists for compatibility + * with bad ideas only. New subsystems should use plain subsystems; and + * add the subsystem-wide attributes should be added to the subsystem + * directory itself and not some create fake root-device placed in + * /sys/devices/system/<name>. + */ +int subsys_system_register(struct bus_type *subsys, + const struct attribute_group **groups) +{ + return subsys_register(subsys, groups, &system_kset->kobj); +} EXPORT_SYMBOL_GPL(subsys_system_register); +/** + * subsys_virtual_register - register a subsystem at /sys/devices/virtual/ + * @subsys: virtual subsystem + * @groups: default attributes for the root device + * + * All 'virtual' subsystems have a /sys/devices/system/<name> root device + * with the name of the subystem. The root device can carry subsystem-wide + * attributes. All registered devices are below this single root device. + * There's no restriction on device naming. This is for kernel software + * constructs which need sysfs interface. + */ +int subsys_virtual_register(struct bus_type *subsys, + const struct attribute_group **groups) +{ + struct kobject *virtual_dir; + + virtual_dir = virtual_device_parent(NULL); + if (!virtual_dir) + return -ENOMEM; + + return subsys_register(subsys, groups, virtual_dir); +} +EXPORT_SYMBOL_GPL(subsys_virtual_register); + int __init buses_init(void) { bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL); diff --git a/drivers/base/class.c b/drivers/base/class.c index 03243d4002f..f96f70419a7 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -47,18 +47,6 @@ static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr, return ret; } -static const void *class_attr_namespace(struct kobject *kobj, - const struct attribute *attr) -{ - struct class_attribute *class_attr = to_class_attr(attr); - struct subsys_private *cp = to_subsys_private(kobj); - const void *ns = NULL; - - if (class_attr->namespace) - ns = class_attr->namespace(cp->class, class_attr); - return ns; -} - static void class_release(struct kobject *kobj) { struct subsys_private *cp = to_subsys_private(kobj); @@ -86,7 +74,6 @@ static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject static const struct sysfs_ops class_sysfs_ops = { .show = class_attr_show, .store = class_attr_store, - .namespace = class_attr_namespace, }; static struct kobj_type class_ktype = { @@ -99,21 +86,23 @@ static struct kobj_type class_ktype = { static struct kset *class_kset; -int class_create_file(struct class *cls, const struct class_attribute *attr) +int class_create_file_ns(struct class *cls, const struct class_attribute *attr, + const void *ns) { int error; if (cls) - error = sysfs_create_file(&cls->p->subsys.kobj, - &attr->attr); + error = sysfs_create_file_ns(&cls->p->subsys.kobj, + &attr->attr, ns); else error = -EINVAL; return error; } -void class_remove_file(struct class *cls, const struct class_attribute *attr) +void class_remove_file_ns(struct class *cls, const struct class_attribute *attr, + const void *ns) { if (cls) - sysfs_remove_file(&cls->p->subsys.kobj, &attr->attr); + sysfs_remove_file_ns(&cls->p->subsys.kobj, &attr->attr, ns); } static struct class *class_get(struct class *cls) @@ -135,7 +124,7 @@ static int add_class_attrs(struct class *cls) int error = 0; if (cls->class_attrs) { - for (i = 0; attr_name(cls->class_attrs[i]); i++) { + for (i = 0; cls->class_attrs[i].attr.name; i++) { error = class_create_file(cls, &cls->class_attrs[i]); if (error) goto error; @@ -154,7 +143,7 @@ static void remove_class_attrs(struct class *cls) int i; if (cls->class_attrs) { - for (i = 0; attr_name(cls->class_attrs[i]); i++) + for (i = 0; cls->class_attrs[i].attr.name; i++) class_remove_file(cls, &cls->class_attrs[i]); } } @@ -420,8 +409,8 @@ EXPORT_SYMBOL_GPL(class_for_each_device); * code. There's no locking restriction. */ struct device *class_find_device(struct class *class, struct device *start, - void *data, - int (*match)(struct device *, void *)) + const void *data, + int (*match)(struct device *, const void *)) { struct class_dev_iter iter; struct device *dev; @@ -600,8 +589,8 @@ int __init classes_init(void) return 0; } -EXPORT_SYMBOL_GPL(class_create_file); -EXPORT_SYMBOL_GPL(class_remove_file); +EXPORT_SYMBOL_GPL(class_create_file_ns); +EXPORT_SYMBOL_GPL(class_remove_file_ns); EXPORT_SYMBOL_GPL(class_unregister); EXPORT_SYMBOL_GPL(class_destroy); diff --git a/drivers/base/component.c b/drivers/base/component.c new file mode 100644 index 00000000000..c4778995cd7 --- /dev/null +++ b/drivers/base/component.c @@ -0,0 +1,390 @@ +/* + * Componentized device handling. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This is work in progress. We gather up the component devices into a list, + * and bind them when instructed. At the moment, we're specific to the DRM + * subsystem, and only handles one master device, but this doesn't have to be + * the case. + */ +#include <linux/component.h> +#include <linux/device.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/slab.h> + +struct master { + struct list_head node; + struct list_head components; + bool bound; + + const struct component_master_ops *ops; + struct device *dev; +}; + +struct component { + struct list_head node; + struct list_head master_node; + struct master *master; + bool bound; + + const struct component_ops *ops; + struct device *dev; +}; + +static DEFINE_MUTEX(component_mutex); +static LIST_HEAD(component_list); +static LIST_HEAD(masters); + +static struct master *__master_find(struct device *dev, + const struct component_master_ops *ops) +{ + struct master *m; + + list_for_each_entry(m, &masters, node) + if (m->dev == dev && (!ops || m->ops == ops)) + return m; + + return NULL; +} + +/* Attach an unattached component to a master. */ +static void component_attach_master(struct master *master, struct component *c) +{ + c->master = master; + + list_add_tail(&c->master_node, &master->components); +} + +/* Detach a component from a master. */ +static void component_detach_master(struct master *master, struct component *c) +{ + list_del(&c->master_node); + + c->master = NULL; +} + +int component_master_add_child(struct master *master, + int (*compare)(struct device *, void *), void *compare_data) +{ + struct component *c; + int ret = -ENXIO; + + list_for_each_entry(c, &component_list, node) { + if (c->master) + continue; + + if (compare(c->dev, compare_data)) { + component_attach_master(master, c); + ret = 0; + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(component_master_add_child); + +/* Detach all attached components from this master */ +static void master_remove_components(struct master *master) +{ + while (!list_empty(&master->components)) { + struct component *c = list_first_entry(&master->components, + struct component, master_node); + + WARN_ON(c->master != master); + + component_detach_master(master, c); + } +} + +/* + * Try to bring up a master. If component is NULL, we're interested in + * this master, otherwise it's a component which must be present to try + * and bring up the master. + * + * Returns 1 for successful bringup, 0 if not ready, or -ve errno. + */ +static int try_to_bring_up_master(struct master *master, + struct component *component) +{ + int ret = 0; + + if (!master->bound) { + /* + * Search the list of components, looking for components that + * belong to this master, and attach them to the master. + */ + if (master->ops->add_components(master->dev, master)) { + /* Failed to find all components */ + master_remove_components(master); + ret = 0; + goto out; + } + + if (component && component->master != master) { + master_remove_components(master); + ret = 0; + goto out; + } + + if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) { + ret = -ENOMEM; + goto out; + } + + /* Found all components */ + ret = master->ops->bind(master->dev); + if (ret < 0) { + devres_release_group(master->dev, NULL); + dev_info(master->dev, "master bind failed: %d\n", ret); + master_remove_components(master); + goto out; + } + + master->bound = true; + ret = 1; + } +out: + + return ret; +} + +static int try_to_bring_up_masters(struct component *component) +{ + struct master *m; + int ret = 0; + + list_for_each_entry(m, &masters, node) { + ret = try_to_bring_up_master(m, component); + if (ret != 0) + break; + } + + return ret; +} + +static void take_down_master(struct master *master) +{ + if (master->bound) { + master->ops->unbind(master->dev); + devres_release_group(master->dev, NULL); + master->bound = false; + } + + master_remove_components(master); +} + +int component_master_add(struct device *dev, + const struct component_master_ops *ops) +{ + struct master *master; + int ret; + + master = kzalloc(sizeof(*master), GFP_KERNEL); + if (!master) + return -ENOMEM; + + master->dev = dev; + master->ops = ops; + INIT_LIST_HEAD(&master->components); + + /* Add to the list of available masters. */ + mutex_lock(&component_mutex); + list_add(&master->node, &masters); + + ret = try_to_bring_up_master(master, NULL); + + if (ret < 0) { + /* Delete off the list if we weren't successful */ + list_del(&master->node); + kfree(master); + } + mutex_unlock(&component_mutex); + + return ret < 0 ? ret : 0; +} +EXPORT_SYMBOL_GPL(component_master_add); + +void component_master_del(struct device *dev, + const struct component_master_ops *ops) +{ + struct master *master; + + mutex_lock(&component_mutex); + master = __master_find(dev, ops); + if (master) { + take_down_master(master); + + list_del(&master->node); + kfree(master); + } + mutex_unlock(&component_mutex); +} +EXPORT_SYMBOL_GPL(component_master_del); + +static void component_unbind(struct component *component, + struct master *master, void *data) +{ + WARN_ON(!component->bound); + + component->ops->unbind(component->dev, master->dev, data); + component->bound = false; + + /* Release all resources claimed in the binding of this component */ + devres_release_group(component->dev, component); +} + +void component_unbind_all(struct device *master_dev, void *data) +{ + struct master *master; + struct component *c; + + WARN_ON(!mutex_is_locked(&component_mutex)); + + master = __master_find(master_dev, NULL); + if (!master) + return; + + list_for_each_entry_reverse(c, &master->components, master_node) + component_unbind(c, master, data); +} +EXPORT_SYMBOL_GPL(component_unbind_all); + +static int component_bind(struct component *component, struct master *master, + void *data) +{ + int ret; + + /* + * Each component initialises inside its own devres group. + * This allows us to roll-back a failed component without + * affecting anything else. + */ + if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) + return -ENOMEM; + + /* + * Also open a group for the device itself: this allows us + * to release the resources claimed against the sub-device + * at the appropriate moment. + */ + if (!devres_open_group(component->dev, component, GFP_KERNEL)) { + devres_release_group(master->dev, NULL); + return -ENOMEM; + } + + dev_dbg(master->dev, "binding %s (ops %ps)\n", + dev_name(component->dev), component->ops); + + ret = component->ops->bind(component->dev, master->dev, data); + if (!ret) { + component->bound = true; + + /* + * Close the component device's group so that resources + * allocated in the binding are encapsulated for removal + * at unbind. Remove the group on the DRM device as we + * can clean those resources up independently. + */ + devres_close_group(component->dev, NULL); + devres_remove_group(master->dev, NULL); + + dev_info(master->dev, "bound %s (ops %ps)\n", + dev_name(component->dev), component->ops); + } else { + devres_release_group(component->dev, NULL); + devres_release_group(master->dev, NULL); + + dev_err(master->dev, "failed to bind %s (ops %ps): %d\n", + dev_name(component->dev), component->ops, ret); + } + + return ret; +} + +int component_bind_all(struct device *master_dev, void *data) +{ + struct master *master; + struct component *c; + int ret = 0; + + WARN_ON(!mutex_is_locked(&component_mutex)); + + master = __master_find(master_dev, NULL); + if (!master) + return -EINVAL; + + list_for_each_entry(c, &master->components, master_node) { + ret = component_bind(c, master, data); + if (ret) + break; + } + + if (ret != 0) { + list_for_each_entry_continue_reverse(c, &master->components, + master_node) + component_unbind(c, master, data); + } + + return ret; +} +EXPORT_SYMBOL_GPL(component_bind_all); + +int component_add(struct device *dev, const struct component_ops *ops) +{ + struct component *component; + int ret; + + component = kzalloc(sizeof(*component), GFP_KERNEL); + if (!component) + return -ENOMEM; + + component->ops = ops; + component->dev = dev; + + dev_dbg(dev, "adding component (ops %ps)\n", ops); + + mutex_lock(&component_mutex); + list_add_tail(&component->node, &component_list); + + ret = try_to_bring_up_masters(component); + if (ret < 0) { + list_del(&component->node); + + kfree(component); + } + mutex_unlock(&component_mutex); + + return ret < 0 ? ret : 0; +} +EXPORT_SYMBOL_GPL(component_add); + +void component_del(struct device *dev, const struct component_ops *ops) +{ + struct component *c, *component = NULL; + + mutex_lock(&component_mutex); + list_for_each_entry(c, &component_list, node) + if (c->dev == dev && c->ops == ops) { + list_del(&c->node); + component = c; + break; + } + + if (component && component->master) + take_down_master(component->master); + + mutex_unlock(&component_mutex); + + WARN_ON(!component); + kfree(component); +} +EXPORT_SYMBOL_GPL(component_del); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/container.c b/drivers/base/container.c new file mode 100644 index 00000000000..ecbfbe2e908 --- /dev/null +++ b/drivers/base/container.c @@ -0,0 +1,44 @@ +/* + * System bus type for containers. + * + * Copyright (C) 2013, Intel Corporation + * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/container.h> + +#include "base.h" + +#define CONTAINER_BUS_NAME "container" + +static int trivial_online(struct device *dev) +{ + return 0; +} + +static int container_offline(struct device *dev) +{ + struct container_dev *cdev = to_container_dev(dev); + + return cdev->offline ? cdev->offline(cdev) : 0; +} + +struct bus_type container_subsys = { + .name = CONTAINER_BUS_NAME, + .dev_name = CONTAINER_BUS_NAME, + .online = trivial_online, + .offline = container_offline, +}; + +void __init container_dev_init(void) +{ + int ret; + + ret = subsys_system_register(&container_subsys, NULL); + if (ret) + pr_err("%s() failed: %d\n", __func__, ret); +} diff --git a/drivers/base/core.c b/drivers/base/core.c index a235085e343..20da3ad1696 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -23,9 +23,9 @@ #include <linux/genhd.h> #include <linux/kallsyms.h> #include <linux/mutex.h> -#include <linux/async.h> #include <linux/pm_runtime.h> #include <linux/netdevice.h> +#include <linux/sysfs.h> #include "base.h" #include "power/power.h" @@ -36,9 +36,9 @@ long sysfs_deprecated = 1; #else long sysfs_deprecated = 0; #endif -static __init int sysfs_deprecated_setup(char *arg) +static int __init sysfs_deprecated_setup(char *arg) { - return strict_strtol(arg, 10, &sysfs_deprecated); + return kstrtol(arg, 10, &sysfs_deprecated); } early_param("sysfs.deprecated", sysfs_deprecated_setup); #endif @@ -49,6 +49,28 @@ static struct kobject *dev_kobj; struct kobject *sysfs_dev_char_kobj; struct kobject *sysfs_dev_block_kobj; +static DEFINE_MUTEX(device_hotplug_lock); + +void lock_device_hotplug(void) +{ + mutex_lock(&device_hotplug_lock); +} + +void unlock_device_hotplug(void) +{ + mutex_unlock(&device_hotplug_lock); +} + +int lock_device_hotplug_sysfs(void) +{ + if (mutex_trylock(&device_hotplug_lock)) + return 0; + + /* Avoid busy looping (5 ms of sleep should do). */ + msleep(5); + return restart_syscall(); +} + #ifdef CONFIG_BLOCK static inline int device_is_not_partition(struct device *dev) { @@ -193,12 +215,12 @@ ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, EXPORT_SYMBOL_GPL(device_show_bool); /** - * device_release - free device structure. - * @kobj: device's kobject. + * device_release - free device structure. + * @kobj: device's kobject. * - * This is called once the reference count for the object - * reaches 0. We forward the call to the device's release - * method, which should handle actually freeing the structure. + * This is called once the reference count for the object + * reaches 0. We forward the call to the device's release + * method, which should handle actually freeing the structure. */ static void device_release(struct kobject *kobj) { @@ -283,15 +305,21 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj, const char *tmp; const char *name; umode_t mode = 0; + kuid_t uid = GLOBAL_ROOT_UID; + kgid_t gid = GLOBAL_ROOT_GID; add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt)); add_uevent_var(env, "MINOR=%u", MINOR(dev->devt)); - name = device_get_devnode(dev, &mode, &tmp); + name = device_get_devnode(dev, &mode, &uid, &gid, &tmp); if (name) { add_uevent_var(env, "DEVNAME=%s", name); - kfree(tmp); if (mode) add_uevent_var(env, "DEVMODE=%#o", mode & 0777); + if (!uid_eq(uid, GLOBAL_ROOT_UID)) + add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid)); + if (!gid_eq(gid, GLOBAL_ROOT_GID)) + add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid)); + kfree(tmp); } } @@ -339,7 +367,7 @@ static const struct kset_uevent_ops device_uevent_ops = { .uevent = dev_uevent, }; -static ssize_t show_uevent(struct device *dev, struct device_attribute *attr, +static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kobject *top_kobj; @@ -382,7 +410,7 @@ out: return count; } -static ssize_t store_uevent(struct device *dev, struct device_attribute *attr, +static ssize_t uevent_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { enum kobject_action action; @@ -393,96 +421,48 @@ static ssize_t store_uevent(struct device *dev, struct device_attribute *attr, dev_err(dev, "uevent: unknown action-string\n"); return count; } +static DEVICE_ATTR_RW(uevent); -static struct device_attribute uevent_attr = - __ATTR(uevent, S_IRUGO | S_IWUSR, show_uevent, store_uevent); - -static int device_add_attributes(struct device *dev, - struct device_attribute *attrs) -{ - int error = 0; - int i; - - if (attrs) { - for (i = 0; attr_name(attrs[i]); i++) { - error = device_create_file(dev, &attrs[i]); - if (error) - break; - } - if (error) - while (--i >= 0) - device_remove_file(dev, &attrs[i]); - } - return error; -} - -static void device_remove_attributes(struct device *dev, - struct device_attribute *attrs) +static ssize_t online_show(struct device *dev, struct device_attribute *attr, + char *buf) { - int i; + bool val; - if (attrs) - for (i = 0; attr_name(attrs[i]); i++) - device_remove_file(dev, &attrs[i]); + device_lock(dev); + val = !dev->offline; + device_unlock(dev); + return sprintf(buf, "%u\n", val); } -static int device_add_bin_attributes(struct device *dev, - struct bin_attribute *attrs) +static ssize_t online_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { - int error = 0; - int i; + bool val; + int ret; - if (attrs) { - for (i = 0; attr_name(attrs[i]); i++) { - error = device_create_bin_file(dev, &attrs[i]); - if (error) - break; - } - if (error) - while (--i >= 0) - device_remove_bin_file(dev, &attrs[i]); - } - return error; -} + ret = strtobool(buf, &val); + if (ret < 0) + return ret; -static void device_remove_bin_attributes(struct device *dev, - struct bin_attribute *attrs) -{ - int i; + ret = lock_device_hotplug_sysfs(); + if (ret) + return ret; - if (attrs) - for (i = 0; attr_name(attrs[i]); i++) - device_remove_bin_file(dev, &attrs[i]); + ret = val ? device_online(dev) : device_offline(dev); + unlock_device_hotplug(); + return ret < 0 ? ret : count; } +static DEVICE_ATTR_RW(online); -static int device_add_groups(struct device *dev, - const struct attribute_group **groups) +int device_add_groups(struct device *dev, const struct attribute_group **groups) { - int error = 0; - int i; - - if (groups) { - for (i = 0; groups[i]; i++) { - error = sysfs_create_group(&dev->kobj, groups[i]); - if (error) { - while (--i >= 0) - sysfs_remove_group(&dev->kobj, - groups[i]); - break; - } - } - } - return error; + return sysfs_create_groups(&dev->kobj, groups); } -static void device_remove_groups(struct device *dev, - const struct attribute_group **groups) +void device_remove_groups(struct device *dev, + const struct attribute_group **groups) { - int i; - - if (groups) - for (i = 0; groups[i]; i++) - sysfs_remove_group(&dev->kobj, groups[i]); + sysfs_remove_groups(&dev->kobj, groups); } static int device_add_attrs(struct device *dev) @@ -492,35 +472,37 @@ static int device_add_attrs(struct device *dev) int error; if (class) { - error = device_add_attributes(dev, class->dev_attrs); + error = device_add_groups(dev, class->dev_groups); if (error) return error; - error = device_add_bin_attributes(dev, class->dev_bin_attrs); - if (error) - goto err_remove_class_attrs; } if (type) { error = device_add_groups(dev, type->groups); if (error) - goto err_remove_class_bin_attrs; + goto err_remove_class_groups; } error = device_add_groups(dev, dev->groups); if (error) goto err_remove_type_groups; + if (device_supports_offline(dev) && !dev->offline_disabled) { + error = device_create_file(dev, &dev_attr_online); + if (error) + goto err_remove_dev_groups; + } + return 0; + err_remove_dev_groups: + device_remove_groups(dev, dev->groups); err_remove_type_groups: if (type) device_remove_groups(dev, type->groups); - err_remove_class_bin_attrs: - if (class) - device_remove_bin_attributes(dev, class->dev_bin_attrs); - err_remove_class_attrs: + err_remove_class_groups: if (class) - device_remove_attributes(dev, class->dev_attrs); + device_remove_groups(dev, class->dev_groups); return error; } @@ -530,26 +512,22 @@ static void device_remove_attrs(struct device *dev) struct class *class = dev->class; const struct device_type *type = dev->type; + device_remove_file(dev, &dev_attr_online); device_remove_groups(dev, dev->groups); if (type) device_remove_groups(dev, type->groups); - if (class) { - device_remove_attributes(dev, class->dev_attrs); - device_remove_bin_attributes(dev, class->dev_bin_attrs); - } + if (class) + device_remove_groups(dev, class->dev_groups); } - -static ssize_t show_dev(struct device *dev, struct device_attribute *attr, +static ssize_t dev_show(struct device *dev, struct device_attribute *attr, char *buf) { return print_dev_t(buf, dev->devt); } - -static struct device_attribute devt_attr = - __ATTR(dev, S_IRUGO, show_dev, NULL); +static DEVICE_ATTR_RO(dev); /* /sys/devices/ */ struct kset *devices_kset; @@ -563,10 +541,20 @@ int device_create_file(struct device *dev, const struct device_attribute *attr) { int error = 0; - if (dev) + + if (dev) { + WARN(((attr->attr.mode & S_IWUGO) && !attr->store), + "Attribute %s: write permission without 'store'\n", + attr->attr.name); + WARN(((attr->attr.mode & S_IRUGO) && !attr->show), + "Attribute %s: read permission without 'show'\n", + attr->attr.name); error = sysfs_create_file(&dev->kobj, &attr->attr); + } + return error; } +EXPORT_SYMBOL_GPL(device_create_file); /** * device_remove_file - remove sysfs attribute file. @@ -579,6 +567,24 @@ void device_remove_file(struct device *dev, if (dev) sysfs_remove_file(&dev->kobj, &attr->attr); } +EXPORT_SYMBOL_GPL(device_remove_file); + +/** + * device_remove_file_self - remove sysfs attribute file from its own method. + * @dev: device. + * @attr: device attribute descriptor. + * + * See kernfs_remove_self() for details. + */ +bool device_remove_file_self(struct device *dev, + const struct device_attribute *attr) +{ + if (dev) + return sysfs_remove_file_self(&dev->kobj, &attr->attr); + else + return false; +} +EXPORT_SYMBOL_GPL(device_remove_file_self); /** * device_create_bin_file - create sysfs binary attribute file for device. @@ -608,39 +614,6 @@ void device_remove_bin_file(struct device *dev, } EXPORT_SYMBOL_GPL(device_remove_bin_file); -/** - * device_schedule_callback_owner - helper to schedule a callback for a device - * @dev: device. - * @func: callback function to invoke later. - * @owner: module owning the callback routine - * - * Attribute methods must not unregister themselves or their parent device - * (which would amount to the same thing). Attempts to do so will deadlock, - * since unregistration is mutually exclusive with driver callbacks. - * - * Instead methods can call this routine, which will attempt to allocate - * and schedule a workqueue request to call back @func with @dev as its - * argument in the workqueue's process context. @dev will be pinned until - * @func returns. - * - * This routine is usually called via the inline device_schedule_callback(), - * which automatically sets @owner to THIS_MODULE. - * - * Returns 0 if the request was submitted, -ENOMEM if storage could not - * be allocated, -ENODEV if a reference to @owner isn't available. - * - * NOTE: This routine won't work if CONFIG_SYSFS isn't set! It uses an - * underlying sysfs routine (since it is intended for use by attribute - * methods), and if sysfs isn't available you'll get nothing but -ENOSYS. - */ -int device_schedule_callback_owner(struct device *dev, - void (*func)(struct device *), struct module *owner) -{ - return sysfs_schedule_callback(&dev->kobj, - (void (*)(void *)) func, dev, owner); -} -EXPORT_SYMBOL_GPL(device_schedule_callback_owner); - static void klist_children_get(struct klist_node *n) { struct device_private *p = to_device_private_parent(n); @@ -689,8 +662,9 @@ void device_initialize(struct device *dev) device_pm_init(dev); set_dev_node(dev, -1); } +EXPORT_SYMBOL_GPL(device_initialize); -static struct kobject *virtual_device_parent(struct device *dev) +struct kobject *virtual_device_parent(struct device *dev) { static struct kobject *virtual_dir = NULL; @@ -1041,12 +1015,12 @@ int device_add(struct device *dev) if (platform_notify) platform_notify(dev); - error = device_create_file(dev, &uevent_attr); + error = device_create_file(dev, &dev_attr_uevent); if (error) goto attrError; if (MAJOR(dev->devt)) { - error = device_create_file(dev, &devt_attr); + error = device_create_file(dev, &dev_attr_dev); if (error) goto ueventattrError; @@ -1113,9 +1087,9 @@ done: device_remove_sys_dev_entry(dev); devtattrError: if (MAJOR(dev->devt)) - device_remove_file(dev, &devt_attr); + device_remove_file(dev, &dev_attr_dev); ueventattrError: - device_remove_file(dev, &uevent_attr); + device_remove_file(dev, &dev_attr_uevent); attrError: kobject_uevent(&dev->kobj, KOBJ_REMOVE); kobject_del(&dev->kobj); @@ -1128,6 +1102,7 @@ name_error: dev->p = NULL; goto done; } +EXPORT_SYMBOL_GPL(device_add); /** * device_register - register a device with the system. @@ -1152,6 +1127,7 @@ int device_register(struct device *dev) device_initialize(dev); return device_add(dev); } +EXPORT_SYMBOL_GPL(device_register); /** * get_device - increment reference count for device. @@ -1165,6 +1141,7 @@ struct device *get_device(struct device *dev) { return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL; } +EXPORT_SYMBOL_GPL(get_device); /** * put_device - decrement reference count. @@ -1176,6 +1153,7 @@ void put_device(struct device *dev) if (dev) kobject_put(&dev->kobj); } +EXPORT_SYMBOL_GPL(put_device); /** * device_del - delete device from system. @@ -1207,7 +1185,7 @@ void device_del(struct device *dev) if (MAJOR(dev->devt)) { devtmpfs_delete_node(dev); device_remove_sys_dev_entry(dev); - device_remove_file(dev, &devt_attr); + device_remove_file(dev, &dev_attr_dev); } if (dev->class) { device_remove_class_symlinks(dev); @@ -1222,7 +1200,7 @@ void device_del(struct device *dev) klist_del(&dev->knode_class); mutex_unlock(&dev->class->p->mutex); } - device_remove_file(dev, &uevent_attr); + device_remove_file(dev, &dev_attr_uevent); device_remove_attrs(dev); bus_remove_device(dev); device_pm_remove(dev); @@ -1238,6 +1216,7 @@ void device_del(struct device *dev) kobject_del(&dev->kobj); put_device(parent); } +EXPORT_SYMBOL_GPL(device_del); /** * device_unregister - unregister device from system. @@ -1256,6 +1235,7 @@ void device_unregister(struct device *dev) device_del(dev); put_device(dev); } +EXPORT_SYMBOL_GPL(device_unregister); static struct device *next_device(struct klist_iter *i) { @@ -1274,6 +1254,8 @@ static struct device *next_device(struct klist_iter *i) * device_get_devnode - path of device node file * @dev: device * @mode: returned file access mode + * @uid: returned file owner + * @gid: returned file group * @tmp: possibly allocated string * * Return the relative path of a possible device node. @@ -1282,7 +1264,8 @@ static struct device *next_device(struct klist_iter *i) * freed by the caller. */ const char *device_get_devnode(struct device *dev, - umode_t *mode, const char **tmp) + umode_t *mode, kuid_t *uid, kgid_t *gid, + const char **tmp) { char *s; @@ -1290,7 +1273,7 @@ const char *device_get_devnode(struct device *dev, /* the device type may provide a specific name */ if (dev->type && dev->type->devnode) - *tmp = dev->type->devnode(dev, mode); + *tmp = dev->type->devnode(dev, mode, uid, gid); if (*tmp) return *tmp; @@ -1316,8 +1299,8 @@ const char *device_get_devnode(struct device *dev, /** * device_for_each_child - device child iterator. * @parent: parent struct device. - * @data: data for the callback. * @fn: function to be called for each device. + * @data: data for the callback. * * Iterate over @parent's child devices, and call @fn for each, * passing it @data. @@ -1341,12 +1324,13 @@ int device_for_each_child(struct device *parent, void *data, klist_iter_exit(&i); return error; } +EXPORT_SYMBOL_GPL(device_for_each_child); /** * device_find_child - device iterator for locating a particular device. * @parent: parent struct device - * @data: Data to pass to match function * @match: Callback function to check device + * @data: Data to pass to match function * * This is similar to the device_for_each_child() function above, but it * returns a reference to a device that is 'found' for later use, as @@ -1356,6 +1340,8 @@ int device_for_each_child(struct device *parent, void *data, * if it does. If the callback returns non-zero and a reference to the * current device can be obtained, this function will return to the caller * and not iterate over any more devices. + * + * NOTE: you will need to drop the reference with put_device() after use. */ struct device *device_find_child(struct device *parent, void *data, int (*match)(struct device *dev, void *data)) @@ -1373,6 +1359,7 @@ struct device *device_find_child(struct device *parent, void *data, klist_iter_exit(&i); return child; } +EXPORT_SYMBOL_GPL(device_find_child); int __init devices_init(void) { @@ -1400,20 +1387,86 @@ int __init devices_init(void) return -ENOMEM; } -EXPORT_SYMBOL_GPL(device_for_each_child); -EXPORT_SYMBOL_GPL(device_find_child); +static int device_check_offline(struct device *dev, void *not_used) +{ + int ret; -EXPORT_SYMBOL_GPL(device_initialize); -EXPORT_SYMBOL_GPL(device_add); -EXPORT_SYMBOL_GPL(device_register); + ret = device_for_each_child(dev, NULL, device_check_offline); + if (ret) + return ret; -EXPORT_SYMBOL_GPL(device_del); -EXPORT_SYMBOL_GPL(device_unregister); -EXPORT_SYMBOL_GPL(get_device); -EXPORT_SYMBOL_GPL(put_device); + return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0; +} -EXPORT_SYMBOL_GPL(device_create_file); -EXPORT_SYMBOL_GPL(device_remove_file); +/** + * device_offline - Prepare the device for hot-removal. + * @dev: Device to be put offline. + * + * Execute the device bus type's .offline() callback, if present, to prepare + * the device for a subsequent hot-removal. If that succeeds, the device must + * not be used until either it is removed or its bus type's .online() callback + * is executed. + * + * Call under device_hotplug_lock. + */ +int device_offline(struct device *dev) +{ + int ret; + + if (dev->offline_disabled) + return -EPERM; + + ret = device_for_each_child(dev, NULL, device_check_offline); + if (ret) + return ret; + + device_lock(dev); + if (device_supports_offline(dev)) { + if (dev->offline) { + ret = 1; + } else { + ret = dev->bus->offline(dev); + if (!ret) { + kobject_uevent(&dev->kobj, KOBJ_OFFLINE); + dev->offline = true; + } + } + } + device_unlock(dev); + + return ret; +} + +/** + * device_online - Put the device back online after successful device_offline(). + * @dev: Device to be put back online. + * + * If device_offline() has been successfully executed for @dev, but the device + * has not been removed subsequently, execute its bus type's .online() callback + * to indicate that the device can be used again. + * + * Call under device_hotplug_lock. + */ +int device_online(struct device *dev) +{ + int ret = 0; + + device_lock(dev); + if (device_supports_offline(dev)) { + if (dev->offline) { + ret = dev->bus->online(dev); + if (!ret) { + kobject_uevent(&dev->kobj, KOBJ_ONLINE); + dev->offline = false; + } + } else { + ret = 1; + } + } + device_unlock(dev); + + return ret; +} struct root_device { struct device dev; @@ -1517,34 +1570,11 @@ static void device_create_release(struct device *dev) kfree(dev); } -/** - * device_create_vargs - creates a device and registers it with sysfs - * @class: pointer to the struct class that this device should be registered to - * @parent: pointer to the parent struct device of this new device, if any - * @devt: the dev_t for the char device to be added - * @drvdata: the data to be added to the device for callbacks - * @fmt: string for the device's name - * @args: va_list for the device's name - * - * This function can be used by char device classes. A struct device - * will be created in sysfs, registered to the specified class. - * - * A "dev" file will be created, showing the dev_t for the device, if - * the dev_t is not 0,0. - * If a pointer to a parent struct device is passed in, the newly created - * struct device will be a child of that device in sysfs. - * The pointer to the struct device will be returned from the call. - * Any further sysfs files that might be required can be created using this - * pointer. - * - * Returns &struct device pointer on success, or ERR_PTR() on error. - * - * Note: the struct class passed to this function must have previously - * been created with a call to class_create(). - */ -struct device *device_create_vargs(struct class *class, struct device *parent, - dev_t devt, void *drvdata, const char *fmt, - va_list args) +static struct device * +device_create_groups_vargs(struct class *class, struct device *parent, + dev_t devt, void *drvdata, + const struct attribute_group **groups, + const char *fmt, va_list args) { struct device *dev = NULL; int retval = -ENODEV; @@ -1558,9 +1588,11 @@ struct device *device_create_vargs(struct class *class, struct device *parent, goto error; } + device_initialize(dev); dev->devt = devt; dev->class = class; dev->parent = parent; + dev->groups = groups; dev->release = device_create_release; dev_set_drvdata(dev, drvdata); @@ -1568,7 +1600,7 @@ struct device *device_create_vargs(struct class *class, struct device *parent, if (retval) goto error; - retval = device_register(dev); + retval = device_add(dev); if (retval) goto error; @@ -1578,6 +1610,39 @@ error: put_device(dev); return ERR_PTR(retval); } + +/** + * device_create_vargs - creates a device and registers it with sysfs + * @class: pointer to the struct class that this device should be registered to + * @parent: pointer to the parent struct device of this new device, if any + * @devt: the dev_t for the char device to be added + * @drvdata: the data to be added to the device for callbacks + * @fmt: string for the device's name + * @args: va_list for the device's name + * + * This function can be used by char device classes. A struct device + * will be created in sysfs, registered to the specified class. + * + * A "dev" file will be created, showing the dev_t for the device, if + * the dev_t is not 0,0. + * If a pointer to a parent struct device is passed in, the newly created + * struct device will be a child of that device in sysfs. + * The pointer to the struct device will be returned from the call. + * Any further sysfs files that might be required can be created using this + * pointer. + * + * Returns &struct device pointer on success, or ERR_PTR() on error. + * + * Note: the struct class passed to this function must have previously + * been created with a call to class_create(). + */ +struct device *device_create_vargs(struct class *class, struct device *parent, + dev_t devt, void *drvdata, const char *fmt, + va_list args) +{ + return device_create_groups_vargs(class, parent, devt, drvdata, NULL, + fmt, args); +} EXPORT_SYMBOL_GPL(device_create_vargs); /** @@ -1617,9 +1682,53 @@ struct device *device_create(struct class *class, struct device *parent, } EXPORT_SYMBOL_GPL(device_create); -static int __match_devt(struct device *dev, void *data) +/** + * device_create_with_groups - creates a device and registers it with sysfs + * @class: pointer to the struct class that this device should be registered to + * @parent: pointer to the parent struct device of this new device, if any + * @devt: the dev_t for the char device to be added + * @drvdata: the data to be added to the device for callbacks + * @groups: NULL-terminated list of attribute groups to be created + * @fmt: string for the device's name + * + * This function can be used by char device classes. A struct device + * will be created in sysfs, registered to the specified class. + * Additional attributes specified in the groups parameter will also + * be created automatically. + * + * A "dev" file will be created, showing the dev_t for the device, if + * the dev_t is not 0,0. + * If a pointer to a parent struct device is passed in, the newly created + * struct device will be a child of that device in sysfs. + * The pointer to the struct device will be returned from the call. + * Any further sysfs files that might be required can be created using this + * pointer. + * + * Returns &struct device pointer on success, or ERR_PTR() on error. + * + * Note: the struct class passed to this function must have previously + * been created with a call to class_create(). + */ +struct device *device_create_with_groups(struct class *class, + struct device *parent, dev_t devt, + void *drvdata, + const struct attribute_group **groups, + const char *fmt, ...) +{ + va_list vargs; + struct device *dev; + + va_start(vargs, fmt); + dev = device_create_groups_vargs(class, parent, devt, drvdata, groups, + fmt, vargs); + va_end(vargs); + return dev; +} +EXPORT_SYMBOL_GPL(device_create_with_groups); + +static int __match_devt(struct device *dev, const void *data) { - dev_t *devt = data; + const dev_t *devt = data; return dev->devt == *devt; } @@ -1685,8 +1794,7 @@ EXPORT_SYMBOL_GPL(device_destroy); */ int device_rename(struct device *dev, const char *new_name) { - char *old_class_name = NULL; - char *new_class_name = NULL; + struct kobject *kobj = &dev->kobj; char *old_device_name = NULL; int error; @@ -1694,8 +1802,7 @@ int device_rename(struct device *dev, const char *new_name) if (!dev) return -EINVAL; - pr_debug("device: '%s': %s: renaming to '%s'\n", dev_name(dev), - __func__, new_name); + dev_dbg(dev, "renaming to %s\n", new_name); old_device_name = kstrdup(dev_name(dev), GFP_KERNEL); if (!old_device_name) { @@ -1704,21 +1811,20 @@ int device_rename(struct device *dev, const char *new_name) } if (dev->class) { - error = sysfs_rename_link(&dev->class->p->subsys.kobj, - &dev->kobj, old_device_name, new_name); + error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj, + kobj, old_device_name, + new_name, kobject_namespace(kobj)); if (error) goto out; } - error = kobject_rename(&dev->kobj, new_name); + error = kobject_rename(kobj, new_name); if (error) goto out; out: put_device(dev); - kfree(new_class_name); - kfree(old_class_name); kfree(old_device_name); return error; @@ -1825,7 +1931,7 @@ EXPORT_SYMBOL_GPL(device_move); */ void device_shutdown(void) { - struct device *dev; + struct device *dev, *parent; spin_lock(&devices_kset->list_lock); /* @@ -1842,7 +1948,7 @@ void device_shutdown(void) * prevent it from being freed because parent's * lock is to be held */ - get_device(dev->parent); + parent = get_device(dev->parent); get_device(dev); /* * Make sure the device is off the kset list, in the @@ -1852,8 +1958,8 @@ void device_shutdown(void) spin_unlock(&devices_kset->list_lock); /* hold lock to avoid race with probe/release */ - if (dev->parent) - device_lock(dev->parent); + if (parent) + device_lock(parent); device_lock(dev); /* Don't allow any more runtime suspends */ @@ -1871,16 +1977,15 @@ void device_shutdown(void) } device_unlock(dev); - if (dev->parent) - device_unlock(dev->parent); + if (parent) + device_unlock(parent); put_device(dev); - put_device(dev->parent); + put_device(parent); spin_lock(&devices_kset->list_lock); } spin_unlock(&devices_kset->list_lock); - async_synchronize_full(); } /* @@ -1935,7 +2040,6 @@ create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen) return pos; } -EXPORT_SYMBOL(create_syslog_header); int dev_vprintk_emit(int level, const struct device *dev, const char *fmt, va_list args) diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index fb10728f637..006b1bc5297 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -13,69 +13,67 @@ #include <linux/gfp.h> #include <linux/slab.h> #include <linux/percpu.h> +#include <linux/acpi.h> +#include <linux/of.h> +#include <linux/cpufeature.h> #include "base.h" -struct bus_type cpu_subsys = { - .name = "cpu", - .dev_name = "cpu", -}; -EXPORT_SYMBOL_GPL(cpu_subsys); - static DEFINE_PER_CPU(struct device *, cpu_sys_devices); -#ifdef CONFIG_HOTPLUG_CPU -static ssize_t show_online(struct device *dev, - struct device_attribute *attr, - char *buf) +static int cpu_subsys_match(struct device *dev, struct device_driver *drv) { - struct cpu *cpu = container_of(dev, struct cpu, dev); + /* ACPI style match is the only one that may succeed. */ + if (acpi_driver_match_device(dev, drv)) + return 1; - return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id)); + return 0; } -static ssize_t __ref store_online(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +#ifdef CONFIG_HOTPLUG_CPU +static void change_cpu_under_node(struct cpu *cpu, + unsigned int from_nid, unsigned int to_nid) +{ + int cpuid = cpu->dev.id; + unregister_cpu_under_node(cpuid, from_nid); + register_cpu_under_node(cpuid, to_nid); + cpu->node_id = to_nid; +} + +static int __ref cpu_subsys_online(struct device *dev) { struct cpu *cpu = container_of(dev, struct cpu, dev); - ssize_t ret; - - cpu_hotplug_driver_lock(); - switch (buf[0]) { - case '0': - ret = cpu_down(cpu->dev.id); - if (!ret) - kobject_uevent(&dev->kobj, KOBJ_OFFLINE); - break; - case '1': - ret = cpu_up(cpu->dev.id); - if (!ret) - kobject_uevent(&dev->kobj, KOBJ_ONLINE); - break; - default: - ret = -EINVAL; - } - cpu_hotplug_driver_unlock(); + int cpuid = dev->id; + int from_nid, to_nid; + int ret; + + from_nid = cpu_to_node(cpuid); + if (from_nid == NUMA_NO_NODE) + return -ENODEV; + + ret = cpu_up(cpuid); + /* + * When hot adding memory to memoryless node and enabling a cpu + * on the node, node number of the cpu may internally change. + */ + to_nid = cpu_to_node(cpuid); + if (from_nid != to_nid) + change_cpu_under_node(cpu, from_nid, to_nid); - if (ret >= 0) - ret = count; return ret; } -static DEVICE_ATTR(online, 0644, show_online, store_online); -static void __cpuinit register_cpu_control(struct cpu *cpu) +static int cpu_subsys_offline(struct device *dev) { - device_create_file(&cpu->dev, &dev_attr_online); + return cpu_down(dev->id); } + void unregister_cpu(struct cpu *cpu) { int logical_cpu = cpu->dev.id; unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu)); - device_remove_file(&cpu->dev, &dev_attr_online); - device_unregister(&cpu->dev); per_cpu(cpu_sys_devices, logical_cpu) = NULL; return; @@ -87,7 +85,17 @@ static ssize_t cpu_probe_store(struct device *dev, const char *buf, size_t count) { - return arch_cpu_probe(buf, count); + ssize_t cnt; + int ret; + + ret = lock_device_hotplug_sysfs(); + if (ret) + return ret; + + cnt = arch_cpu_probe(buf, count); + + unlock_device_hotplug(); + return cnt; } static ssize_t cpu_release_store(struct device *dev, @@ -95,19 +103,35 @@ static ssize_t cpu_release_store(struct device *dev, const char *buf, size_t count) { - return arch_cpu_release(buf, count); + ssize_t cnt; + int ret; + + ret = lock_device_hotplug_sysfs(); + if (ret) + return ret; + + cnt = arch_cpu_release(buf, count); + + unlock_device_hotplug(); + return cnt; } static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store); #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ - -#else /* ... !CONFIG_HOTPLUG_CPU */ -static inline void register_cpu_control(struct cpu *cpu) -{ -} #endif /* CONFIG_HOTPLUG_CPU */ +struct bus_type cpu_subsys = { + .name = "cpu", + .dev_name = "cpu", + .match = cpu_subsys_match, +#ifdef CONFIG_HOTPLUG_CPU + .online = cpu_subsys_online, + .offline = cpu_subsys_offline, +#endif +}; +EXPORT_SYMBOL_GPL(cpu_subsys); + #ifdef CONFIG_KEXEC #include <linux/kexec.h> @@ -132,7 +156,42 @@ static ssize_t show_crash_notes(struct device *dev, struct device_attribute *att return rc; } static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL); + +static ssize_t show_crash_notes_size(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + ssize_t rc; + + rc = sprintf(buf, "%zu\n", sizeof(note_buf_t)); + return rc; +} +static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL); + +static struct attribute *crash_note_cpu_attrs[] = { + &dev_attr_crash_notes.attr, + &dev_attr_crash_notes_size.attr, + NULL +}; + +static struct attribute_group crash_note_cpu_attr_group = { + .attrs = crash_note_cpu_attrs, +}; +#endif + +static const struct attribute_group *common_cpu_attr_groups[] = { +#ifdef CONFIG_KEXEC + &crash_note_cpu_attr_group, +#endif + NULL +}; + +static const struct attribute_group *hotplugable_cpu_attr_groups[] = { +#ifdef CONFIG_KEXEC + &crash_note_cpu_attr_group, #endif + NULL +}; /* * Print cpu online, possible, present, and system maps @@ -228,6 +287,41 @@ static void cpu_device_release(struct device *dev) */ } +#ifdef CONFIG_GENERIC_CPU_AUTOPROBE +static ssize_t print_cpu_modalias(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + ssize_t n; + u32 i; + + n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:", + CPU_FEATURE_TYPEVAL); + + for (i = 0; i < MAX_CPU_FEATURES; i++) + if (cpu_have_feature(i)) { + if (PAGE_SIZE < n + sizeof(",XXXX\n")) { + WARN(1, "CPU features overflow page\n"); + break; + } + n += sprintf(&buf[n], ",%04X", i); + } + buf[n++] = '\n'; + return n; +} + +static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (buf) { + print_cpu_modalias(NULL, NULL, buf); + add_uevent_var(env, "MODALIAS=%s", buf); + kfree(buf); + } + return 0; +} +#endif + /* * register_cpu - Setup a sysfs device for a CPU. * @cpu - cpu->hotpluggable field set to 1 will generate a control file in @@ -236,7 +330,7 @@ static void cpu_device_release(struct device *dev) * * Initialize and register the CPU device. */ -int __cpuinit register_cpu(struct cpu *cpu, int num) +int register_cpu(struct cpu *cpu, int num) { int error; @@ -245,21 +339,21 @@ int __cpuinit register_cpu(struct cpu *cpu, int num) cpu->dev.id = num; cpu->dev.bus = &cpu_subsys; cpu->dev.release = cpu_device_release; -#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE - cpu->dev.bus->uevent = arch_cpu_uevent; + cpu->dev.offline_disabled = !cpu->hotpluggable; + cpu->dev.offline = !cpu_online(num); + cpu->dev.of_node = of_get_cpu_node(num, NULL); +#ifdef CONFIG_GENERIC_CPU_AUTOPROBE + cpu->dev.bus->uevent = cpu_uevent; #endif + cpu->dev.groups = common_cpu_attr_groups; + if (cpu->hotpluggable) + cpu->dev.groups = hotplugable_cpu_attr_groups; error = device_register(&cpu->dev); - if (!error && cpu->hotpluggable) - register_cpu_control(cpu); if (!error) per_cpu(cpu_sys_devices, num) = &cpu->dev; if (!error) register_cpu_under_node(num, cpu_to_node(num)); -#ifdef CONFIG_KEXEC - if (!error) - error = device_create_file(&cpu->dev, &dev_attr_crash_notes); -#endif return error; } @@ -272,8 +366,8 @@ struct device *get_cpu_device(unsigned cpu) } EXPORT_SYMBOL_GPL(get_cpu_device); -#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE -static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL); +#ifdef CONFIG_GENERIC_CPU_AUTOPROBE +static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL); #endif static struct attribute *cpu_root_attrs[] = { @@ -286,7 +380,7 @@ static struct attribute *cpu_root_attrs[] = { &cpu_attrs[2].attr.attr, &dev_attr_kernel_max.attr, &dev_attr_offline.attr, -#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE +#ifdef CONFIG_GENERIC_CPU_AUTOPROBE &dev_attr_modalias.attr, #endif NULL diff --git a/drivers/base/dd.c b/drivers/base/dd.c index e3bbed8a617..e4ffbcf2f51 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -24,6 +24,7 @@ #include <linux/wait.h> #include <linux/async.h> #include <linux/pm_runtime.h> +#include <linux/pinctrl/devinfo.h> #include "base.h" #include "power/power.h" @@ -51,6 +52,7 @@ static DEFINE_MUTEX(deferred_probe_mutex); static LIST_HEAD(deferred_probe_pending_list); static LIST_HEAD(deferred_probe_active_list); static struct workqueue_struct *deferred_wq; +static atomic_t deferred_trigger_count = ATOMIC_INIT(0); /** * deferred_probe_work_func() - Retry probing devices in the active list. @@ -134,6 +136,17 @@ static bool driver_deferred_probe_enable = false; * This functions moves all devices from the pending list to the active * list and schedules the deferred probe workqueue to process them. It * should be called anytime a driver is successfully bound to a device. + * + * Note, there is a race condition in multi-threaded probe. In the case where + * more than one device is probing at the same time, it is possible for one + * probe to complete successfully while another is about to defer. If the second + * depends on the first, then it will get put on the pending list after the + * trigger event has already occured and will be stuck there. + * + * The atomic 'deferred_trigger_count' is used to determine if a successful + * trigger has occurred in the midst of probing a driver. If the trigger count + * changes in the midst of a probe, then deferred processing should be triggered + * again. */ static void driver_deferred_probe_trigger(void) { @@ -146,6 +159,7 @@ static void driver_deferred_probe_trigger(void) * into the active list so they can be retried by the workqueue */ mutex_lock(&deferred_probe_mutex); + atomic_inc(&deferred_trigger_count); list_splice_tail_init(&deferred_probe_pending_list, &deferred_probe_active_list); mutex_unlock(&deferred_probe_mutex); @@ -172,6 +186,8 @@ static int deferred_probe_initcall(void) driver_deferred_probe_enable = true; driver_deferred_probe_trigger(); + /* Sort as many dependencies as possible before exiting initcalls */ + flush_workqueue(deferred_wq); return 0; } late_initcall(deferred_probe_initcall); @@ -184,8 +200,8 @@ static void driver_bound(struct device *dev) return; } - pr_debug("driver: '%s': %s: bound to device '%s'\n", dev_name(dev), - __func__, dev->driver->name); + pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name, + __func__, dev_name(dev)); klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices); @@ -262,6 +278,7 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue); static int really_probe(struct device *dev, struct device_driver *drv) { int ret = 0; + int local_trigger_count = atomic_read(&deferred_trigger_count); atomic_inc(&probe_count); pr_debug("bus: '%s': %s: probing driver %s with device %s\n", @@ -269,6 +286,12 @@ static int really_probe(struct device *dev, struct device_driver *drv) WARN_ON(!list_empty(&dev->devres_head)); dev->driver = drv; + + /* If using pinctrl, bind pins now before probing */ + ret = pinctrl_bind_pins(dev); + if (ret) + goto probe_failed; + if (driver_sysfs_add(dev)) { printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n", __func__, dev_name(dev)); @@ -301,6 +324,9 @@ probe_failed: /* Driver requested deferred probing */ dev_info(dev, "Driver %s requests probe deferral\n", drv->name); driver_deferred_probe_add(dev); + /* Did a trigger occur while probing? Need to re-trigger if yes */ + if (local_trigger_count != atomic_read(&deferred_trigger_count)) + driver_deferred_probe_trigger(); } else if (ret != -ENODEV && ret != -ENXIO) { /* driver matched but the probe failed */ printk(KERN_WARNING @@ -371,7 +397,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev) pm_runtime_barrier(dev); ret = really_probe(dev, drv); - pm_runtime_idle(dev); + pm_request_idle(dev); return ret; } @@ -419,7 +445,7 @@ int device_attach(struct device *dev) } } else { ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); - pm_runtime_idle(dev); + pm_request_idle(dev); } out_unlock: device_unlock(dev); @@ -561,29 +587,3 @@ void driver_detach(struct device_driver *drv) put_device(dev); } } - -/* - * These exports can't be _GPL due to .h files using this within them, and it - * might break something that was previously working... - */ -void *dev_get_drvdata(const struct device *dev) -{ - if (dev && dev->p) - return dev->p->driver_data; - return NULL; -} -EXPORT_SYMBOL(dev_get_drvdata); - -int dev_set_drvdata(struct device *dev, void *data) -{ - int error; - - if (!dev->p) { - error = device_private_init(dev); - if (error) - return error; - } - dev->p->driver_data = data; - return 0; -} -EXPORT_SYMBOL(dev_set_drvdata); diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 66839066476..52302946770 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -91,7 +91,8 @@ static __always_inline struct devres * alloc_dr(dr_release_t release, if (unlikely(!dr)) return NULL; - memset(dr, 0, tot_size); + memset(dr, 0, offsetof(struct devres, data)); + INIT_LIST_HEAD(&dr->node.entry); dr->node.release = release; return dr; @@ -110,7 +111,7 @@ void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp, { struct devres *dr; - dr = alloc_dr(release, size, gfp); + dr = alloc_dr(release, size, gfp | __GFP_ZERO); if (unlikely(!dr)) return NULL; set_node_dbginfo(&dr->node, name, size); @@ -135,7 +136,7 @@ void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp) { struct devres *dr; - dr = alloc_dr(release, size, gfp); + dr = alloc_dr(release, size, gfp | __GFP_ZERO); if (unlikely(!dr)) return NULL; return dr->data; @@ -671,58 +672,259 @@ int devres_release_group(struct device *dev, void *id) EXPORT_SYMBOL_GPL(devres_release_group); /* - * Managed kzalloc/kfree + * Custom devres actions allow inserting a simple function call + * into the teadown sequence. + */ + +struct action_devres { + void *data; + void (*action)(void *); +}; + +static int devm_action_match(struct device *dev, void *res, void *p) +{ + struct action_devres *devres = res; + struct action_devres *target = p; + + return devres->action == target->action && + devres->data == target->data; +} + +static void devm_action_release(struct device *dev, void *res) +{ + struct action_devres *devres = res; + + devres->action(devres->data); +} + +/** + * devm_add_action() - add a custom action to list of managed resources + * @dev: Device that owns the action + * @action: Function that should be called + * @data: Pointer to data passed to @action implementation + * + * This adds a custom action to the list of managed resources so that + * it gets executed as part of standard resource unwinding. */ -static void devm_kzalloc_release(struct device *dev, void *res) +int devm_add_action(struct device *dev, void (*action)(void *), void *data) +{ + struct action_devres *devres; + + devres = devres_alloc(devm_action_release, + sizeof(struct action_devres), GFP_KERNEL); + if (!devres) + return -ENOMEM; + + devres->data = data; + devres->action = action; + + devres_add(dev, devres); + return 0; +} +EXPORT_SYMBOL_GPL(devm_add_action); + +/** + * devm_remove_action() - removes previously added custom action + * @dev: Device that owns the action + * @action: Function implementing the action + * @data: Pointer to data passed to @action implementation + * + * Removes instance of @action previously added by devm_add_action(). + * Both action and data should match one of the existing entries. + */ +void devm_remove_action(struct device *dev, void (*action)(void *), void *data) +{ + struct action_devres devres = { + .data = data, + .action = action, + }; + + WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match, + &devres)); + +} +EXPORT_SYMBOL_GPL(devm_remove_action); + +/* + * Managed kmalloc/kfree + */ +static void devm_kmalloc_release(struct device *dev, void *res) { /* noop */ } -static int devm_kzalloc_match(struct device *dev, void *res, void *data) +static int devm_kmalloc_match(struct device *dev, void *res, void *data) { return res == data; } /** - * devm_kzalloc - Resource-managed kzalloc + * devm_kmalloc - Resource-managed kmalloc * @dev: Device to allocate memory for * @size: Allocation size * @gfp: Allocation gfp flags * - * Managed kzalloc. Memory allocated with this function is + * Managed kmalloc. Memory allocated with this function is * automatically freed on driver detach. Like all other devres * resources, guaranteed alignment is unsigned long long. * * RETURNS: * Pointer to allocated memory on success, NULL on failure. */ -void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) +void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) { struct devres *dr; /* use raw alloc_dr for kmalloc caller tracing */ - dr = alloc_dr(devm_kzalloc_release, size, gfp); + dr = alloc_dr(devm_kmalloc_release, size, gfp); if (unlikely(!dr)) return NULL; + /* + * This is named devm_kzalloc_release for historical reasons + * The initial implementation did not support kmalloc, only kzalloc + */ set_node_dbginfo(&dr->node, "devm_kzalloc_release", size); devres_add(dev, dr->data); return dr->data; } -EXPORT_SYMBOL_GPL(devm_kzalloc); +EXPORT_SYMBOL_GPL(devm_kmalloc); + +/** + * devm_kstrdup - Allocate resource managed space and + * copy an existing string into that. + * @dev: Device to allocate memory for + * @s: the string to duplicate + * @gfp: the GFP mask used in the devm_kmalloc() call when + * allocating memory + * RETURNS: + * Pointer to allocated string on success, NULL on failure. + */ +char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) +{ + size_t size; + char *buf; + + if (!s) + return NULL; + + size = strlen(s) + 1; + buf = devm_kmalloc(dev, size, gfp); + if (buf) + memcpy(buf, s, size); + return buf; +} +EXPORT_SYMBOL_GPL(devm_kstrdup); /** * devm_kfree - Resource-managed kfree * @dev: Device this memory belongs to * @p: Memory to free * - * Free memory allocated with devm_kzalloc(). + * Free memory allocated with devm_kmalloc(). */ void devm_kfree(struct device *dev, void *p) { int rc; - rc = devres_destroy(dev, devm_kzalloc_release, devm_kzalloc_match, p); + rc = devres_destroy(dev, devm_kmalloc_release, devm_kmalloc_match, p); WARN_ON(rc); } EXPORT_SYMBOL_GPL(devm_kfree); + +/** + * devm_kmemdup - Resource-managed kmemdup + * @dev: Device this memory belongs to + * @src: Memory region to duplicate + * @len: Memory region length + * @gfp: GFP mask to use + * + * Duplicate region of a memory using resource managed kmalloc + */ +void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp) +{ + void *p; + + p = devm_kmalloc(dev, len, gfp); + if (p) + memcpy(p, src, len); + + return p; +} +EXPORT_SYMBOL_GPL(devm_kmemdup); + +struct pages_devres { + unsigned long addr; + unsigned int order; +}; + +static int devm_pages_match(struct device *dev, void *res, void *p) +{ + struct pages_devres *devres = res; + struct pages_devres *target = p; + + return devres->addr == target->addr; +} + +static void devm_pages_release(struct device *dev, void *res) +{ + struct pages_devres *devres = res; + + free_pages(devres->addr, devres->order); +} + +/** + * devm_get_free_pages - Resource-managed __get_free_pages + * @dev: Device to allocate memory for + * @gfp_mask: Allocation gfp flags + * @order: Allocation size is (1 << order) pages + * + * Managed get_free_pages. Memory allocated with this function is + * automatically freed on driver detach. + * + * RETURNS: + * Address of allocated memory on success, 0 on failure. + */ + +unsigned long devm_get_free_pages(struct device *dev, + gfp_t gfp_mask, unsigned int order) +{ + struct pages_devres *devres; + unsigned long addr; + + addr = __get_free_pages(gfp_mask, order); + + if (unlikely(!addr)) + return 0; + + devres = devres_alloc(devm_pages_release, + sizeof(struct pages_devres), GFP_KERNEL); + if (unlikely(!devres)) { + free_pages(addr, order); + return 0; + } + + devres->addr = addr; + devres->order = order; + + devres_add(dev, devres); + return addr; +} +EXPORT_SYMBOL_GPL(devm_get_free_pages); + +/** + * devm_free_pages - Resource-managed free_pages + * @dev: Device this memory belongs to + * @addr: Memory to free + * + * Free memory allocated with devm_get_free_pages(). Unlike free_pages, + * there is no need to supply the @order. + */ +void devm_free_pages(struct device *dev, unsigned long addr) +{ + struct pages_devres devres = { .addr = addr }; + + WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match, + &devres)); +} +EXPORT_SYMBOL_GPL(devm_free_pages); diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 17cf7cad601..25798db1455 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -24,6 +24,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/kthread.h> +#include "base.h" static struct task_struct *thread; @@ -41,6 +42,8 @@ static struct req { int err; const char *name; umode_t mode; /* 0 => delete */ + kuid_t uid; + kgid_t gid; struct device *dev; } *requests; @@ -85,7 +88,9 @@ int devtmpfs_create_node(struct device *dev) return 0; req.mode = 0; - req.name = device_get_devnode(dev, &req.mode, &tmp); + req.uid = GLOBAL_ROOT_UID; + req.gid = GLOBAL_ROOT_GID; + req.name = device_get_devnode(dev, &req.mode, &req.uid, &req.gid, &tmp); if (!req.name) return -ENOMEM; @@ -121,7 +126,7 @@ int devtmpfs_delete_node(struct device *dev) if (!thread) return 0; - req.name = device_get_devnode(dev, NULL, &tmp); + req.name = device_get_devnode(dev, NULL, NULL, NULL, &tmp); if (!req.name) return -ENOMEM; @@ -187,7 +192,8 @@ static int create_path(const char *nodepath) return err; } -static int handle_create(const char *nodename, umode_t mode, struct device *dev) +static int handle_create(const char *nodename, umode_t mode, kuid_t uid, + kgid_t gid, struct device *dev) { struct dentry *dentry; struct path path; @@ -201,16 +207,16 @@ static int handle_create(const char *nodename, umode_t mode, struct device *dev) if (IS_ERR(dentry)) return PTR_ERR(dentry); - err = vfs_mknod(path.dentry->d_inode, - dentry, mode, dev->devt); + err = vfs_mknod(path.dentry->d_inode, dentry, mode, dev->devt); if (!err) { struct iattr newattrs; - /* fixup possibly umasked mode */ newattrs.ia_mode = mode; - newattrs.ia_valid = ATTR_MODE; + newattrs.ia_uid = uid; + newattrs.ia_gid = gid; + newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID; mutex_lock(&dentry->d_inode->i_mutex); - notify_change(dentry, &newattrs); + notify_change(dentry, &newattrs, NULL); mutex_unlock(&dentry->d_inode->i_mutex); /* mark as kernel-created inode */ @@ -293,7 +299,7 @@ static int handle_remove(const char *nodename, struct device *dev) { struct path parent; struct dentry *dentry; - int deleted = 1; + int deleted = 0; int err; dentry = kern_path_locked(nodename, &parent); @@ -302,7 +308,8 @@ static int handle_remove(const char *nodename, struct device *dev) if (dentry->d_inode) { struct kstat stat; - err = vfs_getattr(parent.mnt, dentry, &stat); + struct path p = {.mnt = parent.mnt, .dentry = dentry}; + err = vfs_getattr(&p, &stat); if (!err && dev_mynode(dev, dentry->d_inode, &stat)) { struct iattr newattrs; /* @@ -315,9 +322,9 @@ static int handle_remove(const char *nodename, struct device *dev) newattrs.ia_valid = ATTR_UID|ATTR_GID|ATTR_MODE; mutex_lock(&dentry->d_inode->i_mutex); - notify_change(dentry, &newattrs); + notify_change(dentry, &newattrs, NULL); mutex_unlock(&dentry->d_inode->i_mutex); - err = vfs_unlink(parent.dentry->d_inode, dentry); + err = vfs_unlink(parent.dentry->d_inode, dentry, NULL); if (!err || err == -ENOENT) deleted = 1; } @@ -357,10 +364,11 @@ int devtmpfs_mount(const char *mntdir) static DECLARE_COMPLETION(setup_done); -static int handle(const char *name, umode_t mode, struct device *dev) +static int handle(const char *name, umode_t mode, kuid_t uid, kgid_t gid, + struct device *dev) { if (mode) - return handle_create(name, mode, dev); + return handle_create(name, mode, uid, gid, dev); else return handle_remove(name, dev); } @@ -386,7 +394,8 @@ static int devtmpfsd(void *p) spin_unlock(&req_lock); while (req) { struct req *next = req->next; - req->err = handle(req->name, req->mode, req->dev); + req->err = handle(req->name, req->mode, + req->uid, req->gid, req->dev); complete(&req->done); req = next; } diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c index a3f79c495a4..840c7fa8098 100644 --- a/drivers/base/dma-buf.c +++ b/drivers/base/dma-buf.c @@ -27,9 +27,18 @@ #include <linux/dma-buf.h> #include <linux/anon_inodes.h> #include <linux/export.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> static inline int is_dma_buf_file(struct file *); +struct dma_buf_list { + struct list_head head; + struct mutex lock; +}; + +static struct dma_buf_list db_list; + static int dma_buf_release(struct inode *inode, struct file *file) { struct dma_buf *dmabuf; @@ -39,7 +48,14 @@ static int dma_buf_release(struct inode *inode, struct file *file) dmabuf = file->private_data; + BUG_ON(dmabuf->vmapping_counter); + dmabuf->ops->release(dmabuf); + + mutex_lock(&db_list.lock); + list_del(&dmabuf->list_node); + mutex_unlock(&db_list.lock); + kfree(dmabuf); return 0; } @@ -61,9 +77,36 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) return dmabuf->ops->mmap(dmabuf, vma); } +static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) +{ + struct dma_buf *dmabuf; + loff_t base; + + if (!is_dma_buf_file(file)) + return -EBADF; + + dmabuf = file->private_data; + + /* only support discovering the end of the buffer, + but also allow SEEK_SET to maintain the idiomatic + SEEK_END(0), SEEK_CUR(0) pattern */ + if (whence == SEEK_END) + base = dmabuf->size; + else if (whence == SEEK_SET) + base = 0; + else + return -EINVAL; + + if (offset != 0) + return -EINVAL; + + return base + offset; +} + static const struct file_operations dma_buf_fops = { .release = dma_buf_release, .mmap = dma_buf_mmap_internal, + .llseek = dma_buf_llseek, }; /* @@ -75,22 +118,24 @@ static inline int is_dma_buf_file(struct file *file) } /** - * dma_buf_export - Creates a new dma_buf, and associates an anon file + * dma_buf_export_named - Creates a new dma_buf, and associates an anon file * with this buffer, so it can be exported. * Also connect the allocator specific data and ops to the buffer. + * Additionally, provide a name string for exporter; useful in debugging. * * @priv: [in] Attach private data of allocator to this buffer * @ops: [in] Attach allocator-defined dma buf ops to the new buffer. * @size: [in] Size of the buffer * @flags: [in] mode flags for the file. + * @exp_name: [in] name of the exporting module - useful for debugging. * * Returns, on success, a newly created dma_buf object, which wraps the * supplied private data and operations for dma_buf_ops. On either missing * ops, or error in allocating struct dma_buf, will return negative error. * */ -struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops, - size_t size, int flags) +struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, + size_t size, int flags, const char *exp_name) { struct dma_buf *dmabuf; struct file *file; @@ -112,17 +157,27 @@ struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops, dmabuf->priv = priv; dmabuf->ops = ops; dmabuf->size = size; + dmabuf->exp_name = exp_name; file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags); + if (IS_ERR(file)) { + kfree(dmabuf); + return ERR_CAST(file); + } + file->f_mode |= FMODE_LSEEK; dmabuf->file = file; mutex_init(&dmabuf->lock); INIT_LIST_HEAD(&dmabuf->attachments); + mutex_lock(&db_list.lock); + list_add(&dmabuf->list_node, &db_list.head); + mutex_unlock(&db_list.lock); + return dmabuf; } -EXPORT_SYMBOL_GPL(dma_buf_export); +EXPORT_SYMBOL_GPL(dma_buf_export_named); /** @@ -134,15 +189,14 @@ EXPORT_SYMBOL_GPL(dma_buf_export); */ int dma_buf_fd(struct dma_buf *dmabuf, int flags) { - int error, fd; + int fd; if (!dmabuf || !dmabuf->file) return -EINVAL; - error = get_unused_fd_flags(flags); - if (error < 0) - return error; - fd = error; + fd = get_unused_fd_flags(flags); + if (fd < 0) + return fd; fd_install(fd, dmabuf->file); @@ -197,9 +251,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put); * @dmabuf: [in] buffer to attach device to. * @dev: [in] device to be attached. * - * Returns struct dma_buf_attachment * for this attachment; may return negative - * error codes. - * + * Returns struct dma_buf_attachment * for this attachment; returns ERR_PTR on + * error. */ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, struct device *dev) @@ -265,9 +318,8 @@ EXPORT_SYMBOL_GPL(dma_buf_detach); * @attach: [in] attachment whose scatterlist is to be returned * @direction: [in] direction of DMA transfer * - * Returns sg_table containing the scatterlist to be returned; may return NULL - * or ERR_PTR. - * + * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR + * on error. */ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, enum dma_data_direction direction) @@ -280,6 +332,8 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, return ERR_PTR(-EINVAL); sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); + if (!sg_table) + sg_table = ERR_PTR(-ENOMEM); return sg_table; } @@ -437,7 +491,7 @@ EXPORT_SYMBOL_GPL(dma_buf_kunmap); * dma-buf buffer. * * This function adjusts the passed in vma so that it points at the file of the - * dma_buf operation. It alsog adjusts the starting pgoff and does bounds + * dma_buf operation. It also adjusts the starting pgoff and does bounds * checking on the size of the vma. Then it calls the exporters mmap function to * set up the mapping. * @@ -446,6 +500,9 @@ EXPORT_SYMBOL_GPL(dma_buf_kunmap); int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, unsigned long pgoff) { + struct file *oldfile; + int ret; + if (WARN_ON(!dmabuf || !vma)) return -EINVAL; @@ -459,14 +516,22 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, return -EINVAL; /* readjust the vma */ - if (vma->vm_file) - fput(vma->vm_file); - - vma->vm_file = get_file(dmabuf->file); - + get_file(dmabuf->file); + oldfile = vma->vm_file; + vma->vm_file = dmabuf->file; vma->vm_pgoff = pgoff; - return dmabuf->ops->mmap(dmabuf, vma); + ret = dmabuf->ops->mmap(dmabuf, vma); + if (ret) { + /* restore old parameters on failure */ + vma->vm_file = oldfile; + fput(dmabuf->file); + } else { + if (oldfile) + fput(oldfile); + } + return ret; + } EXPORT_SYMBOL_GPL(dma_buf_mmap); @@ -479,15 +544,41 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap); * These calls are optional in drivers. The intended use for them * is for mapping objects linear in kernel space for high use objects. * Please attempt to use kmap/kunmap before thinking about these interfaces. + * + * Returns NULL on error. */ void *dma_buf_vmap(struct dma_buf *dmabuf) { + void *ptr; + if (WARN_ON(!dmabuf)) return NULL; - if (dmabuf->ops->vmap) - return dmabuf->ops->vmap(dmabuf); - return NULL; + if (!dmabuf->ops->vmap) + return NULL; + + mutex_lock(&dmabuf->lock); + if (dmabuf->vmapping_counter) { + dmabuf->vmapping_counter++; + BUG_ON(!dmabuf->vmap_ptr); + ptr = dmabuf->vmap_ptr; + goto out_unlock; + } + + BUG_ON(dmabuf->vmap_ptr); + + ptr = dmabuf->ops->vmap(dmabuf); + if (WARN_ON_ONCE(IS_ERR(ptr))) + ptr = NULL; + if (!ptr) + goto out_unlock; + + dmabuf->vmap_ptr = ptr; + dmabuf->vmapping_counter = 1; + +out_unlock: + mutex_unlock(&dmabuf->lock); + return ptr; } EXPORT_SYMBOL_GPL(dma_buf_vmap); @@ -501,7 +592,152 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) if (WARN_ON(!dmabuf)) return; - if (dmabuf->ops->vunmap) - dmabuf->ops->vunmap(dmabuf, vaddr); + BUG_ON(!dmabuf->vmap_ptr); + BUG_ON(dmabuf->vmapping_counter == 0); + BUG_ON(dmabuf->vmap_ptr != vaddr); + + mutex_lock(&dmabuf->lock); + if (--dmabuf->vmapping_counter == 0) { + if (dmabuf->ops->vunmap) + dmabuf->ops->vunmap(dmabuf, vaddr); + dmabuf->vmap_ptr = NULL; + } + mutex_unlock(&dmabuf->lock); } EXPORT_SYMBOL_GPL(dma_buf_vunmap); + +#ifdef CONFIG_DEBUG_FS +static int dma_buf_describe(struct seq_file *s) +{ + int ret; + struct dma_buf *buf_obj; + struct dma_buf_attachment *attach_obj; + int count = 0, attach_count; + size_t size = 0; + + ret = mutex_lock_interruptible(&db_list.lock); + + if (ret) + return ret; + + seq_puts(s, "\nDma-buf Objects:\n"); + seq_puts(s, "size\tflags\tmode\tcount\texp_name\n"); + + list_for_each_entry(buf_obj, &db_list.head, list_node) { + ret = mutex_lock_interruptible(&buf_obj->lock); + + if (ret) { + seq_puts(s, + "\tERROR locking buffer object: skipping\n"); + continue; + } + + seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n", + buf_obj->size, + buf_obj->file->f_flags, buf_obj->file->f_mode, + (long)(buf_obj->file->f_count.counter), + buf_obj->exp_name); + + seq_puts(s, "\tAttached Devices:\n"); + attach_count = 0; + + list_for_each_entry(attach_obj, &buf_obj->attachments, node) { + seq_puts(s, "\t"); + + seq_printf(s, "%s\n", dev_name(attach_obj->dev)); + attach_count++; + } + + seq_printf(s, "Total %d devices attached\n\n", + attach_count); + + count++; + size += buf_obj->size; + mutex_unlock(&buf_obj->lock); + } + + seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size); + + mutex_unlock(&db_list.lock); + return 0; +} + +static int dma_buf_show(struct seq_file *s, void *unused) +{ + void (*func)(struct seq_file *) = s->private; + func(s); + return 0; +} + +static int dma_buf_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, dma_buf_show, inode->i_private); +} + +static const struct file_operations dma_buf_debug_fops = { + .open = dma_buf_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct dentry *dma_buf_debugfs_dir; + +static int dma_buf_init_debugfs(void) +{ + int err = 0; + dma_buf_debugfs_dir = debugfs_create_dir("dma_buf", NULL); + if (IS_ERR(dma_buf_debugfs_dir)) { + err = PTR_ERR(dma_buf_debugfs_dir); + dma_buf_debugfs_dir = NULL; + return err; + } + + err = dma_buf_debugfs_create_file("bufinfo", dma_buf_describe); + + if (err) + pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); + + return err; +} + +static void dma_buf_uninit_debugfs(void) +{ + if (dma_buf_debugfs_dir) + debugfs_remove_recursive(dma_buf_debugfs_dir); +} + +int dma_buf_debugfs_create_file(const char *name, + int (*write)(struct seq_file *)) +{ + struct dentry *d; + + d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir, + write, &dma_buf_debug_fops); + + return PTR_ERR_OR_ZERO(d); +} +#else +static inline int dma_buf_init_debugfs(void) +{ + return 0; +} +static inline void dma_buf_uninit_debugfs(void) +{ +} +#endif + +static int __init dma_buf_init(void) +{ + mutex_init(&db_list.lock); + INIT_LIST_HEAD(&db_list.head); + dma_buf_init_debugfs(); + return 0; +} +subsys_initcall(dma_buf_init); + +static void __exit dma_buf_deinit(void) +{ + dma_buf_uninit_debugfs(); +} +__exitcall(dma_buf_deinit); diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index bc256b64102..7d6e84a5142 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -10,13 +10,13 @@ struct dma_coherent_mem { void *virt_base; dma_addr_t device_base; - phys_addr_t pfn_base; + unsigned long pfn_base; int size; int flags; unsigned long *bitmap; }; -int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, +int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags) { void __iomem *mem_base = NULL; @@ -32,7 +32,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ - mem_base = ioremap(bus_addr, size); + mem_base = ioremap(phys_addr, size); if (!mem_base) goto out; @@ -45,7 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, dev->dma_mem->virt_base = mem_base; dev->dma_mem->device_base = device_addr; - dev->dma_mem->pfn_base = PFN_DOWN(bus_addr); + dev->dma_mem->pfn_base = PFN_DOWN(phys_addr); dev->dma_mem->size = pages; dev->dma_mem->flags = flags; @@ -208,7 +208,7 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, *ret = -ENXIO; if (off < count && user_count <= count - off) { - unsigned pfn = mem->pfn_base + start + off; + unsigned long pfn = mem->pfn_base + start + off; *ret = remap_pfn_range(vma, vma->vm_start, pfn, user_count << PAGE_SHIFT, vma->vm_page_prot); diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 0ca54421ce9..6467c919c50 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -37,6 +37,7 @@ struct cma { unsigned long base_pfn; unsigned long count; unsigned long *bitmap; + struct mutex lock; }; struct cma *dma_contiguous_default_area; @@ -49,7 +50,7 @@ struct cma *dma_contiguous_default_area; /* * Default global CMA area size can be defined in kernel's .config. - * This is usefull mainly for distro maintainers to create a kernel + * This is useful mainly for distro maintainers to create a kernel * that works correctly for most supported systems. * The size can be set in bytes or as a percentage of the total memory * in the system. @@ -59,11 +60,22 @@ struct cma *dma_contiguous_default_area; */ static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M; static phys_addr_t size_cmdline = -1; +static phys_addr_t base_cmdline; +static phys_addr_t limit_cmdline; static int __init early_cma(char *p) { pr_debug("%s(%s)\n", __func__, p); size_cmdline = memparse(p, &p); + if (*p != '@') + return 0; + base_cmdline = memparse(p + 1, &p); + if (*p != '-') { + limit_cmdline = base_cmdline + size_cmdline; + return 0; + } + limit_cmdline = memparse(p + 1, &p); + return 0; } early_param("cma", early_cma); @@ -96,7 +108,7 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void) #endif /** - * dma_contiguous_reserve() - reserve area for contiguous memory handling + * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling * @limit: End address of the reserved memory (optional, 0 for any). * * This function reserves memory from early allocator. It should be @@ -107,11 +119,18 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void) void __init dma_contiguous_reserve(phys_addr_t limit) { phys_addr_t selected_size = 0; + phys_addr_t selected_base = 0; + phys_addr_t selected_limit = limit; + bool fixed = false; pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); if (size_cmdline != -1) { selected_size = size_cmdline; + selected_base = base_cmdline; + selected_limit = min_not_zero(limit_cmdline, limit); + if (base_cmdline + size_cmdline == limit_cmdline) + fixed = true; } else { #ifdef CONFIG_CMA_SIZE_SEL_MBYTES selected_size = size_bytes; @@ -124,22 +143,31 @@ void __init dma_contiguous_reserve(phys_addr_t limit) #endif } - if (selected_size) { + if (selected_size && !dma_contiguous_default_area) { pr_debug("%s: reserving %ld MiB for global area\n", __func__, (unsigned long)selected_size / SZ_1M); - dma_declare_contiguous(NULL, selected_size, 0, limit); + dma_contiguous_reserve_area(selected_size, selected_base, + selected_limit, + &dma_contiguous_default_area, + fixed); } -}; +} static DEFINE_MUTEX(cma_mutex); -static __init int cma_activate_area(unsigned long base_pfn, unsigned long count) +static int __init cma_activate_area(struct cma *cma) { - unsigned long pfn = base_pfn; - unsigned i = count >> pageblock_order; + int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); + unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; + unsigned i = cma->count >> pageblock_order; struct zone *zone; + cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); + + if (!cma->bitmap) + return -ENOMEM; + WARN_ON_ONCE(!pfn_valid(pfn)); zone = page_zone(pfn_to_page(pfn)); @@ -148,97 +176,74 @@ static __init int cma_activate_area(unsigned long base_pfn, unsigned long count) base_pfn = pfn; for (j = pageblock_nr_pages; j; --j, pfn++) { WARN_ON_ONCE(!pfn_valid(pfn)); + /* + * alloc_contig_range requires the pfn range + * specified to be in the same zone. Make this + * simple by forcing the entire CMA resv range + * to be in the same zone. + */ if (page_zone(pfn_to_page(pfn)) != zone) - return -EINVAL; + goto err; } init_cma_reserved_pageblock(pfn_to_page(base_pfn)); } while (--i); - return 0; -} - -static __init struct cma *cma_create_area(unsigned long base_pfn, - unsigned long count) -{ - int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); - struct cma *cma; - int ret = -ENOMEM; - - pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count); - - cma = kmalloc(sizeof *cma, GFP_KERNEL); - if (!cma) - return ERR_PTR(-ENOMEM); - - cma->base_pfn = base_pfn; - cma->count = count; - cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); - - if (!cma->bitmap) - goto no_mem; - - ret = cma_activate_area(base_pfn, count); - if (ret) - goto error; - pr_debug("%s: returned %p\n", __func__, (void *)cma); - return cma; + mutex_init(&cma->lock); + return 0; -error: +err: kfree(cma->bitmap); -no_mem: - kfree(cma); - return ERR_PTR(ret); + return -EINVAL; } -static struct cma_reserved { - phys_addr_t start; - unsigned long size; - struct device *dev; -} cma_reserved[MAX_CMA_AREAS] __initdata; -static unsigned cma_reserved_count __initdata; +static struct cma cma_areas[MAX_CMA_AREAS]; +static unsigned cma_area_count; static int __init cma_init_reserved_areas(void) { - struct cma_reserved *r = cma_reserved; - unsigned i = cma_reserved_count; - - pr_debug("%s()\n", __func__); + int i; - for (; i; --i, ++r) { - struct cma *cma; - cma = cma_create_area(PFN_DOWN(r->start), - r->size >> PAGE_SHIFT); - if (!IS_ERR(cma)) - dev_set_cma_area(r->dev, cma); + for (i = 0; i < cma_area_count; i++) { + int ret = cma_activate_area(&cma_areas[i]); + if (ret) + return ret; } + return 0; } core_initcall(cma_init_reserved_areas); /** - * dma_declare_contiguous() - reserve area for contiguous memory handling - * for particular device - * @dev: Pointer to device structure. - * @size: Size of the reserved memory. - * @base: Start address of the reserved memory (optional, 0 for any). + * dma_contiguous_reserve_area() - reserve custom contiguous area + * @size: Size of the reserved area (in bytes), + * @base: Base address of the reserved area optional, use 0 for any * @limit: End address of the reserved memory (optional, 0 for any). + * @res_cma: Pointer to store the created cma region. + * @fixed: hint about where to place the reserved area + * + * This function reserves memory from early allocator. It should be + * called by arch specific code once the early allocator (memblock or bootmem) + * has been activated and all other subsystems have already allocated/reserved + * memory. This function allows to create custom reserved areas for specific + * devices. * - * This function reserves memory for specified device. It should be - * called by board specific code when early allocator (memblock or bootmem) - * is still activate. + * If @fixed is true, reserve contiguous area at exactly @base. If false, + * reserve in range from @base to @limit. */ -int __init dma_declare_contiguous(struct device *dev, phys_addr_t size, - phys_addr_t base, phys_addr_t limit) +int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, + phys_addr_t limit, struct cma **res_cma, + bool fixed) { - struct cma_reserved *r = &cma_reserved[cma_reserved_count]; + struct cma *cma = &cma_areas[cma_area_count]; phys_addr_t alignment; + int ret = 0; pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, (unsigned long)size, (unsigned long)base, (unsigned long)limit); /* Sanity checks */ - if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) { + if (cma_area_count == ARRAY_SIZE(cma_areas)) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } @@ -253,20 +258,17 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size, limit &= ~(alignment - 1); /* Reserve memory */ - if (base) { + if (base && fixed) { if (memblock_is_region_reserved(base, size) || memblock_reserve(base, size) < 0) { - base = -EBUSY; + ret = -EBUSY; goto err; } } else { - /* - * Use __memblock_alloc_base() since - * memblock_alloc_base() panic()s. - */ - phys_addr_t addr = __memblock_alloc_base(size, alignment, limit); + phys_addr_t addr = memblock_alloc_range(size, alignment, base, + limit); if (!addr) { - base = -ENOMEM; + ret = -ENOMEM; goto err; } else { base = addr; @@ -277,10 +279,11 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size, * Each reserved area must be initialised later, when more kernel * subsystems (like slab allocator) are available. */ - r->start = base; - r->size = size; - r->dev = dev; - cma_reserved_count++; + cma->base_pfn = PFN_DOWN(base); + cma->count = size >> PAGE_SHIFT; + *res_cma = cma; + cma_area_count++; + pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, (unsigned long)base); @@ -289,7 +292,14 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size, return 0; err: pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); - return base; + return ret; +} + +static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) +{ + mutex_lock(&cma->lock); + bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); + mutex_unlock(&cma->lock); } /** @@ -300,7 +310,7 @@ err: * * This function allocates memory buffer for specified device. It uses * device specific contiguous memory area if available or the default - * global one. Requires architecture specific get_dev_cma_area() helper + * global one. Requires architecture specific dev_get_cma_area() helper * function. */ struct page *dma_alloc_from_contiguous(struct device *dev, int count, @@ -325,30 +335,41 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count, mask = (1 << align) - 1; - mutex_lock(&cma_mutex); for (;;) { + mutex_lock(&cma->lock); pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, start, count, mask); - if (pageno >= cma->count) + if (pageno >= cma->count) { + mutex_unlock(&cma->lock); break; + } + bitmap_set(cma->bitmap, pageno, count); + /* + * It's safe to drop the lock here. We've marked this region for + * our exclusive use. If the migration fails we will take the + * lock again and unmark it. + */ + mutex_unlock(&cma->lock); pfn = cma->base_pfn + pageno; + mutex_lock(&cma_mutex); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); + mutex_unlock(&cma_mutex); if (ret == 0) { - bitmap_set(cma->bitmap, pageno, count); page = pfn_to_page(pfn); break; } else if (ret != -EBUSY) { + clear_cma_bitmap(cma, pfn, count); break; } + clear_cma_bitmap(cma, pfn, count); pr_debug("%s(): memory range at %p is busy, retrying\n", __func__, pfn_to_page(pfn)); /* try again with a bit different memory target */ start = pageno + mask + 1; } - mutex_unlock(&cma_mutex); pr_debug("%s(): returned %p\n", __func__, page); return page; } @@ -381,10 +402,8 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages, VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); - mutex_lock(&cma_mutex); - bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); free_contig_range(pfn, count); - mutex_unlock(&cma_mutex); + clear_cma_bitmap(cma, pfn, count); return true; } diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 0ce39a33b3c..6cd08e145bf 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -175,7 +175,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res) /** * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory() * @dev: Device to declare coherent memory for - * @bus_addr: Bus address of coherent memory to be declared + * @phys_addr: Physical address of coherent memory to be declared * @device_addr: Device address of coherent memory to be declared * @size: Size of coherent memory to be declared * @flags: Flags @@ -185,7 +185,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res) * RETURNS: * 0 on success, -errno on failure. */ -int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, +int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags) { void *res; @@ -195,7 +195,7 @@ int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, if (!res) return -ENOMEM; - rc = dma_declare_coherent_memory(dev, bus_addr, device_addr, size, + rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size, flags); if (rc == 0) devres_add(dev, res); diff --git a/drivers/base/driver.c b/drivers/base/driver.c index 974e301a1ef..9e29943e56c 100644 --- a/drivers/base/driver.c +++ b/drivers/base/driver.c @@ -15,6 +15,7 @@ #include <linux/errno.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/sysfs.h> #include "base.h" static struct device *next_device(struct klist_iter *i) @@ -123,34 +124,16 @@ void driver_remove_file(struct device_driver *drv, } EXPORT_SYMBOL_GPL(driver_remove_file); -static int driver_add_groups(struct device_driver *drv, - const struct attribute_group **groups) +int driver_add_groups(struct device_driver *drv, + const struct attribute_group **groups) { - int error = 0; - int i; - - if (groups) { - for (i = 0; groups[i]; i++) { - error = sysfs_create_group(&drv->p->kobj, groups[i]); - if (error) { - while (--i >= 0) - sysfs_remove_group(&drv->p->kobj, - groups[i]); - break; - } - } - } - return error; + return sysfs_create_groups(&drv->p->kobj, groups); } -static void driver_remove_groups(struct device_driver *drv, - const struct attribute_group **groups) +void driver_remove_groups(struct device_driver *drv, + const struct attribute_group **groups) { - int i; - - if (groups) - for (i = 0; groups[i]; i++) - sysfs_remove_group(&drv->p->kobj, groups[i]); + sysfs_remove_groups(&drv->p->kobj, groups); } /** diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index b392b353be3..d276e33880b 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -27,6 +27,7 @@ #include <linux/pm.h> #include <linux/suspend.h> #include <linux/syscore_ops.h> +#include <linux/reboot.h> #include <generated/utsrelease.h> @@ -88,11 +89,6 @@ enum { FW_STATUS_ABORT, }; -enum fw_buf_fmt { - VMALLOC_BUF, /* used in direct loading */ - PAGE_BUF, /* used in loading via userspace */ -}; - static int loading_timeout = 60; /* In seconds */ static inline long firmware_loading_timeout(void) @@ -100,6 +96,15 @@ static inline long firmware_loading_timeout(void) return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT; } +/* firmware behavior options */ +#define FW_OPT_UEVENT (1U << 0) +#define FW_OPT_NOWAIT (1U << 1) +#ifdef CONFIG_FW_LOADER_USER_HELPER +#define FW_OPT_FALLBACK (1U << 2) +#else +#define FW_OPT_FALLBACK 0 +#endif + struct firmware_cache { /* firmware_buf instance will be added into the below list */ spinlock_t lock; @@ -128,12 +133,16 @@ struct firmware_buf { struct completion completion; struct firmware_cache *fwc; unsigned long status; - enum fw_buf_fmt fmt; void *data; size_t size; +#ifdef CONFIG_FW_LOADER_USER_HELPER + bool is_paged_buf; + bool need_uevent; struct page **pages; int nr_pages; int page_array_size; + struct list_head pending_list; +#endif char fw_id[]; }; @@ -142,14 +151,6 @@ struct fw_cache_entry { char name[]; }; -struct firmware_priv { - struct delayed_work timeout_work; - bool nowait; - struct device dev; - struct firmware_buf *buf; - struct firmware *fw; -}; - struct fw_name_devm { unsigned long magic; char name[]; @@ -182,7 +183,9 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name, strcpy(buf->fw_id, fw_name); buf->fwc = fwc; init_completion(&buf->completion); - buf->fmt = VMALLOC_BUF; +#ifdef CONFIG_FW_LOADER_USER_HELPER + INIT_LIST_HEAD(&buf->pending_list); +#endif pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf); @@ -224,23 +227,11 @@ static int fw_lookup_and_allocate_buf(const char *fw_name, return tmp ? 0 : -ENOMEM; } -static struct firmware_buf *fw_lookup_buf(const char *fw_name) -{ - struct firmware_buf *tmp; - struct firmware_cache *fwc = &fw_cache; - - spin_lock(&fwc->lock); - tmp = __fw_lookup_buf(fw_name); - spin_unlock(&fwc->lock); - - return tmp; -} - static void __fw_free_buf(struct kref *ref) + __releases(&fwc->lock) { struct firmware_buf *buf = to_fwbuf(ref); struct firmware_cache *fwc = buf->fwc; - int i; pr_debug("%s: fw-%s buf=%p data=%p size=%u\n", __func__, buf->fw_id, buf, buf->data, @@ -249,13 +240,15 @@ static void __fw_free_buf(struct kref *ref) list_del(&buf->list); spin_unlock(&fwc->lock); - - if (buf->fmt == PAGE_BUF) { +#ifdef CONFIG_FW_LOADER_USER_HELPER + if (buf->is_paged_buf) { + int i; vunmap(buf->data); for (i = 0; i < buf->nr_pages; i++) __free_page(buf->pages[i]); kfree(buf->pages); } else +#endif vfree(buf->data); kfree(buf); } @@ -287,42 +280,47 @@ module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644); MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path"); /* Don't inline this: 'struct kstat' is biggish */ -static noinline_for_stack long fw_file_size(struct file *file) +static noinline_for_stack int fw_file_size(struct file *file) { struct kstat st; - if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st)) + if (vfs_getattr(&file->f_path, &st)) return -1; if (!S_ISREG(st.mode)) return -1; - if (st.size != (long)st.size) + if (st.size != (int)st.size) return -1; return st.size; } -static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf) +static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf) { - long size; + int size; char *buf; + int rc; size = fw_file_size(file); if (size <= 0) - return false; + return -EINVAL; buf = vmalloc(size); if (!buf) - return false; - if (kernel_read(file, 0, buf, size) != size) { + return -ENOMEM; + rc = kernel_read(file, 0, buf, size); + if (rc != size) { + if (rc > 0) + rc = -EIO; vfree(buf); - return false; + return rc; } fw_buf->data = buf; fw_buf->size = size; - return true; + return 0; } -static bool fw_get_filesystem_firmware(struct firmware_buf *buf) +static int fw_get_filesystem_firmware(struct device *device, + struct firmware_buf *buf) { int i; - bool success = false; + int rc = -ENOENT; char *path = __getname(); for (i = 0; i < ARRAY_SIZE(fw_path); i++) { @@ -337,31 +335,176 @@ static bool fw_get_filesystem_firmware(struct firmware_buf *buf) file = filp_open(path, O_RDONLY, 0); if (IS_ERR(file)) continue; - success = fw_read_file_contents(file, buf); + rc = fw_read_file_contents(file, buf); fput(file); - if (success) + if (rc) + dev_warn(device, "firmware, attempted to load %s, but failed with error %d\n", + path, rc); + else break; } __putname(path); - return success; + + if (!rc) { + dev_dbg(device, "firmware: direct-loading firmware %s\n", + buf->fw_id); + mutex_lock(&fw_lock); + set_bit(FW_STATUS_DONE, &buf->status); + complete_all(&buf->completion); + mutex_unlock(&fw_lock); + } + + return rc; +} + +/* firmware holds the ownership of pages */ +static void firmware_free_data(const struct firmware *fw) +{ + /* Loaded directly? */ + if (!fw->priv) { + vfree(fw->data); + return; + } + fw_free_buf(fw->priv); +} + +/* store the pages buffer info firmware from buf */ +static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw) +{ + fw->priv = buf; +#ifdef CONFIG_FW_LOADER_USER_HELPER + fw->pages = buf->pages; +#endif + fw->size = buf->size; + fw->data = buf->data; + + pr_debug("%s: fw-%s buf=%p data=%p size=%u\n", + __func__, buf->fw_id, buf, buf->data, + (unsigned int)buf->size); } +#ifdef CONFIG_PM_SLEEP +static void fw_name_devm_release(struct device *dev, void *res) +{ + struct fw_name_devm *fwn = res; + + if (fwn->magic == (unsigned long)&fw_cache) + pr_debug("%s: fw_name-%s devm-%p released\n", + __func__, fwn->name, res); +} + +static int fw_devm_match(struct device *dev, void *res, + void *match_data) +{ + struct fw_name_devm *fwn = res; + + return (fwn->magic == (unsigned long)&fw_cache) && + !strcmp(fwn->name, match_data); +} + +static struct fw_name_devm *fw_find_devm_name(struct device *dev, + const char *name) +{ + struct fw_name_devm *fwn; + + fwn = devres_find(dev, fw_name_devm_release, + fw_devm_match, (void *)name); + return fwn; +} + +/* add firmware name into devres list */ +static int fw_add_devm_name(struct device *dev, const char *name) +{ + struct fw_name_devm *fwn; + + fwn = fw_find_devm_name(dev, name); + if (fwn) + return 1; + + fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) + + strlen(name) + 1, GFP_KERNEL); + if (!fwn) + return -ENOMEM; + + fwn->magic = (unsigned long)&fw_cache; + strcpy(fwn->name, name); + devres_add(dev, fwn); + + return 0; +} +#else +static int fw_add_devm_name(struct device *dev, const char *name) +{ + return 0; +} +#endif + + +/* + * user-mode helper code + */ +#ifdef CONFIG_FW_LOADER_USER_HELPER +struct firmware_priv { + struct delayed_work timeout_work; + bool nowait; + struct device dev; + struct firmware_buf *buf; + struct firmware *fw; +}; + static struct firmware_priv *to_firmware_priv(struct device *dev) { return container_of(dev, struct firmware_priv, dev); } -static void fw_load_abort(struct firmware_priv *fw_priv) +static void __fw_load_abort(struct firmware_buf *buf) { - struct firmware_buf *buf = fw_priv->buf; + /* + * There is a small window in which user can write to 'loading' + * between loading done and disappearance of 'loading' + */ + if (test_bit(FW_STATUS_DONE, &buf->status)) + return; + list_del_init(&buf->pending_list); set_bit(FW_STATUS_ABORT, &buf->status); complete_all(&buf->completion); } -static ssize_t firmware_timeout_show(struct class *class, - struct class_attribute *attr, - char *buf) +static void fw_load_abort(struct firmware_priv *fw_priv) +{ + struct firmware_buf *buf = fw_priv->buf; + + __fw_load_abort(buf); + + /* avoid user action after loading abort */ + fw_priv->buf = NULL; +} + +#define is_fw_load_aborted(buf) \ + test_bit(FW_STATUS_ABORT, &(buf)->status) + +static LIST_HEAD(pending_fw_head); + +/* reboot notifier for avoid deadlock with usermode_lock */ +static int fw_shutdown_notify(struct notifier_block *unused1, + unsigned long unused2, void *unused3) +{ + mutex_lock(&fw_lock); + while (!list_empty(&pending_fw_head)) + __fw_load_abort(list_first_entry(&pending_fw_head, + struct firmware_buf, + pending_list)); + mutex_unlock(&fw_lock); + return NOTIFY_DONE; +} + +static struct notifier_block fw_shutdown_nb = { + .notifier_call = fw_shutdown_notify, +}; + +static ssize_t timeout_show(struct class *class, struct class_attribute *attr, + char *buf) { return sprintf(buf, "%d\n", loading_timeout); } @@ -379,9 +522,8 @@ static ssize_t firmware_timeout_show(struct class *class, * * Note: zero means 'wait forever'. **/ -static ssize_t firmware_timeout_store(struct class *class, - struct class_attribute *attr, - const char *buf, size_t count) +static ssize_t timeout_store(struct class *class, struct class_attribute *attr, + const char *buf, size_t count) { loading_timeout = simple_strtol(buf, NULL, 10); if (loading_timeout < 0) @@ -391,8 +533,7 @@ static ssize_t firmware_timeout_store(struct class *class, } static struct class_attribute firmware_class_attrs[] = { - __ATTR(timeout, S_IWUSR | S_IRUGO, - firmware_timeout_show, firmware_timeout_store), + __ATTR_RW(timeout), __ATTR_NULL }; @@ -401,8 +542,6 @@ static void fw_dev_release(struct device *dev) struct firmware_priv *fw_priv = to_firmware_priv(dev); kfree(fw_priv); - - module_put(THIS_MODULE); } static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) @@ -430,20 +569,14 @@ static ssize_t firmware_loading_show(struct device *dev, struct device_attribute *attr, char *buf) { struct firmware_priv *fw_priv = to_firmware_priv(dev); - int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status); + int loading = 0; - return sprintf(buf, "%d\n", loading); -} + mutex_lock(&fw_lock); + if (fw_priv->buf) + loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status); + mutex_unlock(&fw_lock); -/* firmware holds the ownership of pages */ -static void firmware_free_data(const struct firmware *fw) -{ - /* Loaded directly? */ - if (!fw->priv) { - vfree(fw->data); - return; - } - fw_free_buf(fw->priv); + return sprintf(buf, "%d\n", loading); } /* Some architectures don't have PAGE_KERNEL_RO */ @@ -454,7 +587,7 @@ static void firmware_free_data(const struct firmware *fw) /* one pages buffer should be mapped/unmapped only once */ static int fw_map_pages_buf(struct firmware_buf *buf) { - if (buf->fmt != PAGE_BUF) + if (!buf->is_paged_buf) return 0; if (buf->data) @@ -483,12 +616,12 @@ static ssize_t firmware_loading_store(struct device *dev, const char *buf, size_t count) { struct firmware_priv *fw_priv = to_firmware_priv(dev); - struct firmware_buf *fw_buf = fw_priv->buf; + struct firmware_buf *fw_buf; int loading = simple_strtol(buf, NULL, 10); int i; mutex_lock(&fw_lock); - + fw_buf = fw_priv->buf; if (!fw_buf) goto out; @@ -516,7 +649,10 @@ static ssize_t firmware_loading_store(struct device *dev, * see the mapped 'buf->data' once the loading * is completed. * */ - fw_map_pages_buf(fw_buf); + if (fw_map_pages_buf(fw_buf)) + dev_err(dev, "%s: map pages failed\n", + __func__); + list_del_init(&fw_buf->pending_list); complete_all(&fw_buf->completion); break; } @@ -690,17 +826,13 @@ static void firmware_class_timeout_work(struct work_struct *work) struct firmware_priv, timeout_work.work); mutex_lock(&fw_lock); - if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) { - mutex_unlock(&fw_lock); - return; - } fw_load_abort(fw_priv); mutex_unlock(&fw_lock); } static struct firmware_priv * fw_create_instance(struct firmware *firmware, const char *fw_name, - struct device *device, bool uevent, bool nowait) + struct device *device, unsigned int opt_flags) { struct firmware_priv *fw_priv; struct device *f_dev; @@ -712,7 +844,7 @@ fw_create_instance(struct firmware *firmware, const char *fw_name, goto exit; } - fw_priv->nowait = nowait; + fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT); fw_priv->fw = firmware; INIT_DELAYED_WORK(&fw_priv->timeout_work, firmware_class_timeout_work); @@ -727,249 +859,281 @@ exit: return fw_priv; } -/* store the pages buffer info firmware from buf */ -static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw) +/* load a firmware via user helper */ +static int _request_firmware_load(struct firmware_priv *fw_priv, + unsigned int opt_flags, long timeout) { - fw->priv = buf; - fw->pages = buf->pages; - fw->size = buf->size; - fw->data = buf->data; + int retval = 0; + struct device *f_dev = &fw_priv->dev; + struct firmware_buf *buf = fw_priv->buf; - pr_debug("%s: fw-%s buf=%p data=%p size=%u\n", - __func__, buf->fw_id, buf, buf->data, - (unsigned int)buf->size); -} + /* fall back on userspace loading */ + buf->is_paged_buf = true; -#ifdef CONFIG_PM_SLEEP -static void fw_name_devm_release(struct device *dev, void *res) -{ - struct fw_name_devm *fwn = res; + dev_set_uevent_suppress(f_dev, true); - if (fwn->magic == (unsigned long)&fw_cache) - pr_debug("%s: fw_name-%s devm-%p released\n", - __func__, fwn->name, res); -} + retval = device_add(f_dev); + if (retval) { + dev_err(f_dev, "%s: device_register failed\n", __func__); + goto err_put_dev; + } -static int fw_devm_match(struct device *dev, void *res, - void *match_data) -{ - struct fw_name_devm *fwn = res; + retval = device_create_bin_file(f_dev, &firmware_attr_data); + if (retval) { + dev_err(f_dev, "%s: sysfs_create_bin_file failed\n", __func__); + goto err_del_dev; + } - return (fwn->magic == (unsigned long)&fw_cache) && - !strcmp(fwn->name, match_data); -} + mutex_lock(&fw_lock); + list_add(&buf->pending_list, &pending_fw_head); + mutex_unlock(&fw_lock); -static struct fw_name_devm *fw_find_devm_name(struct device *dev, - const char *name) -{ - struct fw_name_devm *fwn; + retval = device_create_file(f_dev, &dev_attr_loading); + if (retval) { + mutex_lock(&fw_lock); + list_del_init(&buf->pending_list); + mutex_unlock(&fw_lock); + dev_err(f_dev, "%s: device_create_file failed\n", __func__); + goto err_del_bin_attr; + } - fwn = devres_find(dev, fw_name_devm_release, - fw_devm_match, (void *)name); - return fwn; + if (opt_flags & FW_OPT_UEVENT) { + buf->need_uevent = true; + dev_set_uevent_suppress(f_dev, false); + dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id); + if (timeout != MAX_SCHEDULE_TIMEOUT) + queue_delayed_work(system_power_efficient_wq, + &fw_priv->timeout_work, timeout); + + kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD); + } + + wait_for_completion(&buf->completion); + + cancel_delayed_work_sync(&fw_priv->timeout_work); + if (!buf->data) + retval = -ENOMEM; + + device_remove_file(f_dev, &dev_attr_loading); +err_del_bin_attr: + device_remove_bin_file(f_dev, &firmware_attr_data); +err_del_dev: + device_del(f_dev); +err_put_dev: + put_device(f_dev); + return retval; } -/* add firmware name into devres list */ -static int fw_add_devm_name(struct device *dev, const char *name) +static int fw_load_from_user_helper(struct firmware *firmware, + const char *name, struct device *device, + unsigned int opt_flags, long timeout) { - struct fw_name_devm *fwn; + struct firmware_priv *fw_priv; - fwn = fw_find_devm_name(dev, name); - if (fwn) - return 1; + fw_priv = fw_create_instance(firmware, name, device, opt_flags); + if (IS_ERR(fw_priv)) + return PTR_ERR(fw_priv); - fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) + - strlen(name) + 1, GFP_KERNEL); - if (!fwn) - return -ENOMEM; + fw_priv->buf = firmware->priv; + return _request_firmware_load(fw_priv, opt_flags, timeout); +} - fwn->magic = (unsigned long)&fw_cache; - strcpy(fwn->name, name); - devres_add(dev, fwn); +#ifdef CONFIG_PM_SLEEP +/* kill pending requests without uevent to avoid blocking suspend */ +static void kill_requests_without_uevent(void) +{ + struct firmware_buf *buf; + struct firmware_buf *next; - return 0; + mutex_lock(&fw_lock); + list_for_each_entry_safe(buf, next, &pending_fw_head, pending_list) { + if (!buf->need_uevent) + __fw_load_abort(buf); + } + mutex_unlock(&fw_lock); } -#else -static int fw_add_devm_name(struct device *dev, const char *name) +#endif + +#else /* CONFIG_FW_LOADER_USER_HELPER */ +static inline int +fw_load_from_user_helper(struct firmware *firmware, const char *name, + struct device *device, unsigned int opt_flags, + long timeout) { - return 0; + return -ENOENT; } + +/* No abort during direct loading */ +#define is_fw_load_aborted(buf) false + +#ifdef CONFIG_PM_SLEEP +static inline void kill_requests_without_uevent(void) { } #endif -static void _request_firmware_cleanup(const struct firmware **firmware_p) +#endif /* CONFIG_FW_LOADER_USER_HELPER */ + + +/* wait until the shared firmware_buf becomes ready (or error) */ +static int sync_cached_firmware_buf(struct firmware_buf *buf) { - release_firmware(*firmware_p); - *firmware_p = NULL; + int ret = 0; + + mutex_lock(&fw_lock); + while (!test_bit(FW_STATUS_DONE, &buf->status)) { + if (is_fw_load_aborted(buf)) { + ret = -ENOENT; + break; + } + mutex_unlock(&fw_lock); + wait_for_completion(&buf->completion); + mutex_lock(&fw_lock); + } + mutex_unlock(&fw_lock); + return ret; } -static struct firmware_priv * -_request_firmware_prepare(const struct firmware **firmware_p, const char *name, - struct device *device, bool uevent, bool nowait) +/* prepare firmware and firmware_buf structs; + * return 0 if a firmware is already assigned, 1 if need to load one, + * or a negative error code + */ +static int +_request_firmware_prepare(struct firmware **firmware_p, const char *name, + struct device *device) { struct firmware *firmware; - struct firmware_priv *fw_priv = NULL; struct firmware_buf *buf; int ret; - if (!firmware_p) - return ERR_PTR(-EINVAL); - *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); if (!firmware) { dev_err(device, "%s: kmalloc(struct firmware) failed\n", __func__); - return ERR_PTR(-ENOMEM); + return -ENOMEM; } if (fw_get_builtin_firmware(firmware, name)) { dev_dbg(device, "firmware: using built-in firmware %s\n", name); - return NULL; + return 0; /* assigned */ } ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf); - if (!ret) - fw_priv = fw_create_instance(firmware, name, device, - uevent, nowait); - if (IS_ERR(fw_priv) || ret < 0) { - kfree(firmware); - *firmware_p = NULL; - return ERR_PTR(-ENOMEM); - } else if (fw_priv) { - fw_priv->buf = buf; - - /* - * bind with 'buf' now to avoid warning in failure path - * of requesting firmware. - */ - firmware->priv = buf; - return fw_priv; - } + /* + * bind with 'buf' now to avoid warning in failure path + * of requesting firmware. + */ + firmware->priv = buf; - /* share the cached buf, which is inprogessing or completed */ - check_status: - mutex_lock(&fw_lock); - if (test_bit(FW_STATUS_ABORT, &buf->status)) { - fw_priv = ERR_PTR(-ENOENT); - firmware->priv = buf; - _request_firmware_cleanup(firmware_p); - goto exit; - } else if (test_bit(FW_STATUS_DONE, &buf->status)) { - fw_priv = NULL; - fw_set_page_data(buf, firmware); - goto exit; + if (ret > 0) { + ret = sync_cached_firmware_buf(buf); + if (!ret) { + fw_set_page_data(buf, firmware); + return 0; /* assigned */ + } } - mutex_unlock(&fw_lock); - wait_for_completion(&buf->completion); - goto check_status; -exit: - mutex_unlock(&fw_lock); - return fw_priv; + if (ret < 0) + return ret; + return 1; /* need to load */ } -static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent, - long timeout) +static int assign_firmware_buf(struct firmware *fw, struct device *device, + unsigned int opt_flags) { - int retval = 0; - struct device *f_dev = &fw_priv->dev; - struct firmware_buf *buf = fw_priv->buf; - struct firmware_cache *fwc = &fw_cache; - int direct_load = 0; - - /* try direct loading from fs first */ - if (fw_get_filesystem_firmware(buf)) { - dev_dbg(f_dev->parent, "firmware: direct-loading" - " firmware %s\n", buf->fw_id); + struct firmware_buf *buf = fw->priv; - mutex_lock(&fw_lock); - set_bit(FW_STATUS_DONE, &buf->status); + mutex_lock(&fw_lock); + if (!buf->size || is_fw_load_aborted(buf)) { mutex_unlock(&fw_lock); - complete_all(&buf->completion); - direct_load = 1; - goto handle_fw; - } - - /* fall back on userspace loading */ - buf->fmt = PAGE_BUF; - - dev_set_uevent_suppress(f_dev, true); - - /* Need to pin this module until class device is destroyed */ - __module_get(THIS_MODULE); - - retval = device_add(f_dev); - if (retval) { - dev_err(f_dev, "%s: device_register failed\n", __func__); - goto err_put_dev; - } - - retval = device_create_bin_file(f_dev, &firmware_attr_data); - if (retval) { - dev_err(f_dev, "%s: sysfs_create_bin_file failed\n", __func__); - goto err_del_dev; + return -ENOENT; } - retval = device_create_file(f_dev, &dev_attr_loading); - if (retval) { - dev_err(f_dev, "%s: device_create_file failed\n", __func__); - goto err_del_bin_attr; - } - - if (uevent) { - dev_set_uevent_suppress(f_dev, false); - dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id); - if (timeout != MAX_SCHEDULE_TIMEOUT) - schedule_delayed_work(&fw_priv->timeout_work, timeout); - - kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD); - } - - wait_for_completion(&buf->completion); - - cancel_delayed_work_sync(&fw_priv->timeout_work); - -handle_fw: - mutex_lock(&fw_lock); - if (!buf->size || test_bit(FW_STATUS_ABORT, &buf->status)) - retval = -ENOENT; - /* * add firmware name into devres list so that we can auto cache * and uncache firmware for device. * - * f_dev->parent may has been deleted already, but the problem + * device may has been deleted already, but the problem * should be fixed in devres or driver core. */ - if (!retval && f_dev->parent) - fw_add_devm_name(f_dev->parent, buf->fw_id); + /* don't cache firmware handled without uevent */ + if (device && (opt_flags & FW_OPT_UEVENT)) + fw_add_devm_name(device, buf->fw_id); /* * After caching firmware image is started, let it piggyback * on request firmware. */ - if (!retval && fwc->state == FW_LOADER_START_CACHE) { + if (buf->fwc->state == FW_LOADER_START_CACHE) { if (fw_cache_piggyback_on_request(buf->fw_id)) kref_get(&buf->ref); } /* pass the pages buffer to driver at the last minute */ - fw_set_page_data(buf, fw_priv->fw); - - fw_priv->buf = NULL; + fw_set_page_data(buf, fw); mutex_unlock(&fw_lock); + return 0; +} - if (direct_load) - goto err_put_dev; +/* called from request_firmware() and request_firmware_work_func() */ +static int +_request_firmware(const struct firmware **firmware_p, const char *name, + struct device *device, unsigned int opt_flags) +{ + struct firmware *fw; + long timeout; + int ret; - device_remove_file(f_dev, &dev_attr_loading); -err_del_bin_attr: - device_remove_bin_file(f_dev, &firmware_attr_data); -err_del_dev: - device_del(f_dev); -err_put_dev: - put_device(f_dev); - return retval; + if (!firmware_p) + return -EINVAL; + + ret = _request_firmware_prepare(&fw, name, device); + if (ret <= 0) /* error or already assigned */ + goto out; + + ret = 0; + timeout = firmware_loading_timeout(); + if (opt_flags & FW_OPT_NOWAIT) { + timeout = usermodehelper_read_lock_wait(timeout); + if (!timeout) { + dev_dbg(device, "firmware: %s loading timed out\n", + name); + ret = -EBUSY; + goto out; + } + } else { + ret = usermodehelper_read_trylock(); + if (WARN_ON(ret)) { + dev_err(device, "firmware: %s will not be loaded\n", + name); + goto out; + } + } + + ret = fw_get_filesystem_firmware(device, fw->priv); + if (ret) { + if (opt_flags & FW_OPT_FALLBACK) { + dev_warn(device, + "Direct firmware load failed with error %d\n", + ret); + dev_warn(device, "Falling back to user helper\n"); + ret = fw_load_from_user_helper(fw, name, device, + opt_flags, timeout); + } + } + + if (!ret) + ret = assign_firmware_buf(fw, device, opt_flags); + + usermodehelper_read_unlock(); + + out: + if (ret < 0) { + release_firmware(fw); + fw = NULL; + } + + *firmware_p = fw; + return ret; } /** @@ -996,27 +1160,40 @@ int request_firmware(const struct firmware **firmware_p, const char *name, struct device *device) { - struct firmware_priv *fw_priv; int ret; - fw_priv = _request_firmware_prepare(firmware_p, name, device, true, - false); - if (IS_ERR_OR_NULL(fw_priv)) - return PTR_RET(fw_priv); - - ret = usermodehelper_read_trylock(); - if (WARN_ON(ret)) { - dev_err(device, "firmware: %s will not be loaded\n", name); - } else { - ret = _request_firmware_load(fw_priv, true, - firmware_loading_timeout()); - usermodehelper_read_unlock(); - } - if (ret) - _request_firmware_cleanup(firmware_p); + /* Need to pin this module until return */ + __module_get(THIS_MODULE); + ret = _request_firmware(firmware_p, name, device, + FW_OPT_UEVENT | FW_OPT_FALLBACK); + module_put(THIS_MODULE); + return ret; +} +EXPORT_SYMBOL(request_firmware); +#ifdef CONFIG_FW_LOADER_USER_HELPER +/** + * request_firmware: - load firmware directly without usermode helper + * @firmware_p: pointer to firmware image + * @name: name of firmware file + * @device: device for which firmware is being loaded + * + * This function works pretty much like request_firmware(), but this doesn't + * fall back to usermode helper even if the firmware couldn't be loaded + * directly from fs. Hence it's useful for loading optional firmwares, which + * aren't always present, without extra long timeouts of udev. + **/ +int request_firmware_direct(const struct firmware **firmware_p, + const char *name, struct device *device) +{ + int ret; + __module_get(THIS_MODULE); + ret = _request_firmware(firmware_p, name, device, FW_OPT_UEVENT); + module_put(THIS_MODULE); return ret; } +EXPORT_SYMBOL_GPL(request_firmware_direct); +#endif /** * release_firmware: - release the resource associated with a firmware image @@ -1030,6 +1207,7 @@ void release_firmware(const struct firmware *fw) kfree(fw); } } +EXPORT_SYMBOL(release_firmware); /* Async support */ struct firmware_work { @@ -1039,40 +1217,20 @@ struct firmware_work { struct device *device; void *context; void (*cont)(const struct firmware *fw, void *context); - bool uevent; + unsigned int opt_flags; }; static void request_firmware_work_func(struct work_struct *work) { struct firmware_work *fw_work; const struct firmware *fw; - struct firmware_priv *fw_priv; - long timeout; - int ret; fw_work = container_of(work, struct firmware_work, work); - fw_priv = _request_firmware_prepare(&fw, fw_work->name, fw_work->device, - fw_work->uevent, true); - if (IS_ERR_OR_NULL(fw_priv)) { - ret = PTR_RET(fw_priv); - goto out; - } - - timeout = usermodehelper_read_lock_wait(firmware_loading_timeout()); - if (timeout) { - ret = _request_firmware_load(fw_priv, fw_work->uevent, timeout); - usermodehelper_read_unlock(); - } else { - dev_dbg(fw_work->device, "firmware: %s loading timed out\n", - fw_work->name); - ret = -EAGAIN; - } - if (ret) - _request_firmware_cleanup(&fw); - out: + _request_firmware(&fw, fw_work->name, fw_work->device, + fw_work->opt_flags); fw_work->cont(fw, fw_work->context); - put_device(fw_work->device); + put_device(fw_work->device); /* taken in request_firmware_nowait() */ module_put(fw_work->module); kfree(fw_work); @@ -1118,7 +1276,8 @@ request_firmware_nowait( fw_work->device = device; fw_work->context = context; fw_work->cont = cont; - fw_work->uevent = uevent; + fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK | + (uevent ? FW_OPT_UEVENT : 0); if (!try_module_get(module)) { kfree(fw_work); @@ -1130,6 +1289,10 @@ request_firmware_nowait( schedule_work(&fw_work->work); return 0; } +EXPORT_SYMBOL(request_firmware_nowait); + +#ifdef CONFIG_PM_SLEEP +static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain); /** * cache_firmware - cache one firmware image in kernel memory space @@ -1145,7 +1308,7 @@ request_firmware_nowait( * Return !0 otherwise * */ -int cache_firmware(const char *fw_name) +static int cache_firmware(const char *fw_name) { int ret; const struct firmware *fw; @@ -1161,6 +1324,18 @@ int cache_firmware(const char *fw_name) return ret; } +static struct firmware_buf *fw_lookup_buf(const char *fw_name) +{ + struct firmware_buf *tmp; + struct firmware_cache *fwc = &fw_cache; + + spin_lock(&fwc->lock); + tmp = __fw_lookup_buf(fw_name); + spin_unlock(&fwc->lock); + + return tmp; +} + /** * uncache_firmware - remove one cached firmware image * @fw_name: the firmware image name @@ -1172,7 +1347,7 @@ int cache_firmware(const char *fw_name) * Return !0 otherwise * */ -int uncache_firmware(const char *fw_name) +static int uncache_firmware(const char *fw_name) { struct firmware_buf *buf; struct firmware fw; @@ -1191,9 +1366,6 @@ int uncache_firmware(const char *fw_name) return -EINVAL; } -#ifdef CONFIG_PM_SLEEP -static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain); - static struct fw_cache_entry *alloc_fw_cache_entry(const char *name) { struct fw_cache_entry *fce; @@ -1403,8 +1575,8 @@ static void device_uncache_fw_images_work(struct work_struct *work) */ static void device_uncache_fw_images_delay(unsigned long delay) { - schedule_delayed_work(&fw_cache.work, - msecs_to_jiffies(delay)); + queue_delayed_work(system_power_efficient_wq, &fw_cache.work, + msecs_to_jiffies(delay)); } static int fw_pm_notify(struct notifier_block *notify_block, @@ -1413,6 +1585,8 @@ static int fw_pm_notify(struct notifier_block *notify_block, switch (mode) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: + case PM_RESTORE_PREPARE: + kill_requests_without_uevent(); device_cache_fw_images(); break; @@ -1474,7 +1648,12 @@ static void __init fw_cache_init(void) static int __init firmware_class_init(void) { fw_cache_init(); +#ifdef CONFIG_FW_LOADER_USER_HELPER + register_reboot_notifier(&fw_shutdown_nb); return class_register(&firmware_class); +#else + return 0; +#endif } static void __exit firmware_class_exit(void) @@ -1483,14 +1662,11 @@ static void __exit firmware_class_exit(void) unregister_syscore_ops(&fw_syscore_ops); unregister_pm_notifier(&fw_cache.pm_notify); #endif +#ifdef CONFIG_FW_LOADER_USER_HELPER + unregister_reboot_notifier(&fw_shutdown_nb); class_unregister(&firmware_class); +#endif } fs_initcall(firmware_class_init); module_exit(firmware_class_exit); - -EXPORT_SYMBOL(release_firmware); -EXPORT_SYMBOL(request_firmware); -EXPORT_SYMBOL(request_firmware_nowait); -EXPORT_SYMBOL_GPL(cache_firmware); -EXPORT_SYMBOL_GPL(uncache_firmware); diff --git a/drivers/base/init.c b/drivers/base/init.c index c16f0b808a1..da033d3bab3 100644 --- a/drivers/base/init.c +++ b/drivers/base/init.c @@ -33,4 +33,5 @@ void __init driver_init(void) platform_bus_init(); cpu_dev_init(); memory_dev_init(); + container_dev_init(); } diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 987604d56c8..89f752dd846 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -16,7 +16,6 @@ #include <linux/capability.h> #include <linux/device.h> #include <linux/memory.h> -#include <linux/kobject.h> #include <linux/memory_hotplug.h> #include <linux/mm.h> #include <linux/mutex.h> @@ -30,6 +29,8 @@ static DEFINE_MUTEX(mem_sysfs_mutex); #define MEMORY_CLASS_NAME "memory" +#define to_memory_block(dev) container_of(dev, struct memory_block, dev) + static int sections_per_block; static inline int base_memory_block_id(int section_nr) @@ -37,9 +38,14 @@ static inline int base_memory_block_id(int section_nr) return section_nr / sections_per_block; } +static int memory_subsys_online(struct device *dev); +static int memory_subsys_offline(struct device *dev); + static struct bus_type memory_subsys = { .name = MEMORY_CLASS_NAME, .dev_name = MEMORY_CLASS_NAME, + .online = memory_subsys_online, + .offline = memory_subsys_offline, }; static BLOCKING_NOTIFIER_HEAD(memory_chain); @@ -72,37 +78,11 @@ EXPORT_SYMBOL(unregister_memory_isolate_notifier); static void memory_block_release(struct device *dev) { - struct memory_block *mem = container_of(dev, struct memory_block, dev); + struct memory_block *mem = to_memory_block(dev); kfree(mem); } -/* - * register_memory - Setup a sysfs device for a memory block - */ -static -int register_memory(struct memory_block *memory) -{ - int error; - - memory->dev.bus = &memory_subsys; - memory->dev.id = memory->start_section_nr / sections_per_block; - memory->dev.release = memory_block_release; - - error = device_register(&memory->dev); - return error; -} - -static void -unregister_memory(struct memory_block *memory) -{ - BUG_ON(memory->dev.bus != &memory_subsys); - - /* drop the ref. we got in remove_memory_block() */ - kobject_put(&memory->dev.kobj); - device_unregister(&memory->dev); -} - unsigned long __weak memory_block_size_bytes(void) { return MIN_MEMORY_BLOCK_SIZE; @@ -131,25 +111,13 @@ static unsigned long get_memory_block_size(void) static ssize_t show_mem_start_phys_index(struct device *dev, struct device_attribute *attr, char *buf) { - struct memory_block *mem = - container_of(dev, struct memory_block, dev); + struct memory_block *mem = to_memory_block(dev); unsigned long phys_index; phys_index = mem->start_section_nr / sections_per_block; return sprintf(buf, "%08lx\n", phys_index); } -static ssize_t show_mem_end_phys_index(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct memory_block *mem = - container_of(dev, struct memory_block, dev); - unsigned long phys_index; - - phys_index = mem->end_section_nr / sections_per_block; - return sprintf(buf, "%08lx\n", phys_index); -} - /* * Show whether the section of memory is likely to be hot-removable */ @@ -158,10 +126,11 @@ static ssize_t show_mem_removable(struct device *dev, { unsigned long i, pfn; int ret = 1; - struct memory_block *mem = - container_of(dev, struct memory_block, dev); + struct memory_block *mem = to_memory_block(dev); for (i = 0; i < sections_per_block; i++) { + if (!present_section_nr(mem->start_section_nr + i)) + continue; pfn = section_nr_to_pfn(mem->start_section_nr + i); ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); } @@ -175,8 +144,7 @@ static ssize_t show_mem_removable(struct device *dev, static ssize_t show_mem_state(struct device *dev, struct device_attribute *attr, char *buf) { - struct memory_block *mem = - container_of(dev, struct memory_block, dev); + struct memory_block *mem = to_memory_block(dev); ssize_t len = 0; /* @@ -217,8 +185,7 @@ int memory_isolate_notify(unsigned long val, void *v) * The probe routines leave the pages reserved, just as the bootmem code does. * Make sure they're still that way. */ -static bool pages_correctly_reserved(unsigned long start_pfn, - unsigned long nr_pages) +static bool pages_correctly_reserved(unsigned long start_pfn) { int i, j; struct page *page; @@ -266,7 +233,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t switch (action) { case MEM_ONLINE: - if (!pages_correctly_reserved(start_pfn, nr_pages)) + if (!pages_correctly_reserved(start_pfn)) return -EBUSY; ret = online_pages(start_pfn, nr_pages, online_type); @@ -283,76 +250,107 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t return ret; } -static int __memory_block_change_state(struct memory_block *mem, - unsigned long to_state, unsigned long from_state_req, - int online_type) +static int memory_block_change_state(struct memory_block *mem, + unsigned long to_state, unsigned long from_state_req) { int ret = 0; - if (mem->state != from_state_req) { - ret = -EINVAL; - goto out; - } + if (mem->state != from_state_req) + return -EINVAL; if (to_state == MEM_OFFLINE) mem->state = MEM_GOING_OFFLINE; - ret = memory_block_action(mem->start_section_nr, to_state, online_type); + ret = memory_block_action(mem->start_section_nr, to_state, + mem->online_type); - if (ret) { - mem->state = from_state_req; - goto out; - } + mem->state = ret ? from_state_req : to_state; - mem->state = to_state; - switch (mem->state) { - case MEM_OFFLINE: - kobject_uevent(&mem->dev.kobj, KOBJ_OFFLINE); - break; - case MEM_ONLINE: - kobject_uevent(&mem->dev.kobj, KOBJ_ONLINE); - break; - default: - break; - } -out: return ret; } -static int memory_block_change_state(struct memory_block *mem, - unsigned long to_state, unsigned long from_state_req, - int online_type) +/* The device lock serializes operations on memory_subsys_[online|offline] */ +static int memory_subsys_online(struct device *dev) { + struct memory_block *mem = to_memory_block(dev); int ret; - mutex_lock(&mem->state_mutex); - ret = __memory_block_change_state(mem, to_state, from_state_req, - online_type); - mutex_unlock(&mem->state_mutex); + if (mem->state == MEM_ONLINE) + return 0; + + /* + * If we are called from store_mem_state(), online_type will be + * set >= 0 Otherwise we were called from the device online + * attribute and need to set the online_type. + */ + if (mem->online_type < 0) + mem->online_type = ONLINE_KEEP; + + ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); + + /* clear online_type */ + mem->online_type = -1; return ret; } + +static int memory_subsys_offline(struct device *dev) +{ + struct memory_block *mem = to_memory_block(dev); + + if (mem->state == MEM_OFFLINE) + return 0; + + return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); +} + static ssize_t store_mem_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct memory_block *mem; - int ret = -EINVAL; + struct memory_block *mem = to_memory_block(dev); + int ret, online_type; - mem = container_of(dev, struct memory_block, dev); + ret = lock_device_hotplug_sysfs(); + if (ret) + return ret; if (!strncmp(buf, "online_kernel", min_t(int, count, 13))) - ret = memory_block_change_state(mem, MEM_ONLINE, - MEM_OFFLINE, ONLINE_KERNEL); + online_type = ONLINE_KERNEL; else if (!strncmp(buf, "online_movable", min_t(int, count, 14))) - ret = memory_block_change_state(mem, MEM_ONLINE, - MEM_OFFLINE, ONLINE_MOVABLE); + online_type = ONLINE_MOVABLE; else if (!strncmp(buf, "online", min_t(int, count, 6))) - ret = memory_block_change_state(mem, MEM_ONLINE, - MEM_OFFLINE, ONLINE_KEEP); - else if(!strncmp(buf, "offline", min_t(int, count, 7))) - ret = memory_block_change_state(mem, MEM_OFFLINE, - MEM_ONLINE, -1); + online_type = ONLINE_KEEP; + else if (!strncmp(buf, "offline", min_t(int, count, 7))) + online_type = -1; + else { + ret = -EINVAL; + goto err; + } + + switch (online_type) { + case ONLINE_KERNEL: + case ONLINE_MOVABLE: + case ONLINE_KEEP: + /* + * mem->online_type is not protected so there can be a + * race here. However, when racing online, the first + * will succeed and the second will just return as the + * block will already be online. The online type + * could be either one, but that is expected. + */ + mem->online_type = online_type; + ret = device_online(&mem->dev); + break; + case -1: + ret = device_offline(&mem->dev); + break; + default: + ret = -EINVAL; /* should never happen */ + } + +err: + unlock_device_hotplug(); if (ret) return ret; @@ -371,22 +369,15 @@ store_mem_state(struct device *dev, static ssize_t show_phys_device(struct device *dev, struct device_attribute *attr, char *buf) { - struct memory_block *mem = - container_of(dev, struct memory_block, dev); + struct memory_block *mem = to_memory_block(dev); return sprintf(buf, "%d\n", mem->phys_device); } static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL); -static DEVICE_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL); static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state); static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL); static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL); -#define mem_create_simple_file(mem, attr_name) \ - device_create_file(&mem->dev, &dev_attr_##attr_name) -#define mem_remove_simple_file(mem, attr_name) \ - device_remove_file(&mem->dev, &dev_attr_##attr_name) - /* * Block size attribute stuff */ @@ -399,12 +390,6 @@ print_block_size(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL); -static int block_size_init(void) -{ - return device_create_file(memory_subsys.dev_root, - &dev_attr_block_size_bytes); -} - /* * Some architectures will have custom drivers to do this, and * will not need to do it from userspace. The fake hot-add code @@ -440,17 +425,8 @@ memory_probe_store(struct device *dev, struct device_attribute *attr, out: return ret; } -static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store); -static int memory_probe_init(void) -{ - return device_create_file(memory_subsys.dev_root, &dev_attr_probe); -} -#else -static inline int memory_probe_init(void) -{ - return 0; -} +static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store); #endif #ifdef CONFIG_MEMORY_FAILURE @@ -468,7 +444,7 @@ store_soft_offline_page(struct device *dev, u64 pfn; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (strict_strtoull(buf, 0, &pfn) < 0) + if (kstrtoull(buf, 0, &pfn) < 0) return -EINVAL; pfn >>= PAGE_SHIFT; if (!pfn_valid(pfn)) @@ -487,32 +463,15 @@ store_hard_offline_page(struct device *dev, u64 pfn; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (strict_strtoull(buf, 0, &pfn) < 0) + if (kstrtoull(buf, 0, &pfn) < 0) return -EINVAL; pfn >>= PAGE_SHIFT; ret = memory_failure(pfn, 0, 0); return ret ? ret : count; } -static DEVICE_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page); -static DEVICE_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page); - -static __init int memory_fail_init(void) -{ - int err; - - err = device_create_file(memory_subsys.dev_root, - &dev_attr_soft_offline_page); - if (!err) - err = device_create_file(memory_subsys.dev_root, - &dev_attr_hard_offline_page); - return err; -} -#else -static inline int memory_fail_init(void) -{ - return 0; -} +static DEVICE_ATTR(soft_offline_page, S_IWUSR, NULL, store_soft_offline_page); +static DEVICE_ATTR(hard_offline_page, S_IWUSR, NULL, store_hard_offline_page); #endif /* @@ -541,7 +500,7 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section, put_device(&hint->dev); if (!dev) return NULL; - return container_of(dev, struct memory_block, dev); + return to_memory_block(dev); } /* @@ -557,6 +516,38 @@ struct memory_block *find_memory_block(struct mem_section *section) return find_memory_block_hinted(section, NULL); } +static struct attribute *memory_memblk_attrs[] = { + &dev_attr_phys_index.attr, + &dev_attr_state.attr, + &dev_attr_phys_device.attr, + &dev_attr_removable.attr, + NULL +}; + +static struct attribute_group memory_memblk_attr_group = { + .attrs = memory_memblk_attrs, +}; + +static const struct attribute_group *memory_memblk_attr_groups[] = { + &memory_memblk_attr_group, + NULL, +}; + +/* + * register_memory - Setup a sysfs device for a memory block + */ +static +int register_memory(struct memory_block *memory) +{ + memory->dev.bus = &memory_subsys; + memory->dev.id = memory->start_section_nr / sections_per_block; + memory->dev.release = memory_block_release; + memory->dev.groups = memory_memblk_attr_groups; + memory->dev.offline = memory->state == MEM_OFFLINE; + + return device_register(&memory->dev); +} + static int init_memory_block(struct memory_block **memory, struct mem_section *section, unsigned long state) { @@ -575,70 +566,81 @@ static int init_memory_block(struct memory_block **memory, mem->end_section_nr = mem->start_section_nr + sections_per_block - 1; mem->state = state; mem->section_count++; - mutex_init(&mem->state_mutex); start_pfn = section_nr_to_pfn(mem->start_section_nr); mem->phys_device = arch_get_memory_phys_device(start_pfn); ret = register_memory(mem); - if (!ret) - ret = mem_create_simple_file(mem, phys_index); - if (!ret) - ret = mem_create_simple_file(mem, end_phys_index); - if (!ret) - ret = mem_create_simple_file(mem, state); - if (!ret) - ret = mem_create_simple_file(mem, phys_device); - if (!ret) - ret = mem_create_simple_file(mem, removable); *memory = mem; return ret; } -static int add_memory_section(int nid, struct mem_section *section, - struct memory_block **mem_p, - unsigned long state, enum mem_add_context context) +static int add_memory_block(int base_section_nr) +{ + struct memory_block *mem; + int i, ret, section_count = 0, section_nr; + + for (i = base_section_nr; + (i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS; + i++) { + if (!present_section_nr(i)) + continue; + if (section_count == 0) + section_nr = i; + section_count++; + } + + if (section_count == 0) + return 0; + ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE); + if (ret) + return ret; + mem->section_count = section_count; + return 0; +} + + +/* + * need an interface for the VM to add new memory regions, + * but without onlining it. + */ +int register_new_memory(int nid, struct mem_section *section) { - struct memory_block *mem = NULL; - int scn_nr = __section_nr(section); int ret = 0; + struct memory_block *mem; mutex_lock(&mem_sysfs_mutex); - if (context == BOOT) { - /* same memory block ? */ - if (mem_p && *mem_p) - if (scn_nr >= (*mem_p)->start_section_nr && - scn_nr <= (*mem_p)->end_section_nr) { - mem = *mem_p; - kobject_get(&mem->dev.kobj); - } - } else - mem = find_memory_block(section); - + mem = find_memory_block(section); if (mem) { mem->section_count++; - kobject_put(&mem->dev.kobj); + put_device(&mem->dev); } else { - ret = init_memory_block(&mem, section, state); - /* store memory_block pointer for next loop */ - if (!ret && context == BOOT) - if (mem_p) - *mem_p = mem; - } - - if (!ret) { - if (context == HOTPLUG && - mem->section_count == sections_per_block) - ret = register_mem_sect_under_node(mem, nid); + ret = init_memory_block(&mem, section, MEM_OFFLINE); + if (ret) + goto out; } + if (mem->section_count == sections_per_block) + ret = register_mem_sect_under_node(mem, nid); +out: mutex_unlock(&mem_sysfs_mutex); return ret; } -int remove_memory_block(unsigned long node_id, struct mem_section *section, - int phys_device) +#ifdef CONFIG_MEMORY_HOTREMOVE +static void +unregister_memory(struct memory_block *memory) +{ + BUG_ON(memory->dev.bus != &memory_subsys); + + /* drop the ref. we got in remove_memory_block() */ + put_device(&memory->dev); + device_unregister(&memory->dev); +} + +static int remove_memory_block(unsigned long node_id, + struct mem_section *section, int phys_device) { struct memory_block *mem; @@ -647,29 +649,15 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section, unregister_mem_sect_under_nodes(mem, __section_nr(section)); mem->section_count--; - if (mem->section_count == 0) { - mem_remove_simple_file(mem, phys_index); - mem_remove_simple_file(mem, end_phys_index); - mem_remove_simple_file(mem, state); - mem_remove_simple_file(mem, phys_device); - mem_remove_simple_file(mem, removable); + if (mem->section_count == 0) unregister_memory(mem); - } else - kobject_put(&mem->dev.kobj); + else + put_device(&mem->dev); mutex_unlock(&mem_sysfs_mutex); return 0; } -/* - * need an interface for the VM to add new memory regions, - * but without onlining it. - */ -int register_new_memory(int nid, struct mem_section *section) -{ - return add_memory_section(nid, section, NULL, MEM_OFFLINE, HOTPLUG); -} - int unregister_memory_section(struct mem_section *section) { if (!present_section(section)) @@ -677,21 +665,36 @@ int unregister_memory_section(struct mem_section *section) return remove_memory_block(0, section, 0); } +#endif /* CONFIG_MEMORY_HOTREMOVE */ -/* - * offline one memory block. If the memory block has been offlined, do nothing. - */ -int offline_memory_block(struct memory_block *mem) +/* return true if the memory block is offlined, otherwise, return false */ +bool is_memblock_offlined(struct memory_block *mem) { - int ret = 0; + return mem->state == MEM_OFFLINE; +} - mutex_lock(&mem->state_mutex); - if (mem->state != MEM_OFFLINE) - ret = __memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE, -1); - mutex_unlock(&mem->state_mutex); +static struct attribute *memory_root_attrs[] = { +#ifdef CONFIG_ARCH_MEMORY_PROBE + &dev_attr_probe.attr, +#endif - return ret; -} +#ifdef CONFIG_MEMORY_FAILURE + &dev_attr_soft_offline_page.attr, + &dev_attr_hard_offline_page.attr, +#endif + + &dev_attr_block_size_bytes.attr, + NULL +}; + +static struct attribute_group memory_root_attr_group = { + .attrs = memory_root_attrs, +}; + +static const struct attribute_group *memory_root_attr_groups[] = { + &memory_root_attr_group, + NULL, +}; /* * Initialize the sysfs support for memory devices... @@ -702,9 +705,8 @@ int __init memory_dev_init(void) int ret; int err; unsigned long block_sz; - struct memory_block *mem = NULL; - ret = subsys_system_register(&memory_subsys, NULL); + ret = subsys_system_register(&memory_subsys, memory_root_attr_groups); if (ret) goto out; @@ -715,27 +717,14 @@ int __init memory_dev_init(void) * Create entries for memory sections that were found * during boot and have been initialized */ - for (i = 0; i < NR_MEM_SECTIONS; i++) { - if (!present_section_nr(i)) - continue; - /* don't need to reuse memory_block if only one per block */ - err = add_memory_section(0, __nr_to_section(i), - (sections_per_block == 1) ? NULL : &mem, - MEM_ONLINE, - BOOT); + mutex_lock(&mem_sysfs_mutex); + for (i = 0; i < NR_MEM_SECTIONS; i += sections_per_block) { + err = add_memory_block(i); if (!ret) ret = err; } + mutex_unlock(&mem_sysfs_mutex); - err = memory_probe_init(); - if (!ret) - ret = err; - err = memory_fail_init(); - if (!ret) - ret = err; - err = block_size_init(); - if (!ret) - ret = err; out: if (ret) printk(KERN_ERR "%s() failed: %d\n", __func__, ret); diff --git a/drivers/base/node.c b/drivers/base/node.c index fac124a7e1c..8f7ed9933a7 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -7,6 +7,7 @@ #include <linux/mm.h> #include <linux/memory.h> #include <linux/vmstat.h> +#include <linux/notifier.h> #include <linux/node.h> #include <linux/hugetlb.h> #include <linux/compaction.h> @@ -124,13 +125,7 @@ static ssize_t node_read_meminfo(struct device *dev, nid, K(node_page_state(nid, NR_WRITEBACK)), nid, K(node_page_state(nid, NR_FILE_PAGES)), nid, K(node_page_state(nid, NR_FILE_MAPPED)), -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - nid, K(node_page_state(nid, NR_ANON_PAGES) - + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * - HPAGE_PMD_NR), -#else nid, K(node_page_state(nid, NR_ANON_PAGES)), -#endif nid, K(node_page_state(nid, NR_SHMEM)), nid, node_page_state(nid, NR_KERNEL_STACK) * THREAD_SIZE / 1024, @@ -604,7 +599,11 @@ int register_one_node(int nid) void unregister_one_node(int nid) { + if (!node_devices[nid]) + return; + unregister_node(node_devices[nid]); + kfree(node_devices[nid]); node_devices[nid] = NULL; } @@ -683,8 +682,11 @@ static int __init register_node_type(void) ret = subsys_system_register(&node_subsys, cpu_root_attr_groups); if (!ret) { - hotplug_memory_notifier(node_memory_callback, - NODE_CALLBACK_PRI); + static struct notifier_block node_memory_callback_nb = { + .notifier_call = node_memory_callback, + .priority = NODE_CALLBACK_PRI, + }; + register_hotmemory_notifier(&node_memory_callback_nb); } /* diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c new file mode 100644 index 00000000000..5fb74b43848 --- /dev/null +++ b/drivers/base/pinctrl.c @@ -0,0 +1,88 @@ +/* + * Driver core interface to the pinctrl subsystem. + * + * Copyright (C) 2012 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * Based on bits of regulator core, gpio core and clk core + * + * Author: Linus Walleij <linus.walleij@linaro.org> + * + * License terms: GNU General Public License (GPL) version 2 + */ + +#include <linux/device.h> +#include <linux/pinctrl/devinfo.h> +#include <linux/pinctrl/consumer.h> +#include <linux/slab.h> + +/** + * pinctrl_bind_pins() - called by the device core before probe + * @dev: the device that is just about to probe + */ +int pinctrl_bind_pins(struct device *dev) +{ + int ret; + + dev->pins = devm_kzalloc(dev, sizeof(*(dev->pins)), GFP_KERNEL); + if (!dev->pins) + return -ENOMEM; + + dev->pins->p = devm_pinctrl_get(dev); + if (IS_ERR(dev->pins->p)) { + dev_dbg(dev, "no pinctrl handle\n"); + ret = PTR_ERR(dev->pins->p); + goto cleanup_alloc; + } + + dev->pins->default_state = pinctrl_lookup_state(dev->pins->p, + PINCTRL_STATE_DEFAULT); + if (IS_ERR(dev->pins->default_state)) { + dev_dbg(dev, "no default pinctrl state\n"); + ret = 0; + goto cleanup_get; + } + + ret = pinctrl_select_state(dev->pins->p, dev->pins->default_state); + if (ret) { + dev_dbg(dev, "failed to activate default pinctrl state\n"); + goto cleanup_get; + } + +#ifdef CONFIG_PM + /* + * If power management is enabled, we also look for the optional + * sleep and idle pin states, with semantics as defined in + * <linux/pinctrl/pinctrl-state.h> + */ + dev->pins->sleep_state = pinctrl_lookup_state(dev->pins->p, + PINCTRL_STATE_SLEEP); + if (IS_ERR(dev->pins->sleep_state)) + /* Not supplying this state is perfectly legal */ + dev_dbg(dev, "no sleep pinctrl state\n"); + + dev->pins->idle_state = pinctrl_lookup_state(dev->pins->p, + PINCTRL_STATE_IDLE); + if (IS_ERR(dev->pins->idle_state)) + /* Not supplying this state is perfectly legal */ + dev_dbg(dev, "no idle pinctrl state\n"); +#endif + + return 0; + + /* + * If no pinctrl handle or default state was found for this device, + * let's explicitly free the pin container in the device, there is + * no point in keeping it around. + */ +cleanup_get: + devm_pinctrl_put(dev->pins->p); +cleanup_alloc: + devm_kfree(dev, dev->pins); + dev->pins = NULL; + + /* Only return deferrals */ + if (ret != -EPROBE_DEFER) + ret = 0; + + return ret; +} diff --git a/drivers/base/platform.c b/drivers/base/platform.c index c0b8df38402..eee48c49f5d 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -13,6 +13,7 @@ #include <linux/string.h> #include <linux/platform_device.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <linux/module.h> #include <linux/init.h> #include <linux/dma-mapping.h> @@ -29,9 +30,6 @@ /* For automatically allocated device IDs */ static DEFINE_IDA(platform_devid_ida); -#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \ - driver)) - struct device platform_bus = { .init_name = "platform", }; @@ -46,8 +44,8 @@ EXPORT_SYMBOL_GPL(platform_bus); * manipulate any relevant information in the pdev_archdata they can do: * * platform_device_alloc() - * ... manipulate ... - * platform_device_add() + * ... manipulate ... + * platform_device_add() * * And if they don't care they can just call platform_device_register() and * everything will just work out. @@ -90,7 +88,16 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) return -ENXIO; return dev->archdata.irqs[num]; #else - struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num); + struct resource *r; + if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { + int ret; + + ret = of_irq_get(dev->dev.of_node, num); + if (ret >= 0 || ret == -EPROBE_DEFER) + return ret; + } + + r = platform_get_resource(dev, IORESOURCE_IRQ, num); return r ? r->start : -ENXIO; #endif @@ -129,9 +136,17 @@ EXPORT_SYMBOL_GPL(platform_get_resource_byname); */ int platform_get_irq_byname(struct platform_device *dev, const char *name) { - struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ, - name); + struct resource *r; + + if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { + int ret; + ret = of_irq_get_byname(dev->dev.of_node, name); + if (ret >= 0 || ret == -EPROBE_DEFER) + return ret; + } + + r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); return r ? r->start : -ENXIO; } EXPORT_SYMBOL_GPL(platform_get_irq_byname); @@ -326,9 +341,7 @@ int platform_device_add(struct platform_device *pdev) } if (p && insert_resource(p, r)) { - printk(KERN_ERR - "%s: failed to claim resource %d\n", - dev_name(&pdev->dev), i); + dev_err(&pdev->dev, "failed to claim resource %d\n", i); ret = -EBUSY; goto failed; } @@ -437,7 +450,7 @@ struct platform_device *platform_device_register_full( goto err_alloc; pdev->dev.parent = pdevinfo->parent; - ACPI_HANDLE_SET(&pdev->dev, pdevinfo->acpi_node.handle); + ACPI_COMPANION_SET(&pdev->dev, pdevinfo->acpi_node.companion); if (pdevinfo->dma_mask) { /* @@ -468,7 +481,7 @@ struct platform_device *platform_device_register_full( ret = platform_device_add(pdev); if (ret) { err: - ACPI_HANDLE_SET(&pdev->dev, NULL); + ACPI_COMPANION_SET(&pdev->dev, NULL); kfree(pdev->dev.dma_mask); err_alloc: @@ -486,13 +499,17 @@ static int platform_drv_probe(struct device *_dev) struct platform_device *dev = to_platform_device(_dev); int ret; - if (ACPI_HANDLE(_dev)) - acpi_dev_pm_attach(_dev, true); + acpi_dev_pm_attach(_dev, true); ret = drv->probe(dev); - if (ret && ACPI_HANDLE(_dev)) + if (ret) acpi_dev_pm_detach(_dev, true); + if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { + dev_warn(_dev, "probe deferral not supported\n"); + ret = -ENXIO; + } + return ret; } @@ -508,8 +525,7 @@ static int platform_drv_remove(struct device *_dev) int ret; ret = drv->remove(dev); - if (ACPI_HANDLE(_dev)) - acpi_dev_pm_detach(_dev, true); + acpi_dev_pm_detach(_dev, true); return ret; } @@ -520,16 +536,18 @@ static void platform_drv_shutdown(struct device *_dev) struct platform_device *dev = to_platform_device(_dev); drv->shutdown(dev); - if (ACPI_HANDLE(_dev)) - acpi_dev_pm_detach(_dev, true); + acpi_dev_pm_detach(_dev, true); } /** - * platform_driver_register - register a driver for platform-level devices + * __platform_driver_register - register a driver for platform-level devices * @drv: platform driver structure + * @owner: owning module/driver */ -int platform_driver_register(struct platform_driver *drv) +int __platform_driver_register(struct platform_driver *drv, + struct module *owner) { + drv->driver.owner = owner; drv->driver.bus = &platform_bus_type; if (drv->probe) drv->driver.probe = platform_drv_probe; @@ -540,7 +558,7 @@ int platform_driver_register(struct platform_driver *drv) return driver_register(&drv->driver); } -EXPORT_SYMBOL_GPL(platform_driver_register); +EXPORT_SYMBOL_GPL(__platform_driver_register); /** * platform_driver_unregister - unregister a driver for platform-level devices @@ -566,6 +584,8 @@ EXPORT_SYMBOL_GPL(platform_driver_unregister); * into system-on-chip processors, where the controller devices have been * configured as part of board setup. * + * Note that this is incompatible with deferred probing. + * * Returns zero if the driver registered and bound to a device, else returns * a negative error code and with the driver not registered. */ @@ -574,6 +594,12 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv, { int retval, code; + /* + * Prevent driver from requesting probe deferral to avoid further + * futile probe attempts. + */ + drv->prevent_deferred_probe = true; + /* make sure driver won't have bind/unbind attributes */ drv->driver.suppress_bind_attrs = true; @@ -666,15 +692,27 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a, char *buf) { struct platform_device *pdev = to_platform_device(dev); - int len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name); + int len; + + len = of_device_get_modalias(dev, buf, PAGE_SIZE -1); + if (len != -ENODEV) + return len; + + len = acpi_device_modalias(dev, buf, PAGE_SIZE -1); + if (len != -ENODEV) + return len; + + len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name); return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; } +static DEVICE_ATTR_RO(modalias); -static struct device_attribute platform_dev_attrs[] = { - __ATTR_RO(modalias), - __ATTR_NULL, +static struct attribute *platform_dev_attrs[] = { + &dev_attr_modalias.attr, + NULL, }; +ATTRIBUTE_GROUPS(platform_dev); static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) { @@ -682,7 +720,11 @@ static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) int rc; /* Some devices have extra OF data and an OF-style MODALIAS */ - rc = of_device_uevent_modalias(dev,env); + rc = of_device_uevent_modalias(dev, env); + if (rc != -ENODEV) + return rc; + + rc = acpi_device_uevent_modalias(dev, env); if (rc != -ENODEV) return rc; @@ -886,13 +928,12 @@ int platform_pm_restore(struct device *dev) static const struct dev_pm_ops platform_dev_pm_ops = { .runtime_suspend = pm_generic_runtime_suspend, .runtime_resume = pm_generic_runtime_resume, - .runtime_idle = pm_generic_runtime_idle, USE_PLATFORM_PM_SLEEP_OPS }; struct bus_type platform_bus_type = { .name = "platform", - .dev_attrs = platform_dev_attrs, + .dev_groups = platform_dev_groups, .match = platform_match, .uevent = platform_uevent, .pm = &platform_dev_pm_ops, @@ -1053,7 +1094,7 @@ void __init early_platform_driver_register_all(char *class_str) * @epdrv: early platform driver structure * @id: id to match against */ -static __init struct platform_device * +static struct platform_device * __init early_platform_match(struct early_platform_driver *epdrv, int id) { struct platform_device *pd; @@ -1071,7 +1112,7 @@ early_platform_match(struct early_platform_driver *epdrv, int id) * @epdrv: early platform driver structure * @id: return true if id or above exists */ -static __init int early_platform_left(struct early_platform_driver *epdrv, +static int __init early_platform_left(struct early_platform_driver *epdrv, int id) { struct platform_device *pd; @@ -1126,8 +1167,8 @@ static int __init early_platform_driver_probe_id(char *class_str, switch (match_id) { case EARLY_PLATFORM_ID_ERROR: - pr_warning("%s: unable to parse %s parameter\n", - class_str, epdrv->pdrv->driver.name); + pr_warn("%s: unable to parse %s parameter\n", + class_str, epdrv->pdrv->driver.name); /* fall-through */ case EARLY_PLATFORM_ID_UNSET: match = NULL; @@ -1158,8 +1199,8 @@ static int __init early_platform_driver_probe_id(char *class_str, } if (epdrv->pdrv->probe(match)) - pr_warning("%s: unable to probe %s early.\n", - class_str, match->name); + pr_warn("%s: unable to probe %s early.\n", + class_str, match->name); else n++; } diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 2e58ebb1f6c..1cb8544598d 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -1,6 +1,5 @@ -obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o +obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o -obj-$(CONFIG_PM_RUNTIME) += runtime.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o obj-$(CONFIG_PM_OPP) += opp.o obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 9d8fde70939..b99e6c06ee6 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -6,7 +6,6 @@ * This file is released under the GPLv2. */ -#include <linux/init.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/io.h> @@ -33,6 +32,21 @@ struct pm_clock_entry { }; /** + * pm_clk_enable - Enable a clock, reporting any errors + * @dev: The device for the given clock + * @clk: The clock being enabled. + */ +static inline int __pm_clk_enable(struct device *dev, struct clk *clk) +{ + int ret = clk_enable(clk); + if (ret) + dev_err(dev, "%s: failed to enable clk %p, error %d\n", + __func__, clk, ret); + + return ret; +} + +/** * pm_clk_acquire - Acquire a device clock. * @dev: Device whose clock is to be acquired. * @ce: PM clock entry corresponding to the clock. @@ -43,6 +57,7 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) if (IS_ERR(ce->clk)) { ce->status = PCE_STATUS_ERROR; } else { + clk_prepare(ce->clk); ce->status = PCE_STATUS_ACQUIRED; dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); } @@ -99,10 +114,12 @@ static void __pm_clk_remove(struct pm_clock_entry *ce) if (ce->status < PCE_STATUS_ERROR) { if (ce->status == PCE_STATUS_ENABLED) - clk_disable_unprepare(ce->clk); + clk_disable(ce->clk); - if (ce->status >= PCE_STATUS_ACQUIRED) + if (ce->status >= PCE_STATUS_ACQUIRED) { + clk_unprepare(ce->clk); clk_put(ce->clk); + } } kfree(ce->con_id); @@ -249,6 +266,7 @@ int pm_clk_resume(struct device *dev) struct pm_subsys_data *psd = dev_to_psd(dev); struct pm_clock_entry *ce; unsigned long flags; + int ret; dev_dbg(dev, "%s()\n", __func__); @@ -259,8 +277,9 @@ int pm_clk_resume(struct device *dev) list_for_each_entry(ce, &psd->clock_list, node) { if (ce->status < PCE_STATUS_ERROR) { - clk_enable(ce->clk); - ce->status = PCE_STATUS_ENABLED; + ret = __pm_clk_enable(dev, ce->clk); + if (!ret) + ce->status = PCE_STATUS_ENABLED; } } @@ -376,7 +395,7 @@ int pm_clk_resume(struct device *dev) spin_lock_irqsave(&psd->lock, flags); list_for_each_entry(ce, &psd->clock_list, node) - clk_enable(ce->clk); + __pm_clk_enable(dev, ce->clk); spin_unlock_irqrestore(&psd->lock, flags); diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index 39c32529b83..df2e5eeaeb0 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -6,7 +6,6 @@ * This file is released under the GPLv2. */ -#include <linux/init.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/export.h> @@ -61,24 +60,24 @@ EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); int dev_pm_put_subsys_data(struct device *dev) { struct pm_subsys_data *psd; - int ret = 0; + int ret = 1; spin_lock_irq(&dev->power.lock); psd = dev_to_psd(dev); - if (!psd) { - ret = -EINVAL; + if (!psd) goto out; - } if (--psd->refcount == 0) { dev->power.subsys_data = NULL; - kfree(psd); - ret = 1; + } else { + psd = NULL; + ret = 0; } out: spin_unlock_irq(&dev->power.lock); + kfree(psd); return ret; } diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index acc3a8ded29..eee55c1e5fd 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -6,7 +6,6 @@ * This file is released under the GPLv2. */ -#include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/pm_runtime.h> @@ -42,7 +41,7 @@ struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \ if (!__retval && __elapsed > __td->field) { \ __td->field = __elapsed; \ - dev_warn(dev, name " latency exceeded, new value %lld ns\n", \ + dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \ __elapsed); \ genpd->max_off_time_changed = true; \ __td->constraint_changed = true; \ @@ -106,7 +105,7 @@ static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) { atomic_inc(&genpd->sd_count); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); } static void genpd_acquire_lock(struct generic_pm_domain *genpd) @@ -433,8 +432,7 @@ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) */ void genpd_queue_power_off_work(struct generic_pm_domain *genpd) { - if (!work_pending(&genpd->power_off_work)) - queue_work(pm_wq, &genpd->power_off_work); + queue_work(pm_wq, &genpd->power_off_work); } /** @@ -707,6 +705,14 @@ static int pm_genpd_runtime_resume(struct device *dev) return 0; } +static bool pd_ignore_unused; +static int __init pd_ignore_unused_setup(char *__unused) +{ + pd_ignore_unused = true; + return 1; +} +__setup("pd_ignore_unused", pd_ignore_unused_setup); + /** * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. */ @@ -714,6 +720,11 @@ void pm_genpd_poweroff_unused(void) { struct generic_pm_domain *genpd; + if (pd_ignore_unused) { + pr_warn("genpd: Not disabling unused power domains\n"); + return; + } + mutex_lock(&gpd_list_lock); list_for_each_entry(genpd, &gpd_list, gpd_list_node) @@ -921,7 +932,7 @@ static int pm_genpd_prepare(struct device *dev) pm_wakeup_event(dev, 0); if (pm_wakeup_pending()) { - pm_runtime_put_sync(dev); + pm_runtime_put(dev); return -EBUSY; } @@ -962,7 +973,7 @@ static int pm_genpd_prepare(struct device *dev) pm_runtime_enable(dev); } - pm_runtime_put_sync(dev); + pm_runtime_put(dev); return ret; } @@ -1328,7 +1339,7 @@ static void pm_genpd_complete(struct device *dev) pm_generic_complete(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); - pm_runtime_idle(dev); + pm_request_idle(dev); } } @@ -2144,7 +2155,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd, genpd->max_off_time_changed = true; genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; - genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; genpd->domain.ops.prepare = pm_genpd_prepare; genpd->domain.ops.suspend = pm_genpd_suspend; genpd->domain.ops.suspend_late = pm_genpd_suspend_late; diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 28dee3053f1..a089e3bcdfb 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -6,7 +6,6 @@ * This file is released under the GPLv2. */ -#include <linux/init.h> #include <linux/kernel.h> #include <linux/pm_domain.h> #include <linux/pm_qos.h> diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index d03d290f31c..96a92db83ca 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c @@ -10,30 +10,7 @@ #include <linux/pm_runtime.h> #include <linux/export.h> -#ifdef CONFIG_PM_RUNTIME -/** - * pm_generic_runtime_idle - Generic runtime idle callback for subsystems. - * @dev: Device to handle. - * - * If PM operations are defined for the @dev's driver and they include - * ->runtime_idle(), execute it and return its error code, if nonzero. - * Otherwise, execute pm_runtime_suspend() for the device and return 0. - */ -int pm_generic_runtime_idle(struct device *dev) -{ - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - if (pm && pm->runtime_idle) { - int ret = pm->runtime_idle(dev); - if (ret) - return ret; - } - - pm_runtime_suspend(dev); - return 0; -} -EXPORT_SYMBOL_GPL(pm_generic_runtime_idle); - +#ifdef CONFIG_PM /** * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems. * @dev: Device to suspend. @@ -71,7 +48,7 @@ int pm_generic_runtime_resume(struct device *dev) return ret; } EXPORT_SYMBOL_GPL(pm_generic_runtime_resume); -#endif /* CONFIG_PM_RUNTIME */ +#endif /* CONFIG_PM */ #ifdef CONFIG_PM_SLEEP /** @@ -308,7 +285,7 @@ int pm_generic_restore(struct device *dev) EXPORT_SYMBOL_GPL(pm_generic_restore); /** - * pm_generic_complete - Generic routine competing a device power transition. + * pm_generic_complete - Generic routine completing a device power transition. * @dev: Device to handle. * * Complete a device power transition during a system-wide power transition. @@ -324,6 +301,6 @@ void pm_generic_complete(struct device *dev) * Let runtime PM try to suspend devices that haven't been in use before * going into the system-wide sleep state we're resuming from. */ - pm_runtime_idle(dev); + pm_request_idle(dev); } #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 2b7f77d3fcb..bf412961a93 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -28,7 +28,11 @@ #include <linux/sched.h> #include <linux/async.h> #include <linux/suspend.h> +#include <trace/events/power.h> +#include <linux/cpufreq.h> #include <linux/cpuidle.h> +#include <linux/timer.h> + #include "../base.h" #include "power.h" @@ -56,6 +60,30 @@ static pm_message_t pm_transition; static int async_error; +static char *pm_verb(int event) +{ + switch (event) { + case PM_EVENT_SUSPEND: + return "suspend"; + case PM_EVENT_RESUME: + return "resume"; + case PM_EVENT_FREEZE: + return "freeze"; + case PM_EVENT_QUIESCE: + return "quiesce"; + case PM_EVENT_HIBERNATE: + return "hibernate"; + case PM_EVENT_THAW: + return "thaw"; + case PM_EVENT_RESTORE: + return "restore"; + case PM_EVENT_RECOVER: + return "recover"; + default: + return "(unknown PM event)"; + } +} + /** * device_pm_sleep_init - Initialize system suspend-related device fields. * @dev: Device object being initialized. @@ -64,6 +92,8 @@ void device_pm_sleep_init(struct device *dev) { dev->power.is_prepared = false; dev->power.is_suspended = false; + dev->power.is_noirq_suspended = false; + dev->power.is_late_suspended = false; init_completion(&dev->power.completion); complete_all(&dev->power.completion); dev->power.wakeup = NULL; @@ -99,7 +129,6 @@ void device_pm_add(struct device *dev) dev_warn(dev, "parent %s should not be sleeping\n", dev_name(dev->parent)); list_add_tail(&dev->power.entry, &dpm_list); - dev_pm_qos_constraints_init(dev); mutex_unlock(&dpm_list_mtx); } @@ -113,7 +142,6 @@ void device_pm_remove(struct device *dev) dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); complete_all(&dev->power.completion); mutex_lock(&dpm_list_mtx); - dev_pm_qos_constraints_destroy(dev); list_del_init(&dev->power.entry); mutex_unlock(&dpm_list_mtx); device_wakeup_disable(dev); @@ -174,15 +202,17 @@ static ktime_t initcall_debug_start(struct device *dev) } static void initcall_debug_report(struct device *dev, ktime_t calltime, - int error) + int error, pm_message_t state, char *info) { - ktime_t delta, rettime; + ktime_t rettime; + s64 nsecs; + + rettime = ktime_get(); + nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime)); if (pm_print_times_enabled) { - rettime = ktime_get(); - delta = ktime_sub(rettime, calltime); pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), - error, (unsigned long long)ktime_to_ns(delta) >> 10); + error, (unsigned long long)nsecs >> 10); } } @@ -311,30 +341,6 @@ static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t stat return NULL; } -static char *pm_verb(int event) -{ - switch (event) { - case PM_EVENT_SUSPEND: - return "suspend"; - case PM_EVENT_RESUME: - return "resume"; - case PM_EVENT_FREEZE: - return "freeze"; - case PM_EVENT_QUIESCE: - return "quiesce"; - case PM_EVENT_HIBERNATE: - return "hibernate"; - case PM_EVENT_THAW: - return "thaw"; - case PM_EVENT_RESTORE: - return "restore"; - case PM_EVENT_RECOVER: - return "recover"; - default: - return "(unknown PM event)"; - } -} - static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) { dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), @@ -378,14 +384,81 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev, calltime = initcall_debug_start(dev); pm_dev_dbg(dev, state, info); + trace_device_pm_callback_start(dev, info, state.event); error = cb(dev); + trace_device_pm_callback_end(dev, error); suspend_report_result(cb, error); - initcall_debug_report(dev, calltime, error); + initcall_debug_report(dev, calltime, error, state, info); return error; } +#ifdef CONFIG_DPM_WATCHDOG +struct dpm_watchdog { + struct device *dev; + struct task_struct *tsk; + struct timer_list timer; +}; + +#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ + struct dpm_watchdog wd + +/** + * dpm_watchdog_handler - Driver suspend / resume watchdog handler. + * @data: Watchdog object address. + * + * Called when a driver has timed out suspending or resuming. + * There's not much we can do here to recover so panic() to + * capture a crash-dump in pstore. + */ +static void dpm_watchdog_handler(unsigned long data) +{ + struct dpm_watchdog *wd = (void *)data; + + dev_emerg(wd->dev, "**** DPM device timeout ****\n"); + show_stack(wd->tsk, NULL); + panic("%s %s: unrecoverable failure\n", + dev_driver_string(wd->dev), dev_name(wd->dev)); +} + +/** + * dpm_watchdog_set - Enable pm watchdog for given device. + * @wd: Watchdog. Must be allocated on the stack. + * @dev: Device to handle. + */ +static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) +{ + struct timer_list *timer = &wd->timer; + + wd->dev = dev; + wd->tsk = current; + + init_timer_on_stack(timer); + /* use same timeout value for both suspend and resume */ + timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT; + timer->function = dpm_watchdog_handler; + timer->data = (unsigned long)wd; + add_timer(timer); +} + +/** + * dpm_watchdog_clear - Disable suspend/resume watchdog. + * @wd: Watchdog to disable. + */ +static void dpm_watchdog_clear(struct dpm_watchdog *wd) +{ + struct timer_list *timer = &wd->timer; + + del_timer_sync(timer); + destroy_timer_on_stack(timer); +} +#else +#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) +#define dpm_watchdog_set(x, y) +#define dpm_watchdog_clear(x) +#endif + /*------------------------- Resume routines -------------------------*/ /** @@ -396,7 +469,7 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev, * The driver of @dev will not receive interrupts while this function is being * executed. */ -static int device_resume_noirq(struct device *dev, pm_message_t state) +static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; char *info = NULL; @@ -405,9 +478,14 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) TRACE_DEVICE(dev); TRACE_RESUME(0); - if (dev->power.syscore) + if (dev->power.syscore || dev->power.direct_complete) + goto Out; + + if (!dev->power.is_noirq_suspended) goto Out; + dpm_wait(dev->parent, async); + if (dev->pm_domain) { info = "noirq power domain "; callback = pm_noirq_op(&dev->pm_domain->ops, state); @@ -428,12 +506,32 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) } error = dpm_run_callback(callback, dev, state, info); + dev->power.is_noirq_suspended = false; Out: + complete_all(&dev->power.completion); TRACE_RESUME(error); return error; } +static bool is_async(struct device *dev) +{ + return dev->power.async_suspend && pm_async_enabled + && !pm_trace_is_enabled(); +} + +static void async_resume_noirq(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = device_resume_noirq(dev, pm_transition, true); + if (error) + pm_dev_err(dev, pm_transition, " async", error); + + put_device(dev); +} + /** * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. * @state: PM transition of the system being carried out. @@ -443,32 +541,53 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) */ static void dpm_resume_noirq(pm_message_t state) { + struct device *dev; ktime_t starttime = ktime_get(); + trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true); mutex_lock(&dpm_list_mtx); - while (!list_empty(&dpm_noirq_list)) { - struct device *dev = to_device(dpm_noirq_list.next); - int error; + pm_transition = state; + /* + * Advanced the async threads upfront, + * in case the starting of async threads is + * delayed by non-async resuming devices. + */ + list_for_each_entry(dev, &dpm_noirq_list, power.entry) { + reinit_completion(&dev->power.completion); + if (is_async(dev)) { + get_device(dev); + async_schedule(async_resume_noirq, dev); + } + } + + while (!list_empty(&dpm_noirq_list)) { + dev = to_device(dpm_noirq_list.next); get_device(dev); list_move_tail(&dev->power.entry, &dpm_late_early_list); mutex_unlock(&dpm_list_mtx); - error = device_resume_noirq(dev, state); - if (error) { - suspend_stats.failed_resume_noirq++; - dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, state, " noirq", error); + if (!is_async(dev)) { + int error; + + error = device_resume_noirq(dev, state, false); + if (error) { + suspend_stats.failed_resume_noirq++; + dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, " noirq", error); + } } mutex_lock(&dpm_list_mtx); put_device(dev); } mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); dpm_show_time(starttime, state, "noirq"); resume_device_irqs(); cpuidle_resume(); + trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); } /** @@ -478,7 +597,7 @@ static void dpm_resume_noirq(pm_message_t state) * * Runtime PM is disabled for @dev while this function is being executed. */ -static int device_resume_early(struct device *dev, pm_message_t state) +static int device_resume_early(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; char *info = NULL; @@ -487,9 +606,14 @@ static int device_resume_early(struct device *dev, pm_message_t state) TRACE_DEVICE(dev); TRACE_RESUME(0); - if (dev->power.syscore) + if (dev->power.syscore || dev->power.direct_complete) + goto Out; + + if (!dev->power.is_late_suspended) goto Out; + dpm_wait(dev->parent, async); + if (dev->pm_domain) { info = "early power domain "; callback = pm_late_early_op(&dev->pm_domain->ops, state); @@ -510,44 +634,78 @@ static int device_resume_early(struct device *dev, pm_message_t state) } error = dpm_run_callback(callback, dev, state, info); + dev->power.is_late_suspended = false; Out: TRACE_RESUME(error); pm_runtime_enable(dev); + complete_all(&dev->power.completion); return error; } +static void async_resume_early(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = device_resume_early(dev, pm_transition, true); + if (error) + pm_dev_err(dev, pm_transition, " async", error); + + put_device(dev); +} + /** * dpm_resume_early - Execute "early resume" callbacks for all devices. * @state: PM transition of the system being carried out. */ static void dpm_resume_early(pm_message_t state) { + struct device *dev; ktime_t starttime = ktime_get(); + trace_suspend_resume(TPS("dpm_resume_early"), state.event, true); mutex_lock(&dpm_list_mtx); - while (!list_empty(&dpm_late_early_list)) { - struct device *dev = to_device(dpm_late_early_list.next); - int error; + pm_transition = state; + + /* + * Advanced the async threads upfront, + * in case the starting of async threads is + * delayed by non-async resuming devices. + */ + list_for_each_entry(dev, &dpm_late_early_list, power.entry) { + reinit_completion(&dev->power.completion); + if (is_async(dev)) { + get_device(dev); + async_schedule(async_resume_early, dev); + } + } + while (!list_empty(&dpm_late_early_list)) { + dev = to_device(dpm_late_early_list.next); get_device(dev); list_move_tail(&dev->power.entry, &dpm_suspended_list); mutex_unlock(&dpm_list_mtx); - error = device_resume_early(dev, state); - if (error) { - suspend_stats.failed_resume_early++; - dpm_save_failed_step(SUSPEND_RESUME_EARLY); - dpm_save_failed_dev(dev_name(dev)); - pm_dev_err(dev, state, " early", error); - } + if (!is_async(dev)) { + int error; + error = device_resume_early(dev, state, false); + if (error) { + suspend_stats.failed_resume_early++; + dpm_save_failed_step(SUSPEND_RESUME_EARLY); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, " early", error); + } + } mutex_lock(&dpm_list_mtx); put_device(dev); } mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); dpm_show_time(starttime, state, "early"); + trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); } /** @@ -572,6 +730,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) pm_callback_t callback = NULL; char *info = NULL; int error = 0; + DECLARE_DPM_WATCHDOG_ON_STACK(wd); TRACE_DEVICE(dev); TRACE_RESUME(0); @@ -579,7 +738,14 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete; + if (dev->power.direct_complete) { + /* Match the pm_runtime_disable() in __device_suspend(). */ + pm_runtime_enable(dev); + goto Complete; + } + dpm_wait(dev->parent, async); + dpm_watchdog_set(&wd, dev); device_lock(dev); /* @@ -638,6 +804,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) Unlock: device_unlock(dev); + dpm_watchdog_clear(&wd); Complete: complete_all(&dev->power.completion); @@ -658,12 +825,6 @@ static void async_resume(void *data, async_cookie_t cookie) put_device(dev); } -static bool is_async(struct device *dev) -{ - return dev->power.async_suspend && pm_async_enabled - && !pm_trace_is_enabled(); -} - /** * dpm_resume - Execute "resume" callbacks for non-sysdev devices. * @state: PM transition of the system being carried out. @@ -676,6 +837,7 @@ void dpm_resume(pm_message_t state) struct device *dev; ktime_t starttime = ktime_get(); + trace_suspend_resume(TPS("dpm_resume"), state.event, true); might_sleep(); mutex_lock(&dpm_list_mtx); @@ -683,7 +845,7 @@ void dpm_resume(pm_message_t state) async_error = 0; list_for_each_entry(dev, &dpm_suspended_list, power.entry) { - INIT_COMPLETION(dev->power.completion); + reinit_completion(&dev->power.completion); if (is_async(dev)) { get_device(dev); async_schedule(async_resume, dev); @@ -715,6 +877,9 @@ void dpm_resume(pm_message_t state) mutex_unlock(&dpm_list_mtx); async_synchronize_full(); dpm_show_time(starttime, state, NULL); + + cpufreq_resume(); + trace_suspend_resume(TPS("dpm_resume"), state.event, false); } /** @@ -753,12 +918,14 @@ static void device_complete(struct device *dev, pm_message_t state) if (callback) { pm_dev_dbg(dev, state, info); + trace_device_pm_callback_start(dev, info, state.event); callback(dev); + trace_device_pm_callback_end(dev, 0); } device_unlock(dev); - pm_runtime_put_sync(dev); + pm_runtime_put(dev); } /** @@ -772,6 +939,7 @@ void dpm_complete(pm_message_t state) { struct list_head list; + trace_suspend_resume(TPS("dpm_complete"), state.event, true); might_sleep(); INIT_LIST_HEAD(&list); @@ -791,6 +959,7 @@ void dpm_complete(pm_message_t state) } list_splice(&list, &dpm_list); mutex_unlock(&dpm_list_mtx); + trace_suspend_resume(TPS("dpm_complete"), state.event, false); } /** @@ -839,13 +1008,24 @@ static pm_message_t resume_event(pm_message_t sleep_state) * The driver of @dev will not receive interrupts while this function is being * executed. */ -static int device_suspend_noirq(struct device *dev, pm_message_t state) +static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; char *info = NULL; + int error = 0; - if (dev->power.syscore) - return 0; + if (async_error) + goto Complete; + + if (pm_wakeup_pending()) { + async_error = -EBUSY; + goto Complete; + } + + if (dev->power.syscore || dev->power.direct_complete) + goto Complete; + + dpm_wait_for_children(dev, async); if (dev->pm_domain) { info = "noirq power domain "; @@ -866,7 +1046,41 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) callback = pm_noirq_op(dev->driver->pm, state); } - return dpm_run_callback(callback, dev, state, info); + error = dpm_run_callback(callback, dev, state, info); + if (!error) + dev->power.is_noirq_suspended = true; + else + async_error = error; + +Complete: + complete_all(&dev->power.completion); + return error; +} + +static void async_suspend_noirq(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = __device_suspend_noirq(dev, pm_transition, true); + if (error) { + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, pm_transition, " async", error); + } + + put_device(dev); +} + +static int device_suspend_noirq(struct device *dev) +{ + reinit_completion(&dev->power.completion); + + if (pm_async_enabled && dev->power.async_suspend) { + get_device(dev); + async_schedule(async_suspend_noirq, dev); + return 0; + } + return __device_suspend_noirq(dev, pm_transition, false); } /** @@ -881,22 +1095,24 @@ static int dpm_suspend_noirq(pm_message_t state) ktime_t starttime = ktime_get(); int error = 0; + trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); cpuidle_pause(); suspend_device_irqs(); mutex_lock(&dpm_list_mtx); + pm_transition = state; + async_error = 0; + while (!list_empty(&dpm_late_early_list)) { struct device *dev = to_device(dpm_late_early_list.prev); get_device(dev); mutex_unlock(&dpm_list_mtx); - error = device_suspend_noirq(dev, state); + error = device_suspend_noirq(dev); mutex_lock(&dpm_list_mtx); if (error) { pm_dev_err(dev, state, " noirq", error); - suspend_stats.failed_suspend_noirq++; - dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); dpm_save_failed_dev(dev_name(dev)); put_device(dev); break; @@ -905,16 +1121,22 @@ static int dpm_suspend_noirq(pm_message_t state) list_move(&dev->power.entry, &dpm_noirq_list); put_device(dev); - if (pm_wakeup_pending()) { - error = -EBUSY; + if (async_error) break; - } } mutex_unlock(&dpm_list_mtx); - if (error) + async_synchronize_full(); + if (!error) + error = async_error; + + if (error) { + suspend_stats.failed_suspend_noirq++; + dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); dpm_resume_noirq(resume_event(state)); - else + } else { dpm_show_time(starttime, state, "noirq"); + } + trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false); return error; } @@ -925,15 +1147,26 @@ static int dpm_suspend_noirq(pm_message_t state) * * Runtime PM is disabled for @dev while this function is being executed. */ -static int device_suspend_late(struct device *dev, pm_message_t state) +static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) { pm_callback_t callback = NULL; char *info = NULL; + int error = 0; __pm_runtime_disable(dev, false); - if (dev->power.syscore) - return 0; + if (async_error) + goto Complete; + + if (pm_wakeup_pending()) { + async_error = -EBUSY; + goto Complete; + } + + if (dev->power.syscore || dev->power.direct_complete) + goto Complete; + + dpm_wait_for_children(dev, async); if (dev->pm_domain) { info = "late power domain "; @@ -954,7 +1187,41 @@ static int device_suspend_late(struct device *dev, pm_message_t state) callback = pm_late_early_op(dev->driver->pm, state); } - return dpm_run_callback(callback, dev, state, info); + error = dpm_run_callback(callback, dev, state, info); + if (!error) + dev->power.is_late_suspended = true; + else + async_error = error; + +Complete: + complete_all(&dev->power.completion); + return error; +} + +static void async_suspend_late(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = __device_suspend_late(dev, pm_transition, true); + if (error) { + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, pm_transition, " async", error); + } + put_device(dev); +} + +static int device_suspend_late(struct device *dev) +{ + reinit_completion(&dev->power.completion); + + if (pm_async_enabled && dev->power.async_suspend) { + get_device(dev); + async_schedule(async_suspend_late, dev); + return 0; + } + + return __device_suspend_late(dev, pm_transition, false); } /** @@ -966,20 +1233,22 @@ static int dpm_suspend_late(pm_message_t state) ktime_t starttime = ktime_get(); int error = 0; + trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); mutex_lock(&dpm_list_mtx); + pm_transition = state; + async_error = 0; + while (!list_empty(&dpm_suspended_list)) { struct device *dev = to_device(dpm_suspended_list.prev); get_device(dev); mutex_unlock(&dpm_list_mtx); - error = device_suspend_late(dev, state); + error = device_suspend_late(dev); mutex_lock(&dpm_list_mtx); if (error) { pm_dev_err(dev, state, " late", error); - suspend_stats.failed_suspend_late++; - dpm_save_failed_step(SUSPEND_SUSPEND_LATE); dpm_save_failed_dev(dev_name(dev)); put_device(dev); break; @@ -988,17 +1257,19 @@ static int dpm_suspend_late(pm_message_t state) list_move(&dev->power.entry, &dpm_late_early_list); put_device(dev); - if (pm_wakeup_pending()) { - error = -EBUSY; + if (async_error) break; - } } mutex_unlock(&dpm_list_mtx); - if (error) + async_synchronize_full(); + if (error) { + suspend_stats.failed_suspend_late++; + dpm_save_failed_step(SUSPEND_SUSPEND_LATE); dpm_resume_early(resume_event(state)); - else + } else { dpm_show_time(starttime, state, "late"); - + } + trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false); return error; } @@ -1029,17 +1300,20 @@ EXPORT_SYMBOL_GPL(dpm_suspend_end); * @cb: Suspend callback to execute. */ static int legacy_suspend(struct device *dev, pm_message_t state, - int (*cb)(struct device *dev, pm_message_t state)) + int (*cb)(struct device *dev, pm_message_t state), + char *info) { int error; ktime_t calltime; calltime = initcall_debug_start(dev); + trace_device_pm_callback_start(dev, info, state.event); error = cb(dev, state); + trace_device_pm_callback_end(dev, error); suspend_report_result(cb, error); - initcall_debug_report(dev, calltime, error); + initcall_debug_report(dev, calltime, error, state, info); return error; } @@ -1055,6 +1329,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) pm_callback_t callback = NULL; char *info = NULL; int error = 0; + DECLARE_DPM_WATCHDOG_ON_STACK(wd); dpm_wait_for_children(dev, async); @@ -1078,6 +1353,18 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete; + if (dev->power.direct_complete) { + if (pm_runtime_status_suspended(dev)) { + pm_runtime_disable(dev); + if (pm_runtime_suspended_if_enabled(dev)) + goto Complete; + + pm_runtime_enable(dev); + } + dev->power.direct_complete = false; + } + + dpm_watchdog_set(&wd, dev); device_lock(dev); if (dev->pm_domain) { @@ -1099,7 +1386,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) goto Run; } else if (dev->class->suspend) { pm_dev_dbg(dev, state, "legacy class "); - error = legacy_suspend(dev, state, dev->class->suspend); + error = legacy_suspend(dev, state, dev->class->suspend, + "legacy class "); goto End; } } @@ -1110,7 +1398,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) callback = pm_op(dev->bus->pm, state); } else if (dev->bus->suspend) { pm_dev_dbg(dev, state, "legacy bus "); - error = legacy_suspend(dev, state, dev->bus->suspend); + error = legacy_suspend(dev, state, dev->bus->suspend, + "legacy bus "); goto End; } } @@ -1125,13 +1414,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) End: if (!error) { + struct device *parent = dev->parent; + dev->power.is_suspended = true; - if (dev->power.wakeup_path - && dev->parent && !dev->parent->power.ignore_children) - dev->parent->power.wakeup_path = true; + if (parent) { + spin_lock_irq(&parent->power.lock); + + dev->parent->power.direct_complete = false; + if (dev->power.wakeup_path + && !dev->parent->power.ignore_children) + dev->parent->power.wakeup_path = true; + + spin_unlock_irq(&parent->power.lock); + } } device_unlock(dev); + dpm_watchdog_clear(&wd); Complete: complete_all(&dev->power.completion); @@ -1157,7 +1456,7 @@ static void async_suspend(void *data, async_cookie_t cookie) static int device_suspend(struct device *dev) { - INIT_COMPLETION(dev->power.completion); + reinit_completion(&dev->power.completion); if (pm_async_enabled && dev->power.async_suspend) { get_device(dev); @@ -1177,8 +1476,11 @@ int dpm_suspend(pm_message_t state) ktime_t starttime = ktime_get(); int error = 0; + trace_suspend_resume(TPS("dpm_suspend"), state.event, true); might_sleep(); + cpufreq_suspend(); + mutex_lock(&dpm_list_mtx); pm_transition = state; async_error = 0; @@ -1212,6 +1514,7 @@ int dpm_suspend(pm_message_t state) dpm_save_failed_step(SUSPEND_SUSPEND); } else dpm_show_time(starttime, state, NULL); + trace_suspend_resume(TPS("dpm_suspend"), state.event, false); return error; } @@ -1227,7 +1530,7 @@ static int device_prepare(struct device *dev, pm_message_t state) { int (*callback)(struct device *) = NULL; char *info = NULL; - int error = 0; + int ret = 0; if (dev->power.syscore) return 0; @@ -1264,13 +1567,29 @@ static int device_prepare(struct device *dev, pm_message_t state) } if (callback) { - error = callback(dev); - suspend_report_result(callback, error); + trace_device_pm_callback_start(dev, info, state.event); + ret = callback(dev); + trace_device_pm_callback_end(dev, ret); } device_unlock(dev); - return error; + if (ret < 0) { + suspend_report_result(callback, ret); + pm_runtime_put(dev); + return ret; + } + /* + * A positive return value from ->prepare() means "this device appears + * to be runtime-suspended and its state is fine, so if it really is + * runtime-suspended, you can leave it in that state provided that you + * will do the same thing with all of its descendants". This only + * applies to suspend transitions, however. + */ + spin_lock_irq(&dev->power.lock); + dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND; + spin_unlock_irq(&dev->power.lock); + return 0; } /** @@ -1283,6 +1602,7 @@ int dpm_prepare(pm_message_t state) { int error = 0; + trace_suspend_resume(TPS("dpm_prepare"), state.event, true); might_sleep(); mutex_lock(&dpm_list_mtx); @@ -1313,6 +1633,7 @@ int dpm_prepare(pm_message_t state) put_device(dev); } mutex_unlock(&dpm_list_mtx); + trace_suspend_resume(TPS("dpm_prepare"), state.event, false); return error; } diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 50b2831e027..89ced955faf 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -14,14 +14,12 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/err.h> -#include <linux/init.h> #include <linux/slab.h> -#include <linux/cpufreq.h> #include <linux/device.h> #include <linux/list.h> #include <linux/rculist.h> #include <linux/rcupdate.h> -#include <linux/opp.h> +#include <linux/pm_opp.h> #include <linux/of.h> #include <linux/export.h> @@ -42,7 +40,7 @@ */ /** - * struct opp - Generic OPP description structure + * struct dev_pm_opp - Generic OPP description structure * @node: opp list node. The nodes are maintained throughout the lifetime * of boot. It is expected only an optimal set of OPPs are * added to the library by the SoC framework. @@ -55,10 +53,11 @@ * @rate: Frequency in hertz * @u_volt: Nominal voltage in microvolts corresponding to this OPP * @dev_opp: points back to the device_opp struct this opp belongs to + * @head: RCU callback head used for deferred freeing * * This structure stores the OPP information for a given device. */ -struct opp { +struct dev_pm_opp { struct list_head node; bool available; @@ -135,7 +134,7 @@ static struct device_opp *find_device_opp(struct device *dev) } /** - * opp_get_voltage() - Gets the voltage corresponding to an available opp + * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp * @opp: opp for which voltage has to be returned for * * Return voltage in micro volt corresponding to the opp, else @@ -149,9 +148,9 @@ static struct device_opp *find_device_opp(struct device *dev) * prior to unlocking with rcu_read_unlock() to maintain the integrity of the * pointer. */ -unsigned long opp_get_voltage(struct opp *opp) +unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) { - struct opp *tmp_opp; + struct dev_pm_opp *tmp_opp; unsigned long v = 0; tmp_opp = rcu_dereference(opp); @@ -162,10 +161,10 @@ unsigned long opp_get_voltage(struct opp *opp) return v; } -EXPORT_SYMBOL(opp_get_voltage); +EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); /** - * opp_get_freq() - Gets the frequency corresponding to an available opp + * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp * @opp: opp for which frequency has to be returned for * * Return frequency in hertz corresponding to the opp, else @@ -179,9 +178,9 @@ EXPORT_SYMBOL(opp_get_voltage); * prior to unlocking with rcu_read_unlock() to maintain the integrity of the * pointer. */ -unsigned long opp_get_freq(struct opp *opp) +unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) { - struct opp *tmp_opp; + struct dev_pm_opp *tmp_opp; unsigned long f = 0; tmp_opp = rcu_dereference(opp); @@ -192,10 +191,10 @@ unsigned long opp_get_freq(struct opp *opp) return f; } -EXPORT_SYMBOL(opp_get_freq); +EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); /** - * opp_get_opp_count() - Get number of opps available in the opp list + * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list * @dev: device for which we do this operation * * This function returns the number of available opps if there are any, @@ -205,10 +204,10 @@ EXPORT_SYMBOL(opp_get_freq); * internally references two RCU protected structures: device_opp and opp which * are safe as long as we are under a common RCU locked section. */ -int opp_get_opp_count(struct device *dev) +int dev_pm_opp_get_opp_count(struct device *dev) { struct device_opp *dev_opp; - struct opp *temp_opp; + struct dev_pm_opp *temp_opp; int count = 0; dev_opp = find_device_opp(dev); @@ -225,10 +224,10 @@ int opp_get_opp_count(struct device *dev) return count; } -EXPORT_SYMBOL(opp_get_opp_count); +EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); /** - * opp_find_freq_exact() - search for an exact frequency + * dev_pm_opp_find_freq_exact() - search for an exact frequency * @dev: device for which we do this operation * @freq: frequency to search for * @available: true/false - match for available opp @@ -253,11 +252,12 @@ EXPORT_SYMBOL(opp_get_opp_count); * under the locked area. The pointer returned must be used prior to unlocking * with rcu_read_unlock() to maintain the integrity of the pointer. */ -struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, - bool available) +struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, + unsigned long freq, + bool available) { struct device_opp *dev_opp; - struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); + struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); dev_opp = find_device_opp(dev); if (IS_ERR(dev_opp)) { @@ -276,10 +276,10 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, return opp; } -EXPORT_SYMBOL(opp_find_freq_exact); +EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); /** - * opp_find_freq_ceil() - Search for an rounded ceil freq + * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq * @dev: device for which we do this operation * @freq: Start frequency * @@ -299,10 +299,11 @@ EXPORT_SYMBOL(opp_find_freq_exact); * under the locked area. The pointer returned must be used prior to unlocking * with rcu_read_unlock() to maintain the integrity of the pointer. */ -struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) +struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, + unsigned long *freq) { struct device_opp *dev_opp; - struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); + struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); if (!dev || !freq) { dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); @@ -323,10 +324,10 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) return opp; } -EXPORT_SYMBOL(opp_find_freq_ceil); +EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); /** - * opp_find_freq_floor() - Search for a rounded floor freq + * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq * @dev: device for which we do this operation * @freq: Start frequency * @@ -346,10 +347,11 @@ EXPORT_SYMBOL(opp_find_freq_ceil); * under the locked area. The pointer returned must be used prior to unlocking * with rcu_read_unlock() to maintain the integrity of the pointer. */ -struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) +struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, + unsigned long *freq) { struct device_opp *dev_opp; - struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); + struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); if (!dev || !freq) { dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); @@ -374,32 +376,39 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) return opp; } -EXPORT_SYMBOL(opp_find_freq_floor); +EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); /** - * opp_add() - Add an OPP table from a table definitions + * dev_pm_opp_add() - Add an OPP table from a table definitions * @dev: device for which we do this operation * @freq: Frequency in Hz for this OPP * @u_volt: Voltage in uVolts for this OPP * * This function adds an opp definition to the opp list and returns status. * The opp is made available by default and it can be controlled using - * opp_enable/disable functions. + * dev_pm_opp_enable/disable functions. * * Locking: The internal device_opp and opp structures are RCU protected. * Hence this function internally uses RCU updater strategy with mutex locks * to keep the integrity of the internal data structures. Callers should ensure * that this function is *NOT* called under RCU protection or in contexts where * mutex cannot be locked. + * + * Return: + * 0: On success OR + * Duplicate OPPs (both freq and volt are same) and opp->available + * -EEXIST: Freq are same and volt are different OR + * Duplicate OPPs (both freq and volt are same) and !opp->available + * -ENOMEM: Memory allocation failure */ -int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) +int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) { struct device_opp *dev_opp = NULL; - struct opp *opp, *new_opp; + struct dev_pm_opp *opp, *new_opp; struct list_head *head; /* allocate new OPP node */ - new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL); + new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL); if (!new_opp) { dev_warn(dev, "%s: Unable to create new OPP node\n", __func__); return -ENOMEM; @@ -440,15 +449,31 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) new_opp->u_volt = u_volt; new_opp->available = true; - /* Insert new OPP in order of increasing frequency */ + /* + * Insert new OPP in order of increasing frequency + * and discard if already present + */ head = &dev_opp->opp_list; list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { - if (new_opp->rate < opp->rate) + if (new_opp->rate <= opp->rate) break; else head = &opp->node; } + /* Duplicate OPPs ? */ + if (new_opp->rate == opp->rate) { + int ret = opp->available && new_opp->u_volt == opp->u_volt ? + 0 : -EEXIST; + + dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", + __func__, opp->rate, opp->u_volt, opp->available, + new_opp->rate, new_opp->u_volt, new_opp->available); + mutex_unlock(&dev_opp_list_lock); + kfree(new_opp); + return ret; + } + list_add_rcu(&new_opp->node, head); mutex_unlock(&dev_opp_list_lock); @@ -459,6 +484,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp); return 0; } +EXPORT_SYMBOL_GPL(dev_pm_opp_add); /** * opp_set_availability() - helper to set the availability of an opp @@ -483,11 +509,11 @@ static int opp_set_availability(struct device *dev, unsigned long freq, bool availability_req) { struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); - struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); + struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); int r = 0; /* keep the node allocated */ - new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL); + new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL); if (!new_opp) { dev_warn(dev, "%s: Unable to create OPP\n", __func__); return -ENOMEM; @@ -550,13 +576,13 @@ unlock: } /** - * opp_enable() - Enable a specific OPP + * dev_pm_opp_enable() - Enable a specific OPP * @dev: device for which we do this operation * @freq: OPP frequency to enable * * Enables a provided opp. If the operation is valid, this returns 0, else the * corresponding error value. It is meant to be used for users an OPP available - * after being temporarily made unavailable with opp_disable. + * after being temporarily made unavailable with dev_pm_opp_disable. * * Locking: The internal device_opp and opp structures are RCU protected. * Hence this function indirectly uses RCU and mutex locks to keep the @@ -564,21 +590,21 @@ unlock: * this function is *NOT* called under RCU protection or in contexts where * mutex locking or synchronize_rcu() blocking calls cannot be used. */ -int opp_enable(struct device *dev, unsigned long freq) +int dev_pm_opp_enable(struct device *dev, unsigned long freq) { return opp_set_availability(dev, freq, true); } -EXPORT_SYMBOL(opp_enable); +EXPORT_SYMBOL_GPL(dev_pm_opp_enable); /** - * opp_disable() - Disable a specific OPP + * dev_pm_opp_disable() - Disable a specific OPP * @dev: device for which we do this operation * @freq: OPP frequency to disable * * Disables a provided opp. If the operation is valid, this returns * 0, else the corresponding error value. It is meant to be a temporary * control by users to make this OPP not available until the circumstances are - * right to make it available again (with a call to opp_enable). + * right to make it available again (with a call to dev_pm_opp_enable). * * Locking: The internal device_opp and opp structures are RCU protected. * Hence this function indirectly uses RCU and mutex locks to keep the @@ -586,105 +612,17 @@ EXPORT_SYMBOL(opp_enable); * this function is *NOT* called under RCU protection or in contexts where * mutex locking or synchronize_rcu() blocking calls cannot be used. */ -int opp_disable(struct device *dev, unsigned long freq) +int dev_pm_opp_disable(struct device *dev, unsigned long freq) { return opp_set_availability(dev, freq, false); } -EXPORT_SYMBOL(opp_disable); - -#ifdef CONFIG_CPU_FREQ -/** - * opp_init_cpufreq_table() - create a cpufreq table for a device - * @dev: device for which we do this operation - * @table: Cpufreq table returned back to caller - * - * Generate a cpufreq table for a provided device- this assumes that the - * opp list is already initialized and ready for usage. - * - * This function allocates required memory for the cpufreq table. It is - * expected that the caller does the required maintenance such as freeing - * the table as required. - * - * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM - * if no memory available for the operation (table is not populated), returns 0 - * if successful and table is populated. - * - * WARNING: It is important for the callers to ensure refreshing their copy of - * the table if any of the mentioned functions have been invoked in the interim. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * To simplify the logic, we pretend we are updater and hold relevant mutex here - * Callers should ensure that this function is *NOT* called under RCU protection - * or in contexts where mutex locking cannot be used. - */ -int opp_init_cpufreq_table(struct device *dev, - struct cpufreq_frequency_table **table) -{ - struct device_opp *dev_opp; - struct opp *opp; - struct cpufreq_frequency_table *freq_table; - int i = 0; - - /* Pretend as if I am an updater */ - mutex_lock(&dev_opp_list_lock); - - dev_opp = find_device_opp(dev); - if (IS_ERR(dev_opp)) { - int r = PTR_ERR(dev_opp); - mutex_unlock(&dev_opp_list_lock); - dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r); - return r; - } - - freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * - (opp_get_opp_count(dev) + 1), GFP_KERNEL); - if (!freq_table) { - mutex_unlock(&dev_opp_list_lock); - dev_warn(dev, "%s: Unable to allocate frequency table\n", - __func__); - return -ENOMEM; - } - - list_for_each_entry(opp, &dev_opp->opp_list, node) { - if (opp->available) { - freq_table[i].index = i; - freq_table[i].frequency = opp->rate / 1000; - i++; - } - } - mutex_unlock(&dev_opp_list_lock); - - freq_table[i].index = i; - freq_table[i].frequency = CPUFREQ_TABLE_END; - - *table = &freq_table[0]; - - return 0; -} - -/** - * opp_free_cpufreq_table() - free the cpufreq table - * @dev: device for which we do this operation - * @table: table to free - * - * Free up the table allocated by opp_init_cpufreq_table - */ -void opp_free_cpufreq_table(struct device *dev, - struct cpufreq_frequency_table **table) -{ - if (!table) - return; - - kfree(*table); - *table = NULL; -} -#endif /* CONFIG_CPU_FREQ */ +EXPORT_SYMBOL_GPL(dev_pm_opp_disable); /** - * opp_get_notifier() - find notifier_head of the device with opp + * dev_pm_opp_get_notifier() - find notifier_head of the device with opp * @dev: device pointer used to lookup device OPPs. */ -struct srcu_notifier_head *opp_get_notifier(struct device *dev) +struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev) { struct device_opp *dev_opp = find_device_opp(dev); @@ -728,14 +666,13 @@ int of_init_opp_table(struct device *dev) unsigned long freq = be32_to_cpup(val++) * 1000; unsigned long volt = be32_to_cpup(val++); - if (opp_add(dev, freq, volt)) { + if (dev_pm_opp_add(dev, freq, volt)) dev_warn(dev, "%s: Failed to add OPP %ld\n", __func__, freq); - continue; - } nr -= 2; } return 0; } +EXPORT_SYMBOL_GPL(of_init_opp_table); #endif diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index b16686a0a5a..a21223d9592 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -4,7 +4,7 @@ static inline void device_pm_init_common(struct device *dev) { if (!dev->power.early_init) { spin_lock_init(&dev->power.lock); - dev->power.power_state = PMSG_INVALID; + dev->power.qos = NULL; dev->power.early_init = true; } } @@ -56,14 +56,10 @@ extern void device_pm_move_last(struct device *); static inline void device_pm_sleep_init(struct device *dev) {} -static inline void device_pm_add(struct device *dev) -{ - dev_pm_qos_constraints_init(dev); -} +static inline void device_pm_add(struct device *dev) {} static inline void device_pm_remove(struct device *dev) { - dev_pm_qos_constraints_destroy(dev); pm_runtime_remove(dev); } @@ -93,8 +89,8 @@ extern void dpm_sysfs_remove(struct device *dev); extern void rpm_sysfs_remove(struct device *dev); extern int wakeup_sysfs_add(struct device *dev); extern void wakeup_sysfs_remove(struct device *dev); -extern int pm_qos_sysfs_add_latency(struct device *dev); -extern void pm_qos_sysfs_remove_latency(struct device *dev); +extern int pm_qos_sysfs_add_resume_latency(struct device *dev); +extern void pm_qos_sysfs_remove_resume_latency(struct device *dev); extern int pm_qos_sysfs_add_flags(struct device *dev); extern void pm_qos_sysfs_remove_flags(struct device *dev); diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index d21349544ce..36b9eb4862c 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -41,10 +41,13 @@ #include <linux/mutex.h> #include <linux/export.h> #include <linux/pm_runtime.h> +#include <linux/err.h> +#include <trace/events/power.h> #include "power.h" static DEFINE_MUTEX(dev_pm_qos_mtx); +static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx); static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); @@ -61,7 +64,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) struct pm_qos_flags *pqf; s32 val; - if (!qos) + if (IS_ERR_OR_NULL(qos)) return PM_QOS_FLAGS_UNDEFINED; pqf = &qos->flags; @@ -91,6 +94,7 @@ enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask) return ret; } +EXPORT_SYMBOL_GPL(dev_pm_qos_flags); /** * __dev_pm_qos_read_value - Get PM QoS constraint for a given device. @@ -100,7 +104,8 @@ enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask) */ s32 __dev_pm_qos_read_value(struct device *dev) { - return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0; + return IS_ERR_OR_NULL(dev->power.qos) ? + 0 : pm_qos_read_value(&dev->power.qos->resume_latency); } /** @@ -136,16 +141,24 @@ static int apply_constraint(struct dev_pm_qos_request *req, int ret; switch(req->type) { - case DEV_PM_QOS_LATENCY: - ret = pm_qos_update_target(&qos->latency, &req->data.pnode, - action, value); + case DEV_PM_QOS_RESUME_LATENCY: + ret = pm_qos_update_target(&qos->resume_latency, + &req->data.pnode, action, value); if (ret) { - value = pm_qos_read_value(&qos->latency); + value = pm_qos_read_value(&qos->resume_latency); blocking_notifier_call_chain(&dev_pm_notifiers, (unsigned long)value, req); } break; + case DEV_PM_QOS_LATENCY_TOLERANCE: + ret = pm_qos_update_target(&qos->latency_tolerance, + &req->data.pnode, action, value); + if (ret) { + value = pm_qos_read_value(&qos->latency_tolerance); + req->dev->power.set_latency_tolerance(req->dev, value); + } + break; case DEV_PM_QOS_FLAGS: ret = pm_qos_update_flags(&qos->flags, &req->data.flr, action, value); @@ -181,13 +194,21 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) } BLOCKING_INIT_NOTIFIER_HEAD(n); - c = &qos->latency; + c = &qos->resume_latency; plist_head_init(&c->list); - c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; - c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; + c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; + c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; + c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; c->type = PM_QOS_MIN; c->notifiers = n; + c = &qos->latency_tolerance; + plist_head_init(&c->list); + c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; + c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; + c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; + c->type = PM_QOS_MIN; + INIT_LIST_HEAD(&qos->flags.list); spin_lock_irq(&dev->power.lock); @@ -197,20 +218,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) return 0; } -/** - * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer. - * @dev: target device - * - * Called from the device PM subsystem during device insertion under - * device_pm_lock(). - */ -void dev_pm_qos_constraints_init(struct device *dev) -{ - mutex_lock(&dev_pm_qos_mtx); - dev->power.qos = NULL; - dev->power.power_state = PMSG_ON; - mutex_unlock(&dev_pm_qos_mtx); -} +static void __dev_pm_qos_hide_latency_limit(struct device *dev); +static void __dev_pm_qos_hide_flags(struct device *dev); /** * dev_pm_qos_constraints_destroy @@ -225,22 +234,26 @@ void dev_pm_qos_constraints_destroy(struct device *dev) struct pm_qos_constraints *c; struct pm_qos_flags *f; + mutex_lock(&dev_pm_qos_sysfs_mtx); + /* * If the device's PM QoS resume latency limit or PM QoS flags have been * exposed to user space, they have to be hidden at this point. */ - dev_pm_qos_hide_latency_limit(dev); - dev_pm_qos_hide_flags(dev); + pm_qos_sysfs_remove_resume_latency(dev); + pm_qos_sysfs_remove_flags(dev); mutex_lock(&dev_pm_qos_mtx); - dev->power.power_state = PMSG_INVALID; + __dev_pm_qos_hide_latency_limit(dev); + __dev_pm_qos_hide_flags(dev); + qos = dev->power.qos; if (!qos) goto out; /* Flush the constraints lists for the device. */ - c = &qos->latency; + c = &qos->resume_latency; plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { /* * Update constraints list and call the notification @@ -249,6 +262,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev) apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); memset(req, 0, sizeof(*req)); } + c = &qos->latency_tolerance; + plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { + apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); + } f = &qos->flags; list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) { apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); @@ -256,7 +274,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev) } spin_lock_irq(&dev->power.lock); - dev->power.qos = NULL; + dev->power.qos = ERR_PTR(-ENODEV); spin_unlock_irq(&dev->power.lock); kfree(c->notifiers); @@ -264,6 +282,42 @@ void dev_pm_qos_constraints_destroy(struct device *dev) out: mutex_unlock(&dev_pm_qos_mtx); + + mutex_unlock(&dev_pm_qos_sysfs_mtx); +} + +static bool dev_pm_qos_invalid_request(struct device *dev, + struct dev_pm_qos_request *req) +{ + return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE + && !dev->power.set_latency_tolerance); +} + +static int __dev_pm_qos_add_request(struct device *dev, + struct dev_pm_qos_request *req, + enum dev_pm_qos_req_type type, s32 value) +{ + int ret = 0; + + if (!dev || dev_pm_qos_invalid_request(dev, req)) + return -EINVAL; + + if (WARN(dev_pm_qos_request_active(req), + "%s() called for already added request\n", __func__)) + return -EINVAL; + + if (IS_ERR(dev->power.qos)) + ret = -ENODEV; + else if (!dev->power.qos) + ret = dev_pm_qos_constraints_allocate(dev); + + trace_dev_pm_qos_add_request(dev_name(dev), type, value); + if (!ret) { + req->dev = dev; + req->type = type; + ret = apply_constraint(req, PM_QOS_ADD_REQ, value); + } + return ret; } /** @@ -291,43 +345,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev) int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value) { - int ret = 0; - - if (!dev || !req) /*guard against callers passing in null */ - return -EINVAL; - - if (WARN(dev_pm_qos_request_active(req), - "%s() called for already added request\n", __func__)) - return -EINVAL; - - req->dev = dev; + int ret; mutex_lock(&dev_pm_qos_mtx); - - if (!dev->power.qos) { - if (dev->power.power_state.event == PM_EVENT_INVALID) { - /* The device has been removed from the system. */ - req->dev = NULL; - ret = -ENODEV; - goto out; - } else { - /* - * Allocate the constraints data on the first call to - * add_request, i.e. only if the data is not already - * allocated and if the device has not been removed. - */ - ret = dev_pm_qos_constraints_allocate(dev); - } - } - - if (!ret) { - req->type = type; - ret = apply_constraint(req, PM_QOS_ADD_REQ, value); - } - - out: + ret = __dev_pm_qos_add_request(dev, req, type, value); mutex_unlock(&dev_pm_qos_mtx); - return ret; } EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); @@ -343,11 +365,19 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 curr_value; int ret = 0; - if (!req->dev->power.qos) + if (!req) /*guard against callers passing in null */ + return -EINVAL; + + if (WARN(!dev_pm_qos_request_active(req), + "%s() called for unknown object\n", __func__)) + return -EINVAL; + + if (IS_ERR_OR_NULL(req->dev->power.qos)) return -ENODEV; switch(req->type) { - case DEV_PM_QOS_LATENCY: + case DEV_PM_QOS_RESUME_LATENCY: + case DEV_PM_QOS_LATENCY_TOLERANCE: curr_value = req->data.pnode.prio; break; case DEV_PM_QOS_FLAGS: @@ -357,6 +387,8 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, return -EINVAL; } + trace_dev_pm_qos_update_request(dev_name(req->dev), req->type, + new_value); if (curr_value != new_value) ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value); @@ -385,6 +417,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value) { int ret; + mutex_lock(&dev_pm_qos_mtx); + ret = __dev_pm_qos_update_request(req, new_value); + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); + +static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req) +{ + int ret; + if (!req) /*guard against callers passing in null */ return -EINVAL; @@ -392,13 +435,15 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value) "%s() called for unknown object\n", __func__)) return -EINVAL; - mutex_lock(&dev_pm_qos_mtx); - ret = __dev_pm_qos_update_request(req, new_value); - mutex_unlock(&dev_pm_qos_mtx); + if (IS_ERR_OR_NULL(req->dev->power.qos)) + return -ENODEV; + trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type, + PM_QOS_DEFAULT_VALUE); + ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); return ret; } -EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); /** * dev_pm_qos_remove_request - modifies an existing qos request @@ -417,26 +462,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); */ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) { - int ret = 0; - - if (!req) /*guard against callers passing in null */ - return -EINVAL; - - if (WARN(!dev_pm_qos_request_active(req), - "%s() called for unknown object\n", __func__)) - return -EINVAL; + int ret; mutex_lock(&dev_pm_qos_mtx); - - if (req->dev->power.qos) { - ret = apply_constraint(req, PM_QOS_REMOVE_REQ, - PM_QOS_DEFAULT_VALUE); - memset(req, 0, sizeof(*req)); - } else { - /* Return if the device has been removed */ - ret = -ENODEV; - } - + ret = __dev_pm_qos_remove_request(req); mutex_unlock(&dev_pm_qos_mtx); return ret; } @@ -461,13 +490,14 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) mutex_lock(&dev_pm_qos_mtx); - if (!dev->power.qos) - ret = dev->power.power_state.event != PM_EVENT_INVALID ? - dev_pm_qos_constraints_allocate(dev) : -ENODEV; + if (IS_ERR(dev->power.qos)) + ret = -ENODEV; + else if (!dev->power.qos) + ret = dev_pm_qos_constraints_allocate(dev); if (!ret) - ret = blocking_notifier_chain_register( - dev->power.qos->latency.notifiers, notifier); + ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers, + notifier); mutex_unlock(&dev_pm_qos_mtx); return ret; @@ -492,10 +522,9 @@ int dev_pm_qos_remove_notifier(struct device *dev, mutex_lock(&dev_pm_qos_mtx); /* Silently return if the constraints object is not present. */ - if (dev->power.qos) - retval = blocking_notifier_chain_unregister( - dev->power.qos->latency.notifiers, - notifier); + if (!IS_ERR_OR_NULL(dev->power.qos)) + retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers, + notifier); mutex_unlock(&dev_pm_qos_mtx); return retval; @@ -536,20 +565,32 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. * @dev: Device whose ancestor to add the request for. * @req: Pointer to the preallocated handle. + * @type: Type of the request. * @value: Constraint latency value. */ int dev_pm_qos_add_ancestor_request(struct device *dev, - struct dev_pm_qos_request *req, s32 value) + struct dev_pm_qos_request *req, + enum dev_pm_qos_req_type type, s32 value) { struct device *ancestor = dev->parent; int ret = -ENODEV; - while (ancestor && !ancestor->power.ignore_children) - ancestor = ancestor->parent; + switch (type) { + case DEV_PM_QOS_RESUME_LATENCY: + while (ancestor && !ancestor->power.ignore_children) + ancestor = ancestor->parent; + + break; + case DEV_PM_QOS_LATENCY_TOLERANCE: + while (ancestor && !ancestor->power.set_latency_tolerance) + ancestor = ancestor->parent; + break; + default: + ancestor = NULL; + } if (ancestor) - ret = dev_pm_qos_add_request(ancestor, req, - DEV_PM_QOS_LATENCY, value); + ret = dev_pm_qos_add_request(ancestor, req, type, value); if (ret < 0) req->dev = NULL; @@ -562,16 +603,32 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); static void __dev_pm_qos_drop_user_request(struct device *dev, enum dev_pm_qos_req_type type) { + struct dev_pm_qos_request *req = NULL; + switch(type) { - case DEV_PM_QOS_LATENCY: - dev_pm_qos_remove_request(dev->power.qos->latency_req); - dev->power.qos->latency_req = NULL; + case DEV_PM_QOS_RESUME_LATENCY: + req = dev->power.qos->resume_latency_req; + dev->power.qos->resume_latency_req = NULL; + break; + case DEV_PM_QOS_LATENCY_TOLERANCE: + req = dev->power.qos->latency_tolerance_req; + dev->power.qos->latency_tolerance_req = NULL; break; case DEV_PM_QOS_FLAGS: - dev_pm_qos_remove_request(dev->power.qos->flags_req); + req = dev->power.qos->flags_req; dev->power.qos->flags_req = NULL; break; } + __dev_pm_qos_remove_request(req); + kfree(req); +} + +static void dev_pm_qos_drop_user_request(struct device *dev, + enum dev_pm_qos_req_type type) +{ + mutex_lock(&dev_pm_qos_mtx); + __dev_pm_qos_drop_user_request(dev, type); + mutex_unlock(&dev_pm_qos_mtx); } /** @@ -587,36 +644,66 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) if (!device_is_registered(dev) || value < 0) return -EINVAL; - if (dev->power.qos && dev->power.qos->latency_req) - return -EEXIST; - req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; - ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); - if (ret < 0) + ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value); + if (ret < 0) { + kfree(req); return ret; + } - dev->power.qos->latency_req = req; - ret = pm_qos_sysfs_add_latency(dev); + mutex_lock(&dev_pm_qos_sysfs_mtx); + + mutex_lock(&dev_pm_qos_mtx); + + if (IS_ERR_OR_NULL(dev->power.qos)) + ret = -ENODEV; + else if (dev->power.qos->resume_latency_req) + ret = -EEXIST; + + if (ret < 0) { + __dev_pm_qos_remove_request(req); + kfree(req); + mutex_unlock(&dev_pm_qos_mtx); + goto out; + } + dev->power.qos->resume_latency_req = req; + + mutex_unlock(&dev_pm_qos_mtx); + + ret = pm_qos_sysfs_add_resume_latency(dev); if (ret) - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); + dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY); + out: + mutex_unlock(&dev_pm_qos_sysfs_mtx); return ret; } EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); +static void __dev_pm_qos_hide_latency_limit(struct device *dev) +{ + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req) + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY); +} + /** * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space. * @dev: Device whose PM QoS latency limit is to be hidden from user space. */ void dev_pm_qos_hide_latency_limit(struct device *dev) { - if (dev->power.qos && dev->power.qos->latency_req) { - pm_qos_sysfs_remove_latency(dev); - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); - } + mutex_lock(&dev_pm_qos_sysfs_mtx); + + pm_qos_sysfs_remove_resume_latency(dev); + + mutex_lock(&dev_pm_qos_mtx); + __dev_pm_qos_hide_latency_limit(dev); + mutex_unlock(&dev_pm_qos_mtx); + + mutex_unlock(&dev_pm_qos_sysfs_mtx); } EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); @@ -633,41 +720,70 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val) if (!device_is_registered(dev)) return -EINVAL; - if (dev->power.qos && dev->power.qos->flags_req) - return -EEXIST; - req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; - pm_runtime_get_sync(dev); ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); - if (ret < 0) - goto fail; + if (ret < 0) { + kfree(req); + return ret; + } + + pm_runtime_get_sync(dev); + mutex_lock(&dev_pm_qos_sysfs_mtx); + + mutex_lock(&dev_pm_qos_mtx); + if (IS_ERR_OR_NULL(dev->power.qos)) + ret = -ENODEV; + else if (dev->power.qos->flags_req) + ret = -EEXIST; + + if (ret < 0) { + __dev_pm_qos_remove_request(req); + kfree(req); + mutex_unlock(&dev_pm_qos_mtx); + goto out; + } dev->power.qos->flags_req = req; + + mutex_unlock(&dev_pm_qos_mtx); + ret = pm_qos_sysfs_add_flags(dev); if (ret) - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); + dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); -fail: + out: + mutex_unlock(&dev_pm_qos_sysfs_mtx); pm_runtime_put(dev); return ret; } EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); +static void __dev_pm_qos_hide_flags(struct device *dev) +{ + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); +} + /** * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space. * @dev: Device whose PM QoS flags are to be hidden from user space. */ void dev_pm_qos_hide_flags(struct device *dev) { - if (dev->power.qos && dev->power.qos->flags_req) { - pm_qos_sysfs_remove_flags(dev); - pm_runtime_get_sync(dev); - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); - pm_runtime_put(dev); - } + pm_runtime_get_sync(dev); + mutex_lock(&dev_pm_qos_sysfs_mtx); + + pm_qos_sysfs_remove_flags(dev); + + mutex_lock(&dev_pm_qos_mtx); + __dev_pm_qos_hide_flags(dev); + mutex_unlock(&dev_pm_qos_mtx); + + mutex_unlock(&dev_pm_qos_sysfs_mtx); + pm_runtime_put(dev); } EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); @@ -682,12 +798,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) s32 value; int ret; - if (!dev->power.qos || !dev->power.qos->flags_req) - return -EINVAL; - pm_runtime_get_sync(dev); mutex_lock(&dev_pm_qos_mtx); + if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) { + ret = -EINVAL; + goto out; + } + value = dev_pm_qos_requested_flags(dev); if (set) value |= mask; @@ -696,9 +814,73 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); + out: mutex_unlock(&dev_pm_qos_mtx); pm_runtime_put(dev); + return ret; +} + +/** + * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance. + * @dev: Device to obtain the user space latency tolerance for. + */ +s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) +{ + s32 ret; + mutex_lock(&dev_pm_qos_mtx); + ret = IS_ERR_OR_NULL(dev->power.qos) + || !dev->power.qos->latency_tolerance_req ? + PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT : + dev->power.qos->latency_tolerance_req->data.pnode.prio; + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} + +/** + * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance. + * @dev: Device to update the user space latency tolerance for. + * @val: New user space latency tolerance for @dev (negative values disable). + */ +int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) +{ + int ret; + + mutex_lock(&dev_pm_qos_mtx); + + if (IS_ERR_OR_NULL(dev->power.qos) + || !dev->power.qos->latency_tolerance_req) { + struct dev_pm_qos_request *req; + + if (val < 0) { + ret = -EINVAL; + goto out; + } + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) { + ret = -ENOMEM; + goto out; + } + ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val); + if (ret < 0) { + kfree(req); + goto out; + } + dev->power.qos->latency_tolerance_req = req; + } else { + if (val < 0) { + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE); + ret = 0; + } else { + ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val); + } + } + + out: + mutex_unlock(&dev_pm_qos_mtx); return ret; } +#else /* !CONFIG_PM_RUNTIME */ +static void __dev_pm_qos_hide_latency_limit(struct device *dev) {} +static void __dev_pm_qos_hide_flags(struct device *dev) {} #endif /* CONFIG_PM_RUNTIME */ diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 3148b10dc2e..67c7938e430 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -13,6 +13,43 @@ #include <trace/events/rpm.h> #include "power.h" +#define RPM_GET_CALLBACK(dev, cb) \ +({ \ + int (*__rpm_cb)(struct device *__d); \ + \ + if (dev->pm_domain) \ + __rpm_cb = dev->pm_domain->ops.cb; \ + else if (dev->type && dev->type->pm) \ + __rpm_cb = dev->type->pm->cb; \ + else if (dev->class && dev->class->pm) \ + __rpm_cb = dev->class->pm->cb; \ + else if (dev->bus && dev->bus->pm) \ + __rpm_cb = dev->bus->pm->cb; \ + else \ + __rpm_cb = NULL; \ + \ + if (!__rpm_cb && dev->driver && dev->driver->pm) \ + __rpm_cb = dev->driver->pm->cb; \ + \ + __rpm_cb; \ +}) + +static int (*rpm_get_suspend_cb(struct device *dev))(struct device *) +{ + return RPM_GET_CALLBACK(dev, runtime_suspend); +} + +static int (*rpm_get_resume_cb(struct device *dev))(struct device *) +{ + return RPM_GET_CALLBACK(dev, runtime_resume); +} + +#ifdef CONFIG_PM_RUNTIME +static int (*rpm_get_idle_cb(struct device *dev))(struct device *) +{ + return RPM_GET_CALLBACK(dev, runtime_idle); +} + static int rpm_resume(struct device *dev, int rpmflags); static int rpm_suspend(struct device *dev, int rpmflags); @@ -124,6 +161,76 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev) } EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); +static int dev_memalloc_noio(struct device *dev, void *data) +{ + return dev->power.memalloc_noio; +} + +/* + * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag. + * @dev: Device to handle. + * @enable: True for setting the flag and False for clearing the flag. + * + * Set the flag for all devices in the path from the device to the + * root device in the device tree if @enable is true, otherwise clear + * the flag for devices in the path whose siblings don't set the flag. + * + * The function should only be called by block device, or network + * device driver for solving the deadlock problem during runtime + * resume/suspend: + * + * If memory allocation with GFP_KERNEL is called inside runtime + * resume/suspend callback of any one of its ancestors(or the + * block device itself), the deadlock may be triggered inside the + * memory allocation since it might not complete until the block + * device becomes active and the involed page I/O finishes. The + * situation is pointed out first by Alan Stern. Network device + * are involved in iSCSI kind of situation. + * + * The lock of dev_hotplug_mutex is held in the function for handling + * hotplug race because pm_runtime_set_memalloc_noio() may be called + * in async probe(). + * + * The function should be called between device_add() and device_del() + * on the affected device(block/network device). + */ +void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) +{ + static DEFINE_MUTEX(dev_hotplug_mutex); + + mutex_lock(&dev_hotplug_mutex); + for (;;) { + bool enabled; + + /* hold power lock since bitfield is not SMP-safe. */ + spin_lock_irq(&dev->power.lock); + enabled = dev->power.memalloc_noio; + dev->power.memalloc_noio = enable; + spin_unlock_irq(&dev->power.lock); + + /* + * not need to enable ancestors any more if the device + * has been enabled. + */ + if (enabled && enable) + break; + + dev = dev->parent; + + /* + * clear flag of the parent device only if all the + * children don't set the flag because ancestor's + * flag was set by any one of the descendants. + */ + if (!dev || (!enable && + device_for_each_child(dev, NULL, + dev_memalloc_noio))) + break; + } + mutex_unlock(&dev_hotplug_mutex); +} +EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio); + /** * rpm_check_suspend_allowed - Test whether a device may be suspended. * @dev: Device to test. @@ -188,7 +295,8 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev) * Check if the device's runtime PM status allows it to be suspended. If * another idle notification has been started earlier, return immediately. If * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise - * run the ->runtime_idle() callback directly. + * run the ->runtime_idle() callback directly. If the ->runtime_idle callback + * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag. * * This function must be called under dev->power.lock with interrupts disabled. */ @@ -223,11 +331,8 @@ static int rpm_idle(struct device *dev, int rpmflags) /* Pending requests need to be canceled. */ dev->power.request = RPM_REQ_NONE; - if (dev->power.no_callbacks) { - /* Assume ->runtime_idle() callback would have suspended. */ - retval = rpm_suspend(dev, rpmflags); + if (dev->power.no_callbacks) goto out; - } /* Carry out an asynchronous or a synchronous idle notification. */ if (rpmflags & RPM_ASYNC) { @@ -236,34 +341,23 @@ static int rpm_idle(struct device *dev, int rpmflags) dev->power.request_pending = true; queue_work(pm_wq, &dev->power.work); } - goto out; + trace_rpm_return_int(dev, _THIS_IP_, 0); + return 0; } dev->power.idle_notification = true; - if (dev->pm_domain) - callback = dev->pm_domain->ops.runtime_idle; - else if (dev->type && dev->type->pm) - callback = dev->type->pm->runtime_idle; - else if (dev->class && dev->class->pm) - callback = dev->class->pm->runtime_idle; - else if (dev->bus && dev->bus->pm) - callback = dev->bus->pm->runtime_idle; - else - callback = NULL; - - if (!callback && dev->driver && dev->driver->pm) - callback = dev->driver->pm->runtime_idle; + callback = rpm_get_idle_cb(dev); if (callback) - __rpm_callback(callback, dev); + retval = __rpm_callback(callback, dev); dev->power.idle_notification = false; wake_up_all(&dev->power.wait_queue); out: trace_rpm_return_int(dev, _THIS_IP_, retval); - return retval; + return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO); } /** @@ -278,7 +372,24 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev) if (!cb) return -ENOSYS; - retval = __rpm_callback(cb, dev); + if (dev->power.memalloc_noio) { + unsigned int noio_flag; + + /* + * Deadlock might be caused if memory allocation with + * GFP_KERNEL happens inside runtime_suspend and + * runtime_resume callbacks of one block device's + * ancestor or the block device itself. Network + * device might be thought as part of iSCSI block + * device, so network device and its ancestor should + * be marked as memalloc_noio too. + */ + noio_flag = memalloc_noio_save(); + retval = __rpm_callback(cb, dev); + memalloc_noio_restore(noio_flag); + } else { + retval = __rpm_callback(cb, dev); + } dev->power.runtime_error = retval; return retval != -EACCES ? retval : -EIO; @@ -406,19 +517,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) __update_runtime_status(dev, RPM_SUSPENDING); - if (dev->pm_domain) - callback = dev->pm_domain->ops.runtime_suspend; - else if (dev->type && dev->type->pm) - callback = dev->type->pm->runtime_suspend; - else if (dev->class && dev->class->pm) - callback = dev->class->pm->runtime_suspend; - else if (dev->bus && dev->bus->pm) - callback = dev->bus->pm->runtime_suspend; - else - callback = NULL; - - if (!callback && dev->driver && dev->driver->pm) - callback = dev->driver->pm->runtime_suspend; + callback = rpm_get_suspend_cb(dev); retval = rpm_callback(callback, dev); if (retval) @@ -638,19 +737,7 @@ static int rpm_resume(struct device *dev, int rpmflags) __update_runtime_status(dev, RPM_RESUMING); - if (dev->pm_domain) - callback = dev->pm_domain->ops.runtime_resume; - else if (dev->type && dev->type->pm) - callback = dev->type->pm->runtime_resume; - else if (dev->class && dev->class->pm) - callback = dev->class->pm->runtime_resume; - else if (dev->bus && dev->bus->pm) - callback = dev->bus->pm->runtime_resume; - else - callback = NULL; - - if (!callback && dev->driver && dev->driver->pm) - callback = dev->driver->pm->runtime_resume; + callback = rpm_get_resume_cb(dev); retval = rpm_callback(callback, dev); if (retval) { @@ -1044,7 +1131,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_barrier); * @dev: Device to handle. * @check_resume: If set, check if there's a resume request for the device. * - * Increment power.disable_depth for the device and if was zero previously, + * Increment power.disable_depth for the device and if it was zero previously, * cancel all pending runtime PM requests for the device and wait for all * operations in progress to complete. The device can be either active or * suspended after its runtime PM has been disabled. @@ -1313,5 +1400,88 @@ void pm_runtime_remove(struct device *dev) if (dev->power.runtime_status == RPM_ACTIVE) pm_runtime_set_suspended(dev); if (dev->power.irq_safe && dev->parent) - pm_runtime_put_sync(dev->parent); + pm_runtime_put(dev->parent); +} +#endif + +/** + * pm_runtime_force_suspend - Force a device into suspend state if needed. + * @dev: Device to suspend. + * + * Disable runtime PM so we safely can check the device's runtime PM status and + * if it is active, invoke it's .runtime_suspend callback to bring it into + * suspend state. Keep runtime PM disabled to preserve the state unless we + * encounter errors. + * + * Typically this function may be invoked from a system suspend callback to make + * sure the device is put into low power state. + */ +int pm_runtime_force_suspend(struct device *dev) +{ + int (*callback)(struct device *); + int ret = 0; + + pm_runtime_disable(dev); + + /* + * Note that pm_runtime_status_suspended() returns false while + * !CONFIG_PM_RUNTIME, which means the device will be put into low + * power state. + */ + if (pm_runtime_status_suspended(dev)) + return 0; + + callback = rpm_get_suspend_cb(dev); + + if (!callback) { + ret = -ENOSYS; + goto err; + } + + ret = callback(dev); + if (ret) + goto err; + + pm_runtime_set_suspended(dev); + return 0; +err: + pm_runtime_enable(dev); + return ret; +} +EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); + +/** + * pm_runtime_force_resume - Force a device into resume state. + * @dev: Device to resume. + * + * Prior invoking this function we expect the user to have brought the device + * into low power state by a call to pm_runtime_force_suspend(). Here we reverse + * those actions and brings the device into full power. We update the runtime PM + * status and re-enables runtime PM. + * + * Typically this function may be invoked from a system resume callback to make + * sure the device is put into full power state. + */ +int pm_runtime_force_resume(struct device *dev) +{ + int (*callback)(struct device *); + int ret = 0; + + callback = rpm_get_resume_cb(dev); + + if (!callback) { + ret = -ENOSYS; + goto out; + } + + ret = callback(dev); + if (ret) + goto out; + + pm_runtime_set_active(dev); + pm_runtime_mark_last_busy(dev); +out: + pm_runtime_enable(dev); + return ret; } +EXPORT_SYMBOL_GPL(pm_runtime_force_resume); diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 50d16e3cb0a..95b181d1ca6 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -206,7 +206,7 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev, if (!dev->power.use_autosuspend) return -EIO; - if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay) + if (kstrtol(buf, 10, &delay) != 0 || delay != (int) delay) return -EINVAL; device_lock(dev); @@ -218,15 +218,16 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev, static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, autosuspend_delay_ms_store); -static ssize_t pm_qos_latency_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t pm_qos_resume_latency_show(struct device *dev, + struct device_attribute *attr, + char *buf) { - return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev)); + return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev)); } -static ssize_t pm_qos_latency_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t n) +static ssize_t pm_qos_resume_latency_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t n) { s32 value; int ret; @@ -237,12 +238,47 @@ static ssize_t pm_qos_latency_store(struct device *dev, if (value < 0) return -EINVAL; - ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value); + ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req, + value); return ret < 0 ? ret : n; } static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, - pm_qos_latency_show, pm_qos_latency_store); + pm_qos_resume_latency_show, pm_qos_resume_latency_store); + +static ssize_t pm_qos_latency_tolerance_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + s32 value = dev_pm_qos_get_user_latency_tolerance(dev); + + if (value < 0) + return sprintf(buf, "auto\n"); + else if (value == PM_QOS_LATENCY_ANY) + return sprintf(buf, "any\n"); + + return sprintf(buf, "%d\n", value); +} + +static ssize_t pm_qos_latency_tolerance_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t n) +{ + s32 value; + int ret; + + if (kstrtos32(buf, 0, &value)) { + if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n")) + value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; + else if (!strcmp(buf, "any") || !strcmp(buf, "any\n")) + value = PM_QOS_LATENCY_ANY; + } + ret = dev_pm_qos_update_user_latency_tolerance(dev, value); + return ret < 0 ? ret : n; +} + +static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644, + pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store); static ssize_t pm_qos_no_power_off_show(struct device *dev, struct device_attribute *attr, @@ -618,15 +654,26 @@ static struct attribute_group pm_runtime_attr_group = { .attrs = runtime_attrs, }; -static struct attribute *pm_qos_latency_attrs[] = { +static struct attribute *pm_qos_resume_latency_attrs[] = { #ifdef CONFIG_PM_RUNTIME &dev_attr_pm_qos_resume_latency_us.attr, #endif /* CONFIG_PM_RUNTIME */ NULL, }; -static struct attribute_group pm_qos_latency_attr_group = { +static struct attribute_group pm_qos_resume_latency_attr_group = { + .name = power_group_name, + .attrs = pm_qos_resume_latency_attrs, +}; + +static struct attribute *pm_qos_latency_tolerance_attrs[] = { +#ifdef CONFIG_PM_RUNTIME + &dev_attr_pm_qos_latency_tolerance_us.attr, +#endif /* CONFIG_PM_RUNTIME */ + NULL, +}; +static struct attribute_group pm_qos_latency_tolerance_attr_group = { .name = power_group_name, - .attrs = pm_qos_latency_attrs, + .attrs = pm_qos_latency_tolerance_attrs, }; static struct attribute *pm_qos_flags_attrs[] = { @@ -654,18 +701,23 @@ int dpm_sysfs_add(struct device *dev) if (rc) goto err_out; } - if (device_can_wakeup(dev)) { rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); - if (rc) { - if (pm_runtime_callbacks_present(dev)) - sysfs_unmerge_group(&dev->kobj, - &pm_runtime_attr_group); - goto err_out; - } + if (rc) + goto err_runtime; + } + if (dev->power.set_latency_tolerance) { + rc = sysfs_merge_group(&dev->kobj, + &pm_qos_latency_tolerance_attr_group); + if (rc) + goto err_wakeup; } return 0; + err_wakeup: + sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); + err_runtime: + sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); err_out: sysfs_remove_group(&dev->kobj, &pm_attr_group); return rc; @@ -681,14 +733,14 @@ void wakeup_sysfs_remove(struct device *dev) sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); } -int pm_qos_sysfs_add_latency(struct device *dev) +int pm_qos_sysfs_add_resume_latency(struct device *dev) { - return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group); + return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); } -void pm_qos_sysfs_remove_latency(struct device *dev) +void pm_qos_sysfs_remove_resume_latency(struct device *dev) { - sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group); + sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); } int pm_qos_sysfs_add_flags(struct device *dev) @@ -708,6 +760,8 @@ void rpm_sysfs_remove(struct device *dev) void dpm_sysfs_remove(struct device *dev) { + sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); + dev_pm_qos_constraints_destroy(dev); rpm_sysfs_remove(dev); sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); sysfs_remove_group(&dev->kobj, &pm_attr_group); diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index e6ee5e80e54..eb1bd2ecad8 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -318,10 +318,16 @@ int device_init_wakeup(struct device *dev, bool enable) { int ret = 0; + if (!dev) + return -EINVAL; + if (enable) { device_set_wakeup_capable(dev, true); ret = device_wakeup_enable(dev); } else { + if (dev->power.can_wakeup) + device_wakeup_disable(dev); + device_set_wakeup_capable(dev, false); } @@ -382,6 +388,12 @@ static void wakeup_source_activate(struct wakeup_source *ws) { unsigned int cec; + /* + * active wakeup source should bring the system + * out of PM_SUSPEND_FREEZE state + */ + freeze_wake(); + ws->active = true; ws->active_count++; ws->last_time = ktime_get(); @@ -653,7 +665,7 @@ void pm_wakeup_event(struct device *dev, unsigned int msec) } EXPORT_SYMBOL_GPL(pm_wakeup_event); -static void print_active_wakeup_sources(void) +void pm_print_active_wakeup_sources(void) { struct wakeup_source *ws; int active = 0; @@ -677,6 +689,7 @@ static void print_active_wakeup_sources(void) last_activity_ws->name); rcu_read_unlock(); } +EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources); /** * pm_wakeup_pending - Check if power transition in progress should be aborted. @@ -701,8 +714,10 @@ bool pm_wakeup_pending(void) } spin_unlock_irqrestore(&events_lock, flags); - if (ret) - print_active_wakeup_sources(); + if (ret) { + pr_info("PM: Wakeup pending, aborting suspend\n"); + pm_print_active_wakeup_sources(); + } return ret; } diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index f0d30543fcc..4251570610c 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -3,7 +3,7 @@ # subsystems should select the appropriate symbols. config REGMAP - default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_MMIO || REGMAP_IRQ) + default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_MMIO || REGMAP_IRQ) select LZO_COMPRESS select LZO_DECOMPRESS select IRQ_DOMAIN if REGMAP_IRQ @@ -15,6 +15,9 @@ config REGMAP_I2C config REGMAP_SPI tristate +config REGMAP_SPMI + tristate + config REGMAP_MMIO tristate diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile index 5e75d1b683e..a7c670b4123 100644 --- a/drivers/base/regmap/Makefile +++ b/drivers/base/regmap/Makefile @@ -1,7 +1,8 @@ obj-$(CONFIG_REGMAP) += regmap.o regcache.o -obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o +obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o +obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index 401d1919635..7d1326985be 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -16,6 +16,7 @@ #include <linux/regmap.h> #include <linux/fs.h> #include <linux/list.h> +#include <linux/wait.h> struct regmap; struct regcache_ops; @@ -25,6 +26,7 @@ struct regmap_debugfs_off_cache { off_t min; off_t max; unsigned int base_reg; + unsigned int max_reg; }; struct regmap_format { @@ -36,12 +38,20 @@ struct regmap_format { unsigned int reg, unsigned int val); void (*format_reg)(void *buf, unsigned int reg, unsigned int shift); void (*format_val)(void *buf, unsigned int val, unsigned int shift); - unsigned int (*parse_val)(void *buf); + unsigned int (*parse_val)(const void *buf); + void (*parse_inplace)(void *buf); +}; + +struct regmap_async { + struct list_head list; + struct regmap *map; + void *work_buf; }; struct regmap { struct mutex mutex; spinlock_t spinlock; + unsigned long spinlock_flags; regmap_lock lock; regmap_unlock unlock; void *lock_arg; /* This is passed to lock/unlock functions */ @@ -53,6 +63,13 @@ struct regmap { void *bus_context; const char *name; + bool async; + spinlock_t async_lock; + wait_queue_head_t async_waitq; + struct list_head async_list; + struct list_head async_free; + int async_ret; + #ifdef CONFIG_DEBUG_FS struct dentry *debugfs; const char *debugfs_name; @@ -62,6 +79,7 @@ struct regmap { unsigned int debugfs_tot_len; struct list_head debugfs_off_cache; + struct mutex cache_lock; #endif unsigned int max_register; @@ -74,6 +92,11 @@ struct regmap { const struct regmap_access_table *volatile_table; const struct regmap_access_table *precious_table; + int (*reg_read)(void *context, unsigned int reg, unsigned int *val); + int (*reg_write)(void *context, unsigned int reg, unsigned int val); + + bool defer_caching; + u8 read_flag_mask; u8 write_flag_mask; @@ -111,6 +134,8 @@ struct regmap { /* if set, converts bulk rw to single rw */ bool use_single_rw; + /* if set, the device supports multi write mode */ + bool can_multi_write; struct rb_root range_tree; void *selector_work_buf; /* Scratch buffer used for selector */ @@ -124,6 +149,7 @@ struct regcache_ops { int (*read)(struct regmap *map, unsigned int reg, unsigned int *value); int (*write)(struct regmap *map, unsigned int reg, unsigned int value); int (*sync)(struct regmap *map, unsigned int min, unsigned int max); + int (*drop)(struct regmap *map, unsigned int min, unsigned int max); }; bool regmap_writeable(struct regmap *map, unsigned int reg); @@ -150,6 +176,17 @@ struct regmap_range_node { unsigned int window_len; }; +struct regmap_field { + struct regmap *regmap; + unsigned int mask; + /* lsb */ + unsigned int shift; + unsigned int reg; + + unsigned int id_size; + unsigned int id_offset; +}; + #ifdef CONFIG_DEBUG_FS extern void regmap_debugfs_initcall(void); extern void regmap_debugfs_init(struct regmap *map, const char *name); @@ -168,14 +205,31 @@ int regcache_read(struct regmap *map, int regcache_write(struct regmap *map, unsigned int reg, unsigned int value); int regcache_sync(struct regmap *map); - -unsigned int regcache_get_val(const void *base, unsigned int idx, - unsigned int word_size); -bool regcache_set_val(void *base, unsigned int idx, - unsigned int val, unsigned int word_size); +int regcache_sync_block(struct regmap *map, void *block, + unsigned long *cache_present, + unsigned int block_base, unsigned int start, + unsigned int end); + +static inline const void *regcache_get_val_addr(struct regmap *map, + const void *base, + unsigned int idx) +{ + return base + (map->cache_word_size * idx); +} + +unsigned int regcache_get_val(struct regmap *map, const void *base, + unsigned int idx); +bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, + unsigned int val); int regcache_lookup_reg(struct regmap *map, unsigned int reg); +int _regmap_raw_write(struct regmap *map, unsigned int reg, + const void *val, size_t val_len); + +void regmap_async_complete_cb(struct regmap_async *async, int ret); + extern struct regcache_ops regcache_rbtree_ops; extern struct regcache_ops regcache_lzo_ops; +extern struct regcache_ops regcache_flat_ops; #endif diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c new file mode 100644 index 00000000000..d9762e41959 --- /dev/null +++ b/drivers/base/regmap/regcache-flat.c @@ -0,0 +1,72 @@ +/* + * Register cache access API - flat caching support + * + * Copyright 2012 Wolfson Microelectronics plc + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/seq_file.h> + +#include "internal.h" + +static int regcache_flat_init(struct regmap *map) +{ + int i; + unsigned int *cache; + + map->cache = kzalloc(sizeof(unsigned int) * (map->max_register + 1), + GFP_KERNEL); + if (!map->cache) + return -ENOMEM; + + cache = map->cache; + + for (i = 0; i < map->num_reg_defaults; i++) + cache[map->reg_defaults[i].reg] = map->reg_defaults[i].def; + + return 0; +} + +static int regcache_flat_exit(struct regmap *map) +{ + kfree(map->cache); + map->cache = NULL; + + return 0; +} + +static int regcache_flat_read(struct regmap *map, + unsigned int reg, unsigned int *value) +{ + unsigned int *cache = map->cache; + + *value = cache[reg]; + + return 0; +} + +static int regcache_flat_write(struct regmap *map, unsigned int reg, + unsigned int value) +{ + unsigned int *cache = map->cache; + + cache[reg] = value; + + return 0; +} + +struct regcache_ops regcache_flat_ops = { + .type = REGCACHE_FLAT, + .name = "flat", + .init = regcache_flat_init, + .exit = regcache_flat_exit, + .read = regcache_flat_read, + .write = regcache_flat_write, +}; diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c index afd6aa91a0d..e210a6d1406 100644 --- a/drivers/base/regmap/regcache-lzo.c +++ b/drivers/base/regmap/regcache-lzo.c @@ -260,8 +260,7 @@ static int regcache_lzo_read(struct regmap *map, ret = regcache_lzo_decompress_cache_block(map, lzo_block); if (ret >= 0) /* fetch the value from the cache */ - *value = regcache_get_val(lzo_block->dst, blkpos, - map->cache_word_size); + *value = regcache_get_val(map, lzo_block->dst, blkpos); kfree(lzo_block->dst); /* restore the pointer and length of the compressed block */ @@ -304,8 +303,7 @@ static int regcache_lzo_write(struct regmap *map, } /* write the new value to the cache */ - if (regcache_set_val(lzo_block->dst, blkpos, value, - map->cache_word_size)) { + if (regcache_set_val(map, lzo_block->dst, blkpos, value)) { kfree(lzo_block->dst); goto out; } diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index e6732cf7c06..6a7e4fa1285 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -23,14 +23,16 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, static int regcache_rbtree_exit(struct regmap *map); struct regcache_rbtree_node { - /* the actual rbtree node holding this block */ - struct rb_node node; - /* base register handled by this block */ - unsigned int base_reg; /* block of adjacent registers */ void *block; + /* Which registers are present */ + long *cache_present; + /* base register handled by this block */ + unsigned int base_reg; /* number of registers available in the block */ unsigned int blklen; + /* the actual rbtree node holding this block */ + struct rb_node node; } __attribute__ ((packed)); struct regcache_rbtree_ctx { @@ -47,22 +49,22 @@ static inline void regcache_rbtree_get_base_top_reg( *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride); } -static unsigned int regcache_rbtree_get_register( - struct regcache_rbtree_node *rbnode, unsigned int idx, - unsigned int word_size) +static unsigned int regcache_rbtree_get_register(struct regmap *map, + struct regcache_rbtree_node *rbnode, unsigned int idx) { - return regcache_get_val(rbnode->block, idx, word_size); + return regcache_get_val(map, rbnode->block, idx); } -static void regcache_rbtree_set_register(struct regcache_rbtree_node *rbnode, - unsigned int idx, unsigned int val, - unsigned int word_size) +static void regcache_rbtree_set_register(struct regmap *map, + struct regcache_rbtree_node *rbnode, + unsigned int idx, unsigned int val) { - regcache_set_val(rbnode->block, idx, val, word_size); + set_bit(idx, rbnode->cache_present); + regcache_set_val(map, rbnode->block, idx, val); } static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map, - unsigned int reg) + unsigned int reg) { struct regcache_rbtree_ctx *rbtree_ctx = map->cache; struct rb_node *node; @@ -139,15 +141,21 @@ static int rbtree_show(struct seq_file *s, void *ignored) struct regcache_rbtree_node *n; struct rb_node *node; unsigned int base, top; + size_t mem_size; int nodes = 0; int registers = 0; int this_registers, average; - map->lock(map); + map->lock(map->lock_arg); + + mem_size = sizeof(*rbtree_ctx); for (node = rb_first(&rbtree_ctx->root); node != NULL; node = rb_next(node)) { n = container_of(node, struct regcache_rbtree_node, node); + mem_size += sizeof(*n); + mem_size += (n->blklen * map->cache_word_size); + mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long); regcache_rbtree_get_base_top_reg(map, n, &base, &top); this_registers = ((top - base) / map->reg_stride) + 1; @@ -162,10 +170,10 @@ static int rbtree_show(struct seq_file *s, void *ignored) else average = 0; - seq_printf(s, "%d nodes, %d registers, average %d registers\n", - nodes, registers, average); + seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n", + nodes, registers, average, mem_size); - map->unlock(map); + map->unlock(map->lock_arg); return 0; } @@ -240,6 +248,7 @@ static int regcache_rbtree_exit(struct regmap *map) rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); next = rb_next(&rbtree_node->node); rb_erase(&rbtree_node->node, &rbtree_ctx->root); + kfree(rbtree_node->cache_present); kfree(rbtree_node->block); kfree(rbtree_node); } @@ -260,8 +269,9 @@ static int regcache_rbtree_read(struct regmap *map, rbnode = regcache_rbtree_lookup(map, reg); if (rbnode) { reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; - *value = regcache_rbtree_get_register(rbnode, reg_tmp, - map->cache_word_size); + if (!test_bit(reg_tmp, rbnode->cache_present)) + return -ENOENT; + *value = regcache_rbtree_get_register(map, rbnode, reg_tmp); } else { return -ENOENT; } @@ -270,99 +280,169 @@ static int regcache_rbtree_read(struct regmap *map, } -static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode, - unsigned int pos, unsigned int reg, - unsigned int value, unsigned int word_size) +static int regcache_rbtree_insert_to_block(struct regmap *map, + struct regcache_rbtree_node *rbnode, + unsigned int base_reg, + unsigned int top_reg, + unsigned int reg, + unsigned int value) { + unsigned int blklen; + unsigned int pos, offset; + unsigned long *present; u8 *blk; + blklen = (top_reg - base_reg) / map->reg_stride + 1; + pos = (reg - base_reg) / map->reg_stride; + offset = (rbnode->base_reg - base_reg) / map->reg_stride; + blk = krealloc(rbnode->block, - (rbnode->blklen + 1) * word_size, GFP_KERNEL); + blklen * map->cache_word_size, + GFP_KERNEL); if (!blk) return -ENOMEM; + present = krealloc(rbnode->cache_present, + BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL); + if (!present) { + kfree(blk); + return -ENOMEM; + } + /* insert the register value in the correct place in the rbnode block */ - memmove(blk + (pos + 1) * word_size, - blk + pos * word_size, - (rbnode->blklen - pos) * word_size); + if (pos == 0) { + memmove(blk + offset * map->cache_word_size, + blk, rbnode->blklen * map->cache_word_size); + bitmap_shift_right(present, present, offset, blklen); + } /* update the rbnode block, its size and the base register */ rbnode->block = blk; - rbnode->blklen++; - if (!pos) - rbnode->base_reg = reg; + rbnode->blklen = blklen; + rbnode->base_reg = base_reg; + rbnode->cache_present = present; - regcache_rbtree_set_register(rbnode, pos, value, word_size); + regcache_rbtree_set_register(map, rbnode, pos, value); return 0; } +static struct regcache_rbtree_node * +regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) +{ + struct regcache_rbtree_node *rbnode; + const struct regmap_range *range; + int i; + + rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL); + if (!rbnode) + return NULL; + + /* If there is a read table then use it to guess at an allocation */ + if (map->rd_table) { + for (i = 0; i < map->rd_table->n_yes_ranges; i++) { + if (regmap_reg_in_range(reg, + &map->rd_table->yes_ranges[i])) + break; + } + + if (i != map->rd_table->n_yes_ranges) { + range = &map->rd_table->yes_ranges[i]; + rbnode->blklen = (range->range_max - range->range_min) / + map->reg_stride + 1; + rbnode->base_reg = range->range_min; + } + } + + if (!rbnode->blklen) { + rbnode->blklen = 1; + rbnode->base_reg = reg; + } + + rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, + GFP_KERNEL); + if (!rbnode->block) + goto err_free; + + rbnode->cache_present = kzalloc(BITS_TO_LONGS(rbnode->blklen) * + sizeof(*rbnode->cache_present), GFP_KERNEL); + if (!rbnode->cache_present) + goto err_free_block; + + return rbnode; + +err_free_block: + kfree(rbnode->block); +err_free: + kfree(rbnode); + return NULL; +} + static int regcache_rbtree_write(struct regmap *map, unsigned int reg, unsigned int value) { struct regcache_rbtree_ctx *rbtree_ctx; struct regcache_rbtree_node *rbnode, *rbnode_tmp; struct rb_node *node; - unsigned int val; unsigned int reg_tmp; - unsigned int pos; - int i; int ret; rbtree_ctx = map->cache; + /* if we can't locate it in the cached rbnode we'll have * to traverse the rbtree looking for it. */ rbnode = regcache_rbtree_lookup(map, reg); if (rbnode) { reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; - val = regcache_rbtree_get_register(rbnode, reg_tmp, - map->cache_word_size); - if (val == value) - return 0; - regcache_rbtree_set_register(rbnode, reg_tmp, value, - map->cache_word_size); + regcache_rbtree_set_register(map, rbnode, reg_tmp, value); } else { + unsigned int base_reg, top_reg; + unsigned int new_base_reg, new_top_reg; + unsigned int min, max; + unsigned int max_dist; + + max_dist = map->reg_stride * sizeof(*rbnode_tmp) / + map->cache_word_size; + if (reg < max_dist) + min = 0; + else + min = reg - max_dist; + max = reg + max_dist; + /* look for an adjacent register to the one we are about to add */ for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, node); - for (i = 0; i < rbnode_tmp->blklen; i++) { - reg_tmp = rbnode_tmp->base_reg + - (i * map->reg_stride); - if (abs(reg_tmp - reg) != map->reg_stride) - continue; - /* decide where in the block to place our register */ - if (reg_tmp + map->reg_stride == reg) - pos = i + 1; - else - pos = i; - ret = regcache_rbtree_insert_to_block(rbnode_tmp, pos, - reg, value, - map->cache_word_size); - if (ret) - return ret; - rbtree_ctx->cached_rbnode = rbnode_tmp; - return 0; + + regcache_rbtree_get_base_top_reg(map, rbnode_tmp, + &base_reg, &top_reg); + + if (base_reg <= max && top_reg >= min) { + new_base_reg = min(reg, base_reg); + new_top_reg = max(reg, top_reg); + } else { + continue; } + + ret = regcache_rbtree_insert_to_block(map, rbnode_tmp, + new_base_reg, + new_top_reg, reg, + value); + if (ret) + return ret; + rbtree_ctx->cached_rbnode = rbnode_tmp; + return 0; } - /* we did not manage to find a place to insert it in an existing - * block so create a new rbnode with a single register in its block. - * This block will get populated further if any other adjacent - * registers get modified in the future. + + /* We did not manage to find a place to insert it in + * an existing block so create a new rbnode. */ - rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL); + rbnode = regcache_rbtree_node_alloc(map, reg); if (!rbnode) return -ENOMEM; - rbnode->blklen = 1; - rbnode->base_reg = reg; - rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, - GFP_KERNEL); - if (!rbnode->block) { - kfree(rbnode); - return -ENOMEM; - } - regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size); + regcache_rbtree_set_register(map, rbnode, + reg - rbnode->base_reg, value); regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode); rbtree_ctx->cached_rbnode = rbnode; } @@ -376,50 +456,72 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min, struct regcache_rbtree_ctx *rbtree_ctx; struct rb_node *node; struct regcache_rbtree_node *rbnode; - unsigned int regtmp; - unsigned int val; + unsigned int base_reg, top_reg; + unsigned int start, end; int ret; - int i, base, end; rbtree_ctx = map->cache; for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { rbnode = rb_entry(node, struct regcache_rbtree_node, node); - if (rbnode->base_reg < min) - continue; - if (rbnode->base_reg > max) + regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, + &top_reg); + if (base_reg > max) break; - if (rbnode->base_reg + rbnode->blklen < min) + if (top_reg < min) continue; - if (min > rbnode->base_reg) - base = min - rbnode->base_reg; + if (min > base_reg) + start = (min - base_reg) / map->reg_stride; else - base = 0; + start = 0; - if (max < rbnode->base_reg + rbnode->blklen) - end = rbnode->base_reg + rbnode->blklen - max; + if (max < top_reg) + end = (max - base_reg) / map->reg_stride + 1; else end = rbnode->blklen; - for (i = base; i < end; i++) { - regtmp = rbnode->base_reg + (i * map->reg_stride); - val = regcache_rbtree_get_register(rbnode, i, - map->cache_word_size); + ret = regcache_sync_block(map, rbnode->block, + rbnode->cache_present, + rbnode->base_reg, start, end); + if (ret != 0) + return ret; + } - /* Is this the hardware default? If so skip. */ - ret = regcache_lookup_reg(map, regtmp); - if (ret >= 0 && val == map->reg_defaults[ret].def) - continue; + return regmap_async_complete(map); +} - map->cache_bypass = 1; - ret = _regmap_write(map, regtmp, val); - map->cache_bypass = 0; - if (ret) - return ret; - dev_dbg(map->dev, "Synced register %#x, value %#x\n", - regtmp, val); - } +static int regcache_rbtree_drop(struct regmap *map, unsigned int min, + unsigned int max) +{ + struct regcache_rbtree_ctx *rbtree_ctx; + struct regcache_rbtree_node *rbnode; + struct rb_node *node; + unsigned int base_reg, top_reg; + unsigned int start, end; + + rbtree_ctx = map->cache; + for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { + rbnode = rb_entry(node, struct regcache_rbtree_node, node); + + regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, + &top_reg); + if (base_reg > max) + break; + if (top_reg < min) + continue; + + if (min > base_reg) + start = (min - base_reg) / map->reg_stride; + else + start = 0; + + if (max < top_reg) + end = (max - base_reg) / map->reg_stride + 1; + else + end = rbnode->blklen; + + bitmap_clear(rbnode->cache_present, start, end - start); } return 0; @@ -432,5 +534,6 @@ struct regcache_ops regcache_rbtree_ops = { .exit = regcache_rbtree_exit, .read = regcache_rbtree_read, .write = regcache_rbtree_write, - .sync = regcache_rbtree_sync + .sync = regcache_rbtree_sync, + .drop = regcache_rbtree_drop, }; diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index 835883bda97..29b4128da0b 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -22,6 +22,7 @@ static const struct regcache_ops *cache_types[] = { ®cache_rbtree_ops, ®cache_lzo_ops, + ®cache_flat_ops, }; static int regcache_hw_init(struct regmap *map) @@ -44,8 +45,8 @@ static int regcache_hw_init(struct regmap *map) tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL); if (!tmp_buf) return -EINVAL; - ret = regmap_bulk_read(map, 0, tmp_buf, - map->num_reg_defaults_raw); + ret = regmap_raw_read(map, 0, tmp_buf, + map->num_reg_defaults_raw); map->cache_bypass = cache_bypass; if (ret < 0) { kfree(tmp_buf); @@ -57,8 +58,7 @@ static int regcache_hw_init(struct regmap *map) /* calculate the size of reg_defaults */ for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) { - val = regcache_get_val(map->reg_defaults_raw, - i, map->cache_word_size); + val = regcache_get_val(map, map->reg_defaults_raw, i); if (regmap_volatile(map, i * map->reg_stride)) continue; count++; @@ -74,8 +74,7 @@ static int regcache_hw_init(struct regmap *map) /* fill the reg_defaults */ map->num_reg_defaults = count; for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) { - val = regcache_get_val(map->reg_defaults_raw, - i, map->cache_word_size); + val = regcache_get_val(map, map->reg_defaults_raw, i); if (regmap_volatile(map, i * map->reg_stride)) continue; map->reg_defaults[j].reg = i * map->reg_stride; @@ -239,15 +238,45 @@ int regcache_write(struct regmap *map, BUG_ON(!map->cache_ops); - if (!regmap_writeable(map, reg)) - return -EIO; - if (!regmap_volatile(map, reg)) return map->cache_ops->write(map, reg, value); return 0; } +static int regcache_default_sync(struct regmap *map, unsigned int min, + unsigned int max) +{ + unsigned int reg; + + for (reg = min; reg <= max; reg += map->reg_stride) { + unsigned int val; + int ret; + + if (regmap_volatile(map, reg) || + !regmap_writeable(map, reg)) + continue; + + ret = regcache_read(map, reg, &val); + if (ret) + return ret; + + /* Is this the hardware default? If so skip. */ + ret = regcache_lookup_reg(map, reg); + if (ret >= 0 && val == map->reg_defaults[ret].def) + continue; + + map->cache_bypass = 1; + ret = _regmap_write(map, reg, val); + map->cache_bypass = 0; + if (ret) + return ret; + dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val); + } + + return 0; +} + /** * regcache_sync: Sync the register cache with the hardware. * @@ -266,9 +295,9 @@ int regcache_sync(struct regmap *map) const char *name; unsigned int bypass; - BUG_ON(!map->cache_ops || !map->cache_ops->sync); + BUG_ON(!map->cache_ops); - map->lock(map); + map->lock(map->lock_arg); /* Remember the initial bypass state */ bypass = map->cache_bypass; dev_dbg(map->dev, "Syncing %s cache\n", @@ -279,13 +308,11 @@ int regcache_sync(struct regmap *map) if (!map->cache_dirty) goto out; + map->async = true; + /* Apply any patch first */ map->cache_bypass = 1; for (i = 0; i < map->patch_regs; i++) { - if (map->patch[i].reg % map->reg_stride) { - ret = -EINVAL; - goto out; - } ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def); if (ret != 0) { dev_err(map->dev, "Failed to write %x = %x: %d\n", @@ -295,16 +322,23 @@ int regcache_sync(struct regmap *map) } map->cache_bypass = 0; - ret = map->cache_ops->sync(map, 0, map->max_register); + if (map->cache_ops->sync) + ret = map->cache_ops->sync(map, 0, map->max_register); + else + ret = regcache_default_sync(map, 0, map->max_register); if (ret == 0) map->cache_dirty = false; out: - trace_regcache_sync(map->dev, name, "stop"); /* Restore the bypass state */ + map->async = false; map->cache_bypass = bypass; - map->unlock(map); + map->unlock(map->lock_arg); + + regmap_async_complete(map); + + trace_regcache_sync(map->dev, name, "stop"); return ret; } @@ -329,9 +363,9 @@ int regcache_sync_region(struct regmap *map, unsigned int min, const char *name; unsigned int bypass; - BUG_ON(!map->cache_ops || !map->cache_ops->sync); + BUG_ON(!map->cache_ops); - map->lock(map); + map->lock(map->lock_arg); /* Remember the initial bypass state */ bypass = map->cache_bypass; @@ -344,19 +378,59 @@ int regcache_sync_region(struct regmap *map, unsigned int min, if (!map->cache_dirty) goto out; - ret = map->cache_ops->sync(map, min, max); + map->async = true; + + if (map->cache_ops->sync) + ret = map->cache_ops->sync(map, min, max); + else + ret = regcache_default_sync(map, min, max); out: - trace_regcache_sync(map->dev, name, "stop region"); /* Restore the bypass state */ map->cache_bypass = bypass; - map->unlock(map); + map->async = false; + map->unlock(map->lock_arg); + + regmap_async_complete(map); + + trace_regcache_sync(map->dev, name, "stop region"); return ret; } EXPORT_SYMBOL_GPL(regcache_sync_region); /** + * regcache_drop_region: Discard part of the register cache + * + * @map: map to operate on + * @min: first register to discard + * @max: last register to discard + * + * Discard part of the register cache. + * + * Return a negative value on failure, 0 on success. + */ +int regcache_drop_region(struct regmap *map, unsigned int min, + unsigned int max) +{ + int ret = 0; + + if (!map->cache_ops || !map->cache_ops->drop) + return -EINVAL; + + map->lock(map->lock_arg); + + trace_regcache_drop_region(map->dev, min, max); + + ret = map->cache_ops->drop(map, min, max); + + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regcache_drop_region); + +/** * regcache_cache_only: Put a register map into cache only mode * * @map: map to configure @@ -370,11 +444,11 @@ EXPORT_SYMBOL_GPL(regcache_sync_region); */ void regcache_cache_only(struct regmap *map, bool enable) { - map->lock(map); + map->lock(map->lock_arg); WARN_ON(map->cache_bypass && enable); map->cache_only = enable; trace_regmap_cache_only(map->dev, enable); - map->unlock(map); + map->unlock(map->lock_arg); } EXPORT_SYMBOL_GPL(regcache_cache_only); @@ -389,9 +463,9 @@ EXPORT_SYMBOL_GPL(regcache_cache_only); */ void regcache_mark_dirty(struct regmap *map) { - map->lock(map); + map->lock(map->lock_arg); map->cache_dirty = true; - map->unlock(map); + map->unlock(map->lock_arg); } EXPORT_SYMBOL_GPL(regcache_mark_dirty); @@ -408,36 +482,40 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty); */ void regcache_cache_bypass(struct regmap *map, bool enable) { - map->lock(map); + map->lock(map->lock_arg); WARN_ON(map->cache_only && enable); map->cache_bypass = enable; trace_regmap_cache_bypass(map->dev, enable); - map->unlock(map); + map->unlock(map->lock_arg); } EXPORT_SYMBOL_GPL(regcache_cache_bypass); -bool regcache_set_val(void *base, unsigned int idx, - unsigned int val, unsigned int word_size) +bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, + unsigned int val) { - switch (word_size) { + if (regcache_get_val(map, base, idx) == val) + return true; + + /* Use device native format if possible */ + if (map->format.format_val) { + map->format.format_val(base + (map->cache_word_size * idx), + val, 0); + return false; + } + + switch (map->cache_word_size) { case 1: { u8 *cache = base; - if (cache[idx] == val) - return true; cache[idx] = val; break; } case 2: { u16 *cache = base; - if (cache[idx] == val) - return true; cache[idx] = val; break; } case 4: { u32 *cache = base; - if (cache[idx] == val) - return true; cache[idx] = val; break; } @@ -447,13 +525,18 @@ bool regcache_set_val(void *base, unsigned int idx, return false; } -unsigned int regcache_get_val(const void *base, unsigned int idx, - unsigned int word_size) +unsigned int regcache_get_val(struct regmap *map, const void *base, + unsigned int idx) { if (!base) return -EINVAL; - switch (word_size) { + /* Use device native format if possible */ + if (map->format.parse_val) + return map->format.parse_val(regcache_get_val_addr(map, base, + idx)); + + switch (map->cache_word_size) { case 1: { const u8 *cache = base; return cache[idx]; @@ -497,3 +580,128 @@ int regcache_lookup_reg(struct regmap *map, unsigned int reg) else return -ENOENT; } + +static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx) +{ + if (!cache_present) + return true; + + return test_bit(idx, cache_present); +} + +static int regcache_sync_block_single(struct regmap *map, void *block, + unsigned long *cache_present, + unsigned int block_base, + unsigned int start, unsigned int end) +{ + unsigned int i, regtmp, val; + int ret; + + for (i = start; i < end; i++) { + regtmp = block_base + (i * map->reg_stride); + + if (!regcache_reg_present(cache_present, i)) + continue; + + val = regcache_get_val(map, block, i); + + /* Is this the hardware default? If so skip. */ + ret = regcache_lookup_reg(map, regtmp); + if (ret >= 0 && val == map->reg_defaults[ret].def) + continue; + + map->cache_bypass = 1; + + ret = _regmap_write(map, regtmp, val); + + map->cache_bypass = 0; + if (ret != 0) + return ret; + dev_dbg(map->dev, "Synced register %#x, value %#x\n", + regtmp, val); + } + + return 0; +} + +static int regcache_sync_block_raw_flush(struct regmap *map, const void **data, + unsigned int base, unsigned int cur) +{ + size_t val_bytes = map->format.val_bytes; + int ret, count; + + if (*data == NULL) + return 0; + + count = (cur - base) / map->reg_stride; + + dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n", + count * val_bytes, count, base, cur - map->reg_stride); + + map->cache_bypass = 1; + + ret = _regmap_raw_write(map, base, *data, count * val_bytes); + + map->cache_bypass = 0; + + *data = NULL; + + return ret; +} + +static int regcache_sync_block_raw(struct regmap *map, void *block, + unsigned long *cache_present, + unsigned int block_base, unsigned int start, + unsigned int end) +{ + unsigned int i, val; + unsigned int regtmp = 0; + unsigned int base = 0; + const void *data = NULL; + int ret; + + for (i = start; i < end; i++) { + regtmp = block_base + (i * map->reg_stride); + + if (!regcache_reg_present(cache_present, i)) { + ret = regcache_sync_block_raw_flush(map, &data, + base, regtmp); + if (ret != 0) + return ret; + continue; + } + + val = regcache_get_val(map, block, i); + + /* Is this the hardware default? If so skip. */ + ret = regcache_lookup_reg(map, regtmp); + if (ret >= 0 && val == map->reg_defaults[ret].def) { + ret = regcache_sync_block_raw_flush(map, &data, + base, regtmp); + if (ret != 0) + return ret; + continue; + } + + if (!data) { + data = regcache_get_val_addr(map, block, i); + base = regtmp; + } + } + + return regcache_sync_block_raw_flush(map, &data, base, regtmp + + map->reg_stride); +} + +int regcache_sync_block(struct regmap *map, void *block, + unsigned long *cache_present, + unsigned int block_base, unsigned int start, + unsigned int end) +{ + if (regmap_can_raw_write(map)) + return regcache_sync_block_raw(map, block, cache_present, + block_base, start, end); + else + return regcache_sync_block_single(map, block, cache_present, + block_base, start, end); +} diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index d9a6c94ce42..45d812c0ea7 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -15,10 +15,19 @@ #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/device.h> +#include <linux/list.h> #include "internal.h" +struct regmap_debugfs_node { + struct regmap *map; + const char *name; + struct list_head link; +}; + static struct dentry *regmap_debugfs_root; +static LIST_HEAD(regmap_debugfs_early_list); +static DEFINE_MUTEX(regmap_debugfs_early_lock); /* Calculate the length of a fixed format */ static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size) @@ -81,18 +90,27 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, struct regmap_debugfs_off_cache *c = NULL; loff_t p = 0; unsigned int i, ret; + unsigned int fpos_offset; + unsigned int reg_offset; + + /* Suppress the cache if we're using a subrange */ + if (base) + return base; /* * If we don't have a cache build one so we don't have to do a * linear scan each time. */ + mutex_lock(&map->cache_lock); + i = base; if (list_empty(&map->debugfs_off_cache)) { - for (i = base; i <= map->max_register; i += map->reg_stride) { + for (; i <= map->max_register; i += map->reg_stride) { /* Skip unprinted registers, closing off cache entry */ if (!regmap_readable(map, i) || regmap_precious(map, i)) { if (c) { c->max = p - 1; + c->max_reg = i - map->reg_stride; list_add_tail(&c->list, &map->debugfs_off_cache); c = NULL; @@ -106,6 +124,7 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, c = kzalloc(sizeof(*c), GFP_KERNEL); if (!c) { regmap_debugfs_free_dump_cache(map); + mutex_unlock(&map->cache_lock); return base; } c->min = p; @@ -119,6 +138,7 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, /* Close the last entry off if we didn't scan beyond it */ if (c) { c->max = p - 1; + c->max_reg = i - map->reg_stride; list_add_tail(&c->list, &map->debugfs_off_cache); } @@ -128,25 +148,40 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, * allocate and we should never be in this code if there are * no registers at all. */ - if (list_empty(&map->debugfs_off_cache)) { - WARN_ON(list_empty(&map->debugfs_off_cache)); - return base; - } + WARN_ON(list_empty(&map->debugfs_off_cache)); + ret = base; - /* Find the relevant block */ + /* Find the relevant block:offset */ list_for_each_entry(c, &map->debugfs_off_cache, list) { if (from >= c->min && from <= c->max) { - *pos = c->min; - return c->base_reg; + fpos_offset = from - c->min; + reg_offset = fpos_offset / map->debugfs_tot_len; + *pos = c->min + (reg_offset * map->debugfs_tot_len); + mutex_unlock(&map->cache_lock); + return c->base_reg + (reg_offset * map->reg_stride); } - *pos = c->min; - ret = c->base_reg; + *pos = c->max; + ret = c->max_reg; } + mutex_unlock(&map->cache_lock); return ret; } +static inline void regmap_calc_tot_len(struct regmap *map, + void *buf, size_t count) +{ + /* Calculate the length of a fixed format */ + if (!map->debugfs_tot_len) { + map->debugfs_reg_len = regmap_calc_reg_len(map->max_register, + buf, count); + map->debugfs_val_len = 2 * map->format.val_bytes; + map->debugfs_tot_len = map->debugfs_reg_len + + map->debugfs_val_len + 3; /* : \n */ + } +} + static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from, unsigned int to, char __user *user_buf, size_t count, loff_t *ppos) @@ -165,14 +200,7 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from, if (!buf) return -ENOMEM; - /* Calculate the length of a fixed format */ - if (!map->debugfs_tot_len) { - map->debugfs_reg_len = regmap_calc_reg_len(map->max_register, - buf, count); - map->debugfs_val_len = 2 * map->format.val_bytes; - map->debugfs_tot_len = map->debugfs_reg_len + - map->debugfs_val_len + 3; /* : \n */ - } + regmap_calc_tot_len(map, buf, count); /* Work out which register we're starting at */ start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p); @@ -187,7 +215,7 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from, /* If we're in the region the user is trying to read */ if (p >= *ppos) { /* ...but not beyond it */ - if (buf_pos + 1 + map->debugfs_tot_len >= count) + if (buf_pos + map->debugfs_tot_len > count) break; /* Format the register */ @@ -250,6 +278,7 @@ static ssize_t regmap_map_write_file(struct file *file, char *start = buf; unsigned long reg, value; struct regmap *map = file->private_data; + int ret; buf_size = min(count, (sizeof(buf)-1)); if (copy_from_user(buf, user_buf, buf_size)) @@ -261,13 +290,15 @@ static ssize_t regmap_map_write_file(struct file *file, reg = simple_strtoul(start, &start, 16); while (*start == ' ') start++; - if (strict_strtoul(start, 16, &value)) + if (kstrtoul(start, 16, &value)) return -EINVAL; /* Userspace has been fiddling around behind the kernel's back */ - add_taint(TAINT_USER); + add_taint(TAINT_USER, LOCKDEP_STILL_OK); - regmap_write(map, reg, value); + ret = regmap_write(map, reg, value); + if (ret < 0) + return ret; return buf_size; } #else @@ -297,6 +328,79 @@ static const struct file_operations regmap_range_fops = { .llseek = default_llseek, }; +static ssize_t regmap_reg_ranges_read_file(struct file *file, + char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct regmap *map = file->private_data; + struct regmap_debugfs_off_cache *c; + loff_t p = 0; + size_t buf_pos = 0; + char *buf; + char *entry; + int ret; + + if (*ppos < 0 || !count) + return -EINVAL; + + buf = kmalloc(count, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + entry = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!entry) { + kfree(buf); + return -ENOMEM; + } + + /* While we are at it, build the register dump cache + * now so the read() operation on the `registers' file + * can benefit from using the cache. We do not care + * about the file position information that is contained + * in the cache, just about the actual register blocks */ + regmap_calc_tot_len(map, buf, count); + regmap_debugfs_get_dump_start(map, 0, *ppos, &p); + + /* Reset file pointer as the fixed-format of the `registers' + * file is not compatible with the `range' file */ + p = 0; + mutex_lock(&map->cache_lock); + list_for_each_entry(c, &map->debugfs_off_cache, list) { + snprintf(entry, PAGE_SIZE, "%x-%x", + c->base_reg, c->max_reg); + if (p >= *ppos) { + if (buf_pos + 1 + strlen(entry) > count) + break; + snprintf(buf + buf_pos, count - buf_pos, + "%s", entry); + buf_pos += strlen(entry); + buf[buf_pos] = '\n'; + buf_pos++; + } + p += strlen(entry) + 1; + } + mutex_unlock(&map->cache_lock); + + kfree(entry); + ret = buf_pos; + + if (copy_to_user(user_buf, buf, buf_pos)) { + ret = -EFAULT; + goto out_buf; + } + + *ppos += buf_pos; +out_buf: + kfree(buf); + return ret; +} + +static const struct file_operations regmap_reg_ranges_fops = { + .open = simple_open, + .read = regmap_reg_ranges_read_file, + .llseek = default_llseek, +}; + static ssize_t regmap_access_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -370,7 +474,22 @@ void regmap_debugfs_init(struct regmap *map, const char *name) struct rb_node *next; struct regmap_range_node *range_node; + /* If we don't have the debugfs root yet, postpone init */ + if (!regmap_debugfs_root) { + struct regmap_debugfs_node *node; + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return; + node->map = map; + node->name = name; + mutex_lock(®map_debugfs_early_lock); + list_add(&node->link, ®map_debugfs_early_list); + mutex_unlock(®map_debugfs_early_lock); + return; + } + INIT_LIST_HEAD(&map->debugfs_off_cache); + mutex_init(&map->cache_lock); if (name) { map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", @@ -389,7 +508,10 @@ void regmap_debugfs_init(struct regmap *map, const char *name) debugfs_create_file("name", 0400, map->debugfs, map, ®map_name_fops); - if (map->max_register) { + debugfs_create_file("range", 0400, map->debugfs, + map, ®map_reg_ranges_fops); + + if (map->max_register || regmap_readable(map, 0)) { debugfs_create_file("registers", 0400, map->debugfs, map, ®map_map_fops); debugfs_create_file("access", 0400, map->debugfs, @@ -420,16 +542,42 @@ void regmap_debugfs_init(struct regmap *map, const char *name) void regmap_debugfs_exit(struct regmap *map) { - debugfs_remove_recursive(map->debugfs); - regmap_debugfs_free_dump_cache(map); - kfree(map->debugfs_name); + if (map->debugfs) { + debugfs_remove_recursive(map->debugfs); + mutex_lock(&map->cache_lock); + regmap_debugfs_free_dump_cache(map); + mutex_unlock(&map->cache_lock); + kfree(map->debugfs_name); + } else { + struct regmap_debugfs_node *node, *tmp; + + mutex_lock(®map_debugfs_early_lock); + list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, + link) { + if (node->map == map) { + list_del(&node->link); + kfree(node); + } + } + mutex_unlock(®map_debugfs_early_lock); + } } void regmap_debugfs_initcall(void) { + struct regmap_debugfs_node *node, *tmp; + regmap_debugfs_root = debugfs_create_dir("regmap", NULL); if (!regmap_debugfs_root) { pr_warn("regmap: Failed to create debugfs root\n"); return; } + + mutex_lock(®map_debugfs_early_lock); + list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, link) { + regmap_debugfs_init(node->map, node->name); + list_del(&node->link); + kfree(node); + } + mutex_unlock(®map_debugfs_early_lock); } diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c index fa6bf5279d2..ca193d1ef47 100644 --- a/drivers/base/regmap/regmap-i2c.c +++ b/drivers/base/regmap/regmap-i2c.c @@ -13,7 +13,79 @@ #include <linux/regmap.h> #include <linux/i2c.h> #include <linux/module.h> -#include <linux/init.h> + + +static int regmap_smbus_byte_reg_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + int ret; + + if (reg > 0xff) + return -EINVAL; + + ret = i2c_smbus_read_byte_data(i2c, reg); + if (ret < 0) + return ret; + + *val = ret; + + return 0; +} + +static int regmap_smbus_byte_reg_write(void *context, unsigned int reg, + unsigned int val) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + + if (val > 0xff || reg > 0xff) + return -EINVAL; + + return i2c_smbus_write_byte_data(i2c, reg, val); +} + +static struct regmap_bus regmap_smbus_byte = { + .reg_write = regmap_smbus_byte_reg_write, + .reg_read = regmap_smbus_byte_reg_read, +}; + +static int regmap_smbus_word_reg_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + int ret; + + if (reg > 0xff) + return -EINVAL; + + ret = i2c_smbus_read_word_data(i2c, reg); + if (ret < 0) + return ret; + + *val = ret; + + return 0; +} + +static int regmap_smbus_word_reg_write(void *context, unsigned int reg, + unsigned int val) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + + if (val > 0xffff || reg > 0xff) + return -EINVAL; + + return i2c_smbus_write_word_data(i2c, reg, val); +} + +static struct regmap_bus regmap_smbus_word = { + .reg_write = regmap_smbus_word_reg_write, + .reg_read = regmap_smbus_word_reg_read, +}; static int regmap_i2c_write(void *context, const void *data, size_t count) { @@ -98,6 +170,23 @@ static struct regmap_bus regmap_i2c = { .read = regmap_i2c_read, }; +static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c, + const struct regmap_config *config) +{ + if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C)) + return ®map_i2c; + else if (config->val_bits == 16 && config->reg_bits == 8 && + i2c_check_functionality(i2c->adapter, + I2C_FUNC_SMBUS_WORD_DATA)) + return ®map_smbus_word; + else if (config->val_bits == 8 && config->reg_bits == 8 && + i2c_check_functionality(i2c->adapter, + I2C_FUNC_SMBUS_BYTE_DATA)) + return ®map_smbus_byte; + + return ERR_PTR(-ENOTSUPP); +} + /** * regmap_init_i2c(): Initialise register map * @@ -110,7 +199,12 @@ static struct regmap_bus regmap_i2c = { struct regmap *regmap_init_i2c(struct i2c_client *i2c, const struct regmap_config *config) { - return regmap_init(&i2c->dev, ®map_i2c, &i2c->dev, config); + const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config); + + if (IS_ERR(bus)) + return ERR_CAST(bus); + + return regmap_init(&i2c->dev, bus, &i2c->dev, config); } EXPORT_SYMBOL_GPL(regmap_init_i2c); @@ -127,7 +221,12 @@ EXPORT_SYMBOL_GPL(regmap_init_i2c); struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c, const struct regmap_config *config) { - return devm_regmap_init(&i2c->dev, ®map_i2c, &i2c->dev, config); + const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config); + + if (IS_ERR(bus)) + return ERR_CAST(bus); + + return devm_regmap_init(&i2c->dev, bus, &i2c->dev, config); } EXPORT_SYMBOL_GPL(devm_regmap_init_i2c); diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 5972ad95854..6299a50a596 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -10,13 +10,13 @@ * published by the Free Software Foundation. */ -#include <linux/export.h> #include <linux/device.h> -#include <linux/regmap.h> -#include <linux/irq.h> +#include <linux/export.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/pm_runtime.h> +#include <linux/regmap.h> #include <linux/slab.h> #include "internal.h" @@ -34,6 +34,7 @@ struct regmap_irq_chip_data { int irq; int wake_count; + void *status_reg_buf; unsigned int *status_buf; unsigned int *mask_buf; unsigned int *mask_buf_def; @@ -87,6 +88,39 @@ static void regmap_irq_sync_unlock(struct irq_data *data) if (ret != 0) dev_err(d->map->dev, "Failed to sync masks in %x\n", reg); + + reg = d->chip->wake_base + + (i * map->reg_stride * d->irq_reg_stride); + if (d->wake_buf) { + if (d->chip->wake_invert) + ret = regmap_update_bits(d->map, reg, + d->mask_buf_def[i], + ~d->wake_buf[i]); + else + ret = regmap_update_bits(d->map, reg, + d->mask_buf_def[i], + d->wake_buf[i]); + if (ret != 0) + dev_err(d->map->dev, + "Failed to sync wakes in %x: %d\n", + reg, ret); + } + + if (!d->chip->init_ack_masked) + continue; + /* + * Ack all the masked interrupts uncondictionly, + * OR if there is masked interrupt which hasn't been Acked, + * it'll be ignored in irq handler, then may introduce irq storm + */ + if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) { + reg = d->chip->ack_base + + (i * map->reg_stride * d->irq_reg_stride); + ret = regmap_write(map, reg, d->mask_buf[i]); + if (ret != 0) + dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", + reg, ret); + } } if (d->chip->runtime_pm) @@ -129,16 +163,15 @@ static int regmap_irq_set_wake(struct irq_data *data, unsigned int on) struct regmap *map = d->map; const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); - if (!d->chip->wake_base) - return -EINVAL; - if (on) { - d->wake_buf[irq_data->reg_offset / map->reg_stride] - &= ~irq_data->mask; + if (d->wake_buf) + d->wake_buf[irq_data->reg_offset / map->reg_stride] + &= ~irq_data->mask; d->wake_count++; } else { - d->wake_buf[irq_data->reg_offset / map->reg_stride] - |= irq_data->mask; + if (d->wake_buf) + d->wake_buf[irq_data->reg_offset / map->reg_stride] + |= irq_data->mask; d->wake_count--; } @@ -167,33 +200,78 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) if (ret < 0) { dev_err(map->dev, "IRQ thread failed to resume: %d\n", ret); + pm_runtime_put(map->dev); return IRQ_NONE; } } /* - * Ignore masked IRQs and ack if we need to; we ack early so - * there is no race between handling and acknowleding the - * interrupt. We assume that typically few of the interrupts - * will fire simultaneously so don't worry about overhead from - * doing a write per register. + * Read in the statuses, using a single bulk read if possible + * in order to reduce the I/O overheads. */ - for (i = 0; i < data->chip->num_regs; i++) { - ret = regmap_read(map, chip->status_base + (i * map->reg_stride - * data->irq_reg_stride), - &data->status_buf[i]); + if (!map->use_single_rw && map->reg_stride == 1 && + data->irq_reg_stride == 1) { + u8 *buf8 = data->status_reg_buf; + u16 *buf16 = data->status_reg_buf; + u32 *buf32 = data->status_reg_buf; + BUG_ON(!data->status_reg_buf); + + ret = regmap_bulk_read(map, chip->status_base, + data->status_reg_buf, + chip->num_regs); if (ret != 0) { dev_err(map->dev, "Failed to read IRQ status: %d\n", - ret); - if (chip->runtime_pm) - pm_runtime_put(map->dev); + ret); return IRQ_NONE; } + for (i = 0; i < data->chip->num_regs; i++) { + switch (map->format.val_bytes) { + case 1: + data->status_buf[i] = buf8[i]; + break; + case 2: + data->status_buf[i] = buf16[i]; + break; + case 4: + data->status_buf[i] = buf32[i]; + break; + default: + BUG(); + return IRQ_NONE; + } + } + + } else { + for (i = 0; i < data->chip->num_regs; i++) { + ret = regmap_read(map, chip->status_base + + (i * map->reg_stride + * data->irq_reg_stride), + &data->status_buf[i]); + + if (ret != 0) { + dev_err(map->dev, + "Failed to read IRQ status: %d\n", + ret); + if (chip->runtime_pm) + pm_runtime_put(map->dev); + return IRQ_NONE; + } + } + } + + /* + * Ignore masked IRQs and ack if we need to; we ack early so + * there is no race between handling and acknowleding the + * interrupt. We assume that typically few of the interrupts + * will fire simultaneously so don't worry about overhead from + * doing a write per register. + */ + for (i = 0; i < data->chip->num_regs; i++) { data->status_buf[i] &= ~data->mask_buf[i]; - if (data->status_buf[i] && chip->ack_base) { + if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) { reg = chip->ack_base + (i * map->reg_stride * data->irq_reg_stride); ret = regmap_write(map, reg, data->status_buf[i]); @@ -269,6 +347,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, int ret = -ENOMEM; u32 reg; + if (chip->num_regs <= 0) + return -EINVAL; + for (i = 0; i < chip->num_irqs; i++) { if (chip->irqs[i].reg_offset % map->reg_stride) return -EINVAL; @@ -290,8 +371,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, if (!d) return -ENOMEM; - *data = d; - d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, GFP_KERNEL); if (!d->status_buf) @@ -316,11 +395,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, d->irq_chip = regmap_irq_chip; d->irq_chip.name = chip->name; - if (!chip->wake_base) { - d->irq_chip.irq_set_wake = NULL; - d->irq_chip.flags |= IRQCHIP_MASK_ON_SUSPEND | - IRQCHIP_SKIP_SET_WAKE; - } d->irq = irq; d->map = map; d->chip = chip; @@ -331,6 +405,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, else d->irq_reg_stride = 1; + if (!map->use_single_rw && map->reg_stride == 1 && + d->irq_reg_stride == 1) { + d->status_reg_buf = kmalloc(map->format.val_bytes * + chip->num_regs, GFP_KERNEL); + if (!d->status_reg_buf) + goto err_alloc; + } + mutex_init(&d->lock); for (i = 0; i < chip->num_irqs; i++) @@ -353,6 +435,31 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, reg, ret); goto err_alloc; } + + if (!chip->init_ack_masked) + continue; + + /* Ack masked but set interrupts */ + reg = chip->status_base + + (i * map->reg_stride * d->irq_reg_stride); + ret = regmap_read(map, reg, &d->status_buf[i]); + if (ret != 0) { + dev_err(map->dev, "Failed to read IRQ status: %d\n", + ret); + goto err_alloc; + } + + if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) { + reg = chip->ack_base + + (i * map->reg_stride * d->irq_reg_stride); + ret = regmap_write(map, reg, + d->status_buf[i] & d->mask_buf[i]); + if (ret != 0) { + dev_err(map->dev, "Failed to ack 0x%x: %d\n", + reg, ret); + goto err_alloc; + } + } } /* Wake is disabled by default */ @@ -361,8 +468,15 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, d->wake_buf[i] = d->mask_buf_def[i]; reg = chip->wake_base + (i * map->reg_stride * d->irq_reg_stride); - ret = regmap_update_bits(map, reg, d->wake_buf[i], - d->wake_buf[i]); + + if (chip->wake_invert) + ret = regmap_update_bits(map, reg, + d->mask_buf_def[i], + 0); + else + ret = regmap_update_bits(map, reg, + d->mask_buf_def[i], + d->wake_buf[i]); if (ret != 0) { dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", reg, ret); @@ -388,10 +502,13 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags, chip->name, d); if (ret != 0) { - dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret); + dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n", + irq, chip->name, ret); goto err_domain; } + *data = d; + return 0; err_domain: @@ -401,6 +518,7 @@ err_alloc: kfree(d->mask_buf_def); kfree(d->mask_buf); kfree(d->status_buf); + kfree(d->status_reg_buf); kfree(d); return ret; } @@ -418,10 +536,11 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) return; free_irq(irq, d); - /* We should unmap the domain but... */ + irq_domain_remove(d->domain); kfree(d->wake_buf); kfree(d->mask_buf_def); kfree(d->mask_buf); + kfree(d->status_reg_buf); kfree(d->status_buf); kfree(d); } diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c index f05fc74dd84..04a329a377e 100644 --- a/drivers/base/regmap/regmap-mmio.c +++ b/drivers/base/regmap/regmap-mmio.c @@ -16,8 +16,8 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/clk.h> #include <linux/err.h> -#include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/regmap.h> @@ -25,19 +25,83 @@ struct regmap_mmio_context { void __iomem *regs; + unsigned reg_bytes; unsigned val_bytes; + unsigned pad_bytes; + struct clk *clk; }; +static inline void regmap_mmio_regsize_check(size_t reg_size) +{ + switch (reg_size) { + case 1: + case 2: + case 4: +#ifdef CONFIG_64BIT + case 8: +#endif + break; + default: + BUG(); + } +} + +static int regmap_mmio_regbits_check(size_t reg_bits) +{ + switch (reg_bits) { + case 8: + case 16: + case 32: +#ifdef CONFIG_64BIT + case 64: +#endif + return 0; + default: + return -EINVAL; + } +} + +static inline void regmap_mmio_count_check(size_t count, u32 offset) +{ + BUG_ON(count <= offset); +} + +static inline unsigned int +regmap_mmio_get_offset(const void *reg, size_t reg_size) +{ + switch (reg_size) { + case 1: + return *(u8 *)reg; + case 2: + return *(u16 *)reg; + case 4: + return *(u32 *)reg; +#ifdef CONFIG_64BIT + case 8: + return *(u64 *)reg; +#endif + default: + BUG(); + } +} + static int regmap_mmio_gather_write(void *context, const void *reg, size_t reg_size, const void *val, size_t val_size) { struct regmap_mmio_context *ctx = context; - u32 offset; + unsigned int offset; + int ret; - BUG_ON(reg_size != 4); + regmap_mmio_regsize_check(reg_size); - offset = *(u32 *)reg; + if (!IS_ERR(ctx->clk)) { + ret = clk_enable(ctx->clk); + if (ret < 0) + return ret; + } + + offset = regmap_mmio_get_offset(reg, reg_size); while (val_size) { switch (ctx->val_bytes) { @@ -64,14 +128,21 @@ static int regmap_mmio_gather_write(void *context, offset += ctx->val_bytes; } + if (!IS_ERR(ctx->clk)) + clk_disable(ctx->clk); + return 0; } static int regmap_mmio_write(void *context, const void *data, size_t count) { - BUG_ON(count < 4); + struct regmap_mmio_context *ctx = context; + unsigned int offset = ctx->reg_bytes + ctx->pad_bytes; - return regmap_mmio_gather_write(context, data, 4, data + 4, count - 4); + regmap_mmio_count_check(count, offset); + + return regmap_mmio_gather_write(context, data, ctx->reg_bytes, + data + offset, count - offset); } static int regmap_mmio_read(void *context, @@ -79,11 +150,18 @@ static int regmap_mmio_read(void *context, void *val, size_t val_size) { struct regmap_mmio_context *ctx = context; - u32 offset; + unsigned int offset; + int ret; + + regmap_mmio_regsize_check(reg_size); - BUG_ON(reg_size != 4); + if (!IS_ERR(ctx->clk)) { + ret = clk_enable(ctx->clk); + if (ret < 0) + return ret; + } - offset = *(u32 *)reg; + offset = regmap_mmio_get_offset(reg, reg_size); while (val_size) { switch (ctx->val_bytes) { @@ -110,11 +188,20 @@ static int regmap_mmio_read(void *context, offset += ctx->val_bytes; } + if (!IS_ERR(ctx->clk)) + clk_disable(ctx->clk); + return 0; } static void regmap_mmio_free_context(void *context) { + struct regmap_mmio_context *ctx = context; + + if (!IS_ERR(ctx->clk)) { + clk_unprepare(ctx->clk); + clk_put(ctx->clk); + } kfree(context); } @@ -128,14 +215,18 @@ static struct regmap_bus regmap_mmio = { .val_format_endian_default = REGMAP_ENDIAN_NATIVE, }; -static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs, +static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev, + const char *clk_id, + void __iomem *regs, const struct regmap_config *config) { struct regmap_mmio_context *ctx; int min_stride; + int ret; - if (config->reg_bits != 32) - return ERR_PTR(-EINVAL); + ret = regmap_mmio_regbits_check(config->reg_bits); + if (ret) + return ERR_PTR(ret); if (config->pad_bits) return ERR_PTR(-EINVAL); @@ -178,38 +269,63 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs, ctx->regs = regs; ctx->val_bytes = config->val_bits / 8; + ctx->reg_bytes = config->reg_bits / 8; + ctx->pad_bytes = config->pad_bits / 8; + ctx->clk = ERR_PTR(-ENODEV); + + if (clk_id == NULL) + return ctx; + + ctx->clk = clk_get(dev, clk_id); + if (IS_ERR(ctx->clk)) { + ret = PTR_ERR(ctx->clk); + goto err_free; + } + + ret = clk_prepare(ctx->clk); + if (ret < 0) { + clk_put(ctx->clk); + goto err_free; + } return ctx; + +err_free: + kfree(ctx); + + return ERR_PTR(ret); } /** - * regmap_init_mmio(): Initialise register map + * regmap_init_mmio_clk(): Initialise register map with register clock * * @dev: Device that will be interacted with + * @clk_id: register clock consumer ID * @regs: Pointer to memory-mapped IO region * @config: Configuration for register map * * The return value will be an ERR_PTR() on error or a valid pointer to * a struct regmap. */ -struct regmap *regmap_init_mmio(struct device *dev, - void __iomem *regs, - const struct regmap_config *config) +struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id, + void __iomem *regs, + const struct regmap_config *config) { struct regmap_mmio_context *ctx; - ctx = regmap_mmio_gen_context(regs, config); + ctx = regmap_mmio_gen_context(dev, clk_id, regs, config); if (IS_ERR(ctx)) return ERR_CAST(ctx); return regmap_init(dev, ®map_mmio, ctx, config); } -EXPORT_SYMBOL_GPL(regmap_init_mmio); +EXPORT_SYMBOL_GPL(regmap_init_mmio_clk); /** - * devm_regmap_init_mmio(): Initialise managed register map + * devm_regmap_init_mmio_clk(): Initialise managed register map with clock * * @dev: Device that will be interacted with + * @clk_id: register clock consumer ID * @regs: Pointer to memory-mapped IO region * @config: Configuration for register map * @@ -217,18 +333,18 @@ EXPORT_SYMBOL_GPL(regmap_init_mmio); * to a struct regmap. The regmap will be automatically freed by the * device management code. */ -struct regmap *devm_regmap_init_mmio(struct device *dev, - void __iomem *regs, - const struct regmap_config *config) +struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id, + void __iomem *regs, + const struct regmap_config *config) { struct regmap_mmio_context *ctx; - ctx = regmap_mmio_gen_context(regs, config); + ctx = regmap_mmio_gen_context(dev, clk_id, regs, config); if (IS_ERR(ctx)) return ERR_CAST(ctx); return devm_regmap_init(dev, ®map_mmio, ctx, config); } -EXPORT_SYMBOL_GPL(devm_regmap_init_mmio); +EXPORT_SYMBOL_GPL(devm_regmap_init_mmio_clk); MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c index ffa46a92ad3..0eb3097c0d7 100644 --- a/drivers/base/regmap/regmap-spi.c +++ b/drivers/base/regmap/regmap-spi.c @@ -12,9 +12,23 @@ #include <linux/regmap.h> #include <linux/spi/spi.h> -#include <linux/init.h> #include <linux/module.h> +#include "internal.h" + +struct regmap_async_spi { + struct regmap_async core; + struct spi_message m; + struct spi_transfer t[2]; +}; + +static void regmap_spi_complete(void *data) +{ + struct regmap_async_spi *async = data; + + regmap_async_complete_cb(&async->core, async->m.status); +} + static int regmap_spi_write(void *context, const void *data, size_t count) { struct device *dev = context; @@ -40,6 +54,44 @@ static int regmap_spi_gather_write(void *context, return spi_sync(spi, &m); } +static int regmap_spi_async_write(void *context, + const void *reg, size_t reg_len, + const void *val, size_t val_len, + struct regmap_async *a) +{ + struct regmap_async_spi *async = container_of(a, + struct regmap_async_spi, + core); + struct device *dev = context; + struct spi_device *spi = to_spi_device(dev); + + async->t[0].tx_buf = reg; + async->t[0].len = reg_len; + async->t[1].tx_buf = val; + async->t[1].len = val_len; + + spi_message_init(&async->m); + spi_message_add_tail(&async->t[0], &async->m); + if (val) + spi_message_add_tail(&async->t[1], &async->m); + + async->m.complete = regmap_spi_complete; + async->m.context = async; + + return spi_async(spi, &async->m); +} + +static struct regmap_async *regmap_spi_async_alloc(void) +{ + struct regmap_async_spi *async_spi; + + async_spi = kzalloc(sizeof(*async_spi), GFP_KERNEL); + if (!async_spi) + return NULL; + + return &async_spi->core; +} + static int regmap_spi_read(void *context, const void *reg, size_t reg_size, void *val, size_t val_size) @@ -53,6 +105,8 @@ static int regmap_spi_read(void *context, static struct regmap_bus regmap_spi = { .write = regmap_spi_write, .gather_write = regmap_spi_gather_write, + .async_write = regmap_spi_async_write, + .async_alloc = regmap_spi_async_alloc, .read = regmap_spi_read, .read_flag_mask = 0x80, }; diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c new file mode 100644 index 00000000000..d7026dc3338 --- /dev/null +++ b/drivers/base/regmap/regmap-spmi.c @@ -0,0 +1,256 @@ +/* + * Register map access API - SPMI support + * + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + * + * Based on regmap-i2c.c: + * Copyright 2011 Wolfson Microelectronics plc + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/regmap.h> +#include <linux/spmi.h> +#include <linux/module.h> +#include <linux/init.h> + +static int regmap_spmi_base_read(void *context, + const void *reg, size_t reg_size, + void *val, size_t val_size) +{ + u8 addr = *(u8 *)reg; + int err = 0; + + BUG_ON(reg_size != 1); + + while (val_size-- && !err) + err = spmi_register_read(context, addr++, val++); + + return err; +} + +static int regmap_spmi_base_gather_write(void *context, + const void *reg, size_t reg_size, + const void *val, size_t val_size) +{ + const u8 *data = val; + u8 addr = *(u8 *)reg; + int err = 0; + + BUG_ON(reg_size != 1); + + /* + * SPMI defines a more bandwidth-efficient 'Register 0 Write' sequence, + * use it when possible. + */ + if (addr == 0 && val_size) { + err = spmi_register_zero_write(context, *data); + if (err) + goto err_out; + + data++; + addr++; + val_size--; + } + + while (val_size) { + err = spmi_register_write(context, addr, *data); + if (err) + goto err_out; + + data++; + addr++; + val_size--; + } + +err_out: + return err; +} + +static int regmap_spmi_base_write(void *context, const void *data, + size_t count) +{ + BUG_ON(count < 1); + return regmap_spmi_base_gather_write(context, data, 1, data + 1, + count - 1); +} + +static struct regmap_bus regmap_spmi_base = { + .read = regmap_spmi_base_read, + .write = regmap_spmi_base_write, + .gather_write = regmap_spmi_base_gather_write, + .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, + .val_format_endian_default = REGMAP_ENDIAN_NATIVE, +}; + +/** + * regmap_init_spmi_base(): Create regmap for the Base register space + * @sdev: SPMI device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +struct regmap *regmap_init_spmi_base(struct spmi_device *sdev, + const struct regmap_config *config) +{ + return regmap_init(&sdev->dev, ®map_spmi_base, sdev, config); +} +EXPORT_SYMBOL_GPL(regmap_init_spmi_base); + +/** + * devm_regmap_init_spmi_base(): Create managed regmap for Base register space + * @sdev: SPMI device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +struct regmap *devm_regmap_init_spmi_base(struct spmi_device *sdev, + const struct regmap_config *config) +{ + return devm_regmap_init(&sdev->dev, ®map_spmi_base, sdev, config); +} +EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_base); + +static int regmap_spmi_ext_read(void *context, + const void *reg, size_t reg_size, + void *val, size_t val_size) +{ + int err = 0; + size_t len; + u16 addr; + + BUG_ON(reg_size != 2); + + addr = *(u16 *)reg; + + /* + * Split accesses into two to take advantage of the more + * bandwidth-efficient 'Extended Register Read' command when possible + */ + while (addr <= 0xFF && val_size) { + len = min_t(size_t, val_size, 16); + + err = spmi_ext_register_read(context, addr, val, len); + if (err) + goto err_out; + + addr += len; + val += len; + val_size -= len; + } + + while (val_size) { + len = min_t(size_t, val_size, 8); + + err = spmi_ext_register_readl(context, addr, val, val_size); + if (err) + goto err_out; + + addr += len; + val += len; + val_size -= len; + } + +err_out: + return err; +} + +static int regmap_spmi_ext_gather_write(void *context, + const void *reg, size_t reg_size, + const void *val, size_t val_size) +{ + int err = 0; + size_t len; + u16 addr; + + BUG_ON(reg_size != 2); + + addr = *(u16 *)reg; + + while (addr <= 0xFF && val_size) { + len = min_t(size_t, val_size, 16); + + err = spmi_ext_register_write(context, addr, val, len); + if (err) + goto err_out; + + addr += len; + val += len; + val_size -= len; + } + + while (val_size) { + len = min_t(size_t, val_size, 8); + + err = spmi_ext_register_writel(context, addr, val, len); + if (err) + goto err_out; + + addr += len; + val += len; + val_size -= len; + } + +err_out: + return err; +} + +static int regmap_spmi_ext_write(void *context, const void *data, + size_t count) +{ + BUG_ON(count < 2); + return regmap_spmi_ext_gather_write(context, data, 2, data + 2, + count - 2); +} + +static struct regmap_bus regmap_spmi_ext = { + .read = regmap_spmi_ext_read, + .write = regmap_spmi_ext_write, + .gather_write = regmap_spmi_ext_gather_write, + .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, + .val_format_endian_default = REGMAP_ENDIAN_NATIVE, +}; + +/** + * regmap_init_spmi_ext(): Create regmap for Ext register space + * @sdev: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +struct regmap *regmap_init_spmi_ext(struct spmi_device *sdev, + const struct regmap_config *config) +{ + return regmap_init(&sdev->dev, ®map_spmi_ext, sdev, config); +} +EXPORT_SYMBOL_GPL(regmap_init_spmi_ext); + +/** + * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space + * @sdev: SPMI device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *sdev, + const struct regmap_config *config) +{ + return devm_regmap_init(&sdev->dev, ®map_spmi_ext, sdev, config); +} +EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_ext); + +MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index f00b059c057..74d8c0672cf 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -16,6 +16,7 @@ #include <linux/mutex.h> #include <linux/err.h> #include <linux/rbtree.h> +#include <linux/sched.h> #define CREATE_TRACE_POINTS #include <trace/events/regmap.h> @@ -34,6 +35,17 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val, bool *change); +static int _regmap_bus_reg_read(void *context, unsigned int reg, + unsigned int *val); +static int _regmap_bus_read(void *context, unsigned int reg, + unsigned int *val); +static int _regmap_bus_formatted_write(void *context, unsigned int reg, + unsigned int val); +static int _regmap_bus_reg_write(void *context, unsigned int reg, + unsigned int val); +static int _regmap_bus_raw_write(void *context, unsigned int reg, + unsigned int val); + bool regmap_reg_in_ranges(unsigned int reg, const struct regmap_range *ranges, unsigned int nranges) @@ -48,9 +60,8 @@ bool regmap_reg_in_ranges(unsigned int reg, } EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); -static bool _regmap_check_range_table(struct regmap *map, - unsigned int reg, - const struct regmap_access_table *table) +bool regmap_check_range_table(struct regmap *map, unsigned int reg, + const struct regmap_access_table *table) { /* Check "no ranges" first */ if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) @@ -63,6 +74,7 @@ static bool _regmap_check_range_table(struct regmap *map, return regmap_reg_in_ranges(reg, table->yes_ranges, table->n_yes_ranges); } +EXPORT_SYMBOL_GPL(regmap_check_range_table); bool regmap_writeable(struct regmap *map, unsigned int reg) { @@ -73,7 +85,7 @@ bool regmap_writeable(struct regmap *map, unsigned int reg) return map->writeable_reg(map->dev, reg); if (map->wr_table) - return _regmap_check_range_table(map, reg, map->wr_table); + return regmap_check_range_table(map, reg, map->wr_table); return true; } @@ -90,7 +102,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg) return map->readable_reg(map->dev, reg); if (map->rd_table) - return _regmap_check_range_table(map, reg, map->rd_table); + return regmap_check_range_table(map, reg, map->rd_table); return true; } @@ -104,9 +116,12 @@ bool regmap_volatile(struct regmap *map, unsigned int reg) return map->volatile_reg(map->dev, reg); if (map->volatile_table) - return _regmap_check_range_table(map, reg, map->volatile_table); + return regmap_check_range_table(map, reg, map->volatile_table); - return true; + if (map->cache_ops) + return false; + else + return true; } bool regmap_precious(struct regmap *map, unsigned int reg) @@ -118,7 +133,7 @@ bool regmap_precious(struct regmap *map, unsigned int reg) return map->precious_reg(map->dev, reg); if (map->precious_table) - return _regmap_check_range_table(map, reg, map->precious_table); + return regmap_check_range_table(map, reg, map->precious_table); return false; } @@ -181,6 +196,13 @@ static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) b[0] = cpu_to_be16(val << shift); } +static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) +{ + __le16 *b = buf; + + b[0] = cpu_to_le16(val << shift); +} + static void regmap_format_16_native(void *buf, unsigned int val, unsigned int shift) { @@ -205,36 +227,66 @@ static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) b[0] = cpu_to_be32(val << shift); } +static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) +{ + __le32 *b = buf; + + b[0] = cpu_to_le32(val << shift); +} + static void regmap_format_32_native(void *buf, unsigned int val, unsigned int shift) { *(u32 *)buf = val << shift; } -static unsigned int regmap_parse_8(void *buf) +static void regmap_parse_inplace_noop(void *buf) { - u8 *b = buf; +} + +static unsigned int regmap_parse_8(const void *buf) +{ + const u8 *b = buf; return b[0]; } -static unsigned int regmap_parse_16_be(void *buf) +static unsigned int regmap_parse_16_be(const void *buf) +{ + const __be16 *b = buf; + + return be16_to_cpu(b[0]); +} + +static unsigned int regmap_parse_16_le(const void *buf) +{ + const __le16 *b = buf; + + return le16_to_cpu(b[0]); +} + +static void regmap_parse_16_be_inplace(void *buf) { __be16 *b = buf; b[0] = be16_to_cpu(b[0]); +} - return b[0]; +static void regmap_parse_16_le_inplace(void *buf) +{ + __le16 *b = buf; + + b[0] = le16_to_cpu(b[0]); } -static unsigned int regmap_parse_16_native(void *buf) +static unsigned int regmap_parse_16_native(const void *buf) { return *(u16 *)buf; } -static unsigned int regmap_parse_24(void *buf) +static unsigned int regmap_parse_24(const void *buf) { - u8 *b = buf; + const u8 *b = buf; unsigned int ret = b[2]; ret |= ((unsigned int)b[1]) << 8; ret |= ((unsigned int)b[0]) << 16; @@ -242,16 +294,35 @@ static unsigned int regmap_parse_24(void *buf) return ret; } -static unsigned int regmap_parse_32_be(void *buf) +static unsigned int regmap_parse_32_be(const void *buf) +{ + const __be32 *b = buf; + + return be32_to_cpu(b[0]); +} + +static unsigned int regmap_parse_32_le(const void *buf) +{ + const __le32 *b = buf; + + return le32_to_cpu(b[0]); +} + +static void regmap_parse_32_be_inplace(void *buf) { __be32 *b = buf; b[0] = be32_to_cpu(b[0]); +} - return b[0]; +static void regmap_parse_32_le_inplace(void *buf) +{ + __le32 *b = buf; + + b[0] = le32_to_cpu(b[0]); } -static unsigned int regmap_parse_32_native(void *buf) +static unsigned int regmap_parse_32_native(const void *buf) { return *(u32 *)buf; } @@ -269,15 +340,20 @@ static void regmap_unlock_mutex(void *__map) } static void regmap_lock_spinlock(void *__map) +__acquires(&map->spinlock) { struct regmap *map = __map; - spin_lock(&map->spinlock); + unsigned long flags; + + spin_lock_irqsave(&map->spinlock, flags); + map->spinlock_flags = flags; } static void regmap_unlock_spinlock(void *__map) +__releases(&map->spinlock) { struct regmap *map = __map; - spin_unlock(&map->spinlock); + spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); } static void dev_get_regmap_release(struct device *dev, void *res) @@ -350,6 +426,28 @@ static void regmap_range_exit(struct regmap *map) kfree(map->selector_work_buf); } +int regmap_attach_dev(struct device *dev, struct regmap *map, + const struct regmap_config *config) +{ + struct regmap **m; + + map->dev = dev; + + regmap_debugfs_init(map, config->name); + + /* Add a devres resource for dev_get_regmap() */ + m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); + if (!m) { + regmap_debugfs_exit(map); + return -ENOMEM; + } + *m = map; + devres_add(dev, m); + + return 0; +} +EXPORT_SYMBOL_GPL(regmap_attach_dev); + /** * regmap_init(): Initialise register map * @@ -367,12 +465,12 @@ struct regmap *regmap_init(struct device *dev, void *bus_context, const struct regmap_config *config) { - struct regmap *map, **m; + struct regmap *map; int ret = -EINVAL; enum regmap_endian reg_endian, val_endian; int i, j; - if (!bus || !config) + if (!config) goto err; map = kzalloc(sizeof(*map), GFP_KERNEL); @@ -386,7 +484,8 @@ struct regmap *regmap_init(struct device *dev, map->unlock = config->unlock; map->lock_arg = config->lock_arg; } else { - if (bus->fast_io) { + if ((bus && bus->fast_io) || + config->fast_io) { spin_lock_init(&map->spinlock); map->lock = regmap_lock_spinlock; map->unlock = regmap_unlock_spinlock; @@ -408,6 +507,7 @@ struct regmap *regmap_init(struct device *dev, else map->reg_stride = 1; map->use_single_rw = config->use_single_rw; + map->can_multi_write = config->can_multi_write; map->dev = dev; map->bus = bus; map->bus_context = bus_context; @@ -423,13 +523,34 @@ struct regmap *regmap_init(struct device *dev, map->cache_type = config->cache_type; map->name = config->name; + spin_lock_init(&map->async_lock); + INIT_LIST_HEAD(&map->async_list); + INIT_LIST_HEAD(&map->async_free); + init_waitqueue_head(&map->async_waitq); + if (config->read_flag_mask || config->write_flag_mask) { map->read_flag_mask = config->read_flag_mask; map->write_flag_mask = config->write_flag_mask; - } else { + } else if (bus) { map->read_flag_mask = bus->read_flag_mask; } + if (!bus) { + map->reg_read = config->reg_read; + map->reg_write = config->reg_write; + + map->defer_caching = false; + goto skip_format_initialization; + } else if (!bus->read || !bus->write) { + map->reg_read = _regmap_bus_reg_read; + map->reg_write = _regmap_bus_reg_write; + + map->defer_caching = false; + goto skip_format_initialization; + } else { + map->reg_read = _regmap_bus_read; + } + reg_endian = config->reg_format_endian; if (reg_endian == REGMAP_ENDIAN_DEFAULT) reg_endian = bus->reg_format_endian_default; @@ -500,6 +621,12 @@ struct regmap *regmap_init(struct device *dev, } break; + case 24: + if (reg_endian != REGMAP_ENDIAN_BIG) + goto err_map; + map->format.format_reg = regmap_format_24; + break; + case 32: switch (reg_endian) { case REGMAP_ENDIAN_BIG: @@ -517,16 +644,26 @@ struct regmap *regmap_init(struct device *dev, goto err_map; } + if (val_endian == REGMAP_ENDIAN_NATIVE) + map->format.parse_inplace = regmap_parse_inplace_noop; + switch (config->val_bits) { case 8: map->format.format_val = regmap_format_8; map->format.parse_val = regmap_parse_8; + map->format.parse_inplace = regmap_parse_inplace_noop; break; case 16: switch (val_endian) { case REGMAP_ENDIAN_BIG: map->format.format_val = regmap_format_16_be; map->format.parse_val = regmap_parse_16_be; + map->format.parse_inplace = regmap_parse_16_be_inplace; + break; + case REGMAP_ENDIAN_LITTLE: + map->format.format_val = regmap_format_16_le; + map->format.parse_val = regmap_parse_16_le; + map->format.parse_inplace = regmap_parse_16_le_inplace; break; case REGMAP_ENDIAN_NATIVE: map->format.format_val = regmap_format_16_native; @@ -547,6 +684,12 @@ struct regmap *regmap_init(struct device *dev, case REGMAP_ENDIAN_BIG: map->format.format_val = regmap_format_32_be; map->format.parse_val = regmap_parse_32_be; + map->format.parse_inplace = regmap_parse_32_be_inplace; + break; + case REGMAP_ENDIAN_LITTLE: + map->format.format_val = regmap_format_32_le; + map->format.parse_val = regmap_parse_32_le; + map->format.parse_inplace = regmap_parse_32_le_inplace; break; case REGMAP_ENDIAN_NATIVE: map->format.format_val = regmap_format_32_native; @@ -575,6 +718,16 @@ struct regmap *regmap_init(struct device *dev, goto err_map; } + if (map->format.format_write) { + map->defer_caching = false; + map->reg_write = _regmap_bus_formatted_write; + } else if (map->format.format_val) { + map->defer_caching = true; + map->reg_write = _regmap_bus_raw_write; + } + +skip_format_initialization: + map->range_tree = RB_ROOT; for (i = 0; i < config->num_ranges; i++) { const struct regmap_range_cfg *range_cfg = &config->ranges[i]; @@ -613,6 +766,10 @@ struct regmap *regmap_init(struct device *dev, unsigned win_max = win_min + config->ranges[j].window_len - 1; + /* Allow data window inside its own virtual range */ + if (j == i) + continue; + if (range_cfg->range_min <= sel_reg && sel_reg <= range_cfg->range_max) { dev_err(map->dev, @@ -646,7 +803,7 @@ struct regmap *regmap_init(struct device *dev, new->window_start = range_cfg->window_start; new->window_len = range_cfg->window_len; - if (_regmap_range_add(map, new) == false) { + if (!_regmap_range_add(map, new)) { dev_err(map->dev, "Failed to add range %d\n", i); kfree(new); goto err_range; @@ -666,21 +823,15 @@ struct regmap *regmap_init(struct device *dev, if (ret != 0) goto err_range; - regmap_debugfs_init(map, config->name); - - /* Add a devres resource for dev_get_regmap() */ - m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); - if (!m) { - ret = -ENOMEM; - goto err_debugfs; + if (dev) { + ret = regmap_attach_dev(dev, map, config); + if (ret != 0) + goto err_regcache; } - *m = map; - devres_add(dev, m); return map; -err_debugfs: - regmap_debugfs_exit(map); +err_regcache: regcache_exit(map); err_range: regmap_range_exit(map); @@ -733,6 +884,97 @@ struct regmap *devm_regmap_init(struct device *dev, } EXPORT_SYMBOL_GPL(devm_regmap_init); +static void regmap_field_init(struct regmap_field *rm_field, + struct regmap *regmap, struct reg_field reg_field) +{ + int field_bits = reg_field.msb - reg_field.lsb + 1; + rm_field->regmap = regmap; + rm_field->reg = reg_field.reg; + rm_field->shift = reg_field.lsb; + rm_field->mask = ((BIT(field_bits) - 1) << reg_field.lsb); + rm_field->id_size = reg_field.id_size; + rm_field->id_offset = reg_field.id_offset; +} + +/** + * devm_regmap_field_alloc(): Allocate and initialise a register field + * in a register map. + * + * @dev: Device that will be interacted with + * @regmap: regmap bank in which this register field is located. + * @reg_field: Register field with in the bank. + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap_field. The regmap_field will be automatically freed + * by the device management code. + */ +struct regmap_field *devm_regmap_field_alloc(struct device *dev, + struct regmap *regmap, struct reg_field reg_field) +{ + struct regmap_field *rm_field = devm_kzalloc(dev, + sizeof(*rm_field), GFP_KERNEL); + if (!rm_field) + return ERR_PTR(-ENOMEM); + + regmap_field_init(rm_field, regmap, reg_field); + + return rm_field; + +} +EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); + +/** + * devm_regmap_field_free(): Free register field allocated using + * devm_regmap_field_alloc. Usally drivers need not call this function, + * as the memory allocated via devm will be freed as per device-driver + * life-cyle. + * + * @dev: Device that will be interacted with + * @field: regmap field which should be freed. + */ +void devm_regmap_field_free(struct device *dev, + struct regmap_field *field) +{ + devm_kfree(dev, field); +} +EXPORT_SYMBOL_GPL(devm_regmap_field_free); + +/** + * regmap_field_alloc(): Allocate and initialise a register field + * in a register map. + * + * @regmap: regmap bank in which this register field is located. + * @reg_field: Register field with in the bank. + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap_field. The regmap_field should be freed by the + * user once its finished working with it using regmap_field_free(). + */ +struct regmap_field *regmap_field_alloc(struct regmap *regmap, + struct reg_field reg_field) +{ + struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); + + if (!rm_field) + return ERR_PTR(-ENOMEM); + + regmap_field_init(rm_field, regmap, reg_field); + + return rm_field; +} +EXPORT_SYMBOL_GPL(regmap_field_alloc); + +/** + * regmap_field_free(): Free register field allocated using regmap_field_alloc + * + * @field: regmap field which should be freed. + */ +void regmap_field_free(struct regmap_field *field) +{ + kfree(field); +} +EXPORT_SYMBOL_GPL(regmap_field_free); + /** * regmap_reinit_cache(): Reinitialise the current register cache * @@ -773,12 +1015,22 @@ EXPORT_SYMBOL_GPL(regmap_reinit_cache); */ void regmap_exit(struct regmap *map) { + struct regmap_async *async; + regcache_exit(map); regmap_debugfs_exit(map); regmap_range_exit(map); - if (map->bus->free_context) + if (map->bus && map->bus->free_context) map->bus->free_context(map->bus_context); kfree(map->work_buf); + while (!list_empty(&map->async_free)) { + async = list_first_entry_or_null(&map->async_free, + struct regmap_async, + list); + list_del(&async->list); + kfree(async->work_buf); + kfree(async); + } kfree(map); } EXPORT_SYMBOL_GPL(regmap_exit); @@ -869,16 +1121,21 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg, return 0; } -static int _regmap_raw_write(struct regmap *map, unsigned int reg, - const void *val, size_t val_len) +int _regmap_raw_write(struct regmap *map, unsigned int reg, + const void *val, size_t val_len) { struct regmap_range_node *range; + unsigned long flags; u8 *u8 = map->work_buf; + void *work_val = map->work_buf + map->format.reg_bytes + + map->format.pad_bytes; void *buf; int ret = -ENOTSUPP; size_t len; int i; + WARN_ON(!map->bus); + /* Check for unwritable registers before we start */ if (map->writeable_reg) for (i = 0; i < val_len / map->format.val_bytes; i++) @@ -890,8 +1147,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg, unsigned int ival; int val_bytes = map->format.val_bytes; for (i = 0; i < val_len / val_bytes; i++) { - memcpy(map->work_buf, val + (i * val_bytes), val_bytes); - ival = map->format.parse_val(map->work_buf); + ival = map->format.parse_val(val + (i * val_bytes)); ret = regcache_write(map, reg + (i * map->reg_stride), ival); if (ret) { @@ -941,6 +1197,77 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg, u8[0] |= map->write_flag_mask; + /* + * Essentially all I/O mechanisms will be faster with a single + * buffer to write. Since register syncs often generate raw + * writes of single registers optimise that case. + */ + if (val != work_val && val_len == map->format.val_bytes) { + memcpy(work_val, val, map->format.val_bytes); + val = work_val; + } + + if (map->async && map->bus->async_write) { + struct regmap_async *async; + + trace_regmap_async_write_start(map->dev, reg, val_len); + + spin_lock_irqsave(&map->async_lock, flags); + async = list_first_entry_or_null(&map->async_free, + struct regmap_async, + list); + if (async) + list_del(&async->list); + spin_unlock_irqrestore(&map->async_lock, flags); + + if (!async) { + async = map->bus->async_alloc(); + if (!async) + return -ENOMEM; + + async->work_buf = kzalloc(map->format.buf_size, + GFP_KERNEL | GFP_DMA); + if (!async->work_buf) { + kfree(async); + return -ENOMEM; + } + } + + async->map = map; + + /* If the caller supplied the value we can use it safely. */ + memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + + map->format.reg_bytes + map->format.val_bytes); + + spin_lock_irqsave(&map->async_lock, flags); + list_add_tail(&async->list, &map->async_list); + spin_unlock_irqrestore(&map->async_lock, flags); + + if (val != work_val) + ret = map->bus->async_write(map->bus_context, + async->work_buf, + map->format.reg_bytes + + map->format.pad_bytes, + val, val_len, async); + else + ret = map->bus->async_write(map->bus_context, + async->work_buf, + map->format.reg_bytes + + map->format.pad_bytes + + val_len, NULL, 0, async); + + if (ret != 0) { + dev_err(map->dev, "Failed to schedule write: %d\n", + ret); + + spin_lock_irqsave(&map->async_lock, flags); + list_move(&async->list, &map->async_free); + spin_unlock_irqrestore(&map->async_lock, flags); + } + + return ret; + } + trace_regmap_hw_write_start(map->dev, reg, val_len / map->format.val_bytes); @@ -948,8 +1275,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg, * send the work_buf directly, otherwise try to do a gather * write. */ - if (val == (map->work_buf + map->format.pad_bytes + - map->format.reg_bytes)) + if (val == work_val) ret = map->bus->write(map->bus_context, map->work_buf, map->format.reg_bytes + map->format.pad_bytes + @@ -981,14 +1307,84 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg, return ret; } +/** + * regmap_can_raw_write - Test if regmap_raw_write() is supported + * + * @map: Map to check. + */ +bool regmap_can_raw_write(struct regmap *map) +{ + return map->bus && map->format.format_val && map->format.format_reg; +} +EXPORT_SYMBOL_GPL(regmap_can_raw_write); + +static int _regmap_bus_formatted_write(void *context, unsigned int reg, + unsigned int val) +{ + int ret; + struct regmap_range_node *range; + struct regmap *map = context; + + WARN_ON(!map->bus || !map->format.format_write); + + range = _regmap_range_lookup(map, reg); + if (range) { + ret = _regmap_select_page(map, ®, range, 1); + if (ret != 0) + return ret; + } + + map->format.format_write(map, reg, val); + + trace_regmap_hw_write_start(map->dev, reg, 1); + + ret = map->bus->write(map->bus_context, map->work_buf, + map->format.buf_size); + + trace_regmap_hw_write_done(map->dev, reg, 1); + + return ret; +} + +static int _regmap_bus_reg_write(void *context, unsigned int reg, + unsigned int val) +{ + struct regmap *map = context; + + return map->bus->reg_write(map->bus_context, reg, val); +} + +static int _regmap_bus_raw_write(void *context, unsigned int reg, + unsigned int val) +{ + struct regmap *map = context; + + WARN_ON(!map->bus || !map->format.format_val); + + map->format.format_val(map->work_buf + map->format.reg_bytes + + map->format.pad_bytes, val, 0); + return _regmap_raw_write(map, reg, + map->work_buf + + map->format.reg_bytes + + map->format.pad_bytes, + map->format.val_bytes); +} + +static inline void *_regmap_map_get_context(struct regmap *map) +{ + return (map->bus) ? map : map->bus_context; +} + int _regmap_write(struct regmap *map, unsigned int reg, unsigned int val) { - struct regmap_range_node *range; int ret; - BUG_ON(!map->format.format_write && !map->format.format_val); + void *context = _regmap_map_get_context(map); + + if (!regmap_writeable(map, reg)) + return -EIO; - if (!map->cache_bypass && map->format.format_write) { + if (!map->cache_bypass && !map->defer_caching) { ret = regcache_write(map, reg, val); if (ret != 0) return ret; @@ -1005,37 +1401,38 @@ int _regmap_write(struct regmap *map, unsigned int reg, trace_regmap_reg_write(map->dev, reg, val); - if (map->format.format_write) { - range = _regmap_range_lookup(map, reg); - if (range) { - ret = _regmap_select_page(map, ®, range, 1); - if (ret != 0) - return ret; - } + return map->reg_write(context, reg, val); +} + +/** + * regmap_write(): Write a value to a single register + * + * @map: Register map to write to + * @reg: Register to write to + * @val: Value to be written + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) +{ + int ret; - map->format.format_write(map, reg, val); + if (reg % map->reg_stride) + return -EINVAL; - trace_regmap_hw_write_start(map->dev, reg, 1); + map->lock(map->lock_arg); - ret = map->bus->write(map->bus_context, map->work_buf, - map->format.buf_size); + ret = _regmap_write(map, reg, val); - trace_regmap_hw_write_done(map->dev, reg, 1); + map->unlock(map->lock_arg); - return ret; - } else { - map->format.format_val(map->work_buf + map->format.reg_bytes - + map->format.pad_bytes, val, 0); - return _regmap_raw_write(map, reg, - map->work_buf + - map->format.reg_bytes + - map->format.pad_bytes, - map->format.val_bytes); - } + return ret; } +EXPORT_SYMBOL_GPL(regmap_write); /** - * regmap_write(): Write a value to a single register + * regmap_write_async(): Write a value to a single register asynchronously * * @map: Register map to write to * @reg: Register to write to @@ -1044,7 +1441,7 @@ int _regmap_write(struct regmap *map, unsigned int reg, * A value of zero will be returned on success, a negative errno will * be returned in error cases. */ -int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) +int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) { int ret; @@ -1053,13 +1450,17 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) map->lock(map->lock_arg); + map->async = true; + ret = _regmap_write(map, reg, val); + map->async = false; + map->unlock(map->lock_arg); return ret; } -EXPORT_SYMBOL_GPL(regmap_write); +EXPORT_SYMBOL_GPL(regmap_write_async); /** * regmap_raw_write(): Write raw values to one or more registers @@ -1082,9 +1483,9 @@ int regmap_raw_write(struct regmap *map, unsigned int reg, { int ret; - if (val_len % map->format.val_bytes) + if (!regmap_can_raw_write(map)) return -EINVAL; - if (reg % map->reg_stride) + if (val_len % map->format.val_bytes) return -EINVAL; map->lock(map->lock_arg); @@ -1097,6 +1498,90 @@ int regmap_raw_write(struct regmap *map, unsigned int reg, } EXPORT_SYMBOL_GPL(regmap_raw_write); +/** + * regmap_field_write(): Write a value to a single register field + * + * @field: Register field to write to + * @val: Value to be written + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_field_write(struct regmap_field *field, unsigned int val) +{ + return regmap_update_bits(field->regmap, field->reg, + field->mask, val << field->shift); +} +EXPORT_SYMBOL_GPL(regmap_field_write); + +/** + * regmap_field_update_bits(): Perform a read/modify/write cycle + * on the register field + * + * @field: Register field to write to + * @mask: Bitmask to change + * @val: Value to be written + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_field_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val) +{ + mask = (mask << field->shift) & field->mask; + + return regmap_update_bits(field->regmap, field->reg, + mask, val << field->shift); +} +EXPORT_SYMBOL_GPL(regmap_field_update_bits); + +/** + * regmap_fields_write(): Write a value to a single register field with port ID + * + * @field: Register field to write to + * @id: port ID + * @val: Value to be written + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_fields_write(struct regmap_field *field, unsigned int id, + unsigned int val) +{ + if (id >= field->id_size) + return -EINVAL; + + return regmap_update_bits(field->regmap, + field->reg + (field->id_offset * id), + field->mask, val << field->shift); +} +EXPORT_SYMBOL_GPL(regmap_fields_write); + +/** + * regmap_fields_update_bits(): Perform a read/modify/write cycle + * on the register field + * + * @field: Register field to write to + * @id: port ID + * @mask: Bitmask to change + * @val: Value to be written + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_fields_update_bits(struct regmap_field *field, unsigned int id, + unsigned int mask, unsigned int val) +{ + if (id >= field->id_size) + return -EINVAL; + + mask = (mask << field->shift) & field->mask; + + return regmap_update_bits(field->regmap, + field->reg + (field->id_offset * id), + mask, val << field->shift); +} +EXPORT_SYMBOL_GPL(regmap_fields_update_bits); + /* * regmap_bulk_write(): Write multiple registers to the device * @@ -1116,53 +1601,362 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, { int ret = 0, i; size_t val_bytes = map->format.val_bytes; - void *wval; - if (!map->format.parse_val) + if (map->bus && !map->format.parse_inplace) return -EINVAL; if (reg % map->reg_stride) return -EINVAL; - map->lock(map->lock_arg); + /* + * Some devices don't support bulk write, for + * them we have a series of single write operations. + */ + if (!map->bus || map->use_single_rw) { + map->lock(map->lock_arg); + for (i = 0; i < val_count; i++) { + unsigned int ival; - /* No formatting is require if val_byte is 1 */ - if (val_bytes == 1) { - wval = (void *)val; + switch (val_bytes) { + case 1: + ival = *(u8 *)(val + (i * val_bytes)); + break; + case 2: + ival = *(u16 *)(val + (i * val_bytes)); + break; + case 4: + ival = *(u32 *)(val + (i * val_bytes)); + break; +#ifdef CONFIG_64BIT + case 8: + ival = *(u64 *)(val + (i * val_bytes)); + break; +#endif + default: + ret = -EINVAL; + goto out; + } + + ret = _regmap_write(map, reg + (i * map->reg_stride), + ival); + if (ret != 0) + goto out; + } +out: + map->unlock(map->lock_arg); } else { + void *wval; + wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL); if (!wval) { - ret = -ENOMEM; dev_err(map->dev, "Error in memory allocation\n"); - goto out; + return -ENOMEM; } for (i = 0; i < val_count * val_bytes; i += val_bytes) - map->format.parse_val(wval + i); + map->format.parse_inplace(wval + i); + + map->lock(map->lock_arg); + ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); + map->unlock(map->lock_arg); + + kfree(wval); + } + return ret; +} +EXPORT_SYMBOL_GPL(regmap_bulk_write); + +/* + * _regmap_raw_multi_reg_write() + * + * the (register,newvalue) pairs in regs have not been formatted, but + * they are all in the same page and have been changed to being page + * relative. The page register has been written if that was neccessary. + */ +static int _regmap_raw_multi_reg_write(struct regmap *map, + const struct reg_default *regs, + size_t num_regs) +{ + int ret; + void *buf; + int i; + u8 *u8; + size_t val_bytes = map->format.val_bytes; + size_t reg_bytes = map->format.reg_bytes; + size_t pad_bytes = map->format.pad_bytes; + size_t pair_size = reg_bytes + pad_bytes + val_bytes; + size_t len = pair_size * num_regs; + + if (!len) + return -EINVAL; + + buf = kzalloc(len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* We have to linearise by hand. */ + + u8 = buf; + + for (i = 0; i < num_regs; i++) { + int reg = regs[i].reg; + int val = regs[i].def; + trace_regmap_hw_write_start(map->dev, reg, 1); + map->format.format_reg(u8, reg, map->reg_shift); + u8 += reg_bytes + pad_bytes; + map->format.format_val(u8, val, 0); + u8 += val_bytes; } + u8 = buf; + *u8 |= map->write_flag_mask; + + ret = map->bus->write(map->bus_context, buf, len); + + kfree(buf); + + for (i = 0; i < num_regs; i++) { + int reg = regs[i].reg; + trace_regmap_hw_write_done(map->dev, reg, 1); + } + return ret; +} + +static unsigned int _regmap_register_page(struct regmap *map, + unsigned int reg, + struct regmap_range_node *range) +{ + unsigned int win_page = (reg - range->range_min) / range->window_len; + + return win_page; +} + +static int _regmap_range_multi_paged_reg_write(struct regmap *map, + struct reg_default *regs, + size_t num_regs) +{ + int ret; + int i, n; + struct reg_default *base; + unsigned int this_page = 0; /* - * Some devices does not support bulk write, for - * them we have a series of single write operations. + * the set of registers are not neccessarily in order, but + * since the order of write must be preserved this algorithm + * chops the set each time the page changes */ - if (map->use_single_rw) { - for (i = 0; i < val_count; i++) { - ret = regmap_raw_write(map, - reg + (i * map->reg_stride), - val + (i * val_bytes), - val_bytes); + base = regs; + for (i = 0, n = 0; i < num_regs; i++, n++) { + unsigned int reg = regs[i].reg; + struct regmap_range_node *range; + + range = _regmap_range_lookup(map, reg); + if (range) { + unsigned int win_page = _regmap_register_page(map, reg, + range); + + if (i == 0) + this_page = win_page; + if (win_page != this_page) { + this_page = win_page; + ret = _regmap_raw_multi_reg_write(map, base, n); + if (ret != 0) + return ret; + base += n; + n = 0; + } + ret = _regmap_select_page(map, &base[n].reg, range, 1); if (ret != 0) return ret; } - } else { - ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); } + if (n > 0) + return _regmap_raw_multi_reg_write(map, base, n); + return 0; +} - if (val_bytes != 1) - kfree(wval); +static int _regmap_multi_reg_write(struct regmap *map, + const struct reg_default *regs, + size_t num_regs) +{ + int i; + int ret; + + if (!map->can_multi_write) { + for (i = 0; i < num_regs; i++) { + ret = _regmap_write(map, regs[i].reg, regs[i].def); + if (ret != 0) + return ret; + } + return 0; + } + + if (!map->format.parse_inplace) + return -EINVAL; + + if (map->writeable_reg) + for (i = 0; i < num_regs; i++) { + int reg = regs[i].reg; + if (!map->writeable_reg(map->dev, reg)) + return -EINVAL; + if (reg % map->reg_stride) + return -EINVAL; + } + + if (!map->cache_bypass) { + for (i = 0; i < num_regs; i++) { + unsigned int val = regs[i].def; + unsigned int reg = regs[i].reg; + ret = regcache_write(map, reg, val); + if (ret) { + dev_err(map->dev, + "Error in caching of register: %x ret: %d\n", + reg, ret); + return ret; + } + } + if (map->cache_only) { + map->cache_dirty = true; + return 0; + } + } + + WARN_ON(!map->bus); + + for (i = 0; i < num_regs; i++) { + unsigned int reg = regs[i].reg; + struct regmap_range_node *range; + range = _regmap_range_lookup(map, reg); + if (range) { + size_t len = sizeof(struct reg_default)*num_regs; + struct reg_default *base = kmemdup(regs, len, + GFP_KERNEL); + if (!base) + return -ENOMEM; + ret = _regmap_range_multi_paged_reg_write(map, base, + num_regs); + kfree(base); + + return ret; + } + } + return _regmap_raw_multi_reg_write(map, regs, num_regs); +} + +/* + * regmap_multi_reg_write(): Write multiple registers to the device + * + * where the set of register,value pairs are supplied in any order, + * possibly not all in a single range. + * + * @map: Register map to write to + * @regs: Array of structures containing register,value to be written + * @num_regs: Number of registers to write + * + * The 'normal' block write mode will send ultimately send data on the + * target bus as R,V1,V2,V3,..,Vn where successively higer registers are + * addressed. However, this alternative block multi write mode will send + * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device + * must of course support the mode. + * + * A value of zero will be returned on success, a negative errno will be + * returned in error cases. + */ +int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs, + int num_regs) +{ + int ret; + + map->lock(map->lock_arg); + + ret = _regmap_multi_reg_write(map, regs, num_regs); -out: map->unlock(map->lock_arg); + return ret; } -EXPORT_SYMBOL_GPL(regmap_bulk_write); +EXPORT_SYMBOL_GPL(regmap_multi_reg_write); + +/* + * regmap_multi_reg_write_bypassed(): Write multiple registers to the + * device but not the cache + * + * where the set of register are supplied in any order + * + * @map: Register map to write to + * @regs: Array of structures containing register,value to be written + * @num_regs: Number of registers to write + * + * This function is intended to be used for writing a large block of data + * atomically to the device in single transfer for those I2C client devices + * that implement this alternative block write mode. + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_multi_reg_write_bypassed(struct regmap *map, + const struct reg_default *regs, + int num_regs) +{ + int ret; + bool bypass; + + map->lock(map->lock_arg); + + bypass = map->cache_bypass; + map->cache_bypass = true; + + ret = _regmap_multi_reg_write(map, regs, num_regs); + + map->cache_bypass = bypass; + + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); + +/** + * regmap_raw_write_async(): Write raw values to one or more registers + * asynchronously + * + * @map: Register map to write to + * @reg: Initial register to write to + * @val: Block of data to be written, laid out for direct transmission to the + * device. Must be valid until regmap_async_complete() is called. + * @val_len: Length of data pointed to by val. + * + * This function is intended to be used for things like firmware + * download where a large block of data needs to be transferred to the + * device. No formatting will be done on the data provided. + * + * If supported by the underlying bus the write will be scheduled + * asynchronously, helping maximise I/O speed on higher speed buses + * like SPI. regmap_async_complete() can be called to ensure that all + * asynchrnous writes have been completed. + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_raw_write_async(struct regmap *map, unsigned int reg, + const void *val, size_t val_len) +{ + int ret; + + if (val_len % map->format.val_bytes) + return -EINVAL; + if (reg % map->reg_stride) + return -EINVAL; + + map->lock(map->lock_arg); + + map->async = true; + + ret = _regmap_raw_write(map, reg, val, val_len); + + map->async = false; + + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_raw_write_async); static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, unsigned int val_len) @@ -1171,6 +1965,8 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, u8 *u8 = map->work_buf; int ret; + WARN_ON(!map->bus); + range = _regmap_range_lookup(map, reg); if (range) { ret = _regmap_select_page(map, ®, range, @@ -1202,10 +1998,37 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, return ret; } +static int _regmap_bus_reg_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct regmap *map = context; + + return map->bus->reg_read(map->bus_context, reg, val); +} + +static int _regmap_bus_read(void *context, unsigned int reg, + unsigned int *val) +{ + int ret; + struct regmap *map = context; + + if (!map->format.parse_val) + return -EINVAL; + + ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes); + if (ret == 0) + *val = map->format.parse_val(map->work_buf); + + return ret; +} + static int _regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) { int ret; + void *context = _regmap_map_get_context(map); + + WARN_ON(!map->reg_read); if (!map->cache_bypass) { ret = regcache_read(map, reg, val); @@ -1213,26 +2036,24 @@ static int _regmap_read(struct regmap *map, unsigned int reg, return 0; } - if (!map->format.parse_val) - return -EINVAL; - if (map->cache_only) return -EBUSY; - ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes); - if (ret == 0) { - *val = map->format.parse_val(map->work_buf); + if (!regmap_readable(map, reg)) + return -EIO; + ret = map->reg_read(context, reg, val); + if (ret == 0) { #ifdef LOG_DEVICE if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0) dev_info(map->dev, "%x => %x\n", reg, *val); #endif trace_regmap_reg_read(map->dev, reg, *val); - } - if (ret == 0 && !map->cache_bypass) - regcache_write(map, reg, *val); + if (!map->cache_bypass) + regcache_write(map, reg, *val); + } return ret; } @@ -1240,7 +2061,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg, /** * regmap_read(): Read a value from a single register * - * @map: Register map to write to + * @map: Register map to read from * @reg: Register to be read from * @val: Pointer to store read value * @@ -1267,7 +2088,7 @@ EXPORT_SYMBOL_GPL(regmap_read); /** * regmap_raw_read(): Read raw data from the device * - * @map: Register map to write to + * @map: Register map to read from * @reg: First register to be read from * @val: Pointer to store read value * @val_len: Size of data to read @@ -1283,6 +2104,8 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, unsigned int v; int ret, i; + if (!map->bus) + return -EINVAL; if (val_len % map->format.val_bytes) return -EINVAL; if (reg % map->reg_stride) @@ -1317,9 +2140,67 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, EXPORT_SYMBOL_GPL(regmap_raw_read); /** + * regmap_field_read(): Read a value to a single register field + * + * @field: Register field to read from + * @val: Pointer to store read value + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_field_read(struct regmap_field *field, unsigned int *val) +{ + int ret; + unsigned int reg_val; + ret = regmap_read(field->regmap, field->reg, ®_val); + if (ret != 0) + return ret; + + reg_val &= field->mask; + reg_val >>= field->shift; + *val = reg_val; + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_field_read); + +/** + * regmap_fields_read(): Read a value to a single register field with port ID + * + * @field: Register field to read from + * @id: port ID + * @val: Pointer to store read value + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_fields_read(struct regmap_field *field, unsigned int id, + unsigned int *val) +{ + int ret; + unsigned int reg_val; + + if (id >= field->id_size) + return -EINVAL; + + ret = regmap_read(field->regmap, + field->reg + (field->id_offset * id), + ®_val); + if (ret != 0) + return ret; + + reg_val &= field->mask; + reg_val >>= field->shift; + *val = reg_val; + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_fields_read); + +/** * regmap_bulk_read(): Read multiple registers from the device * - * @map: Register map to write to + * @map: Register map to read from * @reg: First register to be read from * @val: Pointer to store read value, in native register size for device * @val_count: Number of registers to read @@ -1334,12 +2215,10 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, size_t val_bytes = map->format.val_bytes; bool vol = regmap_volatile_range(map, reg, val_count); - if (!map->format.parse_val) - return -EINVAL; if (reg % map->reg_stride) return -EINVAL; - if (vol || map->cache_type == REGCACHE_NONE) { + if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { /* * Some devices does not support bulk read, for * them we have a series of single read operations. @@ -1361,7 +2240,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, } for (i = 0; i < val_count * val_bytes; i += val_bytes) - map->format.parse_val(val + i); + map->format.parse_inplace(val + i); } else { for (i = 0; i < val_count; i++) { unsigned int ival; @@ -1393,9 +2272,11 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg, if (tmp != orig) { ret = _regmap_write(map, reg, tmp); - *change = true; + if (change) + *change = true; } else { - *change = false; + if (change) + *change = false; } return ret; @@ -1414,11 +2295,10 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg, int regmap_update_bits(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val) { - bool change; int ret; map->lock(map->lock_arg); - ret = _regmap_update_bits(map, reg, mask, val, &change); + ret = _regmap_update_bits(map, reg, mask, val, NULL); map->unlock(map->lock_arg); return ret; @@ -1426,6 +2306,40 @@ int regmap_update_bits(struct regmap *map, unsigned int reg, EXPORT_SYMBOL_GPL(regmap_update_bits); /** + * regmap_update_bits_async: Perform a read/modify/write cycle on the register + * map asynchronously + * + * @map: Register map to update + * @reg: Register to update + * @mask: Bitmask to change + * @val: New value for bitmask + * + * With most buses the read must be done synchronously so this is most + * useful for devices with a cache which do not need to interact with + * the hardware to determine the current register value. + * + * Returns zero for success, a negative number on error. + */ +int regmap_update_bits_async(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + int ret; + + map->lock(map->lock_arg); + + map->async = true; + + ret = _regmap_update_bits(map, reg, mask, val, NULL); + + map->async = false; + + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_update_bits_async); + +/** * regmap_update_bits_check: Perform a read/modify/write cycle on the * register map and report if updated * @@ -1451,6 +2365,108 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg, EXPORT_SYMBOL_GPL(regmap_update_bits_check); /** + * regmap_update_bits_check_async: Perform a read/modify/write cycle on the + * register map asynchronously and report if + * updated + * + * @map: Register map to update + * @reg: Register to update + * @mask: Bitmask to change + * @val: New value for bitmask + * @change: Boolean indicating if a write was done + * + * With most buses the read must be done synchronously so this is most + * useful for devices with a cache which do not need to interact with + * the hardware to determine the current register value. + * + * Returns zero for success, a negative number on error. + */ +int regmap_update_bits_check_async(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change) +{ + int ret; + + map->lock(map->lock_arg); + + map->async = true; + + ret = _regmap_update_bits(map, reg, mask, val, change); + + map->async = false; + + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_update_bits_check_async); + +void regmap_async_complete_cb(struct regmap_async *async, int ret) +{ + struct regmap *map = async->map; + bool wake; + + trace_regmap_async_io_complete(map->dev); + + spin_lock(&map->async_lock); + list_move(&async->list, &map->async_free); + wake = list_empty(&map->async_list); + + if (ret != 0) + map->async_ret = ret; + + spin_unlock(&map->async_lock); + + if (wake) + wake_up(&map->async_waitq); +} +EXPORT_SYMBOL_GPL(regmap_async_complete_cb); + +static int regmap_async_is_done(struct regmap *map) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&map->async_lock, flags); + ret = list_empty(&map->async_list); + spin_unlock_irqrestore(&map->async_lock, flags); + + return ret; +} + +/** + * regmap_async_complete: Ensure all asynchronous I/O has completed. + * + * @map: Map to operate on. + * + * Blocks until any pending asynchronous I/O has completed. Returns + * an error code for any failed I/O operations. + */ +int regmap_async_complete(struct regmap *map) +{ + unsigned long flags; + int ret; + + /* Nothing to do with no async support */ + if (!map->bus || !map->bus->async_write) + return 0; + + trace_regmap_async_complete_start(map->dev); + + wait_event(map->async_waitq, regmap_async_is_done(map)); + + spin_lock_irqsave(&map->async_lock, flags); + ret = map->async_ret; + map->async_ret = 0; + spin_unlock_irqrestore(&map->async_lock, flags); + + trace_regmap_async_complete_done(map->dev); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_async_complete); + +/** * regmap_register_patch: Register and apply register updates to be applied * on device initialistion * @@ -1463,47 +2479,51 @@ EXPORT_SYMBOL_GPL(regmap_update_bits_check); * apply them immediately. Typically this is used to apply * corrections to be applied to the device defaults on startup, such * as the updates some vendors provide to undocumented registers. + * + * The caller must ensure that this function cannot be called + * concurrently with either itself or regcache_sync(). */ int regmap_register_patch(struct regmap *map, const struct reg_default *regs, int num_regs) { - int i, ret; + struct reg_default *p; + int ret; bool bypass; - /* If needed the implementation can be extended to support this */ - if (map->patch) - return -EBUSY; + if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", + num_regs)) + return 0; + + p = krealloc(map->patch, + sizeof(struct reg_default) * (map->patch_regs + num_regs), + GFP_KERNEL); + if (p) { + memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); + map->patch = p; + map->patch_regs += num_regs; + } else { + return -ENOMEM; + } map->lock(map->lock_arg); bypass = map->cache_bypass; map->cache_bypass = true; + map->async = true; - /* Write out first; it's useful to apply even if we fail later. */ - for (i = 0; i < num_regs; i++) { - ret = _regmap_write(map, regs[i].reg, regs[i].def); - if (ret != 0) { - dev_err(map->dev, "Failed to write %x = %x: %d\n", - regs[i].reg, regs[i].def, ret); - goto out; - } - } - - map->patch = kcalloc(num_regs, sizeof(struct reg_default), GFP_KERNEL); - if (map->patch != NULL) { - memcpy(map->patch, regs, - num_regs * sizeof(struct reg_default)); - map->patch_regs = num_regs; - } else { - ret = -ENOMEM; - } + ret = _regmap_multi_reg_write(map, regs, num_regs); + if (ret != 0) + goto out; out: + map->async = false; map->cache_bypass = bypass; map->unlock(map->lock_arg); + regmap_async_complete(map); + return ret; } EXPORT_SYMBOL_GPL(regmap_register_patch); @@ -1523,6 +2543,18 @@ int regmap_get_val_bytes(struct regmap *map) } EXPORT_SYMBOL_GPL(regmap_get_val_bytes); +int regmap_parse_val(struct regmap *map, const void *buf, + unsigned int *val) +{ + if (!map->format.parse_val) + return -EINVAL; + + *val = map->format.parse_val(buf); + + return 0; +} +EXPORT_SYMBOL_GPL(regmap_parse_val); + static int __init regmap_initcall(void) { regmap_debugfs_initcall(); diff --git a/drivers/base/reservation.c b/drivers/base/reservation.c new file mode 100644 index 00000000000..a73fbf3b8e5 --- /dev/null +++ b/drivers/base/reservation.c @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2012-2013 Canonical Ltd + * + * Based on bo.c which bears the following copyright notice, + * but is dual licensed: + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> + */ + +#include <linux/reservation.h> +#include <linux/export.h> + +DEFINE_WW_CLASS(reservation_ww_class); +EXPORT_SYMBOL(reservation_ww_class); diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c index e8d11b6630e..dbb8350ea8d 100644 --- a/drivers/base/syscore.c +++ b/drivers/base/syscore.c @@ -10,6 +10,7 @@ #include <linux/mutex.h> #include <linux/module.h> #include <linux/interrupt.h> +#include <trace/events/power.h> static LIST_HEAD(syscore_ops_list); static DEFINE_MUTEX(syscore_ops_lock); @@ -49,6 +50,7 @@ int syscore_suspend(void) struct syscore_ops *ops; int ret = 0; + trace_suspend_resume(TPS("syscore_suspend"), 0, true); pr_debug("Checking wakeup interrupts\n"); /* Return error code if there are any wakeup interrupts pending. */ @@ -70,6 +72,7 @@ int syscore_suspend(void) "Interrupts enabled after %pF\n", ops->suspend); } + trace_suspend_resume(TPS("syscore_suspend"), 0, false); return 0; err_out: @@ -92,6 +95,7 @@ void syscore_resume(void) { struct syscore_ops *ops; + trace_suspend_resume(TPS("syscore_resume"), 0, true); WARN_ONCE(!irqs_disabled(), "Interrupts enabled before system core resume.\n"); @@ -103,6 +107,7 @@ void syscore_resume(void) WARN_ONCE(!irqs_disabled(), "Interrupts enabled after %pF\n", ops->resume); } + trace_suspend_resume(TPS("syscore_resume"), 0, false); } EXPORT_SYMBOL_GPL(syscore_resume); #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/base/topology.c b/drivers/base/topology.c index ae989c57cd5..be7c1fb7c0c 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -23,7 +23,6 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ -#include <linux/init.h> #include <linux/mm.h> #include <linux/cpu.h> #include <linux/module.h> @@ -40,8 +39,7 @@ static ssize_t show_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ - unsigned int cpu = dev->id; \ - return sprintf(buf, "%d\n", topology_##name(cpu)); \ + return sprintf(buf, "%d\n", topology_##name(dev->id)); \ } #if defined(topology_thread_cpumask) || defined(topology_core_cpumask) || \ @@ -62,25 +60,6 @@ static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf) } #endif -#ifdef arch_provides_topology_pointers -#define define_siblings_show_map(name) \ -static ssize_t show_##name(struct device *dev, \ - struct device_attribute *attr, char *buf) \ -{ \ - unsigned int cpu = dev->id; \ - return show_cpumap(0, topology_##name(cpu), buf); \ -} - -#define define_siblings_show_list(name) \ -static ssize_t show_##name##_list(struct device *dev, \ - struct device_attribute *attr, \ - char *buf) \ -{ \ - unsigned int cpu = dev->id; \ - return show_cpumap(1, topology_##name(cpu), buf); \ -} - -#else #define define_siblings_show_map(name) \ static ssize_t show_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ @@ -95,7 +74,6 @@ static ssize_t show_##name##_list(struct device *dev, \ { \ return show_cpumap(1, topology_##name(dev->id), buf); \ } -#endif #define define_siblings_show_func(name) \ define_siblings_show_map(name); define_siblings_show_list(name) @@ -143,22 +121,22 @@ static struct attribute_group topology_attr_group = { }; /* Add/Remove cpu_topology interface for CPU device */ -static int __cpuinit topology_add_dev(unsigned int cpu) +static int topology_add_dev(unsigned int cpu) { struct device *dev = get_cpu_device(cpu); return sysfs_create_group(&dev->kobj, &topology_attr_group); } -static void __cpuinit topology_remove_dev(unsigned int cpu) +static void topology_remove_dev(unsigned int cpu) { struct device *dev = get_cpu_device(cpu); sysfs_remove_group(&dev->kobj, &topology_attr_group); } -static int __cpuinit topology_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int topology_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; int rc = 0; @@ -178,19 +156,23 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb, return notifier_from_errno(rc); } -static int __cpuinit topology_sysfs_init(void) +static int topology_sysfs_init(void) { int cpu; - int rc; + int rc = 0; + + cpu_notifier_register_begin(); for_each_online_cpu(cpu) { rc = topology_add_dev(cpu); if (rc) - return rc; + goto out; } - hotcpu_notifier(topology_cpu_callback, 0); + __hotcpu_notifier(topology_cpu_callback, 0); - return 0; +out: + cpu_notifier_register_done(); + return rc; } device_initcall(topology_sysfs_init); |
