diff options
Diffstat (limited to 'drivers/base')
49 files changed, 3156 insertions, 933 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index e373671652b..23b8726962a 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -1,10 +1,10 @@  menu "Generic Driver Options" -config UEVENT_HELPER_PATH -	string "path to uevent helper" -	default "" +config UEVENT_HELPER +	bool "Support for uevent helper" +	default y  	help -	  Path to uevent helper program forked by the kernel for +	  The uevent helper program is forked by the kernel for  	  every uevent.  	  Before the switch to the netlink-based uevent source, this was  	  used to hook hotplug scripts into kernel device events. It @@ -15,8 +15,13 @@ config UEVENT_HELPER_PATH  	  that it creates a high system load, or on smaller systems  	  it is known to create out-of-memory situations during bootup. -	  To disable user space helper program execution at early boot -	  time specify an empty string here. This setting can be altered +config UEVENT_HELPER_PATH +	string "path to uevent helper" +	depends on UEVENT_HELPER +	default "" +	help +	  To disable user space helper program execution at by default +	  specify an empty string here. This setting can still be altered  	  via /proc/sys/kernel/hotplug or via /sys/kernel/uevent_helper  	  later at runtime. @@ -49,7 +54,7 @@ config DEVTMPFS_MOUNT  	  with the commandline parameter: devtmpfs.mount=0|1.  	  This option does not affect initramfs based booting, here  	  the devtmpfs filesystem always needs to be mounted manually -	  after the roots is mounted. +	  after the rootfs is mounted.  	  With this option enabled, it allows to bring up a system in  	  rescue mode with init=/bin/sh, even when the /dev directory  	  on the rootfs is completely empty. @@ -185,6 +190,9 @@ config GENERIC_CPU_DEVICES  	bool  	default n +config GENERIC_CPU_AUTOPROBE +	bool +  config SOC_BUS  	bool @@ -250,7 +258,7 @@ endchoice  config CMA_ALIGNMENT  	int "Maximum PAGE_SIZE order of alignment for contiguous buffers" -	range 4 9 +	range 4 12  	default 8  	help  	  DMA mapping framework by default aligns all buffers to the smallest diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 94e8a80e87f..04b314e0fa5 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -1,10 +1,10 @@  # Makefile for the Linux device tree -obj-y			:= core.o bus.o dd.o syscore.o \ +obj-y			:= component.o core.o bus.o dd.o syscore.o \  			   driver.o class.o platform.o \  			   cpu.o firmware.o init.o map.o devres.o \  			   attribute_container.o transport_class.o \ -			   topology.o +			   topology.o container.o  obj-$(CONFIG_DEVTMPFS)	+= devtmpfs.o  obj-$(CONFIG_DMA_CMA) += dma-contiguous.o  obj-y			+= power/ diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c index ecc1929d7f6..b84ca8f13f9 100644 --- a/drivers/base/attribute_container.c +++ b/drivers/base/attribute_container.c @@ -12,7 +12,6 @@   */  #include <linux/attribute_container.h> -#include <linux/init.h>  #include <linux/device.h>  #include <linux/kernel.h>  #include <linux/slab.h> diff --git a/drivers/base/base.h b/drivers/base/base.h index 2cbc6774f4c..251c5d30f96 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -63,8 +63,6 @@ struct driver_private {   *	binding of drivers which were unable to get all the resources needed by   *	the device; typically because it depends on another driver getting   *	probed first. - * @driver_data - private pointer for driver specific info.  Will turn into a - * list soon.   * @device - pointer back to the struct class that this structure is   * associated with.   * @@ -76,7 +74,6 @@ struct device_private {  	struct klist_node knode_driver;  	struct klist_node knode_bus;  	struct list_head deferred_probe; -	void *driver_data;  	struct device *device;  };  #define to_device_private_parent(obj)	\ @@ -100,6 +97,7 @@ static inline int hypervisor_init(void) { return 0; }  #endif  extern int platform_bus_init(void);  extern void cpu_dev_init(void); +extern void container_dev_init(void);  struct kobject *virtual_device_parent(struct device *dev); diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 4c289ab9135..83e910a5756 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -146,8 +146,19 @@ void bus_remove_file(struct bus_type *bus, struct bus_attribute *attr)  }  EXPORT_SYMBOL_GPL(bus_remove_file); +static void bus_release(struct kobject *kobj) +{ +	struct subsys_private *priv = +		container_of(kobj, typeof(*priv), subsys.kobj); +	struct bus_type *bus = priv->bus; + +	kfree(priv); +	bus->p = NULL; +} +  static struct kobj_type bus_ktype = {  	.sysfs_ops	= &bus_sysfs_ops, +	.release	= bus_release,  };  static int bus_uevent_filter(struct kset *kset, struct kobject *kobj) @@ -591,37 +602,6 @@ void bus_remove_device(struct device *dev)  	bus_put(dev->bus);  } -static int driver_add_attrs(struct bus_type *bus, struct device_driver *drv) -{ -	int error = 0; -	int i; - -	if (bus->drv_attrs) { -		for (i = 0; bus->drv_attrs[i].attr.name; i++) { -			error = driver_create_file(drv, &bus->drv_attrs[i]); -			if (error) -				goto err; -		} -	} -done: -	return error; -err: -	while (--i >= 0) -		driver_remove_file(drv, &bus->drv_attrs[i]); -	goto done; -} - -static void driver_remove_attrs(struct bus_type *bus, -				struct device_driver *drv) -{ -	int i; - -	if (bus->drv_attrs) { -		for (i = 0; bus->drv_attrs[i].attr.name; i++) -			driver_remove_file(drv, &bus->drv_attrs[i]); -	} -} -  static int __must_check add_bind_files(struct device_driver *drv)  {  	int ret; @@ -720,16 +700,12 @@ int bus_add_driver(struct device_driver *drv)  		printk(KERN_ERR "%s: uevent attr (%s) failed\n",  			__func__, drv->name);  	} -	error = driver_add_attrs(bus, drv); +	error = driver_add_groups(drv, bus->drv_groups);  	if (error) {  		/* How the hell do we get out of this pickle? Give up */ -		printk(KERN_ERR "%s: driver_add_attrs(%s) failed\n", -			__func__, drv->name); -	} -	error = driver_add_groups(drv, bus->drv_groups); -	if (error)  		printk(KERN_ERR "%s: driver_create_groups(%s) failed\n",  			__func__, drv->name); +	}  	if (!drv->suppress_bind_attrs) {  		error = add_bind_files(drv); @@ -766,7 +742,6 @@ void bus_remove_driver(struct device_driver *drv)  	if (!drv->suppress_bind_attrs)  		remove_bind_files(drv); -	driver_remove_attrs(drv->bus, drv);  	driver_remove_groups(drv, drv->bus->drv_groups);  	driver_remove_file(drv, &driver_attr_uevent);  	klist_remove(&drv->p->knode_bus); @@ -846,42 +821,6 @@ struct bus_type *find_bus(char *name)  }  #endif  /*  0  */ - -/** - * bus_add_attrs - Add default attributes for this bus. - * @bus: Bus that has just been registered. - */ - -static int bus_add_attrs(struct bus_type *bus) -{ -	int error = 0; -	int i; - -	if (bus->bus_attrs) { -		for (i = 0; bus->bus_attrs[i].attr.name; i++) { -			error = bus_create_file(bus, &bus->bus_attrs[i]); -			if (error) -				goto err; -		} -	} -done: -	return error; -err: -	while (--i >= 0) -		bus_remove_file(bus, &bus->bus_attrs[i]); -	goto done; -} - -static void bus_remove_attrs(struct bus_type *bus) -{ -	int i; - -	if (bus->bus_attrs) { -		for (i = 0; bus->bus_attrs[i].attr.name; i++) -			bus_remove_file(bus, &bus->bus_attrs[i]); -	} -} -  static int bus_add_groups(struct bus_type *bus,  			  const struct attribute_group **groups)  { @@ -983,9 +922,6 @@ int bus_register(struct bus_type *bus)  	if (retval)  		goto bus_probe_files_fail; -	retval = bus_add_attrs(bus); -	if (retval) -		goto bus_attrs_fail;  	retval = bus_add_groups(bus, bus->bus_groups);  	if (retval)  		goto bus_groups_fail; @@ -994,8 +930,6 @@ int bus_register(struct bus_type *bus)  	return 0;  bus_groups_fail: -	bus_remove_attrs(bus); -bus_attrs_fail:  	remove_probe_files(bus);  bus_probe_files_fail:  	kset_unregister(bus->p->drivers_kset); @@ -1024,15 +958,12 @@ void bus_unregister(struct bus_type *bus)  	pr_debug("bus: '%s': unregistering\n", bus->name);  	if (bus->dev_root)  		device_unregister(bus->dev_root); -	bus_remove_attrs(bus);  	bus_remove_groups(bus, bus->bus_groups);  	remove_probe_files(bus);  	kset_unregister(bus->p->drivers_kset);  	kset_unregister(bus->p->devices_kset);  	bus_remove_file(bus, &bus_attr_uevent);  	kset_unregister(&bus->p->subsys); -	kfree(bus->p); -	bus->p = NULL;  }  EXPORT_SYMBOL_GPL(bus_unregister); @@ -1287,7 +1218,7 @@ err_dev:   * with the name of the subsystem. The root device can carry subsystem-   * wide attributes. All registered devices are below this single root   * device and are named after the subsystem with a simple enumeration - * number appended. The registered devices are not explicitely named; + * number appended. The registered devices are not explicitly named;   * only 'id' in the device needs to be set.   *   * Do not use this interface for anything new, it exists for compatibility diff --git a/drivers/base/class.c b/drivers/base/class.c index 8b7818b8005..f96f70419a7 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -47,18 +47,6 @@ static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,  	return ret;  } -static const void *class_attr_namespace(struct kobject *kobj, -					const struct attribute *attr) -{ -	struct class_attribute *class_attr = to_class_attr(attr); -	struct subsys_private *cp = to_subsys_private(kobj); -	const void *ns = NULL; - -	if (class_attr->namespace) -		ns = class_attr->namespace(cp->class, class_attr); -	return ns; -} -  static void class_release(struct kobject *kobj)  {  	struct subsys_private *cp = to_subsys_private(kobj); @@ -86,7 +74,6 @@ static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject  static const struct sysfs_ops class_sysfs_ops = {  	.show	   = class_attr_show,  	.store	   = class_attr_store, -	.namespace = class_attr_namespace,  };  static struct kobj_type class_ktype = { @@ -99,21 +86,23 @@ static struct kobj_type class_ktype = {  static struct kset *class_kset; -int class_create_file(struct class *cls, const struct class_attribute *attr) +int class_create_file_ns(struct class *cls, const struct class_attribute *attr, +			 const void *ns)  {  	int error;  	if (cls) -		error = sysfs_create_file(&cls->p->subsys.kobj, -					  &attr->attr); +		error = sysfs_create_file_ns(&cls->p->subsys.kobj, +					     &attr->attr, ns);  	else  		error = -EINVAL;  	return error;  } -void class_remove_file(struct class *cls, const struct class_attribute *attr) +void class_remove_file_ns(struct class *cls, const struct class_attribute *attr, +			  const void *ns)  {  	if (cls) -		sysfs_remove_file(&cls->p->subsys.kobj, &attr->attr); +		sysfs_remove_file_ns(&cls->p->subsys.kobj, &attr->attr, ns);  }  static struct class *class_get(struct class *cls) @@ -600,8 +589,8 @@ int __init classes_init(void)  	return 0;  } -EXPORT_SYMBOL_GPL(class_create_file); -EXPORT_SYMBOL_GPL(class_remove_file); +EXPORT_SYMBOL_GPL(class_create_file_ns); +EXPORT_SYMBOL_GPL(class_remove_file_ns);  EXPORT_SYMBOL_GPL(class_unregister);  EXPORT_SYMBOL_GPL(class_destroy); diff --git a/drivers/base/component.c b/drivers/base/component.c new file mode 100644 index 00000000000..c4778995cd7 --- /dev/null +++ b/drivers/base/component.c @@ -0,0 +1,390 @@ +/* + * Componentized device handling. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This is work in progress.  We gather up the component devices into a list, + * and bind them when instructed.  At the moment, we're specific to the DRM + * subsystem, and only handles one master device, but this doesn't have to be + * the case. + */ +#include <linux/component.h> +#include <linux/device.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/slab.h> + +struct master { +	struct list_head node; +	struct list_head components; +	bool bound; + +	const struct component_master_ops *ops; +	struct device *dev; +}; + +struct component { +	struct list_head node; +	struct list_head master_node; +	struct master *master; +	bool bound; + +	const struct component_ops *ops; +	struct device *dev; +}; + +static DEFINE_MUTEX(component_mutex); +static LIST_HEAD(component_list); +static LIST_HEAD(masters); + +static struct master *__master_find(struct device *dev, +	const struct component_master_ops *ops) +{ +	struct master *m; + +	list_for_each_entry(m, &masters, node) +		if (m->dev == dev && (!ops || m->ops == ops)) +			return m; + +	return NULL; +} + +/* Attach an unattached component to a master. */ +static void component_attach_master(struct master *master, struct component *c) +{ +	c->master = master; + +	list_add_tail(&c->master_node, &master->components); +} + +/* Detach a component from a master. */ +static void component_detach_master(struct master *master, struct component *c) +{ +	list_del(&c->master_node); + +	c->master = NULL; +} + +int component_master_add_child(struct master *master, +	int (*compare)(struct device *, void *), void *compare_data) +{ +	struct component *c; +	int ret = -ENXIO; + +	list_for_each_entry(c, &component_list, node) { +		if (c->master) +			continue; + +		if (compare(c->dev, compare_data)) { +			component_attach_master(master, c); +			ret = 0; +			break; +		} +	} + +	return ret; +} +EXPORT_SYMBOL_GPL(component_master_add_child); + +/* Detach all attached components from this master */ +static void master_remove_components(struct master *master) +{ +	while (!list_empty(&master->components)) { +		struct component *c = list_first_entry(&master->components, +					struct component, master_node); + +		WARN_ON(c->master != master); + +		component_detach_master(master, c); +	} +} + +/* + * Try to bring up a master.  If component is NULL, we're interested in + * this master, otherwise it's a component which must be present to try + * and bring up the master. + * + * Returns 1 for successful bringup, 0 if not ready, or -ve errno. + */ +static int try_to_bring_up_master(struct master *master, +	struct component *component) +{ +	int ret = 0; + +	if (!master->bound) { +		/* +		 * Search the list of components, looking for components that +		 * belong to this master, and attach them to the master. +		 */ +		if (master->ops->add_components(master->dev, master)) { +			/* Failed to find all components */ +			master_remove_components(master); +			ret = 0; +			goto out; +		} + +		if (component && component->master != master) { +			master_remove_components(master); +			ret = 0; +			goto out; +		} + +		if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) { +			ret = -ENOMEM; +			goto out; +		} + +		/* Found all components */ +		ret = master->ops->bind(master->dev); +		if (ret < 0) { +			devres_release_group(master->dev, NULL); +			dev_info(master->dev, "master bind failed: %d\n", ret); +			master_remove_components(master); +			goto out; +		} + +		master->bound = true; +		ret = 1; +	} +out: + +	return ret; +} + +static int try_to_bring_up_masters(struct component *component) +{ +	struct master *m; +	int ret = 0; + +	list_for_each_entry(m, &masters, node) { +		ret = try_to_bring_up_master(m, component); +		if (ret != 0) +			break; +	} + +	return ret; +} + +static void take_down_master(struct master *master) +{ +	if (master->bound) { +		master->ops->unbind(master->dev); +		devres_release_group(master->dev, NULL); +		master->bound = false; +	} + +	master_remove_components(master); +} + +int component_master_add(struct device *dev, +	const struct component_master_ops *ops) +{ +	struct master *master; +	int ret; + +	master = kzalloc(sizeof(*master), GFP_KERNEL); +	if (!master) +		return -ENOMEM; + +	master->dev = dev; +	master->ops = ops; +	INIT_LIST_HEAD(&master->components); + +	/* Add to the list of available masters. */ +	mutex_lock(&component_mutex); +	list_add(&master->node, &masters); + +	ret = try_to_bring_up_master(master, NULL); + +	if (ret < 0) { +		/* Delete off the list if we weren't successful */ +		list_del(&master->node); +		kfree(master); +	} +	mutex_unlock(&component_mutex); + +	return ret < 0 ? ret : 0; +} +EXPORT_SYMBOL_GPL(component_master_add); + +void component_master_del(struct device *dev, +	const struct component_master_ops *ops) +{ +	struct master *master; + +	mutex_lock(&component_mutex); +	master = __master_find(dev, ops); +	if (master) { +		take_down_master(master); + +		list_del(&master->node); +		kfree(master); +	} +	mutex_unlock(&component_mutex); +} +EXPORT_SYMBOL_GPL(component_master_del); + +static void component_unbind(struct component *component, +	struct master *master, void *data) +{ +	WARN_ON(!component->bound); + +	component->ops->unbind(component->dev, master->dev, data); +	component->bound = false; + +	/* Release all resources claimed in the binding of this component */ +	devres_release_group(component->dev, component); +} + +void component_unbind_all(struct device *master_dev, void *data) +{ +	struct master *master; +	struct component *c; + +	WARN_ON(!mutex_is_locked(&component_mutex)); + +	master = __master_find(master_dev, NULL); +	if (!master) +		return; + +	list_for_each_entry_reverse(c, &master->components, master_node) +		component_unbind(c, master, data); +} +EXPORT_SYMBOL_GPL(component_unbind_all); + +static int component_bind(struct component *component, struct master *master, +	void *data) +{ +	int ret; + +	/* +	 * Each component initialises inside its own devres group. +	 * This allows us to roll-back a failed component without +	 * affecting anything else. +	 */ +	if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) +		return -ENOMEM; + +	/* +	 * Also open a group for the device itself: this allows us +	 * to release the resources claimed against the sub-device +	 * at the appropriate moment. +	 */ +	if (!devres_open_group(component->dev, component, GFP_KERNEL)) { +		devres_release_group(master->dev, NULL); +		return -ENOMEM; +	} + +	dev_dbg(master->dev, "binding %s (ops %ps)\n", +		dev_name(component->dev), component->ops); + +	ret = component->ops->bind(component->dev, master->dev, data); +	if (!ret) { +		component->bound = true; + +		/* +		 * Close the component device's group so that resources +		 * allocated in the binding are encapsulated for removal +		 * at unbind.  Remove the group on the DRM device as we +		 * can clean those resources up independently. +		 */ +		devres_close_group(component->dev, NULL); +		devres_remove_group(master->dev, NULL); + +		dev_info(master->dev, "bound %s (ops %ps)\n", +			 dev_name(component->dev), component->ops); +	} else { +		devres_release_group(component->dev, NULL); +		devres_release_group(master->dev, NULL); + +		dev_err(master->dev, "failed to bind %s (ops %ps): %d\n", +			dev_name(component->dev), component->ops, ret); +	} + +	return ret; +} + +int component_bind_all(struct device *master_dev, void *data) +{ +	struct master *master; +	struct component *c; +	int ret = 0; + +	WARN_ON(!mutex_is_locked(&component_mutex)); + +	master = __master_find(master_dev, NULL); +	if (!master) +		return -EINVAL; + +	list_for_each_entry(c, &master->components, master_node) { +		ret = component_bind(c, master, data); +		if (ret) +			break; +	} + +	if (ret != 0) { +		list_for_each_entry_continue_reverse(c, &master->components, +						     master_node) +			component_unbind(c, master, data); +	} + +	return ret; +} +EXPORT_SYMBOL_GPL(component_bind_all); + +int component_add(struct device *dev, const struct component_ops *ops) +{ +	struct component *component; +	int ret; + +	component = kzalloc(sizeof(*component), GFP_KERNEL); +	if (!component) +		return -ENOMEM; + +	component->ops = ops; +	component->dev = dev; + +	dev_dbg(dev, "adding component (ops %ps)\n", ops); + +	mutex_lock(&component_mutex); +	list_add_tail(&component->node, &component_list); + +	ret = try_to_bring_up_masters(component); +	if (ret < 0) { +		list_del(&component->node); + +		kfree(component); +	} +	mutex_unlock(&component_mutex); + +	return ret < 0 ? ret : 0; +} +EXPORT_SYMBOL_GPL(component_add); + +void component_del(struct device *dev, const struct component_ops *ops) +{ +	struct component *c, *component = NULL; + +	mutex_lock(&component_mutex); +	list_for_each_entry(c, &component_list, node) +		if (c->dev == dev && c->ops == ops) { +			list_del(&c->node); +			component = c; +			break; +		} + +	if (component && component->master) +		take_down_master(component->master); + +	mutex_unlock(&component_mutex); + +	WARN_ON(!component); +	kfree(component); +} +EXPORT_SYMBOL_GPL(component_del); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/container.c b/drivers/base/container.c new file mode 100644 index 00000000000..ecbfbe2e908 --- /dev/null +++ b/drivers/base/container.c @@ -0,0 +1,44 @@ +/* + * System bus type for containers. + * + * Copyright (C) 2013, Intel Corporation + * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/container.h> + +#include "base.h" + +#define CONTAINER_BUS_NAME	"container" + +static int trivial_online(struct device *dev) +{ +	return 0; +} + +static int container_offline(struct device *dev) +{ +	struct container_dev *cdev = to_container_dev(dev); + +	return cdev->offline ? cdev->offline(cdev) : 0; +} + +struct bus_type container_subsys = { +	.name = CONTAINER_BUS_NAME, +	.dev_name = CONTAINER_BUS_NAME, +	.online = trivial_online, +	.offline = container_offline, +}; + +void __init container_dev_init(void) +{ +	int ret; + +	ret = subsys_system_register(&container_subsys, NULL); +	if (ret) +		pr_err("%s() failed: %d\n", __func__, ret); +} diff --git a/drivers/base/core.c b/drivers/base/core.c index 34abf4d8a45..20da3ad1696 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -23,7 +23,6 @@  #include <linux/genhd.h>  #include <linux/kallsyms.h>  #include <linux/mutex.h> -#include <linux/async.h>  #include <linux/pm_runtime.h>  #include <linux/netdevice.h>  #include <linux/sysfs.h> @@ -455,64 +454,6 @@ static ssize_t online_store(struct device *dev, struct device_attribute *attr,  }  static DEVICE_ATTR_RW(online); -static int device_add_attributes(struct device *dev, -				 struct device_attribute *attrs) -{ -	int error = 0; -	int i; - -	if (attrs) { -		for (i = 0; attrs[i].attr.name; i++) { -			error = device_create_file(dev, &attrs[i]); -			if (error) -				break; -		} -		if (error) -			while (--i >= 0) -				device_remove_file(dev, &attrs[i]); -	} -	return error; -} - -static void device_remove_attributes(struct device *dev, -				     struct device_attribute *attrs) -{ -	int i; - -	if (attrs) -		for (i = 0; attrs[i].attr.name; i++) -			device_remove_file(dev, &attrs[i]); -} - -static int device_add_bin_attributes(struct device *dev, -				     struct bin_attribute *attrs) -{ -	int error = 0; -	int i; - -	if (attrs) { -		for (i = 0; attrs[i].attr.name; i++) { -			error = device_create_bin_file(dev, &attrs[i]); -			if (error) -				break; -		} -		if (error) -			while (--i >= 0) -				device_remove_bin_file(dev, &attrs[i]); -	} -	return error; -} - -static void device_remove_bin_attributes(struct device *dev, -					 struct bin_attribute *attrs) -{ -	int i; - -	if (attrs) -		for (i = 0; attrs[i].attr.name; i++) -			device_remove_bin_file(dev, &attrs[i]); -} -  int device_add_groups(struct device *dev, const struct attribute_group **groups)  {  	return sysfs_create_groups(&dev->kobj, groups); @@ -534,18 +475,12 @@ static int device_add_attrs(struct device *dev)  		error = device_add_groups(dev, class->dev_groups);  		if (error)  			return error; -		error = device_add_attributes(dev, class->dev_attrs); -		if (error) -			goto err_remove_class_groups; -		error = device_add_bin_attributes(dev, class->dev_bin_attrs); -		if (error) -			goto err_remove_class_attrs;  	}  	if (type) {  		error = device_add_groups(dev, type->groups);  		if (error) -			goto err_remove_class_bin_attrs; +			goto err_remove_class_groups;  	}  	error = device_add_groups(dev, dev->groups); @@ -555,20 +490,16 @@ static int device_add_attrs(struct device *dev)  	if (device_supports_offline(dev) && !dev->offline_disabled) {  		error = device_create_file(dev, &dev_attr_online);  		if (error) -			goto err_remove_type_groups; +			goto err_remove_dev_groups;  	}  	return 0; + err_remove_dev_groups: +	device_remove_groups(dev, dev->groups);   err_remove_type_groups:  	if (type)  		device_remove_groups(dev, type->groups); - err_remove_class_bin_attrs: -	if (class) -		device_remove_bin_attributes(dev, class->dev_bin_attrs); - err_remove_class_attrs: -	if (class) -		device_remove_attributes(dev, class->dev_attrs);   err_remove_class_groups:  	if (class)  		device_remove_groups(dev, class->dev_groups); @@ -587,11 +518,8 @@ static void device_remove_attrs(struct device *dev)  	if (type)  		device_remove_groups(dev, type->groups); -	if (class) { -		device_remove_attributes(dev, class->dev_attrs); -		device_remove_bin_attributes(dev, class->dev_bin_attrs); +	if (class)  		device_remove_groups(dev, class->dev_groups); -	}  }  static ssize_t dev_show(struct device *dev, struct device_attribute *attr, @@ -642,6 +570,23 @@ void device_remove_file(struct device *dev,  EXPORT_SYMBOL_GPL(device_remove_file);  /** + * device_remove_file_self - remove sysfs attribute file from its own method. + * @dev: device. + * @attr: device attribute descriptor. + * + * See kernfs_remove_self() for details. + */ +bool device_remove_file_self(struct device *dev, +			     const struct device_attribute *attr) +{ +	if (dev) +		return sysfs_remove_file_self(&dev->kobj, &attr->attr); +	else +		return false; +} +EXPORT_SYMBOL_GPL(device_remove_file_self); + +/**   * device_create_bin_file - create sysfs binary attribute file for device.   * @dev: device.   * @attr: device binary attribute descriptor. @@ -669,39 +614,6 @@ void device_remove_bin_file(struct device *dev,  }  EXPORT_SYMBOL_GPL(device_remove_bin_file); -/** - * device_schedule_callback_owner - helper to schedule a callback for a device - * @dev: device. - * @func: callback function to invoke later. - * @owner: module owning the callback routine - * - * Attribute methods must not unregister themselves or their parent device - * (which would amount to the same thing).  Attempts to do so will deadlock, - * since unregistration is mutually exclusive with driver callbacks. - * - * Instead methods can call this routine, which will attempt to allocate - * and schedule a workqueue request to call back @func with @dev as its - * argument in the workqueue's process context.  @dev will be pinned until - * @func returns. - * - * This routine is usually called via the inline device_schedule_callback(), - * which automatically sets @owner to THIS_MODULE. - * - * Returns 0 if the request was submitted, -ENOMEM if storage could not - * be allocated, -ENODEV if a reference to @owner isn't available. - * - * NOTE: This routine won't work if CONFIG_SYSFS isn't set!  It uses an - * underlying sysfs routine (since it is intended for use by attribute - * methods), and if sysfs isn't available you'll get nothing but -ENOSYS. - */ -int device_schedule_callback_owner(struct device *dev, -		void (*func)(struct device *), struct module *owner) -{ -	return sysfs_schedule_callback(&dev->kobj, -			(void (*)(void *)) func, dev, owner); -} -EXPORT_SYMBOL_GPL(device_schedule_callback_owner); -  static void klist_children_get(struct klist_node *n)  {  	struct device_private *p = to_device_private_parent(n); @@ -1676,6 +1588,7 @@ device_create_groups_vargs(struct class *class, struct device *parent,  		goto error;  	} +	device_initialize(dev);  	dev->devt = devt;  	dev->class = class;  	dev->parent = parent; @@ -1687,7 +1600,7 @@ device_create_groups_vargs(struct class *class, struct device *parent,  	if (retval)  		goto error; -	retval = device_register(dev); +	retval = device_add(dev);  	if (retval)  		goto error; @@ -1881,6 +1794,7 @@ EXPORT_SYMBOL_GPL(device_destroy);   */  int device_rename(struct device *dev, const char *new_name)  { +	struct kobject *kobj = &dev->kobj;  	char *old_device_name = NULL;  	int error; @@ -1888,8 +1802,7 @@ int device_rename(struct device *dev, const char *new_name)  	if (!dev)  		return -EINVAL; -	pr_debug("device: '%s': %s: renaming to '%s'\n", dev_name(dev), -		 __func__, new_name); +	dev_dbg(dev, "renaming to %s\n", new_name);  	old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);  	if (!old_device_name) { @@ -1898,13 +1811,14 @@ int device_rename(struct device *dev, const char *new_name)  	}  	if (dev->class) { -		error = sysfs_rename_link(&dev->class->p->subsys.kobj, -			&dev->kobj, old_device_name, new_name); +		error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj, +					     kobj, old_device_name, +					     new_name, kobject_namespace(kobj));  		if (error)  			goto out;  	} -	error = kobject_rename(&dev->kobj, new_name); +	error = kobject_rename(kobj, new_name);  	if (error)  		goto out; @@ -2072,7 +1986,6 @@ void device_shutdown(void)  		spin_lock(&devices_kset->list_lock);  	}  	spin_unlock(&devices_kset->list_lock); -	async_synchronize_full();  }  /* @@ -2127,7 +2040,6 @@ create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen)  	return pos;  } -EXPORT_SYMBOL(create_syslog_header);  int dev_vprintk_emit(int level, const struct device *dev,  		     const char *fmt, va_list args) diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 848ebbd2571..006b1bc5297 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -15,6 +15,7 @@  #include <linux/percpu.h>  #include <linux/acpi.h>  #include <linux/of.h> +#include <linux/cpufeature.h>  #include "base.h" @@ -44,13 +45,11 @@ static int __ref cpu_subsys_online(struct device *dev)  	struct cpu *cpu = container_of(dev, struct cpu, dev);  	int cpuid = dev->id;  	int from_nid, to_nid; -	int ret = -ENODEV; - -	cpu_hotplug_driver_lock(); +	int ret;  	from_nid = cpu_to_node(cpuid);  	if (from_nid == NUMA_NO_NODE) -		goto out; +		return -ENODEV;  	ret = cpu_up(cpuid);  	/* @@ -61,19 +60,12 @@ static int __ref cpu_subsys_online(struct device *dev)  	if (from_nid != to_nid)  		change_cpu_under_node(cpu, from_nid, to_nid); - out: -	cpu_hotplug_driver_unlock();  	return ret;  }  static int cpu_subsys_offline(struct device *dev)  { -	int ret; - -	cpu_hotplug_driver_lock(); -	ret = cpu_down(dev->id); -	cpu_hotplug_driver_unlock(); -	return ret; +	return cpu_down(dev->id);  }  void unregister_cpu(struct cpu *cpu) @@ -93,7 +85,17 @@ static ssize_t cpu_probe_store(struct device *dev,  			       const char *buf,  			       size_t count)  { -	return arch_cpu_probe(buf, count); +	ssize_t cnt; +	int ret; + +	ret = lock_device_hotplug_sysfs(); +	if (ret) +		return ret; + +	cnt = arch_cpu_probe(buf, count); + +	unlock_device_hotplug(); +	return cnt;  }  static ssize_t cpu_release_store(struct device *dev, @@ -101,7 +103,17 @@ static ssize_t cpu_release_store(struct device *dev,  				 const char *buf,  				 size_t count)  { -	return arch_cpu_release(buf, count); +	ssize_t cnt; +	int ret; + +	ret = lock_device_hotplug_sysfs(); +	if (ret) +		return ret; + +	cnt = arch_cpu_release(buf, count); + +	unlock_device_hotplug(); +	return cnt;  }  static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); @@ -275,6 +287,41 @@ static void cpu_device_release(struct device *dev)  	 */  } +#ifdef CONFIG_GENERIC_CPU_AUTOPROBE +static ssize_t print_cpu_modalias(struct device *dev, +				  struct device_attribute *attr, +				  char *buf) +{ +	ssize_t n; +	u32 i; + +	n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:", +		    CPU_FEATURE_TYPEVAL); + +	for (i = 0; i < MAX_CPU_FEATURES; i++) +		if (cpu_have_feature(i)) { +			if (PAGE_SIZE < n + sizeof(",XXXX\n")) { +				WARN(1, "CPU features overflow page\n"); +				break; +			} +			n += sprintf(&buf[n], ",%04X", i); +		} +	buf[n++] = '\n'; +	return n; +} + +static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env) +{ +	char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL); +	if (buf) { +		print_cpu_modalias(NULL, NULL, buf); +		add_uevent_var(env, "MODALIAS=%s", buf); +		kfree(buf); +	} +	return 0; +} +#endif +  /*   * register_cpu - Setup a sysfs device for a CPU.   * @cpu - cpu->hotpluggable field set to 1 will generate a control file in @@ -295,8 +342,8 @@ int register_cpu(struct cpu *cpu, int num)  	cpu->dev.offline_disabled = !cpu->hotpluggable;  	cpu->dev.offline = !cpu_online(num);  	cpu->dev.of_node = of_get_cpu_node(num, NULL); -#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE -	cpu->dev.bus->uevent = arch_cpu_uevent; +#ifdef CONFIG_GENERIC_CPU_AUTOPROBE +	cpu->dev.bus->uevent = cpu_uevent;  #endif  	cpu->dev.groups = common_cpu_attr_groups;  	if (cpu->hotpluggable) @@ -319,8 +366,8 @@ struct device *get_cpu_device(unsigned cpu)  }  EXPORT_SYMBOL_GPL(get_cpu_device); -#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE -static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL); +#ifdef CONFIG_GENERIC_CPU_AUTOPROBE +static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);  #endif  static struct attribute *cpu_root_attrs[] = { @@ -333,7 +380,7 @@ static struct attribute *cpu_root_attrs[] = {  	&cpu_attrs[2].attr.attr,  	&dev_attr_kernel_max.attr,  	&dev_attr_offline.attr, -#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE +#ifdef CONFIG_GENERIC_CPU_AUTOPROBE  	&dev_attr_modalias.attr,  #endif  	NULL diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 35fa3689891..e4ffbcf2f51 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -52,6 +52,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);  static LIST_HEAD(deferred_probe_pending_list);  static LIST_HEAD(deferred_probe_active_list);  static struct workqueue_struct *deferred_wq; +static atomic_t deferred_trigger_count = ATOMIC_INIT(0);  /**   * deferred_probe_work_func() - Retry probing devices in the active list. @@ -135,6 +136,17 @@ static bool driver_deferred_probe_enable = false;   * This functions moves all devices from the pending list to the active   * list and schedules the deferred probe workqueue to process them.  It   * should be called anytime a driver is successfully bound to a device. + * + * Note, there is a race condition in multi-threaded probe. In the case where + * more than one device is probing at the same time, it is possible for one + * probe to complete successfully while another is about to defer. If the second + * depends on the first, then it will get put on the pending list after the + * trigger event has already occured and will be stuck there. + * + * The atomic 'deferred_trigger_count' is used to determine if a successful + * trigger has occurred in the midst of probing a driver. If the trigger count + * changes in the midst of a probe, then deferred processing should be triggered + * again.   */  static void driver_deferred_probe_trigger(void)  { @@ -147,6 +159,7 @@ static void driver_deferred_probe_trigger(void)  	 * into the active list so they can be retried by the workqueue  	 */  	mutex_lock(&deferred_probe_mutex); +	atomic_inc(&deferred_trigger_count);  	list_splice_tail_init(&deferred_probe_pending_list,  			      &deferred_probe_active_list);  	mutex_unlock(&deferred_probe_mutex); @@ -187,8 +200,8 @@ static void driver_bound(struct device *dev)  		return;  	} -	pr_debug("driver: '%s': %s: bound to device '%s'\n", dev_name(dev), -		 __func__, dev->driver->name); +	pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name, +		 __func__, dev_name(dev));  	klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices); @@ -265,6 +278,7 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);  static int really_probe(struct device *dev, struct device_driver *drv)  {  	int ret = 0; +	int local_trigger_count = atomic_read(&deferred_trigger_count);  	atomic_inc(&probe_count);  	pr_debug("bus: '%s': %s: probing driver %s with device %s\n", @@ -310,6 +324,9 @@ probe_failed:  		/* Driver requested deferred probing */  		dev_info(dev, "Driver %s requests probe deferral\n", drv->name);  		driver_deferred_probe_add(dev); +		/* Did a trigger occur while probing? Need to re-trigger if yes */ +		if (local_trigger_count != atomic_read(&deferred_trigger_count)) +			driver_deferred_probe_trigger();  	} else if (ret != -ENODEV && ret != -ENXIO) {  		/* driver matched but the probe failed */  		printk(KERN_WARNING @@ -499,7 +516,7 @@ static void __device_release_driver(struct device *dev)  						     BUS_NOTIFY_UNBIND_DRIVER,  						     dev); -		pm_runtime_put(dev); +		pm_runtime_put_sync(dev);  		if (dev->bus && dev->bus->remove)  			dev->bus->remove(dev); @@ -570,29 +587,3 @@ void driver_detach(struct device_driver *drv)  		put_device(dev);  	}  } - -/* - * These exports can't be _GPL due to .h files using this within them, and it - * might break something that was previously working... - */ -void *dev_get_drvdata(const struct device *dev) -{ -	if (dev && dev->p) -		return dev->p->driver_data; -	return NULL; -} -EXPORT_SYMBOL(dev_get_drvdata); - -int dev_set_drvdata(struct device *dev, void *data) -{ -	int error; - -	if (!dev->p) { -		error = device_private_init(dev); -		if (error) -			return error; -	} -	dev->p->driver_data = data; -	return 0; -} -EXPORT_SYMBOL(dev_set_drvdata); diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 507379e7b76..52302946770 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -91,7 +91,8 @@ static __always_inline struct devres * alloc_dr(dr_release_t release,  	if (unlikely(!dr))  		return NULL; -	memset(dr, 0, tot_size); +	memset(dr, 0, offsetof(struct devres, data)); +  	INIT_LIST_HEAD(&dr->node.entry);  	dr->node.release = release;  	return dr; @@ -110,7 +111,7 @@ void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,  {  	struct devres *dr; -	dr = alloc_dr(release, size, gfp); +	dr = alloc_dr(release, size, gfp | __GFP_ZERO);  	if (unlikely(!dr))  		return NULL;  	set_node_dbginfo(&dr->node, name, size); @@ -135,7 +136,7 @@ void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)  {  	struct devres *dr; -	dr = alloc_dr(release, size, gfp); +	dr = alloc_dr(release, size, gfp | __GFP_ZERO);  	if (unlikely(!dr))  		return NULL;  	return dr->data; @@ -745,58 +746,185 @@ void devm_remove_action(struct device *dev, void (*action)(void *), void *data)  EXPORT_SYMBOL_GPL(devm_remove_action);  /* - * Managed kzalloc/kfree + * Managed kmalloc/kfree   */ -static void devm_kzalloc_release(struct device *dev, void *res) +static void devm_kmalloc_release(struct device *dev, void *res)  {  	/* noop */  } -static int devm_kzalloc_match(struct device *dev, void *res, void *data) +static int devm_kmalloc_match(struct device *dev, void *res, void *data)  {  	return res == data;  }  /** - * devm_kzalloc - Resource-managed kzalloc + * devm_kmalloc - Resource-managed kmalloc   * @dev: Device to allocate memory for   * @size: Allocation size   * @gfp: Allocation gfp flags   * - * Managed kzalloc.  Memory allocated with this function is + * Managed kmalloc.  Memory allocated with this function is   * automatically freed on driver detach.  Like all other devres   * resources, guaranteed alignment is unsigned long long.   *   * RETURNS:   * Pointer to allocated memory on success, NULL on failure.   */ -void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) +void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)  {  	struct devres *dr;  	/* use raw alloc_dr for kmalloc caller tracing */ -	dr = alloc_dr(devm_kzalloc_release, size, gfp); +	dr = alloc_dr(devm_kmalloc_release, size, gfp);  	if (unlikely(!dr))  		return NULL; +	/* +	 * This is named devm_kzalloc_release for historical reasons +	 * The initial implementation did not support kmalloc, only kzalloc +	 */  	set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);  	devres_add(dev, dr->data);  	return dr->data;  } -EXPORT_SYMBOL_GPL(devm_kzalloc); +EXPORT_SYMBOL_GPL(devm_kmalloc); + +/** + * devm_kstrdup - Allocate resource managed space and + *                copy an existing string into that. + * @dev: Device to allocate memory for + * @s: the string to duplicate + * @gfp: the GFP mask used in the devm_kmalloc() call when + *       allocating memory + * RETURNS: + * Pointer to allocated string on success, NULL on failure. + */ +char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) +{ +	size_t size; +	char *buf; + +	if (!s) +		return NULL; + +	size = strlen(s) + 1; +	buf = devm_kmalloc(dev, size, gfp); +	if (buf) +		memcpy(buf, s, size); +	return buf; +} +EXPORT_SYMBOL_GPL(devm_kstrdup);  /**   * devm_kfree - Resource-managed kfree   * @dev: Device this memory belongs to   * @p: Memory to free   * - * Free memory allocated with devm_kzalloc(). + * Free memory allocated with devm_kmalloc().   */  void devm_kfree(struct device *dev, void *p)  {  	int rc; -	rc = devres_destroy(dev, devm_kzalloc_release, devm_kzalloc_match, p); +	rc = devres_destroy(dev, devm_kmalloc_release, devm_kmalloc_match, p);  	WARN_ON(rc);  }  EXPORT_SYMBOL_GPL(devm_kfree); + +/** + * devm_kmemdup - Resource-managed kmemdup + * @dev: Device this memory belongs to + * @src: Memory region to duplicate + * @len: Memory region length + * @gfp: GFP mask to use + * + * Duplicate region of a memory using resource managed kmalloc + */ +void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp) +{ +	void *p; + +	p = devm_kmalloc(dev, len, gfp); +	if (p) +		memcpy(p, src, len); + +	return p; +} +EXPORT_SYMBOL_GPL(devm_kmemdup); + +struct pages_devres { +	unsigned long addr; +	unsigned int order; +}; + +static int devm_pages_match(struct device *dev, void *res, void *p) +{ +	struct pages_devres *devres = res; +	struct pages_devres *target = p; + +	return devres->addr == target->addr; +} + +static void devm_pages_release(struct device *dev, void *res) +{ +	struct pages_devres *devres = res; + +	free_pages(devres->addr, devres->order); +} + +/** + * devm_get_free_pages - Resource-managed __get_free_pages + * @dev: Device to allocate memory for + * @gfp_mask: Allocation gfp flags + * @order: Allocation size is (1 << order) pages + * + * Managed get_free_pages.  Memory allocated with this function is + * automatically freed on driver detach. + * + * RETURNS: + * Address of allocated memory on success, 0 on failure. + */ + +unsigned long devm_get_free_pages(struct device *dev, +				  gfp_t gfp_mask, unsigned int order) +{ +	struct pages_devres *devres; +	unsigned long addr; + +	addr = __get_free_pages(gfp_mask, order); + +	if (unlikely(!addr)) +		return 0; + +	devres = devres_alloc(devm_pages_release, +			      sizeof(struct pages_devres), GFP_KERNEL); +	if (unlikely(!devres)) { +		free_pages(addr, order); +		return 0; +	} + +	devres->addr = addr; +	devres->order = order; + +	devres_add(dev, devres); +	return addr; +} +EXPORT_SYMBOL_GPL(devm_get_free_pages); + +/** + * devm_free_pages - Resource-managed free_pages + * @dev: Device this memory belongs to + * @addr: Memory to free + * + * Free memory allocated with devm_get_free_pages(). Unlike free_pages, + * there is no need to supply the @order. + */ +void devm_free_pages(struct device *dev, unsigned long addr) +{ +	struct pages_devres devres = { .addr = addr }; + +	WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match, +			       &devres)); +} +EXPORT_SYMBOL_GPL(devm_free_pages); diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 7413d065906..25798db1455 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -216,7 +216,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,  		newattrs.ia_gid = gid;  		newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;  		mutex_lock(&dentry->d_inode->i_mutex); -		notify_change(dentry, &newattrs); +		notify_change(dentry, &newattrs, NULL);  		mutex_unlock(&dentry->d_inode->i_mutex);  		/* mark as kernel-created inode */ @@ -299,7 +299,7 @@ static int handle_remove(const char *nodename, struct device *dev)  {  	struct path parent;  	struct dentry *dentry; -	int deleted = 1; +	int deleted = 0;  	int err;  	dentry = kern_path_locked(nodename, &parent); @@ -322,9 +322,9 @@ static int handle_remove(const char *nodename, struct device *dev)  			newattrs.ia_valid =  				ATTR_UID|ATTR_GID|ATTR_MODE;  			mutex_lock(&dentry->d_inode->i_mutex); -			notify_change(dentry, &newattrs); +			notify_change(dentry, &newattrs, NULL);  			mutex_unlock(&dentry->d_inode->i_mutex); -			err = vfs_unlink(parent.dentry->d_inode, dentry); +			err = vfs_unlink(parent.dentry->d_inode, dentry, NULL);  			if (!err || err == -ENOENT)  				deleted = 1;  		} diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c index 1e16cbd61da..840c7fa8098 100644 --- a/drivers/base/dma-buf.c +++ b/drivers/base/dma-buf.c @@ -251,9 +251,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);   * @dmabuf:	[in]	buffer to attach device to.   * @dev:	[in]	device to be attached.   * - * Returns struct dma_buf_attachment * for this attachment; may return negative - * error codes. - * + * Returns struct dma_buf_attachment * for this attachment; returns ERR_PTR on + * error.   */  struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,  					  struct device *dev) @@ -319,9 +318,8 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);   * @attach:	[in]	attachment whose scatterlist is to be returned   * @direction:	[in]	direction of DMA transfer   * - * Returns sg_table containing the scatterlist to be returned; may return NULL - * or ERR_PTR. - * + * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR + * on error.   */  struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,  					enum dma_data_direction direction) @@ -334,6 +332,8 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,  		return ERR_PTR(-EINVAL);  	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); +	if (!sg_table) +		sg_table = ERR_PTR(-ENOMEM);  	return sg_table;  } @@ -491,7 +491,7 @@ EXPORT_SYMBOL_GPL(dma_buf_kunmap);   * 			dma-buf buffer.   *   * This function adjusts the passed in vma so that it points at the file of the - * dma_buf operation. It alsog adjusts the starting pgoff and does bounds + * dma_buf operation. It also adjusts the starting pgoff and does bounds   * checking on the size of the vma. Then it calls the exporters mmap function to   * set up the mapping.   * @@ -544,6 +544,8 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap);   * These calls are optional in drivers. The intended use for them   * is for mapping objects linear in kernel space for high use objects.   * Please attempt to use kmap/kunmap before thinking about these interfaces. + * + * Returns NULL on error.   */  void *dma_buf_vmap(struct dma_buf *dmabuf)  { @@ -566,7 +568,9 @@ void *dma_buf_vmap(struct dma_buf *dmabuf)  	BUG_ON(dmabuf->vmap_ptr);  	ptr = dmabuf->ops->vmap(dmabuf); -	if (IS_ERR_OR_NULL(ptr)) +	if (WARN_ON_ONCE(IS_ERR(ptr))) +		ptr = NULL; +	if (!ptr)  		goto out_unlock;  	dmabuf->vmap_ptr = ptr; @@ -616,36 +620,35 @@ static int dma_buf_describe(struct seq_file *s)  	if (ret)  		return ret; -	seq_printf(s, "\nDma-buf Objects:\n"); -	seq_printf(s, "\texp_name\tsize\tflags\tmode\tcount\n"); +	seq_puts(s, "\nDma-buf Objects:\n"); +	seq_puts(s, "size\tflags\tmode\tcount\texp_name\n");  	list_for_each_entry(buf_obj, &db_list.head, list_node) {  		ret = mutex_lock_interruptible(&buf_obj->lock);  		if (ret) { -			seq_printf(s, -				  "\tERROR locking buffer object: skipping\n"); +			seq_puts(s, +				 "\tERROR locking buffer object: skipping\n");  			continue;  		} -		seq_printf(s, "\t"); - -		seq_printf(s, "\t%s\t%08zu\t%08x\t%08x\t%08ld\n", -				buf_obj->exp_name, buf_obj->size, +		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n", +				buf_obj->size,  				buf_obj->file->f_flags, buf_obj->file->f_mode, -				(long)(buf_obj->file->f_count.counter)); +				(long)(buf_obj->file->f_count.counter), +				buf_obj->exp_name); -		seq_printf(s, "\t\tAttached Devices:\n"); +		seq_puts(s, "\tAttached Devices:\n");  		attach_count = 0;  		list_for_each_entry(attach_obj, &buf_obj->attachments, node) { -			seq_printf(s, "\t\t"); +			seq_puts(s, "\t"); -			seq_printf(s, "%s\n", attach_obj->dev->init_name); +			seq_printf(s, "%s\n", dev_name(attach_obj->dev));  			attach_count++;  		} -		seq_printf(s, "\n\t\tTotal %d devices attached\n", +		seq_printf(s, "Total %d devices attached\n\n",  				attach_count);  		count++; diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index bc256b64102..7d6e84a5142 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -10,13 +10,13 @@  struct dma_coherent_mem {  	void		*virt_base;  	dma_addr_t	device_base; -	phys_addr_t	pfn_base; +	unsigned long	pfn_base;  	int		size;  	int		flags;  	unsigned long	*bitmap;  }; -int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, +int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,  				dma_addr_t device_addr, size_t size, int flags)  {  	void __iomem *mem_base = NULL; @@ -32,7 +32,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,  	/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ -	mem_base = ioremap(bus_addr, size); +	mem_base = ioremap(phys_addr, size);  	if (!mem_base)  		goto out; @@ -45,7 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,  	dev->dma_mem->virt_base = mem_base;  	dev->dma_mem->device_base = device_addr; -	dev->dma_mem->pfn_base = PFN_DOWN(bus_addr); +	dev->dma_mem->pfn_base = PFN_DOWN(phys_addr);  	dev->dma_mem->size = pages;  	dev->dma_mem->flags = flags; @@ -208,7 +208,7 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,  		*ret = -ENXIO;  		if (off < count && user_count <= count - off) { -			unsigned pfn = mem->pfn_base + start + off; +			unsigned long pfn = mem->pfn_base + start + off;  			*ret = remap_pfn_range(vma, vma->vm_start, pfn,  					       user_count << PAGE_SHIFT,  					       vma->vm_page_prot); diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 99802d6f3c6..6467c919c50 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -37,6 +37,7 @@ struct cma {  	unsigned long	base_pfn;  	unsigned long	count;  	unsigned long	*bitmap; +	struct mutex	lock;  };  struct cma *dma_contiguous_default_area; @@ -49,7 +50,7 @@ struct cma *dma_contiguous_default_area;  /*   * Default global CMA area size can be defined in kernel's .config. - * This is usefull mainly for distro maintainers to create a kernel + * This is useful mainly for distro maintainers to create a kernel   * that works correctly for most supported systems.   * The size can be set in bytes or as a percentage of the total memory   * in the system. @@ -59,11 +60,22 @@ struct cma *dma_contiguous_default_area;   */  static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;  static phys_addr_t size_cmdline = -1; +static phys_addr_t base_cmdline; +static phys_addr_t limit_cmdline;  static int __init early_cma(char *p)  {  	pr_debug("%s(%s)\n", __func__, p);  	size_cmdline = memparse(p, &p); +	if (*p != '@') +		return 0; +	base_cmdline = memparse(p + 1, &p); +	if (*p != '-') { +		limit_cmdline = base_cmdline + size_cmdline; +		return 0; +	} +	limit_cmdline = memparse(p + 1, &p); +  	return 0;  }  early_param("cma", early_cma); @@ -107,11 +119,18 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)  void __init dma_contiguous_reserve(phys_addr_t limit)  {  	phys_addr_t selected_size = 0; +	phys_addr_t selected_base = 0; +	phys_addr_t selected_limit = limit; +	bool fixed = false;  	pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);  	if (size_cmdline != -1) {  		selected_size = size_cmdline; +		selected_base = base_cmdline; +		selected_limit = min_not_zero(limit_cmdline, limit); +		if (base_cmdline + size_cmdline == limit_cmdline) +			fixed = true;  	} else {  #ifdef CONFIG_CMA_SIZE_SEL_MBYTES  		selected_size = size_bytes; @@ -128,10 +147,12 @@ void __init dma_contiguous_reserve(phys_addr_t limit)  		pr_debug("%s: reserving %ld MiB for global area\n", __func__,  			 (unsigned long)selected_size / SZ_1M); -		dma_contiguous_reserve_area(selected_size, 0, limit, -					    &dma_contiguous_default_area); +		dma_contiguous_reserve_area(selected_size, selected_base, +					    selected_limit, +					    &dma_contiguous_default_area, +					    fixed);  	} -}; +}  static DEFINE_MUTEX(cma_mutex); @@ -155,13 +176,24 @@ static int __init cma_activate_area(struct cma *cma)  		base_pfn = pfn;  		for (j = pageblock_nr_pages; j; --j, pfn++) {  			WARN_ON_ONCE(!pfn_valid(pfn)); +			/* +			 * alloc_contig_range requires the pfn range +			 * specified to be in the same zone. Make this +			 * simple by forcing the entire CMA resv range +			 * to be in the same zone. +			 */  			if (page_zone(pfn_to_page(pfn)) != zone) -				return -EINVAL; +				goto err;  		}  		init_cma_reserved_pageblock(pfn_to_page(base_pfn));  	} while (--i); +	mutex_init(&cma->lock);  	return 0; + +err: +	kfree(cma->bitmap); +	return -EINVAL;  }  static struct cma cma_areas[MAX_CMA_AREAS]; @@ -187,15 +219,20 @@ core_initcall(cma_init_reserved_areas);   * @base: Base address of the reserved area optional, use 0 for any   * @limit: End address of the reserved memory (optional, 0 for any).   * @res_cma: Pointer to store the created cma region. + * @fixed: hint about where to place the reserved area   *   * This function reserves memory from early allocator. It should be   * called by arch specific code once the early allocator (memblock or bootmem)   * has been activated and all other subsystems have already allocated/reserved   * memory. This function allows to create custom reserved areas for specific   * devices. + * + * If @fixed is true, reserve contiguous area at exactly @base.  If false, + * reserve in range from @base to @limit.   */  int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, -				       phys_addr_t limit, struct cma **res_cma) +				       phys_addr_t limit, struct cma **res_cma, +				       bool fixed)  {  	struct cma *cma = &cma_areas[cma_area_count];  	phys_addr_t alignment; @@ -221,18 +258,15 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,  	limit &= ~(alignment - 1);  	/* Reserve memory */ -	if (base) { +	if (base && fixed) {  		if (memblock_is_region_reserved(base, size) ||  		    memblock_reserve(base, size) < 0) {  			ret = -EBUSY;  			goto err;  		}  	} else { -		/* -		 * Use __memblock_alloc_base() since -		 * memblock_alloc_base() panic()s. -		 */ -		phys_addr_t addr = __memblock_alloc_base(size, alignment, limit); +		phys_addr_t addr = memblock_alloc_range(size, alignment, base, +							limit);  		if (!addr) {  			ret = -ENOMEM;  			goto err; @@ -261,6 +295,13 @@ err:  	return ret;  } +static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) +{ +	mutex_lock(&cma->lock); +	bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); +	mutex_unlock(&cma->lock); +} +  /**   * dma_alloc_from_contiguous() - allocate pages from contiguous area   * @dev:   Pointer to device for which the allocation is performed. @@ -269,7 +310,7 @@ err:   *   * This function allocates memory buffer for specified device. It uses   * device specific contiguous memory area if available or the default - * global one. Requires architecture specific get_dev_cma_area() helper + * global one. Requires architecture specific dev_get_cma_area() helper   * function.   */  struct page *dma_alloc_from_contiguous(struct device *dev, int count, @@ -294,30 +335,41 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,  	mask = (1 << align) - 1; -	mutex_lock(&cma_mutex);  	for (;;) { +		mutex_lock(&cma->lock);  		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,  						    start, count, mask); -		if (pageno >= cma->count) +		if (pageno >= cma->count) { +			mutex_unlock(&cma->lock);  			break; +		} +		bitmap_set(cma->bitmap, pageno, count); +		/* +		 * It's safe to drop the lock here. We've marked this region for +		 * our exclusive use. If the migration fails we will take the +		 * lock again and unmark it. +		 */ +		mutex_unlock(&cma->lock);  		pfn = cma->base_pfn + pageno; +		mutex_lock(&cma_mutex);  		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); +		mutex_unlock(&cma_mutex);  		if (ret == 0) { -			bitmap_set(cma->bitmap, pageno, count);  			page = pfn_to_page(pfn);  			break;  		} else if (ret != -EBUSY) { +			clear_cma_bitmap(cma, pfn, count);  			break;  		} +		clear_cma_bitmap(cma, pfn, count);  		pr_debug("%s(): memory range at %p is busy, retrying\n",  			 __func__, pfn_to_page(pfn));  		/* try again with a bit different memory target */  		start = pageno + mask + 1;  	} -	mutex_unlock(&cma_mutex);  	pr_debug("%s(): returned %p\n", __func__, page);  	return page;  } @@ -350,10 +402,8 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,  	VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); -	mutex_lock(&cma_mutex); -	bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);  	free_contig_range(pfn, count); -	mutex_unlock(&cma_mutex); +	clear_cma_bitmap(cma, pfn, count);  	return true;  } diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 0ce39a33b3c..6cd08e145bf 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -175,7 +175,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res)  /**   * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()   * @dev: Device to declare coherent memory for - * @bus_addr: Bus address of coherent memory to be declared + * @phys_addr: Physical address of coherent memory to be declared   * @device_addr: Device address of coherent memory to be declared   * @size: Size of coherent memory to be declared   * @flags: Flags @@ -185,7 +185,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res)   * RETURNS:   * 0 on success, -errno on failure.   */ -int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, +int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,  				 dma_addr_t device_addr, size_t size, int flags)  {  	void *res; @@ -195,7 +195,7 @@ int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,  	if (!res)  		return -ENOMEM; -	rc = dma_declare_coherent_memory(dev, bus_addr, device_addr, size, +	rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,  					 flags);  	if (rc == 0)  		devres_add(dev, res); diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 10a4467c63f..d276e33880b 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -96,6 +96,15 @@ static inline long firmware_loading_timeout(void)  	return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT;  } +/* firmware behavior options */ +#define FW_OPT_UEVENT	(1U << 0) +#define FW_OPT_NOWAIT	(1U << 1) +#ifdef CONFIG_FW_LOADER_USER_HELPER +#define FW_OPT_FALLBACK	(1U << 2) +#else +#define FW_OPT_FALLBACK	0 +#endif +  struct firmware_cache {  	/* firmware_buf instance will be added into the below list */  	spinlock_t lock; @@ -219,6 +228,7 @@ static int fw_lookup_and_allocate_buf(const char *fw_name,  }  static void __fw_free_buf(struct kref *ref) +	__releases(&fwc->lock)  {  	struct firmware_buf *buf = to_fwbuf(ref);  	struct firmware_cache *fwc = buf->fwc; @@ -270,43 +280,47 @@ module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);  MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");  /* Don't inline this: 'struct kstat' is biggish */ -static noinline_for_stack long fw_file_size(struct file *file) +static noinline_for_stack int fw_file_size(struct file *file)  {  	struct kstat st;  	if (vfs_getattr(&file->f_path, &st))  		return -1;  	if (!S_ISREG(st.mode))  		return -1; -	if (st.size != (long)st.size) +	if (st.size != (int)st.size)  		return -1;  	return st.size;  } -static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf) +static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)  { -	long size; +	int size;  	char *buf; +	int rc;  	size = fw_file_size(file);  	if (size <= 0) -		return false; +		return -EINVAL;  	buf = vmalloc(size);  	if (!buf) -		return false; -	if (kernel_read(file, 0, buf, size) != size) { +		return -ENOMEM; +	rc = kernel_read(file, 0, buf, size); +	if (rc != size) { +		if (rc > 0) +			rc = -EIO;  		vfree(buf); -		return false; +		return rc;  	}  	fw_buf->data = buf;  	fw_buf->size = size; -	return true; +	return 0;  } -static bool fw_get_filesystem_firmware(struct device *device, +static int fw_get_filesystem_firmware(struct device *device,  				       struct firmware_buf *buf)  {  	int i; -	bool success = false; +	int rc = -ENOENT;  	char *path = __getname();  	for (i = 0; i < ARRAY_SIZE(fw_path); i++) { @@ -321,14 +335,17 @@ static bool fw_get_filesystem_firmware(struct device *device,  		file = filp_open(path, O_RDONLY, 0);  		if (IS_ERR(file))  			continue; -		success = fw_read_file_contents(file, buf); +		rc = fw_read_file_contents(file, buf);  		fput(file); -		if (success) +		if (rc) +			dev_warn(device, "firmware, attempted to load %s, but failed with error %d\n", +				path, rc); +		else  			break;  	}  	__putname(path); -	if (success) { +	if (!rc) {  		dev_dbg(device, "firmware: direct-loading firmware %s\n",  			buf->fw_id);  		mutex_lock(&fw_lock); @@ -337,7 +354,7 @@ static bool fw_get_filesystem_firmware(struct device *device,  		mutex_unlock(&fw_lock);  	} -	return success; +	return rc;  }  /* firmware holds the ownership of pages */ @@ -632,7 +649,9 @@ static ssize_t firmware_loading_store(struct device *dev,  			 * see the mapped 'buf->data' once the loading  			 * is completed.  			 * */ -			fw_map_pages_buf(fw_buf); +			if (fw_map_pages_buf(fw_buf)) +				dev_err(dev, "%s: map pages failed\n", +					__func__);  			list_del_init(&fw_buf->pending_list);  			complete_all(&fw_buf->completion);  			break; @@ -813,7 +832,7 @@ static void firmware_class_timeout_work(struct work_struct *work)  static struct firmware_priv *  fw_create_instance(struct firmware *firmware, const char *fw_name, -		   struct device *device, bool uevent, bool nowait) +		   struct device *device, unsigned int opt_flags)  {  	struct firmware_priv *fw_priv;  	struct device *f_dev; @@ -825,7 +844,7 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,  		goto exit;  	} -	fw_priv->nowait = nowait; +	fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT);  	fw_priv->fw = firmware;  	INIT_DELAYED_WORK(&fw_priv->timeout_work,  		firmware_class_timeout_work); @@ -841,8 +860,8 @@ exit:  }  /* load a firmware via user helper */ -static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent, -				  long timeout) +static int _request_firmware_load(struct firmware_priv *fw_priv, +				  unsigned int opt_flags, long timeout)  {  	int retval = 0;  	struct device *f_dev = &fw_priv->dev; @@ -878,12 +897,13 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,  		goto err_del_bin_attr;  	} -	if (uevent) { +	if (opt_flags & FW_OPT_UEVENT) {  		buf->need_uevent = true;  		dev_set_uevent_suppress(f_dev, false);  		dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);  		if (timeout != MAX_SCHEDULE_TIMEOUT) -			schedule_delayed_work(&fw_priv->timeout_work, timeout); +			queue_delayed_work(system_power_efficient_wq, +					   &fw_priv->timeout_work, timeout);  		kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);  	} @@ -891,6 +911,8 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,  	wait_for_completion(&buf->completion);  	cancel_delayed_work_sync(&fw_priv->timeout_work); +	if (!buf->data) +		retval = -ENOMEM;  	device_remove_file(f_dev, &dev_attr_loading);  err_del_bin_attr: @@ -904,16 +926,16 @@ err_put_dev:  static int fw_load_from_user_helper(struct firmware *firmware,  				    const char *name, struct device *device, -				    bool uevent, bool nowait, long timeout) +				    unsigned int opt_flags, long timeout)  {  	struct firmware_priv *fw_priv; -	fw_priv = fw_create_instance(firmware, name, device, uevent, nowait); +	fw_priv = fw_create_instance(firmware, name, device, opt_flags);  	if (IS_ERR(fw_priv))  		return PTR_ERR(fw_priv);  	fw_priv->buf = firmware->priv; -	return _request_firmware_load(fw_priv, uevent, timeout); +	return _request_firmware_load(fw_priv, opt_flags, timeout);  }  #ifdef CONFIG_PM_SLEEP @@ -935,7 +957,7 @@ static void kill_requests_without_uevent(void)  #else /* CONFIG_FW_LOADER_USER_HELPER */  static inline int  fw_load_from_user_helper(struct firmware *firmware, const char *name, -			 struct device *device, bool uevent, bool nowait, +			 struct device *device, unsigned int opt_flags,  			 long timeout)  {  	return -ENOENT; @@ -1016,7 +1038,7 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,  }  static int assign_firmware_buf(struct firmware *fw, struct device *device, -				bool skip_cache) +			       unsigned int opt_flags)  {  	struct firmware_buf *buf = fw->priv; @@ -1033,7 +1055,8 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,  	 * device may has been deleted already, but the problem  	 * should be fixed in devres or driver core.  	 */ -	if (device && !skip_cache) +	/* don't cache firmware handled without uevent */ +	if (device && (opt_flags & FW_OPT_UEVENT))  		fw_add_devm_name(device, buf->fw_id);  	/* @@ -1054,7 +1077,7 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,  /* called from request_firmware() and request_firmware_work_func() */  static int  _request_firmware(const struct firmware **firmware_p, const char *name, -		  struct device *device, bool uevent, bool nowait) +		  struct device *device, unsigned int opt_flags)  {  	struct firmware *fw;  	long timeout; @@ -1069,7 +1092,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name,  	ret = 0;  	timeout = firmware_loading_timeout(); -	if (nowait) { +	if (opt_flags & FW_OPT_NOWAIT) {  		timeout = usermodehelper_read_lock_wait(timeout);  		if (!timeout) {  			dev_dbg(device, "firmware: %s loading timed out\n", @@ -1086,13 +1109,20 @@ _request_firmware(const struct firmware **firmware_p, const char *name,  		}  	} -	if (!fw_get_filesystem_firmware(device, fw->priv)) -		ret = fw_load_from_user_helper(fw, name, device, -					       uevent, nowait, timeout); +	ret = fw_get_filesystem_firmware(device, fw->priv); +	if (ret) { +		if (opt_flags & FW_OPT_FALLBACK) { +			dev_warn(device, +				 "Direct firmware load failed with error %d\n", +				 ret); +			dev_warn(device, "Falling back to user helper\n"); +			ret = fw_load_from_user_helper(fw, name, device, +						       opt_flags, timeout); +		} +	} -	/* don't cache firmware handled without uevent */  	if (!ret) -		ret = assign_firmware_buf(fw, device, !uevent); +		ret = assign_firmware_buf(fw, device, opt_flags);  	usermodehelper_read_unlock(); @@ -1134,12 +1164,37 @@ request_firmware(const struct firmware **firmware_p, const char *name,  	/* Need to pin this module until return */  	__module_get(THIS_MODULE); -	ret = _request_firmware(firmware_p, name, device, true, false); +	ret = _request_firmware(firmware_p, name, device, +				FW_OPT_UEVENT | FW_OPT_FALLBACK);  	module_put(THIS_MODULE);  	return ret;  }  EXPORT_SYMBOL(request_firmware); +#ifdef CONFIG_FW_LOADER_USER_HELPER +/** + * request_firmware: - load firmware directly without usermode helper + * @firmware_p: pointer to firmware image + * @name: name of firmware file + * @device: device for which firmware is being loaded + * + * This function works pretty much like request_firmware(), but this doesn't + * fall back to usermode helper even if the firmware couldn't be loaded + * directly from fs.  Hence it's useful for loading optional firmwares, which + * aren't always present, without extra long timeouts of udev. + **/ +int request_firmware_direct(const struct firmware **firmware_p, +			    const char *name, struct device *device) +{ +	int ret; +	__module_get(THIS_MODULE); +	ret = _request_firmware(firmware_p, name, device, FW_OPT_UEVENT); +	module_put(THIS_MODULE); +	return ret; +} +EXPORT_SYMBOL_GPL(request_firmware_direct); +#endif +  /**   * release_firmware: - release the resource associated with a firmware image   * @fw: firmware resource to release @@ -1162,7 +1217,7 @@ struct firmware_work {  	struct device *device;  	void *context;  	void (*cont)(const struct firmware *fw, void *context); -	bool uevent; +	unsigned int opt_flags;  };  static void request_firmware_work_func(struct work_struct *work) @@ -1173,7 +1228,7 @@ static void request_firmware_work_func(struct work_struct *work)  	fw_work = container_of(work, struct firmware_work, work);  	_request_firmware(&fw, fw_work->name, fw_work->device, -			  fw_work->uevent, true); +			  fw_work->opt_flags);  	fw_work->cont(fw, fw_work->context);  	put_device(fw_work->device); /* taken in request_firmware_nowait() */ @@ -1221,7 +1276,8 @@ request_firmware_nowait(  	fw_work->device = device;  	fw_work->context = context;  	fw_work->cont = cont; -	fw_work->uevent = uevent; +	fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK | +		(uevent ? FW_OPT_UEVENT : 0);  	if (!try_module_get(module)) {  		kfree(fw_work); @@ -1519,8 +1575,8 @@ static void device_uncache_fw_images_work(struct work_struct *work)   */  static void device_uncache_fw_images_delay(unsigned long delay)  { -	schedule_delayed_work(&fw_cache.work, -			msecs_to_jiffies(delay)); +	queue_delayed_work(system_power_efficient_wq, &fw_cache.work, +			   msecs_to_jiffies(delay));  }  static int fw_pm_notify(struct notifier_block *notify_block, @@ -1529,6 +1585,7 @@ static int fw_pm_notify(struct notifier_block *notify_block,  	switch (mode) {  	case PM_HIBERNATION_PREPARE:  	case PM_SUSPEND_PREPARE: +	case PM_RESTORE_PREPARE:  		kill_requests_without_uevent();  		device_cache_fw_images();  		break; diff --git a/drivers/base/init.c b/drivers/base/init.c index c16f0b808a1..da033d3bab3 100644 --- a/drivers/base/init.c +++ b/drivers/base/init.c @@ -33,4 +33,5 @@ void __init driver_init(void)  	platform_bus_init();  	cpu_dev_init();  	memory_dev_init(); +	container_dev_init();  } diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 9e59f6535c4..89f752dd846 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -118,16 +118,6 @@ static ssize_t show_mem_start_phys_index(struct device *dev,  	return sprintf(buf, "%08lx\n", phys_index);  } -static ssize_t show_mem_end_phys_index(struct device *dev, -			struct device_attribute *attr, char *buf) -{ -	struct memory_block *mem = to_memory_block(dev); -	unsigned long phys_index; - -	phys_index = mem->end_section_nr / sections_per_block; -	return sprintf(buf, "%08lx\n", phys_index); -} -  /*   * Show whether the section of memory is likely to be hot-removable   */ @@ -333,8 +323,10 @@ store_mem_state(struct device *dev,  		online_type = ONLINE_KEEP;  	else if (!strncmp(buf, "offline", min_t(int, count, 7)))  		online_type = -1; -	else -		return -EINVAL; +	else { +		ret = -EINVAL; +		goto err; +	}  	switch (online_type) {  	case ONLINE_KERNEL: @@ -357,6 +349,7 @@ store_mem_state(struct device *dev,  		ret = -EINVAL; /* should never happen */  	} +err:  	unlock_device_hotplug();  	if (ret) @@ -381,7 +374,6 @@ static ssize_t show_phys_device(struct device *dev,  }  static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL); -static DEVICE_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL);  static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);  static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);  static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL); @@ -526,7 +518,6 @@ struct memory_block *find_memory_block(struct mem_section *section)  static struct attribute *memory_memblk_attrs[] = {  	&dev_attr_phys_index.attr, -	&dev_attr_end_phys_index.attr,  	&dev_attr_state.attr,  	&dev_attr_phys_device.attr,  	&dev_attr_removable.attr, diff --git a/drivers/base/node.c b/drivers/base/node.c index bc9f43bf7e2..8f7ed9933a7 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -599,7 +599,11 @@ int register_one_node(int nid)  void unregister_one_node(int nid)  { +	if (!node_devices[nid]) +		return; +  	unregister_node(node_devices[nid]); +	kfree(node_devices[nid]);  	node_devices[nid] = NULL;  } diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 4f8bef3eb5a..eee48c49f5d 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -13,6 +13,7 @@  #include <linux/string.h>  #include <linux/platform_device.h>  #include <linux/of_device.h> +#include <linux/of_irq.h>  #include <linux/module.h>  #include <linux/init.h>  #include <linux/dma-mapping.h> @@ -87,7 +88,16 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)  		return -ENXIO;  	return dev->archdata.irqs[num];  #else -	struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num); +	struct resource *r; +	if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { +		int ret; + +		ret = of_irq_get(dev->dev.of_node, num); +		if (ret >= 0 || ret == -EPROBE_DEFER) +			return ret; +	} + +	r = platform_get_resource(dev, IORESOURCE_IRQ, num);  	return r ? r->start : -ENXIO;  #endif @@ -126,9 +136,17 @@ EXPORT_SYMBOL_GPL(platform_get_resource_byname);   */  int platform_get_irq_byname(struct platform_device *dev, const char *name)  { -	struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ, -							  name); +	struct resource *r; + +	if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { +		int ret; + +		ret = of_irq_get_byname(dev->dev.of_node, name); +		if (ret >= 0 || ret == -EPROBE_DEFER) +			return ret; +	} +	r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);  	return r ? r->start : -ENXIO;  }  EXPORT_SYMBOL_GPL(platform_get_irq_byname); @@ -432,7 +450,7 @@ struct platform_device *platform_device_register_full(  		goto err_alloc;  	pdev->dev.parent = pdevinfo->parent; -	ACPI_HANDLE_SET(&pdev->dev, pdevinfo->acpi_node.handle); +	ACPI_COMPANION_SET(&pdev->dev, pdevinfo->acpi_node.companion);  	if (pdevinfo->dma_mask) {  		/* @@ -463,7 +481,7 @@ struct platform_device *platform_device_register_full(  	ret = platform_device_add(pdev);  	if (ret) {  err: -		ACPI_HANDLE_SET(&pdev->dev, NULL); +		ACPI_COMPANION_SET(&pdev->dev, NULL);  		kfree(pdev->dev.dma_mask);  err_alloc: @@ -481,13 +499,17 @@ static int platform_drv_probe(struct device *_dev)  	struct platform_device *dev = to_platform_device(_dev);  	int ret; -	if (ACPI_HANDLE(_dev)) -		acpi_dev_pm_attach(_dev, true); +	acpi_dev_pm_attach(_dev, true);  	ret = drv->probe(dev); -	if (ret && ACPI_HANDLE(_dev)) +	if (ret)  		acpi_dev_pm_detach(_dev, true); +	if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { +		dev_warn(_dev, "probe deferral not supported\n"); +		ret = -ENXIO; +	} +  	return ret;  } @@ -503,8 +525,7 @@ static int platform_drv_remove(struct device *_dev)  	int ret;  	ret = drv->remove(dev); -	if (ACPI_HANDLE(_dev)) -		acpi_dev_pm_detach(_dev, true); +	acpi_dev_pm_detach(_dev, true);  	return ret;  } @@ -515,8 +536,7 @@ static void platform_drv_shutdown(struct device *_dev)  	struct platform_device *dev = to_platform_device(_dev);  	drv->shutdown(dev); -	if (ACPI_HANDLE(_dev)) -		acpi_dev_pm_detach(_dev, true); +	acpi_dev_pm_detach(_dev, true);  }  /** @@ -553,8 +573,7 @@ EXPORT_SYMBOL_GPL(platform_driver_unregister);  /**   * platform_driver_probe - register driver for non-hotpluggable device   * @drv: platform driver structure - * @probe: the driver probe routine, probably from an __init section, - *         must not return -EPROBE_DEFER. + * @probe: the driver probe routine, probably from an __init section   *   * Use this instead of platform_driver_register() when you know the device   * is not hotpluggable and has already been registered, and you want to @@ -565,8 +584,7 @@ EXPORT_SYMBOL_GPL(platform_driver_unregister);   * into system-on-chip processors, where the controller devices have been   * configured as part of board setup.   * - * This is incompatible with deferred probing so probe() must not - * return -EPROBE_DEFER. + * Note that this is incompatible with deferred probing.   *   * Returns zero if the driver registered and bound to a device, else returns   * a negative error code and with the driver not registered. @@ -576,6 +594,12 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv,  {  	int retval, code; +	/* +	 * Prevent driver from requesting probe deferral to avoid further +	 * futile probe attempts. +	 */ +	drv->prevent_deferred_probe = true; +  	/* make sure driver won't have bind/unbind attributes */  	drv->driver.suppress_bind_attrs = true; @@ -668,7 +692,17 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,  			     char *buf)  {  	struct platform_device	*pdev = to_platform_device(dev); -	int len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name); +	int len; + +	len = of_device_get_modalias(dev, buf, PAGE_SIZE -1); +	if (len != -ENODEV) +		return len; + +	len = acpi_device_modalias(dev, buf, PAGE_SIZE -1); +	if (len != -ENODEV) +		return len; + +	len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);  	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;  } @@ -690,6 +724,10 @@ static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)  	if (rc != -ENODEV)  		return rc; +	rc = acpi_device_uevent_modalias(dev, env); +	if (rc != -ENODEV) +		return rc; +  	add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,  			pdev->name);  	return 0; diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 2e58ebb1f6c..1cb8544598d 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -1,6 +1,5 @@ -obj-$(CONFIG_PM)	+= sysfs.o generic_ops.o common.o qos.o +obj-$(CONFIG_PM)	+= sysfs.o generic_ops.o common.o qos.o runtime.o  obj-$(CONFIG_PM_SLEEP)	+= main.o wakeup.o -obj-$(CONFIG_PM_RUNTIME)	+= runtime.o  obj-$(CONFIG_PM_TRACE_RTC)	+= trace.o  obj-$(CONFIG_PM_OPP)	+= opp.o  obj-$(CONFIG_PM_GENERIC_DOMAINS)	+=  domain.o domain_governor.o diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 9d8fde70939..b99e6c06ee6 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -6,7 +6,6 @@   * This file is released under the GPLv2.   */ -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/device.h>  #include <linux/io.h> @@ -33,6 +32,21 @@ struct pm_clock_entry {  };  /** + * pm_clk_enable - Enable a clock, reporting any errors + * @dev: The device for the given clock + * @clk: The clock being enabled. + */ +static inline int __pm_clk_enable(struct device *dev, struct clk *clk) +{ +	int ret = clk_enable(clk); +	if (ret) +		dev_err(dev, "%s: failed to enable clk %p, error %d\n", +			__func__, clk, ret); + +	return ret; +} + +/**   * pm_clk_acquire - Acquire a device clock.   * @dev: Device whose clock is to be acquired.   * @ce: PM clock entry corresponding to the clock. @@ -43,6 +57,7 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)  	if (IS_ERR(ce->clk)) {  		ce->status = PCE_STATUS_ERROR;  	} else { +		clk_prepare(ce->clk);  		ce->status = PCE_STATUS_ACQUIRED;  		dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);  	} @@ -99,10 +114,12 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)  	if (ce->status < PCE_STATUS_ERROR) {  		if (ce->status == PCE_STATUS_ENABLED) -			clk_disable_unprepare(ce->clk); +			clk_disable(ce->clk); -		if (ce->status >= PCE_STATUS_ACQUIRED) +		if (ce->status >= PCE_STATUS_ACQUIRED) { +			clk_unprepare(ce->clk);  			clk_put(ce->clk); +		}  	}  	kfree(ce->con_id); @@ -249,6 +266,7 @@ int pm_clk_resume(struct device *dev)  	struct pm_subsys_data *psd = dev_to_psd(dev);  	struct pm_clock_entry *ce;  	unsigned long flags; +	int ret;  	dev_dbg(dev, "%s()\n", __func__); @@ -259,8 +277,9 @@ int pm_clk_resume(struct device *dev)  	list_for_each_entry(ce, &psd->clock_list, node) {  		if (ce->status < PCE_STATUS_ERROR) { -			clk_enable(ce->clk); -			ce->status = PCE_STATUS_ENABLED; +			ret = __pm_clk_enable(dev, ce->clk); +			if (!ret) +				ce->status = PCE_STATUS_ENABLED;  		}  	} @@ -376,7 +395,7 @@ int pm_clk_resume(struct device *dev)  	spin_lock_irqsave(&psd->lock, flags);  	list_for_each_entry(ce, &psd->clock_list, node) -		clk_enable(ce->clk); +		__pm_clk_enable(dev, ce->clk);  	spin_unlock_irqrestore(&psd->lock, flags); diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index 5da91404130..df2e5eeaeb0 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -6,7 +6,6 @@   * This file is released under the GPLv2.   */ -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/device.h>  #include <linux/export.h> diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index bfb8955c406..eee55c1e5fd 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -6,7 +6,6 @@   * This file is released under the GPLv2.   */ -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/io.h>  #include <linux/pm_runtime.h> @@ -42,7 +41,7 @@  	struct gpd_timing_data *__td = &dev_gpd_data(dev)->td;			\  	if (!__retval && __elapsed > __td->field) {				\  		__td->field = __elapsed;					\ -		dev_warn(dev, name " latency exceeded, new value %lld ns\n",	\ +		dev_dbg(dev, name " latency exceeded, new value %lld ns\n",	\  			__elapsed);						\  		genpd->max_off_time_changed = true;				\  		__td->constraint_changed = true;				\ @@ -106,7 +105,7 @@ static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)  static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)  {  	atomic_inc(&genpd->sd_count); -	smp_mb__after_atomic_inc(); +	smp_mb__after_atomic();  }  static void genpd_acquire_lock(struct generic_pm_domain *genpd) @@ -706,6 +705,14 @@ static int pm_genpd_runtime_resume(struct device *dev)  	return 0;  } +static bool pd_ignore_unused; +static int __init pd_ignore_unused_setup(char *__unused) +{ +	pd_ignore_unused = true; +	return 1; +} +__setup("pd_ignore_unused", pd_ignore_unused_setup); +  /**   * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.   */ @@ -713,6 +720,11 @@ void pm_genpd_poweroff_unused(void)  {  	struct generic_pm_domain *genpd; +	if (pd_ignore_unused) { +		pr_warn("genpd: Not disabling unused power domains\n"); +		return; +	} +  	mutex_lock(&gpd_list_lock);  	list_for_each_entry(genpd, &gpd_list, gpd_list_node) diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 28dee3053f1..a089e3bcdfb 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -6,7 +6,6 @@   * This file is released under the GPLv2.   */ -#include <linux/init.h>  #include <linux/kernel.h>  #include <linux/pm_domain.h>  #include <linux/pm_qos.h> diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 5ee030a864f..96a92db83ca 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c @@ -10,7 +10,7 @@  #include <linux/pm_runtime.h>  #include <linux/export.h> -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM  /**   * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.   * @dev: Device to suspend. @@ -48,7 +48,7 @@ int pm_generic_runtime_resume(struct device *dev)  	return ret;  }  EXPORT_SYMBOL_GPL(pm_generic_runtime_resume); -#endif /* CONFIG_PM_RUNTIME */ +#endif /* CONFIG_PM */  #ifdef CONFIG_PM_SLEEP  /** @@ -285,7 +285,7 @@ int pm_generic_restore(struct device *dev)  EXPORT_SYMBOL_GPL(pm_generic_restore);  /** - * pm_generic_complete - Generic routine competing a device power transition. + * pm_generic_complete - Generic routine completing a device power transition.   * @dev: Device to handle.   *   * Complete a device power transition during a system-wide power transition. diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 9f098a82cf0..bf412961a93 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -29,7 +29,10 @@  #include <linux/async.h>  #include <linux/suspend.h>  #include <trace/events/power.h> +#include <linux/cpufreq.h>  #include <linux/cpuidle.h> +#include <linux/timer.h> +  #include "../base.h"  #include "power.h" @@ -89,6 +92,8 @@ void device_pm_sleep_init(struct device *dev)  {  	dev->power.is_prepared = false;  	dev->power.is_suspended = false; +	dev->power.is_noirq_suspended = false; +	dev->power.is_late_suspended = false;  	init_completion(&dev->power.completion);  	complete_all(&dev->power.completion);  	dev->power.wakeup = NULL; @@ -209,9 +214,6 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,  		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),  			error, (unsigned long long)nsecs >> 10);  	} - -	trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event), -				    error);  }  /** @@ -382,7 +384,9 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,  	calltime = initcall_debug_start(dev);  	pm_dev_dbg(dev, state, info); +	trace_device_pm_callback_start(dev, info, state.event);  	error = cb(dev); +	trace_device_pm_callback_end(dev, error);  	suspend_report_result(cb, error);  	initcall_debug_report(dev, calltime, error, state, info); @@ -390,6 +394,71 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,  	return error;  } +#ifdef CONFIG_DPM_WATCHDOG +struct dpm_watchdog { +	struct device		*dev; +	struct task_struct	*tsk; +	struct timer_list	timer; +}; + +#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ +	struct dpm_watchdog wd + +/** + * dpm_watchdog_handler - Driver suspend / resume watchdog handler. + * @data: Watchdog object address. + * + * Called when a driver has timed out suspending or resuming. + * There's not much we can do here to recover so panic() to + * capture a crash-dump in pstore. + */ +static void dpm_watchdog_handler(unsigned long data) +{ +	struct dpm_watchdog *wd = (void *)data; + +	dev_emerg(wd->dev, "**** DPM device timeout ****\n"); +	show_stack(wd->tsk, NULL); +	panic("%s %s: unrecoverable failure\n", +		dev_driver_string(wd->dev), dev_name(wd->dev)); +} + +/** + * dpm_watchdog_set - Enable pm watchdog for given device. + * @wd: Watchdog. Must be allocated on the stack. + * @dev: Device to handle. + */ +static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) +{ +	struct timer_list *timer = &wd->timer; + +	wd->dev = dev; +	wd->tsk = current; + +	init_timer_on_stack(timer); +	/* use same timeout value for both suspend and resume */ +	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT; +	timer->function = dpm_watchdog_handler; +	timer->data = (unsigned long)wd; +	add_timer(timer); +} + +/** + * dpm_watchdog_clear - Disable suspend/resume watchdog. + * @wd: Watchdog to disable. + */ +static void dpm_watchdog_clear(struct dpm_watchdog *wd) +{ +	struct timer_list *timer = &wd->timer; + +	del_timer_sync(timer); +	destroy_timer_on_stack(timer); +} +#else +#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) +#define dpm_watchdog_set(x, y) +#define dpm_watchdog_clear(x) +#endif +  /*------------------------- Resume routines -------------------------*/  /** @@ -400,7 +469,7 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,   * The driver of @dev will not receive interrupts while this function is being   * executed.   */ -static int device_resume_noirq(struct device *dev, pm_message_t state) +static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)  {  	pm_callback_t callback = NULL;  	char *info = NULL; @@ -409,9 +478,14 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)  	TRACE_DEVICE(dev);  	TRACE_RESUME(0); -	if (dev->power.syscore) +	if (dev->power.syscore || dev->power.direct_complete)  		goto Out; +	if (!dev->power.is_noirq_suspended) +		goto Out; + +	dpm_wait(dev->parent, async); +  	if (dev->pm_domain) {  		info = "noirq power domain ";  		callback = pm_noirq_op(&dev->pm_domain->ops, state); @@ -432,12 +506,32 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)  	}  	error = dpm_run_callback(callback, dev, state, info); +	dev->power.is_noirq_suspended = false;   Out: +	complete_all(&dev->power.completion);  	TRACE_RESUME(error);  	return error;  } +static bool is_async(struct device *dev) +{ +	return dev->power.async_suspend && pm_async_enabled +		&& !pm_trace_is_enabled(); +} + +static void async_resume_noirq(void *data, async_cookie_t cookie) +{ +	struct device *dev = (struct device *)data; +	int error; + +	error = device_resume_noirq(dev, pm_transition, true); +	if (error) +		pm_dev_err(dev, pm_transition, " async", error); + +	put_device(dev); +} +  /**   * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.   * @state: PM transition of the system being carried out. @@ -447,32 +541,53 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)   */  static void dpm_resume_noirq(pm_message_t state)  { +	struct device *dev;  	ktime_t starttime = ktime_get(); +	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);  	mutex_lock(&dpm_list_mtx); -	while (!list_empty(&dpm_noirq_list)) { -		struct device *dev = to_device(dpm_noirq_list.next); -		int error; +	pm_transition = state; + +	/* +	 * Advanced the async threads upfront, +	 * in case the starting of async threads is +	 * delayed by non-async resuming devices. +	 */ +	list_for_each_entry(dev, &dpm_noirq_list, power.entry) { +		reinit_completion(&dev->power.completion); +		if (is_async(dev)) { +			get_device(dev); +			async_schedule(async_resume_noirq, dev); +		} +	} +	while (!list_empty(&dpm_noirq_list)) { +		dev = to_device(dpm_noirq_list.next);  		get_device(dev);  		list_move_tail(&dev->power.entry, &dpm_late_early_list);  		mutex_unlock(&dpm_list_mtx); -		error = device_resume_noirq(dev, state); -		if (error) { -			suspend_stats.failed_resume_noirq++; -			dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); -			dpm_save_failed_dev(dev_name(dev)); -			pm_dev_err(dev, state, " noirq", error); +		if (!is_async(dev)) { +			int error; + +			error = device_resume_noirq(dev, state, false); +			if (error) { +				suspend_stats.failed_resume_noirq++; +				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); +				dpm_save_failed_dev(dev_name(dev)); +				pm_dev_err(dev, state, " noirq", error); +			}  		}  		mutex_lock(&dpm_list_mtx);  		put_device(dev);  	}  	mutex_unlock(&dpm_list_mtx); +	async_synchronize_full();  	dpm_show_time(starttime, state, "noirq");  	resume_device_irqs();  	cpuidle_resume(); +	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);  }  /** @@ -482,7 +597,7 @@ static void dpm_resume_noirq(pm_message_t state)   *   * Runtime PM is disabled for @dev while this function is being executed.   */ -static int device_resume_early(struct device *dev, pm_message_t state) +static int device_resume_early(struct device *dev, pm_message_t state, bool async)  {  	pm_callback_t callback = NULL;  	char *info = NULL; @@ -491,9 +606,14 @@ static int device_resume_early(struct device *dev, pm_message_t state)  	TRACE_DEVICE(dev);  	TRACE_RESUME(0); -	if (dev->power.syscore) +	if (dev->power.syscore || dev->power.direct_complete) +		goto Out; + +	if (!dev->power.is_late_suspended)  		goto Out; +	dpm_wait(dev->parent, async); +  	if (dev->pm_domain) {  		info = "early power domain ";  		callback = pm_late_early_op(&dev->pm_domain->ops, state); @@ -514,44 +634,78 @@ static int device_resume_early(struct device *dev, pm_message_t state)  	}  	error = dpm_run_callback(callback, dev, state, info); +	dev->power.is_late_suspended = false;   Out:  	TRACE_RESUME(error);  	pm_runtime_enable(dev); +	complete_all(&dev->power.completion);  	return error;  } +static void async_resume_early(void *data, async_cookie_t cookie) +{ +	struct device *dev = (struct device *)data; +	int error; + +	error = device_resume_early(dev, pm_transition, true); +	if (error) +		pm_dev_err(dev, pm_transition, " async", error); + +	put_device(dev); +} +  /**   * dpm_resume_early - Execute "early resume" callbacks for all devices.   * @state: PM transition of the system being carried out.   */  static void dpm_resume_early(pm_message_t state)  { +	struct device *dev;  	ktime_t starttime = ktime_get(); +	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);  	mutex_lock(&dpm_list_mtx); -	while (!list_empty(&dpm_late_early_list)) { -		struct device *dev = to_device(dpm_late_early_list.next); -		int error; +	pm_transition = state; +	/* +	 * Advanced the async threads upfront, +	 * in case the starting of async threads is +	 * delayed by non-async resuming devices. +	 */ +	list_for_each_entry(dev, &dpm_late_early_list, power.entry) { +		reinit_completion(&dev->power.completion); +		if (is_async(dev)) { +			get_device(dev); +			async_schedule(async_resume_early, dev); +		} +	} + +	while (!list_empty(&dpm_late_early_list)) { +		dev = to_device(dpm_late_early_list.next);  		get_device(dev);  		list_move_tail(&dev->power.entry, &dpm_suspended_list);  		mutex_unlock(&dpm_list_mtx); -		error = device_resume_early(dev, state); -		if (error) { -			suspend_stats.failed_resume_early++; -			dpm_save_failed_step(SUSPEND_RESUME_EARLY); -			dpm_save_failed_dev(dev_name(dev)); -			pm_dev_err(dev, state, " early", error); -		} +		if (!is_async(dev)) { +			int error; +			error = device_resume_early(dev, state, false); +			if (error) { +				suspend_stats.failed_resume_early++; +				dpm_save_failed_step(SUSPEND_RESUME_EARLY); +				dpm_save_failed_dev(dev_name(dev)); +				pm_dev_err(dev, state, " early", error); +			} +		}  		mutex_lock(&dpm_list_mtx);  		put_device(dev);  	}  	mutex_unlock(&dpm_list_mtx); +	async_synchronize_full();  	dpm_show_time(starttime, state, "early"); +	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);  }  /** @@ -576,6 +730,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)  	pm_callback_t callback = NULL;  	char *info = NULL;  	int error = 0; +	DECLARE_DPM_WATCHDOG_ON_STACK(wd);  	TRACE_DEVICE(dev);  	TRACE_RESUME(0); @@ -583,7 +738,14 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)  	if (dev->power.syscore)  		goto Complete; +	if (dev->power.direct_complete) { +		/* Match the pm_runtime_disable() in __device_suspend(). */ +		pm_runtime_enable(dev); +		goto Complete; +	} +  	dpm_wait(dev->parent, async); +	dpm_watchdog_set(&wd, dev);  	device_lock(dev);  	/* @@ -642,6 +804,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)   Unlock:  	device_unlock(dev); +	dpm_watchdog_clear(&wd);   Complete:  	complete_all(&dev->power.completion); @@ -662,12 +825,6 @@ static void async_resume(void *data, async_cookie_t cookie)  	put_device(dev);  } -static bool is_async(struct device *dev) -{ -	return dev->power.async_suspend && pm_async_enabled -		&& !pm_trace_is_enabled(); -} -  /**   * dpm_resume - Execute "resume" callbacks for non-sysdev devices.   * @state: PM transition of the system being carried out. @@ -680,6 +837,7 @@ void dpm_resume(pm_message_t state)  	struct device *dev;  	ktime_t starttime = ktime_get(); +	trace_suspend_resume(TPS("dpm_resume"), state.event, true);  	might_sleep();  	mutex_lock(&dpm_list_mtx); @@ -687,7 +845,7 @@ void dpm_resume(pm_message_t state)  	async_error = 0;  	list_for_each_entry(dev, &dpm_suspended_list, power.entry) { -		INIT_COMPLETION(dev->power.completion); +		reinit_completion(&dev->power.completion);  		if (is_async(dev)) {  			get_device(dev);  			async_schedule(async_resume, dev); @@ -719,6 +877,9 @@ void dpm_resume(pm_message_t state)  	mutex_unlock(&dpm_list_mtx);  	async_synchronize_full();  	dpm_show_time(starttime, state, NULL); + +	cpufreq_resume(); +	trace_suspend_resume(TPS("dpm_resume"), state.event, false);  }  /** @@ -757,7 +918,9 @@ static void device_complete(struct device *dev, pm_message_t state)  	if (callback) {  		pm_dev_dbg(dev, state, info); +		trace_device_pm_callback_start(dev, info, state.event);  		callback(dev); +		trace_device_pm_callback_end(dev, 0);  	}  	device_unlock(dev); @@ -776,6 +939,7 @@ void dpm_complete(pm_message_t state)  {  	struct list_head list; +	trace_suspend_resume(TPS("dpm_complete"), state.event, true);  	might_sleep();  	INIT_LIST_HEAD(&list); @@ -795,6 +959,7 @@ void dpm_complete(pm_message_t state)  	}  	list_splice(&list, &dpm_list);  	mutex_unlock(&dpm_list_mtx); +	trace_suspend_resume(TPS("dpm_complete"), state.event, false);  }  /** @@ -843,13 +1008,24 @@ static pm_message_t resume_event(pm_message_t sleep_state)   * The driver of @dev will not receive interrupts while this function is being   * executed.   */ -static int device_suspend_noirq(struct device *dev, pm_message_t state) +static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)  {  	pm_callback_t callback = NULL;  	char *info = NULL; +	int error = 0; -	if (dev->power.syscore) -		return 0; +	if (async_error) +		goto Complete; + +	if (pm_wakeup_pending()) { +		async_error = -EBUSY; +		goto Complete; +	} + +	if (dev->power.syscore || dev->power.direct_complete) +		goto Complete; + +	dpm_wait_for_children(dev, async);  	if (dev->pm_domain) {  		info = "noirq power domain "; @@ -870,7 +1046,41 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)  		callback = pm_noirq_op(dev->driver->pm, state);  	} -	return dpm_run_callback(callback, dev, state, info); +	error = dpm_run_callback(callback, dev, state, info); +	if (!error) +		dev->power.is_noirq_suspended = true; +	else +		async_error = error; + +Complete: +	complete_all(&dev->power.completion); +	return error; +} + +static void async_suspend_noirq(void *data, async_cookie_t cookie) +{ +	struct device *dev = (struct device *)data; +	int error; + +	error = __device_suspend_noirq(dev, pm_transition, true); +	if (error) { +		dpm_save_failed_dev(dev_name(dev)); +		pm_dev_err(dev, pm_transition, " async", error); +	} + +	put_device(dev); +} + +static int device_suspend_noirq(struct device *dev) +{ +	reinit_completion(&dev->power.completion); + +	if (pm_async_enabled && dev->power.async_suspend) { +		get_device(dev); +		async_schedule(async_suspend_noirq, dev); +		return 0; +	} +	return __device_suspend_noirq(dev, pm_transition, false);  }  /** @@ -885,22 +1095,24 @@ static int dpm_suspend_noirq(pm_message_t state)  	ktime_t starttime = ktime_get();  	int error = 0; +	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);  	cpuidle_pause();  	suspend_device_irqs();  	mutex_lock(&dpm_list_mtx); +	pm_transition = state; +	async_error = 0; +  	while (!list_empty(&dpm_late_early_list)) {  		struct device *dev = to_device(dpm_late_early_list.prev);  		get_device(dev);  		mutex_unlock(&dpm_list_mtx); -		error = device_suspend_noirq(dev, state); +		error = device_suspend_noirq(dev);  		mutex_lock(&dpm_list_mtx);  		if (error) {  			pm_dev_err(dev, state, " noirq", error); -			suspend_stats.failed_suspend_noirq++; -			dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);  			dpm_save_failed_dev(dev_name(dev));  			put_device(dev);  			break; @@ -909,16 +1121,22 @@ static int dpm_suspend_noirq(pm_message_t state)  			list_move(&dev->power.entry, &dpm_noirq_list);  		put_device(dev); -		if (pm_wakeup_pending()) { -			error = -EBUSY; +		if (async_error)  			break; -		}  	}  	mutex_unlock(&dpm_list_mtx); -	if (error) +	async_synchronize_full(); +	if (!error) +		error = async_error; + +	if (error) { +		suspend_stats.failed_suspend_noirq++; +		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);  		dpm_resume_noirq(resume_event(state)); -	else +	} else {  		dpm_show_time(starttime, state, "noirq"); +	} +	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);  	return error;  } @@ -929,15 +1147,26 @@ static int dpm_suspend_noirq(pm_message_t state)   *   * Runtime PM is disabled for @dev while this function is being executed.   */ -static int device_suspend_late(struct device *dev, pm_message_t state) +static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)  {  	pm_callback_t callback = NULL;  	char *info = NULL; +	int error = 0;  	__pm_runtime_disable(dev, false); -	if (dev->power.syscore) -		return 0; +	if (async_error) +		goto Complete; + +	if (pm_wakeup_pending()) { +		async_error = -EBUSY; +		goto Complete; +	} + +	if (dev->power.syscore || dev->power.direct_complete) +		goto Complete; + +	dpm_wait_for_children(dev, async);  	if (dev->pm_domain) {  		info = "late power domain "; @@ -958,7 +1187,41 @@ static int device_suspend_late(struct device *dev, pm_message_t state)  		callback = pm_late_early_op(dev->driver->pm, state);  	} -	return dpm_run_callback(callback, dev, state, info); +	error = dpm_run_callback(callback, dev, state, info); +	if (!error) +		dev->power.is_late_suspended = true; +	else +		async_error = error; + +Complete: +	complete_all(&dev->power.completion); +	return error; +} + +static void async_suspend_late(void *data, async_cookie_t cookie) +{ +	struct device *dev = (struct device *)data; +	int error; + +	error = __device_suspend_late(dev, pm_transition, true); +	if (error) { +		dpm_save_failed_dev(dev_name(dev)); +		pm_dev_err(dev, pm_transition, " async", error); +	} +	put_device(dev); +} + +static int device_suspend_late(struct device *dev) +{ +	reinit_completion(&dev->power.completion); + +	if (pm_async_enabled && dev->power.async_suspend) { +		get_device(dev); +		async_schedule(async_suspend_late, dev); +		return 0; +	} + +	return __device_suspend_late(dev, pm_transition, false);  }  /** @@ -970,20 +1233,22 @@ static int dpm_suspend_late(pm_message_t state)  	ktime_t starttime = ktime_get();  	int error = 0; +	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);  	mutex_lock(&dpm_list_mtx); +	pm_transition = state; +	async_error = 0; +  	while (!list_empty(&dpm_suspended_list)) {  		struct device *dev = to_device(dpm_suspended_list.prev);  		get_device(dev);  		mutex_unlock(&dpm_list_mtx); -		error = device_suspend_late(dev, state); +		error = device_suspend_late(dev);  		mutex_lock(&dpm_list_mtx);  		if (error) {  			pm_dev_err(dev, state, " late", error); -			suspend_stats.failed_suspend_late++; -			dpm_save_failed_step(SUSPEND_SUSPEND_LATE);  			dpm_save_failed_dev(dev_name(dev));  			put_device(dev);  			break; @@ -992,17 +1257,19 @@ static int dpm_suspend_late(pm_message_t state)  			list_move(&dev->power.entry, &dpm_late_early_list);  		put_device(dev); -		if (pm_wakeup_pending()) { -			error = -EBUSY; +		if (async_error)  			break; -		}  	}  	mutex_unlock(&dpm_list_mtx); -	if (error) +	async_synchronize_full(); +	if (error) { +		suspend_stats.failed_suspend_late++; +		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);  		dpm_resume_early(resume_event(state)); -	else +	} else {  		dpm_show_time(starttime, state, "late"); - +	} +	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);  	return error;  } @@ -1041,7 +1308,9 @@ static int legacy_suspend(struct device *dev, pm_message_t state,  	calltime = initcall_debug_start(dev); +	trace_device_pm_callback_start(dev, info, state.event);  	error = cb(dev, state); +	trace_device_pm_callback_end(dev, error);  	suspend_report_result(cb, error);  	initcall_debug_report(dev, calltime, error, state, info); @@ -1060,6 +1329,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)  	pm_callback_t callback = NULL;  	char *info = NULL;  	int error = 0; +	DECLARE_DPM_WATCHDOG_ON_STACK(wd);  	dpm_wait_for_children(dev, async); @@ -1083,6 +1353,18 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)  	if (dev->power.syscore)  		goto Complete; +	if (dev->power.direct_complete) { +		if (pm_runtime_status_suspended(dev)) { +			pm_runtime_disable(dev); +			if (pm_runtime_suspended_if_enabled(dev)) +				goto Complete; + +			pm_runtime_enable(dev); +		} +		dev->power.direct_complete = false; +	} + +	dpm_watchdog_set(&wd, dev);  	device_lock(dev);  	if (dev->pm_domain) { @@ -1132,13 +1414,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)   End:  	if (!error) { +		struct device *parent = dev->parent; +  		dev->power.is_suspended = true; -		if (dev->power.wakeup_path -		    && dev->parent && !dev->parent->power.ignore_children) -			dev->parent->power.wakeup_path = true; +		if (parent) { +			spin_lock_irq(&parent->power.lock); + +			dev->parent->power.direct_complete = false; +			if (dev->power.wakeup_path +			    && !dev->parent->power.ignore_children) +				dev->parent->power.wakeup_path = true; + +			spin_unlock_irq(&parent->power.lock); +		}  	}  	device_unlock(dev); +	dpm_watchdog_clear(&wd);   Complete:  	complete_all(&dev->power.completion); @@ -1164,7 +1456,7 @@ static void async_suspend(void *data, async_cookie_t cookie)  static int device_suspend(struct device *dev)  { -	INIT_COMPLETION(dev->power.completion); +	reinit_completion(&dev->power.completion);  	if (pm_async_enabled && dev->power.async_suspend) {  		get_device(dev); @@ -1184,8 +1476,11 @@ int dpm_suspend(pm_message_t state)  	ktime_t starttime = ktime_get();  	int error = 0; +	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);  	might_sleep(); +	cpufreq_suspend(); +  	mutex_lock(&dpm_list_mtx);  	pm_transition = state;  	async_error = 0; @@ -1219,6 +1514,7 @@ int dpm_suspend(pm_message_t state)  		dpm_save_failed_step(SUSPEND_SUSPEND);  	} else  		dpm_show_time(starttime, state, NULL); +	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);  	return error;  } @@ -1234,7 +1530,7 @@ static int device_prepare(struct device *dev, pm_message_t state)  {  	int (*callback)(struct device *) = NULL;  	char *info = NULL; -	int error = 0; +	int ret = 0;  	if (dev->power.syscore)  		return 0; @@ -1271,13 +1567,29 @@ static int device_prepare(struct device *dev, pm_message_t state)  	}  	if (callback) { -		error = callback(dev); -		suspend_report_result(callback, error); +		trace_device_pm_callback_start(dev, info, state.event); +		ret = callback(dev); +		trace_device_pm_callback_end(dev, ret);  	}  	device_unlock(dev); -	return error; +	if (ret < 0) { +		suspend_report_result(callback, ret); +		pm_runtime_put(dev); +		return ret; +	} +	/* +	 * A positive return value from ->prepare() means "this device appears +	 * to be runtime-suspended and its state is fine, so if it really is +	 * runtime-suspended, you can leave it in that state provided that you +	 * will do the same thing with all of its descendants".  This only +	 * applies to suspend transitions, however. +	 */ +	spin_lock_irq(&dev->power.lock); +	dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND; +	spin_unlock_irq(&dev->power.lock); +	return 0;  }  /** @@ -1290,6 +1602,7 @@ int dpm_prepare(pm_message_t state)  {  	int error = 0; +	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);  	might_sleep();  	mutex_lock(&dpm_list_mtx); @@ -1320,6 +1633,7 @@ int dpm_prepare(pm_message_t state)  		put_device(dev);  	}  	mutex_unlock(&dpm_list_mtx); +	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);  	return error;  } diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index ef89897c604..89ced955faf 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -14,14 +14,12 @@  #include <linux/kernel.h>  #include <linux/errno.h>  #include <linux/err.h> -#include <linux/init.h>  #include <linux/slab.h> -#include <linux/cpufreq.h>  #include <linux/device.h>  #include <linux/list.h>  #include <linux/rculist.h>  #include <linux/rcupdate.h> -#include <linux/opp.h> +#include <linux/pm_opp.h>  #include <linux/of.h>  #include <linux/export.h> @@ -42,7 +40,7 @@   */  /** - * struct opp - Generic OPP description structure + * struct dev_pm_opp - Generic OPP description structure   * @node:	opp list node. The nodes are maintained throughout the lifetime   *		of boot. It is expected only an optimal set of OPPs are   *		added to the library by the SoC framework. @@ -59,7 +57,7 @@   *   * This structure stores the OPP information for a given device.   */ -struct opp { +struct dev_pm_opp {  	struct list_head node;  	bool available; @@ -136,7 +134,7 @@ static struct device_opp *find_device_opp(struct device *dev)  }  /** - * opp_get_voltage() - Gets the voltage corresponding to an available opp + * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp   * @opp:	opp for which voltage has to be returned for   *   * Return voltage in micro volt corresponding to the opp, else @@ -150,9 +148,9 @@ static struct device_opp *find_device_opp(struct device *dev)   * prior to unlocking with rcu_read_unlock() to maintain the integrity of the   * pointer.   */ -unsigned long opp_get_voltage(struct opp *opp) +unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)  { -	struct opp *tmp_opp; +	struct dev_pm_opp *tmp_opp;  	unsigned long v = 0;  	tmp_opp = rcu_dereference(opp); @@ -163,10 +161,10 @@ unsigned long opp_get_voltage(struct opp *opp)  	return v;  } -EXPORT_SYMBOL_GPL(opp_get_voltage); +EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);  /** - * opp_get_freq() - Gets the frequency corresponding to an available opp + * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp   * @opp:	opp for which frequency has to be returned for   *   * Return frequency in hertz corresponding to the opp, else @@ -180,9 +178,9 @@ EXPORT_SYMBOL_GPL(opp_get_voltage);   * prior to unlocking with rcu_read_unlock() to maintain the integrity of the   * pointer.   */ -unsigned long opp_get_freq(struct opp *opp) +unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)  { -	struct opp *tmp_opp; +	struct dev_pm_opp *tmp_opp;  	unsigned long f = 0;  	tmp_opp = rcu_dereference(opp); @@ -193,10 +191,10 @@ unsigned long opp_get_freq(struct opp *opp)  	return f;  } -EXPORT_SYMBOL_GPL(opp_get_freq); +EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);  /** - * opp_get_opp_count() - Get number of opps available in the opp list + * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list   * @dev:	device for which we do this operation   *   * This function returns the number of available opps if there are any, @@ -206,10 +204,10 @@ EXPORT_SYMBOL_GPL(opp_get_freq);   * internally references two RCU protected structures: device_opp and opp which   * are safe as long as we are under a common RCU locked section.   */ -int opp_get_opp_count(struct device *dev) +int dev_pm_opp_get_opp_count(struct device *dev)  {  	struct device_opp *dev_opp; -	struct opp *temp_opp; +	struct dev_pm_opp *temp_opp;  	int count = 0;  	dev_opp = find_device_opp(dev); @@ -226,10 +224,10 @@ int opp_get_opp_count(struct device *dev)  	return count;  } -EXPORT_SYMBOL_GPL(opp_get_opp_count); +EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);  /** - * opp_find_freq_exact() - search for an exact frequency + * dev_pm_opp_find_freq_exact() - search for an exact frequency   * @dev:		device for which we do this operation   * @freq:		frequency to search for   * @available:		true/false - match for available opp @@ -254,11 +252,12 @@ EXPORT_SYMBOL_GPL(opp_get_opp_count);   * under the locked area. The pointer returned must be used prior to unlocking   * with rcu_read_unlock() to maintain the integrity of the pointer.   */ -struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, -				bool available) +struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, +					      unsigned long freq, +					      bool available)  {  	struct device_opp *dev_opp; -	struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); +	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);  	dev_opp = find_device_opp(dev);  	if (IS_ERR(dev_opp)) { @@ -277,10 +276,10 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,  	return opp;  } -EXPORT_SYMBOL_GPL(opp_find_freq_exact); +EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);  /** - * opp_find_freq_ceil() - Search for an rounded ceil freq + * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq   * @dev:	device for which we do this operation   * @freq:	Start frequency   * @@ -300,10 +299,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_exact);   * under the locked area. The pointer returned must be used prior to unlocking   * with rcu_read_unlock() to maintain the integrity of the pointer.   */ -struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) +struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, +					     unsigned long *freq)  {  	struct device_opp *dev_opp; -	struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); +	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);  	if (!dev || !freq) {  		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); @@ -324,10 +324,10 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)  	return opp;  } -EXPORT_SYMBOL_GPL(opp_find_freq_ceil); +EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);  /** - * opp_find_freq_floor() - Search for a rounded floor freq + * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq   * @dev:	device for which we do this operation   * @freq:	Start frequency   * @@ -347,10 +347,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_ceil);   * under the locked area. The pointer returned must be used prior to unlocking   * with rcu_read_unlock() to maintain the integrity of the pointer.   */ -struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) +struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, +					      unsigned long *freq)  {  	struct device_opp *dev_opp; -	struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); +	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);  	if (!dev || !freq) {  		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); @@ -375,32 +376,39 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)  	return opp;  } -EXPORT_SYMBOL_GPL(opp_find_freq_floor); +EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);  /** - * opp_add()  - Add an OPP table from a table definitions + * dev_pm_opp_add()  - Add an OPP table from a table definitions   * @dev:	device for which we do this operation   * @freq:	Frequency in Hz for this OPP   * @u_volt:	Voltage in uVolts for this OPP   *   * This function adds an opp definition to the opp list and returns status.   * The opp is made available by default and it can be controlled using - * opp_enable/disable functions. + * dev_pm_opp_enable/disable functions.   *   * Locking: The internal device_opp and opp structures are RCU protected.   * Hence this function internally uses RCU updater strategy with mutex locks   * to keep the integrity of the internal data structures. Callers should ensure   * that this function is *NOT* called under RCU protection or in contexts where   * mutex cannot be locked. + * + * Return: + * 0:		On success OR + *		Duplicate OPPs (both freq and volt are same) and opp->available + * -EEXIST:	Freq are same and volt are different OR + *		Duplicate OPPs (both freq and volt are same) and !opp->available + * -ENOMEM:	Memory allocation failure   */ -int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) +int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)  {  	struct device_opp *dev_opp = NULL; -	struct opp *opp, *new_opp; +	struct dev_pm_opp *opp, *new_opp;  	struct list_head *head;  	/* allocate new OPP node */ -	new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL); +	new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);  	if (!new_opp) {  		dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);  		return -ENOMEM; @@ -441,15 +449,31 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)  	new_opp->u_volt = u_volt;  	new_opp->available = true; -	/* Insert new OPP in order of increasing frequency */ +	/* +	 * Insert new OPP in order of increasing frequency +	 * and discard if already present +	 */  	head = &dev_opp->opp_list;  	list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { -		if (new_opp->rate < opp->rate) +		if (new_opp->rate <= opp->rate)  			break;  		else  			head = &opp->node;  	} +	/* Duplicate OPPs ? */ +	if (new_opp->rate == opp->rate) { +		int ret = opp->available && new_opp->u_volt == opp->u_volt ? +			0 : -EEXIST; + +		dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", +			 __func__, opp->rate, opp->u_volt, opp->available, +			 new_opp->rate, new_opp->u_volt, new_opp->available); +		mutex_unlock(&dev_opp_list_lock); +		kfree(new_opp); +		return ret; +	} +  	list_add_rcu(&new_opp->node, head);  	mutex_unlock(&dev_opp_list_lock); @@ -460,7 +484,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)  	srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);  	return 0;  } -EXPORT_SYMBOL_GPL(opp_add); +EXPORT_SYMBOL_GPL(dev_pm_opp_add);  /**   * opp_set_availability() - helper to set the availability of an opp @@ -485,11 +509,11 @@ static int opp_set_availability(struct device *dev, unsigned long freq,  		bool availability_req)  {  	struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); -	struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); +	struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);  	int r = 0;  	/* keep the node allocated */ -	new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL); +	new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);  	if (!new_opp) {  		dev_warn(dev, "%s: Unable to create OPP\n", __func__);  		return -ENOMEM; @@ -552,13 +576,13 @@ unlock:  }  /** - * opp_enable() - Enable a specific OPP + * dev_pm_opp_enable() - Enable a specific OPP   * @dev:	device for which we do this operation   * @freq:	OPP frequency to enable   *   * Enables a provided opp. If the operation is valid, this returns 0, else the   * corresponding error value. It is meant to be used for users an OPP available - * after being temporarily made unavailable with opp_disable. + * after being temporarily made unavailable with dev_pm_opp_disable.   *   * Locking: The internal device_opp and opp structures are RCU protected.   * Hence this function indirectly uses RCU and mutex locks to keep the @@ -566,21 +590,21 @@ unlock:   * this function is *NOT* called under RCU protection or in contexts where   * mutex locking or synchronize_rcu() blocking calls cannot be used.   */ -int opp_enable(struct device *dev, unsigned long freq) +int dev_pm_opp_enable(struct device *dev, unsigned long freq)  {  	return opp_set_availability(dev, freq, true);  } -EXPORT_SYMBOL_GPL(opp_enable); +EXPORT_SYMBOL_GPL(dev_pm_opp_enable);  /** - * opp_disable() - Disable a specific OPP + * dev_pm_opp_disable() - Disable a specific OPP   * @dev:	device for which we do this operation   * @freq:	OPP frequency to disable   *   * Disables a provided opp. If the operation is valid, this returns   * 0, else the corresponding error value. It is meant to be a temporary   * control by users to make this OPP not available until the circumstances are - * right to make it available again (with a call to opp_enable). + * right to make it available again (with a call to dev_pm_opp_enable).   *   * Locking: The internal device_opp and opp structures are RCU protected.   * Hence this function indirectly uses RCU and mutex locks to keep the @@ -588,107 +612,17 @@ EXPORT_SYMBOL_GPL(opp_enable);   * this function is *NOT* called under RCU protection or in contexts where   * mutex locking or synchronize_rcu() blocking calls cannot be used.   */ -int opp_disable(struct device *dev, unsigned long freq) +int dev_pm_opp_disable(struct device *dev, unsigned long freq)  {  	return opp_set_availability(dev, freq, false);  } -EXPORT_SYMBOL_GPL(opp_disable); - -#ifdef CONFIG_CPU_FREQ -/** - * opp_init_cpufreq_table() - create a cpufreq table for a device - * @dev:	device for which we do this operation - * @table:	Cpufreq table returned back to caller - * - * Generate a cpufreq table for a provided device- this assumes that the - * opp list is already initialized and ready for usage. - * - * This function allocates required memory for the cpufreq table. It is - * expected that the caller does the required maintenance such as freeing - * the table as required. - * - * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM - * if no memory available for the operation (table is not populated), returns 0 - * if successful and table is populated. - * - * WARNING: It is  important for the callers to ensure refreshing their copy of - * the table if any of the mentioned functions have been invoked in the interim. - * - * Locking: The internal device_opp and opp structures are RCU protected. - * To simplify the logic, we pretend we are updater and hold relevant mutex here - * Callers should ensure that this function is *NOT* called under RCU protection - * or in contexts where mutex locking cannot be used. - */ -int opp_init_cpufreq_table(struct device *dev, -			    struct cpufreq_frequency_table **table) -{ -	struct device_opp *dev_opp; -	struct opp *opp; -	struct cpufreq_frequency_table *freq_table; -	int i = 0; - -	/* Pretend as if I am an updater */ -	mutex_lock(&dev_opp_list_lock); - -	dev_opp = find_device_opp(dev); -	if (IS_ERR(dev_opp)) { -		int r = PTR_ERR(dev_opp); -		mutex_unlock(&dev_opp_list_lock); -		dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r); -		return r; -	} - -	freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * -			     (opp_get_opp_count(dev) + 1), GFP_KERNEL); -	if (!freq_table) { -		mutex_unlock(&dev_opp_list_lock); -		dev_warn(dev, "%s: Unable to allocate frequency table\n", -			__func__); -		return -ENOMEM; -	} - -	list_for_each_entry(opp, &dev_opp->opp_list, node) { -		if (opp->available) { -			freq_table[i].driver_data = i; -			freq_table[i].frequency = opp->rate / 1000; -			i++; -		} -	} -	mutex_unlock(&dev_opp_list_lock); - -	freq_table[i].driver_data = i; -	freq_table[i].frequency = CPUFREQ_TABLE_END; - -	*table = &freq_table[0]; - -	return 0; -} -EXPORT_SYMBOL_GPL(opp_init_cpufreq_table); - -/** - * opp_free_cpufreq_table() - free the cpufreq table - * @dev:	device for which we do this operation - * @table:	table to free - * - * Free up the table allocated by opp_init_cpufreq_table - */ -void opp_free_cpufreq_table(struct device *dev, -				struct cpufreq_frequency_table **table) -{ -	if (!table) -		return; - -	kfree(*table); -	*table = NULL; -} -EXPORT_SYMBOL_GPL(opp_free_cpufreq_table); -#endif		/* CONFIG_CPU_FREQ */ +EXPORT_SYMBOL_GPL(dev_pm_opp_disable);  /** - * opp_get_notifier() - find notifier_head of the device with opp + * dev_pm_opp_get_notifier() - find notifier_head of the device with opp   * @dev:	device pointer used to lookup device OPPs.   */ -struct srcu_notifier_head *opp_get_notifier(struct device *dev) +struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)  {  	struct device_opp *dev_opp = find_device_opp(dev); @@ -732,11 +666,9 @@ int of_init_opp_table(struct device *dev)  		unsigned long freq = be32_to_cpup(val++) * 1000;  		unsigned long volt = be32_to_cpup(val++); -		if (opp_add(dev, freq, volt)) { +		if (dev_pm_opp_add(dev, freq, volt))  			dev_warn(dev, "%s: Failed to add OPP %ld\n",  				 __func__, freq); -			continue; -		}  		nr -= 2;  	} diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index cfc3226ec49..a21223d9592 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -89,8 +89,8 @@ extern void dpm_sysfs_remove(struct device *dev);  extern void rpm_sysfs_remove(struct device *dev);  extern int wakeup_sysfs_add(struct device *dev);  extern void wakeup_sysfs_remove(struct device *dev); -extern int pm_qos_sysfs_add_latency(struct device *dev); -extern void pm_qos_sysfs_remove_latency(struct device *dev); +extern int pm_qos_sysfs_add_resume_latency(struct device *dev); +extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);  extern int pm_qos_sysfs_add_flags(struct device *dev);  extern void pm_qos_sysfs_remove_flags(struct device *dev); diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 5c1361a9e5d..36b9eb4862c 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -105,7 +105,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);  s32 __dev_pm_qos_read_value(struct device *dev)  {  	return IS_ERR_OR_NULL(dev->power.qos) ? -		0 : pm_qos_read_value(&dev->power.qos->latency); +		0 : pm_qos_read_value(&dev->power.qos->resume_latency);  }  /** @@ -141,16 +141,24 @@ static int apply_constraint(struct dev_pm_qos_request *req,  	int ret;  	switch(req->type) { -	case DEV_PM_QOS_LATENCY: -		ret = pm_qos_update_target(&qos->latency, &req->data.pnode, -					   action, value); +	case DEV_PM_QOS_RESUME_LATENCY: +		ret = pm_qos_update_target(&qos->resume_latency, +					   &req->data.pnode, action, value);  		if (ret) { -			value = pm_qos_read_value(&qos->latency); +			value = pm_qos_read_value(&qos->resume_latency);  			blocking_notifier_call_chain(&dev_pm_notifiers,  						     (unsigned long)value,  						     req);  		}  		break; +	case DEV_PM_QOS_LATENCY_TOLERANCE: +		ret = pm_qos_update_target(&qos->latency_tolerance, +					   &req->data.pnode, action, value); +		if (ret) { +			value = pm_qos_read_value(&qos->latency_tolerance); +			req->dev->power.set_latency_tolerance(req->dev, value); +		} +		break;  	case DEV_PM_QOS_FLAGS:  		ret = pm_qos_update_flags(&qos->flags, &req->data.flr,  					  action, value); @@ -186,13 +194,21 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)  	}  	BLOCKING_INIT_NOTIFIER_HEAD(n); -	c = &qos->latency; +	c = &qos->resume_latency;  	plist_head_init(&c->list); -	c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; -	c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; +	c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; +	c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; +	c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;  	c->type = PM_QOS_MIN;  	c->notifiers = n; +	c = &qos->latency_tolerance; +	plist_head_init(&c->list); +	c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; +	c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; +	c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; +	c->type = PM_QOS_MIN; +  	INIT_LIST_HEAD(&qos->flags.list);  	spin_lock_irq(&dev->power.lock); @@ -224,7 +240,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)  	 * If the device's PM QoS resume latency limit or PM QoS flags have been  	 * exposed to user space, they have to be hidden at this point.  	 */ -	pm_qos_sysfs_remove_latency(dev); +	pm_qos_sysfs_remove_resume_latency(dev);  	pm_qos_sysfs_remove_flags(dev);  	mutex_lock(&dev_pm_qos_mtx); @@ -237,7 +253,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)  		goto out;  	/* Flush the constraints lists for the device. */ -	c = &qos->latency; +	c = &qos->resume_latency;  	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {  		/*  		 * Update constraints list and call the notification @@ -246,6 +262,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)  		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);  		memset(req, 0, sizeof(*req));  	} +	c = &qos->latency_tolerance; +	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { +		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); +		memset(req, 0, sizeof(*req)); +	}  	f = &qos->flags;  	list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {  		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); @@ -265,6 +286,40 @@ void dev_pm_qos_constraints_destroy(struct device *dev)  	mutex_unlock(&dev_pm_qos_sysfs_mtx);  } +static bool dev_pm_qos_invalid_request(struct device *dev, +				       struct dev_pm_qos_request *req) +{ +	return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE +			&& !dev->power.set_latency_tolerance); +} + +static int __dev_pm_qos_add_request(struct device *dev, +				    struct dev_pm_qos_request *req, +				    enum dev_pm_qos_req_type type, s32 value) +{ +	int ret = 0; + +	if (!dev || dev_pm_qos_invalid_request(dev, req)) +		return -EINVAL; + +	if (WARN(dev_pm_qos_request_active(req), +		 "%s() called for already added request\n", __func__)) +		return -EINVAL; + +	if (IS_ERR(dev->power.qos)) +		ret = -ENODEV; +	else if (!dev->power.qos) +		ret = dev_pm_qos_constraints_allocate(dev); + +	trace_dev_pm_qos_add_request(dev_name(dev), type, value); +	if (!ret) { +		req->dev = dev; +		req->type = type; +		ret = apply_constraint(req, PM_QOS_ADD_REQ, value); +	} +	return ret; +} +  /**   * dev_pm_qos_add_request - inserts new qos request into the list   * @dev: target device for the constraint @@ -290,31 +345,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)  int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,  			   enum dev_pm_qos_req_type type, s32 value)  { -	int ret = 0; - -	if (!dev || !req) /*guard against callers passing in null */ -		return -EINVAL; - -	if (WARN(dev_pm_qos_request_active(req), -		 "%s() called for already added request\n", __func__)) -		return -EINVAL; +	int ret;  	mutex_lock(&dev_pm_qos_mtx); - -	if (IS_ERR(dev->power.qos)) -		ret = -ENODEV; -	else if (!dev->power.qos) -		ret = dev_pm_qos_constraints_allocate(dev); - -	trace_dev_pm_qos_add_request(dev_name(dev), type, value); -	if (!ret) { -		req->dev = dev; -		req->type = type; -		ret = apply_constraint(req, PM_QOS_ADD_REQ, value); -	} - +	ret = __dev_pm_qos_add_request(dev, req, type, value);  	mutex_unlock(&dev_pm_qos_mtx); -  	return ret;  }  EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); @@ -341,7 +376,8 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,  		return -ENODEV;  	switch(req->type) { -	case DEV_PM_QOS_LATENCY: +	case DEV_PM_QOS_RESUME_LATENCY: +	case DEV_PM_QOS_LATENCY_TOLERANCE:  		curr_value = req->data.pnode.prio;  		break;  	case DEV_PM_QOS_FLAGS: @@ -460,8 +496,8 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)  		ret = dev_pm_qos_constraints_allocate(dev);  	if (!ret) -		ret = blocking_notifier_chain_register( -				dev->power.qos->latency.notifiers, notifier); +		ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers, +						       notifier);  	mutex_unlock(&dev_pm_qos_mtx);  	return ret; @@ -487,9 +523,8 @@ int dev_pm_qos_remove_notifier(struct device *dev,  	/* Silently return if the constraints object is not present. */  	if (!IS_ERR_OR_NULL(dev->power.qos)) -		retval = blocking_notifier_chain_unregister( -				dev->power.qos->latency.notifiers, -				notifier); +		retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers, +							    notifier);  	mutex_unlock(&dev_pm_qos_mtx);  	return retval; @@ -530,20 +565,32 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);   * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.   * @dev: Device whose ancestor to add the request for.   * @req: Pointer to the preallocated handle. + * @type: Type of the request.   * @value: Constraint latency value.   */  int dev_pm_qos_add_ancestor_request(struct device *dev, -				    struct dev_pm_qos_request *req, s32 value) +				    struct dev_pm_qos_request *req, +				    enum dev_pm_qos_req_type type, s32 value)  {  	struct device *ancestor = dev->parent;  	int ret = -ENODEV; -	while (ancestor && !ancestor->power.ignore_children) -		ancestor = ancestor->parent; +	switch (type) { +	case DEV_PM_QOS_RESUME_LATENCY: +		while (ancestor && !ancestor->power.ignore_children) +			ancestor = ancestor->parent; +		break; +	case DEV_PM_QOS_LATENCY_TOLERANCE: +		while (ancestor && !ancestor->power.set_latency_tolerance) +			ancestor = ancestor->parent; + +		break; +	default: +		ancestor = NULL; +	}  	if (ancestor) -		ret = dev_pm_qos_add_request(ancestor, req, -					     DEV_PM_QOS_LATENCY, value); +		ret = dev_pm_qos_add_request(ancestor, req, type, value);  	if (ret < 0)  		req->dev = NULL; @@ -559,9 +606,13 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,  	struct dev_pm_qos_request *req = NULL;  	switch(type) { -	case DEV_PM_QOS_LATENCY: -		req = dev->power.qos->latency_req; -		dev->power.qos->latency_req = NULL; +	case DEV_PM_QOS_RESUME_LATENCY: +		req = dev->power.qos->resume_latency_req; +		dev->power.qos->resume_latency_req = NULL; +		break; +	case DEV_PM_QOS_LATENCY_TOLERANCE: +		req = dev->power.qos->latency_tolerance_req; +		dev->power.qos->latency_tolerance_req = NULL;  		break;  	case DEV_PM_QOS_FLAGS:  		req = dev->power.qos->flags_req; @@ -597,7 +648,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)  	if (!req)  		return -ENOMEM; -	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); +	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);  	if (ret < 0) {  		kfree(req);  		return ret; @@ -609,7 +660,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)  	if (IS_ERR_OR_NULL(dev->power.qos))  		ret = -ENODEV; -	else if (dev->power.qos->latency_req) +	else if (dev->power.qos->resume_latency_req)  		ret = -EEXIST;  	if (ret < 0) { @@ -618,13 +669,13 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)  		mutex_unlock(&dev_pm_qos_mtx);  		goto out;  	} -	dev->power.qos->latency_req = req; +	dev->power.qos->resume_latency_req = req;  	mutex_unlock(&dev_pm_qos_mtx); -	ret = pm_qos_sysfs_add_latency(dev); +	ret = pm_qos_sysfs_add_resume_latency(dev);  	if (ret) -		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); +		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);   out:  	mutex_unlock(&dev_pm_qos_sysfs_mtx); @@ -634,8 +685,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);  static void __dev_pm_qos_hide_latency_limit(struct device *dev)  { -	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) -		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); +	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req) +		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);  }  /** @@ -646,7 +697,7 @@ void dev_pm_qos_hide_latency_limit(struct device *dev)  {  	mutex_lock(&dev_pm_qos_sysfs_mtx); -	pm_qos_sysfs_remove_latency(dev); +	pm_qos_sysfs_remove_resume_latency(dev);  	mutex_lock(&dev_pm_qos_mtx);  	__dev_pm_qos_hide_latency_limit(dev); @@ -768,6 +819,67 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)  	pm_runtime_put(dev);  	return ret;  } + +/** + * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance. + * @dev: Device to obtain the user space latency tolerance for. + */ +s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) +{ +	s32 ret; + +	mutex_lock(&dev_pm_qos_mtx); +	ret = IS_ERR_OR_NULL(dev->power.qos) +		|| !dev->power.qos->latency_tolerance_req ? +			PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT : +			dev->power.qos->latency_tolerance_req->data.pnode.prio; +	mutex_unlock(&dev_pm_qos_mtx); +	return ret; +} + +/** + * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance. + * @dev: Device to update the user space latency tolerance for. + * @val: New user space latency tolerance for @dev (negative values disable). + */ +int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) +{ +	int ret; + +	mutex_lock(&dev_pm_qos_mtx); + +	if (IS_ERR_OR_NULL(dev->power.qos) +	    || !dev->power.qos->latency_tolerance_req) { +		struct dev_pm_qos_request *req; + +		if (val < 0) { +			ret = -EINVAL; +			goto out; +		} +		req = kzalloc(sizeof(*req), GFP_KERNEL); +		if (!req) { +			ret = -ENOMEM; +			goto out; +		} +		ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val); +		if (ret < 0) { +			kfree(req); +			goto out; +		} +		dev->power.qos->latency_tolerance_req = req; +	} else { +		if (val < 0) { +			__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE); +			ret = 0; +		} else { +			ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val); +		} +	} + + out: +	mutex_unlock(&dev_pm_qos_mtx); +	return ret; +}  #else /* !CONFIG_PM_RUNTIME */  static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}  static void __dev_pm_qos_hide_flags(struct device *dev) {} diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 268a3509757..67c7938e430 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -13,6 +13,43 @@  #include <trace/events/rpm.h>  #include "power.h" +#define RPM_GET_CALLBACK(dev, cb)				\ +({								\ +	int (*__rpm_cb)(struct device *__d);			\ +								\ +	if (dev->pm_domain)					\ +		__rpm_cb = dev->pm_domain->ops.cb;		\ +	else if (dev->type && dev->type->pm)			\ +		__rpm_cb = dev->type->pm->cb;			\ +	else if (dev->class && dev->class->pm)			\ +		__rpm_cb = dev->class->pm->cb;			\ +	else if (dev->bus && dev->bus->pm)			\ +		__rpm_cb = dev->bus->pm->cb;			\ +	else							\ +		__rpm_cb = NULL;				\ +								\ +	if (!__rpm_cb && dev->driver && dev->driver->pm)	\ +		__rpm_cb = dev->driver->pm->cb;			\ +								\ +	__rpm_cb;						\ +}) + +static int (*rpm_get_suspend_cb(struct device *dev))(struct device *) +{ +	return RPM_GET_CALLBACK(dev, runtime_suspend); +} + +static int (*rpm_get_resume_cb(struct device *dev))(struct device *) +{ +	return RPM_GET_CALLBACK(dev, runtime_resume); +} + +#ifdef CONFIG_PM_RUNTIME +static int (*rpm_get_idle_cb(struct device *dev))(struct device *) +{ +	return RPM_GET_CALLBACK(dev, runtime_idle); +} +  static int rpm_resume(struct device *dev, int rpmflags);  static int rpm_suspend(struct device *dev, int rpmflags); @@ -258,7 +295,8 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)   * Check if the device's runtime PM status allows it to be suspended.  If   * another idle notification has been started earlier, return immediately.  If   * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise - * run the ->runtime_idle() callback directly. + * run the ->runtime_idle() callback directly. If the ->runtime_idle callback + * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.   *   * This function must be called under dev->power.lock with interrupts disabled.   */ @@ -309,19 +347,7 @@ static int rpm_idle(struct device *dev, int rpmflags)  	dev->power.idle_notification = true; -	if (dev->pm_domain) -		callback = dev->pm_domain->ops.runtime_idle; -	else if (dev->type && dev->type->pm) -		callback = dev->type->pm->runtime_idle; -	else if (dev->class && dev->class->pm) -		callback = dev->class->pm->runtime_idle; -	else if (dev->bus && dev->bus->pm) -		callback = dev->bus->pm->runtime_idle; -	else -		callback = NULL; - -	if (!callback && dev->driver && dev->driver->pm) -		callback = dev->driver->pm->runtime_idle; +	callback = rpm_get_idle_cb(dev);  	if (callback)  		retval = __rpm_callback(callback, dev); @@ -331,7 +357,7 @@ static int rpm_idle(struct device *dev, int rpmflags)   out:  	trace_rpm_return_int(dev, _THIS_IP_, retval); -	return retval ? retval : rpm_suspend(dev, rpmflags); +	return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);  }  /** @@ -491,19 +517,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)  	__update_runtime_status(dev, RPM_SUSPENDING); -	if (dev->pm_domain) -		callback = dev->pm_domain->ops.runtime_suspend; -	else if (dev->type && dev->type->pm) -		callback = dev->type->pm->runtime_suspend; -	else if (dev->class && dev->class->pm) -		callback = dev->class->pm->runtime_suspend; -	else if (dev->bus && dev->bus->pm) -		callback = dev->bus->pm->runtime_suspend; -	else -		callback = NULL; - -	if (!callback && dev->driver && dev->driver->pm) -		callback = dev->driver->pm->runtime_suspend; +	callback = rpm_get_suspend_cb(dev);  	retval = rpm_callback(callback, dev);  	if (retval) @@ -723,19 +737,7 @@ static int rpm_resume(struct device *dev, int rpmflags)  	__update_runtime_status(dev, RPM_RESUMING); -	if (dev->pm_domain) -		callback = dev->pm_domain->ops.runtime_resume; -	else if (dev->type && dev->type->pm) -		callback = dev->type->pm->runtime_resume; -	else if (dev->class && dev->class->pm) -		callback = dev->class->pm->runtime_resume; -	else if (dev->bus && dev->bus->pm) -		callback = dev->bus->pm->runtime_resume; -	else -		callback = NULL; - -	if (!callback && dev->driver && dev->driver->pm) -		callback = dev->driver->pm->runtime_resume; +	callback = rpm_get_resume_cb(dev);  	retval = rpm_callback(callback, dev);  	if (retval) { @@ -1129,7 +1131,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_barrier);   * @dev: Device to handle.   * @check_resume: If set, check if there's a resume request for the device.   * - * Increment power.disable_depth for the device and if was zero previously, + * Increment power.disable_depth for the device and if it was zero previously,   * cancel all pending runtime PM requests for the device and wait for all   * operations in progress to complete.  The device can be either active or   * suspended after its runtime PM has been disabled. @@ -1400,3 +1402,86 @@ void pm_runtime_remove(struct device *dev)  	if (dev->power.irq_safe && dev->parent)  		pm_runtime_put(dev->parent);  } +#endif + +/** + * pm_runtime_force_suspend - Force a device into suspend state if needed. + * @dev: Device to suspend. + * + * Disable runtime PM so we safely can check the device's runtime PM status and + * if it is active, invoke it's .runtime_suspend callback to bring it into + * suspend state. Keep runtime PM disabled to preserve the state unless we + * encounter errors. + * + * Typically this function may be invoked from a system suspend callback to make + * sure the device is put into low power state. + */ +int pm_runtime_force_suspend(struct device *dev) +{ +	int (*callback)(struct device *); +	int ret = 0; + +	pm_runtime_disable(dev); + +	/* +	 * Note that pm_runtime_status_suspended() returns false while +	 * !CONFIG_PM_RUNTIME, which means the device will be put into low +	 * power state. +	 */ +	if (pm_runtime_status_suspended(dev)) +		return 0; + +	callback = rpm_get_suspend_cb(dev); + +	if (!callback) { +		ret = -ENOSYS; +		goto err; +	} + +	ret = callback(dev); +	if (ret) +		goto err; + +	pm_runtime_set_suspended(dev); +	return 0; +err: +	pm_runtime_enable(dev); +	return ret; +} +EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); + +/** + * pm_runtime_force_resume - Force a device into resume state. + * @dev: Device to resume. + * + * Prior invoking this function we expect the user to have brought the device + * into low power state by a call to pm_runtime_force_suspend(). Here we reverse + * those actions and brings the device into full power. We update the runtime PM + * status and re-enables runtime PM. + * + * Typically this function may be invoked from a system resume callback to make + * sure the device is put into full power state. + */ +int pm_runtime_force_resume(struct device *dev) +{ +	int (*callback)(struct device *); +	int ret = 0; + +	callback = rpm_get_resume_cb(dev); + +	if (!callback) { +		ret = -ENOSYS; +		goto out; +	} + +	ret = callback(dev); +	if (ret) +		goto out; + +	pm_runtime_set_active(dev); +	pm_runtime_mark_last_busy(dev); +out: +	pm_runtime_enable(dev); +	return ret; +} +EXPORT_SYMBOL_GPL(pm_runtime_force_resume); diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 03e089ade5c..95b181d1ca6 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -218,15 +218,16 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,  static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,  		autosuspend_delay_ms_store); -static ssize_t pm_qos_latency_show(struct device *dev, -				   struct device_attribute *attr, char *buf) +static ssize_t pm_qos_resume_latency_show(struct device *dev, +					  struct device_attribute *attr, +					  char *buf)  { -	return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev)); +	return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev));  } -static ssize_t pm_qos_latency_store(struct device *dev, -				    struct device_attribute *attr, -				    const char *buf, size_t n) +static ssize_t pm_qos_resume_latency_store(struct device *dev, +					   struct device_attribute *attr, +					   const char *buf, size_t n)  {  	s32 value;  	int ret; @@ -237,12 +238,47 @@ static ssize_t pm_qos_latency_store(struct device *dev,  	if (value < 0)  		return -EINVAL; -	ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value); +	ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req, +					value);  	return ret < 0 ? ret : n;  }  static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, -		   pm_qos_latency_show, pm_qos_latency_store); +		   pm_qos_resume_latency_show, pm_qos_resume_latency_store); + +static ssize_t pm_qos_latency_tolerance_show(struct device *dev, +					     struct device_attribute *attr, +					     char *buf) +{ +	s32 value = dev_pm_qos_get_user_latency_tolerance(dev); + +	if (value < 0) +		return sprintf(buf, "auto\n"); +	else if (value == PM_QOS_LATENCY_ANY) +		return sprintf(buf, "any\n"); + +	return sprintf(buf, "%d\n", value); +} + +static ssize_t pm_qos_latency_tolerance_store(struct device *dev, +					      struct device_attribute *attr, +					      const char *buf, size_t n) +{ +	s32 value; +	int ret; + +	if (kstrtos32(buf, 0, &value)) { +		if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n")) +			value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; +		else if (!strcmp(buf, "any") || !strcmp(buf, "any\n")) +			value = PM_QOS_LATENCY_ANY; +	} +	ret = dev_pm_qos_update_user_latency_tolerance(dev, value); +	return ret < 0 ? ret : n; +} + +static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644, +		   pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store);  static ssize_t pm_qos_no_power_off_show(struct device *dev,  					struct device_attribute *attr, @@ -618,15 +654,26 @@ static struct attribute_group pm_runtime_attr_group = {  	.attrs	= runtime_attrs,  }; -static struct attribute *pm_qos_latency_attrs[] = { +static struct attribute *pm_qos_resume_latency_attrs[] = {  #ifdef CONFIG_PM_RUNTIME  	&dev_attr_pm_qos_resume_latency_us.attr,  #endif /* CONFIG_PM_RUNTIME */  	NULL,  }; -static struct attribute_group pm_qos_latency_attr_group = { +static struct attribute_group pm_qos_resume_latency_attr_group = { +	.name	= power_group_name, +	.attrs	= pm_qos_resume_latency_attrs, +}; + +static struct attribute *pm_qos_latency_tolerance_attrs[] = { +#ifdef CONFIG_PM_RUNTIME +	&dev_attr_pm_qos_latency_tolerance_us.attr, +#endif /* CONFIG_PM_RUNTIME */ +	NULL, +}; +static struct attribute_group pm_qos_latency_tolerance_attr_group = {  	.name	= power_group_name, -	.attrs	= pm_qos_latency_attrs, +	.attrs	= pm_qos_latency_tolerance_attrs,  };  static struct attribute *pm_qos_flags_attrs[] = { @@ -654,18 +701,23 @@ int dpm_sysfs_add(struct device *dev)  		if (rc)  			goto err_out;  	} -  	if (device_can_wakeup(dev)) {  		rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); -		if (rc) { -			if (pm_runtime_callbacks_present(dev)) -				sysfs_unmerge_group(&dev->kobj, -						    &pm_runtime_attr_group); -			goto err_out; -		} +		if (rc) +			goto err_runtime; +	} +	if (dev->power.set_latency_tolerance) { +		rc = sysfs_merge_group(&dev->kobj, +				       &pm_qos_latency_tolerance_attr_group); +		if (rc) +			goto err_wakeup;  	}  	return 0; + err_wakeup: +	sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); + err_runtime: +	sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);   err_out:  	sysfs_remove_group(&dev->kobj, &pm_attr_group);  	return rc; @@ -681,14 +733,14 @@ void wakeup_sysfs_remove(struct device *dev)  	sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);  } -int pm_qos_sysfs_add_latency(struct device *dev) +int pm_qos_sysfs_add_resume_latency(struct device *dev)  { -	return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group); +	return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);  } -void pm_qos_sysfs_remove_latency(struct device *dev) +void pm_qos_sysfs_remove_resume_latency(struct device *dev)  { -	sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group); +	sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);  }  int pm_qos_sysfs_add_flags(struct device *dev) @@ -708,6 +760,7 @@ void rpm_sysfs_remove(struct device *dev)  void dpm_sysfs_remove(struct device *dev)  { +	sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);  	dev_pm_qos_constraints_destroy(dev);  	rpm_sysfs_remove(dev);  	sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 2d56f4113ae..eb1bd2ecad8 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -318,10 +318,16 @@ int device_init_wakeup(struct device *dev, bool enable)  {  	int ret = 0; +	if (!dev) +		return -EINVAL; +  	if (enable) {  		device_set_wakeup_capable(dev, true);  		ret = device_wakeup_enable(dev);  	} else { +		if (dev->power.can_wakeup) +			device_wakeup_disable(dev); +  		device_set_wakeup_capable(dev, false);  	} diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index f0d30543fcc..4251570610c 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -3,7 +3,7 @@  # subsystems should select the appropriate symbols.  config REGMAP -	default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_MMIO || REGMAP_IRQ) +	default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_MMIO || REGMAP_IRQ)  	select LZO_COMPRESS  	select LZO_DECOMPRESS  	select IRQ_DOMAIN if REGMAP_IRQ @@ -15,6 +15,9 @@ config REGMAP_I2C  config REGMAP_SPI  	tristate +config REGMAP_SPMI +	tristate +  config REGMAP_MMIO  	tristate diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile index cf129980abd..a7c670b4123 100644 --- a/drivers/base/regmap/Makefile +++ b/drivers/base/regmap/Makefile @@ -3,5 +3,6 @@ obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o  obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o  obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o  obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o +obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o  obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o  obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index 57f777835d9..7d1326985be 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -44,7 +44,6 @@ struct regmap_format {  struct regmap_async {  	struct list_head list; -	struct work_struct cleanup;  	struct regmap *map;  	void *work_buf;  }; @@ -64,9 +63,11 @@ struct regmap {  	void *bus_context;  	const char *name; +	bool async;  	spinlock_t async_lock;  	wait_queue_head_t async_waitq;  	struct list_head async_list; +	struct list_head async_free;  	int async_ret;  #ifdef CONFIG_DEBUG_FS @@ -133,6 +134,8 @@ struct regmap {  	/* if set, converts bulk rw to single rw */  	bool use_single_rw; +	/* if set, the device supports multi write mode */ +	bool can_multi_write;  	struct rb_root range_tree;  	void *selector_work_buf;	/* Scratch buffer used for selector */ @@ -179,6 +182,9 @@ struct regmap_field {  	/* lsb */  	unsigned int shift;  	unsigned int reg; + +	unsigned int id_size; +	unsigned int id_offset;  };  #ifdef CONFIG_DEBUG_FS @@ -218,7 +224,7 @@ bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,  int regcache_lookup_reg(struct regmap *map, unsigned int reg);  int _regmap_raw_write(struct regmap *map, unsigned int reg, -		      const void *val, size_t val_len, bool async); +		      const void *val, size_t val_len);  void regmap_async_complete_cb(struct regmap_async *async, int ret); diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 930cad4e5df..6a7e4fa1285 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -23,16 +23,16 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,  static int regcache_rbtree_exit(struct regmap *map);  struct regcache_rbtree_node { -	/* the actual rbtree node holding this block */ -	struct rb_node node; -	/* base register handled by this block */ -	unsigned int base_reg;  	/* block of adjacent registers */  	void *block;  	/* Which registers are present */  	long *cache_present; +	/* base register handled by this block */ +	unsigned int base_reg;  	/* number of registers available in the block */  	unsigned int blklen; +	/* the actual rbtree node holding this block */ +	struct rb_node node;  } __attribute__ ((packed));  struct regcache_rbtree_ctx { diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index d6c2d691b6e..29b4128da0b 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -249,11 +249,12 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,  {  	unsigned int reg; -	for (reg = min; reg <= max; reg++) { +	for (reg = min; reg <= max; reg += map->reg_stride) {  		unsigned int val;  		int ret; -		if (regmap_volatile(map, reg)) +		if (regmap_volatile(map, reg) || +		    !regmap_writeable(map, reg))  			continue;  		ret = regcache_read(map, reg, &val); @@ -307,13 +308,11 @@ int regcache_sync(struct regmap *map)  	if (!map->cache_dirty)  		goto out; +	map->async = true; +  	/* Apply any patch first */  	map->cache_bypass = 1;  	for (i = 0; i < map->patch_regs; i++) { -		if (map->patch[i].reg % map->reg_stride) { -			ret = -EINVAL; -			goto out; -		}  		ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);  		if (ret != 0) {  			dev_err(map->dev, "Failed to write %x = %x: %d\n", @@ -332,11 +331,15 @@ int regcache_sync(struct regmap *map)  		map->cache_dirty = false;  out: -	trace_regcache_sync(map->dev, name, "stop");  	/* Restore the bypass state */ +	map->async = false;  	map->cache_bypass = bypass;  	map->unlock(map->lock_arg); +	regmap_async_complete(map); + +	trace_regcache_sync(map->dev, name, "stop"); +  	return ret;  }  EXPORT_SYMBOL_GPL(regcache_sync); @@ -375,17 +378,23 @@ int regcache_sync_region(struct regmap *map, unsigned int min,  	if (!map->cache_dirty)  		goto out; +	map->async = true; +  	if (map->cache_ops->sync)  		ret = map->cache_ops->sync(map, min, max);  	else  		ret = regcache_default_sync(map, min, max);  out: -	trace_regcache_sync(map->dev, name, "stop region");  	/* Restore the bypass state */  	map->cache_bypass = bypass; +	map->async = false;  	map->unlock(map->lock_arg); +	regmap_async_complete(map); + +	trace_regcache_sync(map->dev, name, "stop region"); +  	return ret;  }  EXPORT_SYMBOL_GPL(regcache_sync_region); @@ -624,15 +633,14 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,  	if (*data == NULL)  		return 0; -	count = cur - base; +	count = (cur - base) / map->reg_stride;  	dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n", -		count * val_bytes, count, base, cur - 1); +		count * val_bytes, count, base, cur - map->reg_stride);  	map->cache_bypass = 1; -	ret = _regmap_raw_write(map, base, *data, count * val_bytes, -				false); +	ret = _regmap_raw_write(map, base, *data, count * val_bytes);  	map->cache_bypass = 0; diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index de11ecaf383..45d812c0ea7 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -15,10 +15,19 @@  #include <linux/debugfs.h>  #include <linux/uaccess.h>  #include <linux/device.h> +#include <linux/list.h>  #include "internal.h" +struct regmap_debugfs_node { +	struct regmap *map; +	const char *name; +	struct list_head link; +}; +  static struct dentry *regmap_debugfs_root; +static LIST_HEAD(regmap_debugfs_early_list); +static DEFINE_MUTEX(regmap_debugfs_early_lock);  /* Calculate the length of a fixed format  */  static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size) @@ -465,6 +474,20 @@ void regmap_debugfs_init(struct regmap *map, const char *name)  	struct rb_node *next;  	struct regmap_range_node *range_node; +	/* If we don't have the debugfs root yet, postpone init */ +	if (!regmap_debugfs_root) { +		struct regmap_debugfs_node *node; +		node = kzalloc(sizeof(*node), GFP_KERNEL); +		if (!node) +			return; +		node->map = map; +		node->name = name; +		mutex_lock(®map_debugfs_early_lock); +		list_add(&node->link, ®map_debugfs_early_list); +		mutex_unlock(®map_debugfs_early_lock); +		return; +	} +  	INIT_LIST_HEAD(&map->debugfs_off_cache);  	mutex_init(&map->cache_lock); @@ -488,7 +511,7 @@ void regmap_debugfs_init(struct regmap *map, const char *name)  	debugfs_create_file("range", 0400, map->debugfs,  			    map, ®map_reg_ranges_fops); -	if (map->max_register) { +	if (map->max_register || regmap_readable(map, 0)) {  		debugfs_create_file("registers", 0400, map->debugfs,  				    map, ®map_map_fops);  		debugfs_create_file("access", 0400, map->debugfs, @@ -519,18 +542,42 @@ void regmap_debugfs_init(struct regmap *map, const char *name)  void regmap_debugfs_exit(struct regmap *map)  { -	debugfs_remove_recursive(map->debugfs); -	mutex_lock(&map->cache_lock); -	regmap_debugfs_free_dump_cache(map); -	mutex_unlock(&map->cache_lock); -	kfree(map->debugfs_name); +	if (map->debugfs) { +		debugfs_remove_recursive(map->debugfs); +		mutex_lock(&map->cache_lock); +		regmap_debugfs_free_dump_cache(map); +		mutex_unlock(&map->cache_lock); +		kfree(map->debugfs_name); +	} else { +		struct regmap_debugfs_node *node, *tmp; + +		mutex_lock(®map_debugfs_early_lock); +		list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, +					 link) { +			if (node->map == map) { +				list_del(&node->link); +				kfree(node); +			} +		} +		mutex_unlock(®map_debugfs_early_lock); +	}  }  void regmap_debugfs_initcall(void)  { +	struct regmap_debugfs_node *node, *tmp; +  	regmap_debugfs_root = debugfs_create_dir("regmap", NULL);  	if (!regmap_debugfs_root) {  		pr_warn("regmap: Failed to create debugfs root\n");  		return;  	} + +	mutex_lock(®map_debugfs_early_lock); +	list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, link) { +		regmap_debugfs_init(node->map, node->name); +		list_del(&node->link); +		kfree(node); +	} +	mutex_unlock(®map_debugfs_early_lock);  } diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c index fa6bf5279d2..ca193d1ef47 100644 --- a/drivers/base/regmap/regmap-i2c.c +++ b/drivers/base/regmap/regmap-i2c.c @@ -13,7 +13,79 @@  #include <linux/regmap.h>  #include <linux/i2c.h>  #include <linux/module.h> -#include <linux/init.h> + + +static int regmap_smbus_byte_reg_read(void *context, unsigned int reg, +				      unsigned int *val) +{ +	struct device *dev = context; +	struct i2c_client *i2c = to_i2c_client(dev); +	int ret; + +	if (reg > 0xff) +		return -EINVAL; + +	ret = i2c_smbus_read_byte_data(i2c, reg); +	if (ret < 0) +		return ret; + +	*val = ret; + +	return 0; +} + +static int regmap_smbus_byte_reg_write(void *context, unsigned int reg, +				       unsigned int val) +{ +	struct device *dev = context; +	struct i2c_client *i2c = to_i2c_client(dev); + +	if (val > 0xff || reg > 0xff) +		return -EINVAL; + +	return i2c_smbus_write_byte_data(i2c, reg, val); +} + +static struct regmap_bus regmap_smbus_byte = { +	.reg_write = regmap_smbus_byte_reg_write, +	.reg_read = regmap_smbus_byte_reg_read, +}; + +static int regmap_smbus_word_reg_read(void *context, unsigned int reg, +				      unsigned int *val) +{ +	struct device *dev = context; +	struct i2c_client *i2c = to_i2c_client(dev); +	int ret; + +	if (reg > 0xff) +		return -EINVAL; + +	ret = i2c_smbus_read_word_data(i2c, reg); +	if (ret < 0) +		return ret; + +	*val = ret; + +	return 0; +} + +static int regmap_smbus_word_reg_write(void *context, unsigned int reg, +				       unsigned int val) +{ +	struct device *dev = context; +	struct i2c_client *i2c = to_i2c_client(dev); + +	if (val > 0xffff || reg > 0xff) +		return -EINVAL; + +	return i2c_smbus_write_word_data(i2c, reg, val); +} + +static struct regmap_bus regmap_smbus_word = { +	.reg_write = regmap_smbus_word_reg_write, +	.reg_read = regmap_smbus_word_reg_read, +};  static int regmap_i2c_write(void *context, const void *data, size_t count)  { @@ -98,6 +170,23 @@ static struct regmap_bus regmap_i2c = {  	.read = regmap_i2c_read,  }; +static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c, +					const struct regmap_config *config) +{ +	if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C)) +		return ®map_i2c; +	else if (config->val_bits == 16 && config->reg_bits == 8 && +		 i2c_check_functionality(i2c->adapter, +					 I2C_FUNC_SMBUS_WORD_DATA)) +		return ®map_smbus_word; +	else if (config->val_bits == 8 && config->reg_bits == 8 && +		 i2c_check_functionality(i2c->adapter, +					 I2C_FUNC_SMBUS_BYTE_DATA)) +		return ®map_smbus_byte; + +	return ERR_PTR(-ENOTSUPP); +} +  /**   * regmap_init_i2c(): Initialise register map   * @@ -110,7 +199,12 @@ static struct regmap_bus regmap_i2c = {  struct regmap *regmap_init_i2c(struct i2c_client *i2c,  			       const struct regmap_config *config)  { -	return regmap_init(&i2c->dev, ®map_i2c, &i2c->dev, config); +	const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config); + +	if (IS_ERR(bus)) +		return ERR_CAST(bus); + +	return regmap_init(&i2c->dev, bus, &i2c->dev, config);  }  EXPORT_SYMBOL_GPL(regmap_init_i2c); @@ -127,7 +221,12 @@ EXPORT_SYMBOL_GPL(regmap_init_i2c);  struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,  				    const struct regmap_config *config)  { -	return devm_regmap_init(&i2c->dev, ®map_i2c, &i2c->dev, config); +	const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config); + +	if (IS_ERR(bus)) +		return ERR_CAST(bus); + +	return devm_regmap_init(&i2c->dev, bus, &i2c->dev, config);  }  EXPORT_SYMBOL_GPL(devm_regmap_init_i2c); diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index d10456ffd81..6299a50a596 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -10,13 +10,13 @@   * published by the Free Software Foundation.   */ -#include <linux/export.h>  #include <linux/device.h> -#include <linux/regmap.h> -#include <linux/irq.h> +#include <linux/export.h>  #include <linux/interrupt.h> +#include <linux/irq.h>  #include <linux/irqdomain.h>  #include <linux/pm_runtime.h> +#include <linux/regmap.h>  #include <linux/slab.h>  #include "internal.h" @@ -105,6 +105,22 @@ static void regmap_irq_sync_unlock(struct irq_data *data)  					"Failed to sync wakes in %x: %d\n",  					reg, ret);  		} + +		if (!d->chip->init_ack_masked) +			continue; +		/* +		 * Ack all the masked interrupts uncondictionly, +		 * OR if there is masked interrupt which hasn't been Acked, +		 * it'll be ignored in irq handler, then may introduce irq storm +		 */ +		if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) { +			reg = d->chip->ack_base + +				(i * map->reg_stride * d->irq_reg_stride); +			ret = regmap_write(map, reg, d->mask_buf[i]); +			if (ret != 0) +				dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", +					reg, ret); +		}  	}  	if (d->chip->runtime_pm) @@ -255,7 +271,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)  	for (i = 0; i < data->chip->num_regs; i++) {  		data->status_buf[i] &= ~data->mask_buf[i]; -		if (data->status_buf[i] && chip->ack_base) { +		if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {  			reg = chip->ack_base +  				(i * map->reg_stride * data->irq_reg_stride);  			ret = regmap_write(map, reg, data->status_buf[i]); @@ -331,6 +347,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,  	int ret = -ENOMEM;  	u32 reg; +	if (chip->num_regs <= 0) +		return -EINVAL; +  	for (i = 0; i < chip->num_irqs; i++) {  		if (chip->irqs[i].reg_offset % map->reg_stride)  			return -EINVAL; @@ -352,8 +371,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,  	if (!d)  		return -ENOMEM; -	*data = d; -  	d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,  				GFP_KERNEL);  	if (!d->status_buf) @@ -432,7 +449,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,  			goto err_alloc;  		} -		if (d->status_buf[i] && chip->ack_base) { +		if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {  			reg = chip->ack_base +  				(i * map->reg_stride * d->irq_reg_stride);  			ret = regmap_write(map, reg, @@ -490,6 +507,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,  		goto err_domain;  	} +	*data = d; +  	return 0;  err_domain: @@ -517,7 +536,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)  		return;  	free_irq(irq, d); -	/* We should unmap the domain but... */ +	irq_domain_remove(d->domain);  	kfree(d->wake_buf);  	kfree(d->mask_buf_def);  	kfree(d->mask_buf); diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c index 98745dd77e8..04a329a377e 100644 --- a/drivers/base/regmap/regmap-mmio.c +++ b/drivers/base/regmap/regmap-mmio.c @@ -18,7 +18,6 @@  #include <linux/clk.h>  #include <linux/err.h> -#include <linux/init.h>  #include <linux/io.h>  #include <linux/module.h>  #include <linux/regmap.h> @@ -26,27 +25,83 @@  struct regmap_mmio_context {  	void __iomem *regs; +	unsigned reg_bytes;  	unsigned val_bytes; +	unsigned pad_bytes;  	struct clk *clk;  }; +static inline void regmap_mmio_regsize_check(size_t reg_size) +{ +	switch (reg_size) { +	case 1: +	case 2: +	case 4: +#ifdef CONFIG_64BIT +	case 8: +#endif +		break; +	default: +		BUG(); +	} +} + +static int regmap_mmio_regbits_check(size_t reg_bits) +{ +	switch (reg_bits) { +	case 8: +	case 16: +	case 32: +#ifdef CONFIG_64BIT +	case 64: +#endif +		return 0; +	default: +		return -EINVAL; +	} +} + +static inline void regmap_mmio_count_check(size_t count, u32 offset) +{ +	BUG_ON(count <= offset); +} + +static inline unsigned int +regmap_mmio_get_offset(const void *reg, size_t reg_size) +{ +	switch (reg_size) { +	case 1: +		return *(u8 *)reg; +	case 2: +		return *(u16 *)reg; +	case 4: +		return *(u32 *)reg; +#ifdef CONFIG_64BIT +	case 8: +		return *(u64 *)reg; +#endif +	default: +		BUG(); +	} +} +  static int regmap_mmio_gather_write(void *context,  				    const void *reg, size_t reg_size,  				    const void *val, size_t val_size)  {  	struct regmap_mmio_context *ctx = context; -	u32 offset; +	unsigned int offset;  	int ret; -	BUG_ON(reg_size != 4); +	regmap_mmio_regsize_check(reg_size); -	if (ctx->clk) { +	if (!IS_ERR(ctx->clk)) {  		ret = clk_enable(ctx->clk);  		if (ret < 0)  			return ret;  	} -	offset = *(u32 *)reg; +	offset = regmap_mmio_get_offset(reg, reg_size);  	while (val_size) {  		switch (ctx->val_bytes) { @@ -73,7 +128,7 @@ static int regmap_mmio_gather_write(void *context,  		offset += ctx->val_bytes;  	} -	if (ctx->clk) +	if (!IS_ERR(ctx->clk))  		clk_disable(ctx->clk);  	return 0; @@ -81,9 +136,13 @@ static int regmap_mmio_gather_write(void *context,  static int regmap_mmio_write(void *context, const void *data, size_t count)  { -	BUG_ON(count < 4); +	struct regmap_mmio_context *ctx = context; +	unsigned int offset = ctx->reg_bytes + ctx->pad_bytes; -	return regmap_mmio_gather_write(context, data, 4, data + 4, count - 4); +	regmap_mmio_count_check(count, offset); + +	return regmap_mmio_gather_write(context, data, ctx->reg_bytes, +					data + offset, count - offset);  }  static int regmap_mmio_read(void *context, @@ -91,18 +150,18 @@ static int regmap_mmio_read(void *context,  			    void *val, size_t val_size)  {  	struct regmap_mmio_context *ctx = context; -	u32 offset; +	unsigned int offset;  	int ret; -	BUG_ON(reg_size != 4); +	regmap_mmio_regsize_check(reg_size); -	if (ctx->clk) { +	if (!IS_ERR(ctx->clk)) {  		ret = clk_enable(ctx->clk);  		if (ret < 0)  			return ret;  	} -	offset = *(u32 *)reg; +	offset = regmap_mmio_get_offset(reg, reg_size);  	while (val_size) {  		switch (ctx->val_bytes) { @@ -129,7 +188,7 @@ static int regmap_mmio_read(void *context,  		offset += ctx->val_bytes;  	} -	if (ctx->clk) +	if (!IS_ERR(ctx->clk))  		clk_disable(ctx->clk);  	return 0; @@ -139,7 +198,7 @@ static void regmap_mmio_free_context(void *context)  {  	struct regmap_mmio_context *ctx = context; -	if (ctx->clk) { +	if (!IS_ERR(ctx->clk)) {  		clk_unprepare(ctx->clk);  		clk_put(ctx->clk);  	} @@ -165,8 +224,9 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,  	int min_stride;  	int ret; -	if (config->reg_bits != 32) -		return ERR_PTR(-EINVAL); +	ret = regmap_mmio_regbits_check(config->reg_bits); +	if (ret) +		return ERR_PTR(ret);  	if (config->pad_bits)  		return ERR_PTR(-EINVAL); @@ -209,6 +269,9 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,  	ctx->regs = regs;  	ctx->val_bytes = config->val_bits / 8; +	ctx->reg_bytes = config->reg_bits / 8; +	ctx->pad_bytes = config->pad_bits / 8; +	ctx->clk = ERR_PTR(-ENODEV);  	if (clk_id == NULL)  		return ctx; diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c index 4c506bd940f..0eb3097c0d7 100644 --- a/drivers/base/regmap/regmap-spi.c +++ b/drivers/base/regmap/regmap-spi.c @@ -12,7 +12,6 @@  #include <linux/regmap.h>  #include <linux/spi/spi.h> -#include <linux/init.h>  #include <linux/module.h>  #include "internal.h" @@ -73,7 +72,8 @@ static int regmap_spi_async_write(void *context,  	spi_message_init(&async->m);  	spi_message_add_tail(&async->t[0], &async->m); -	spi_message_add_tail(&async->t[1], &async->m); +	if (val) +		spi_message_add_tail(&async->t[1], &async->m);  	async->m.complete = regmap_spi_complete;  	async->m.context = async; diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c new file mode 100644 index 00000000000..d7026dc3338 --- /dev/null +++ b/drivers/base/regmap/regmap-spmi.c @@ -0,0 +1,256 @@ +/* + * Register map access API - SPMI support + * + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + * + * Based on regmap-i2c.c: + * Copyright 2011 Wolfson Microelectronics plc + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ +#include <linux/regmap.h> +#include <linux/spmi.h> +#include <linux/module.h> +#include <linux/init.h> + +static int regmap_spmi_base_read(void *context, +				 const void *reg, size_t reg_size, +				 void *val, size_t val_size) +{ +	u8 addr = *(u8 *)reg; +	int err = 0; + +	BUG_ON(reg_size != 1); + +	while (val_size-- && !err) +		err = spmi_register_read(context, addr++, val++); + +	return err; +} + +static int regmap_spmi_base_gather_write(void *context, +					 const void *reg, size_t reg_size, +					 const void *val, size_t val_size) +{ +	const u8 *data = val; +	u8 addr = *(u8 *)reg; +	int err = 0; + +	BUG_ON(reg_size != 1); + +	/* +	 * SPMI defines a more bandwidth-efficient 'Register 0 Write' sequence, +	 * use it when possible. +	 */ +	if (addr == 0 && val_size) { +		err = spmi_register_zero_write(context, *data); +		if (err) +			goto err_out; + +		data++; +		addr++; +		val_size--; +	} + +	while (val_size) { +		err = spmi_register_write(context, addr, *data); +		if (err) +			goto err_out; + +		data++; +		addr++; +		val_size--; +	} + +err_out: +	return err; +} + +static int regmap_spmi_base_write(void *context, const void *data, +				  size_t count) +{ +	BUG_ON(count < 1); +	return regmap_spmi_base_gather_write(context, data, 1, data + 1, +					     count - 1); +} + +static struct regmap_bus regmap_spmi_base = { +	.read				= regmap_spmi_base_read, +	.write				= regmap_spmi_base_write, +	.gather_write			= regmap_spmi_base_gather_write, +	.reg_format_endian_default	= REGMAP_ENDIAN_NATIVE, +	.val_format_endian_default	= REGMAP_ENDIAN_NATIVE, +}; + +/** + * regmap_init_spmi_base(): Create regmap for the Base register space + * @sdev:	SPMI device that will be interacted with + * @config:	Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +struct regmap *regmap_init_spmi_base(struct spmi_device *sdev, +				     const struct regmap_config *config) +{ +	return regmap_init(&sdev->dev, ®map_spmi_base, sdev, config); +} +EXPORT_SYMBOL_GPL(regmap_init_spmi_base); + +/** + * devm_regmap_init_spmi_base(): Create managed regmap for Base register space + * @sdev:	SPMI device that will be interacted with + * @config:	Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap.  The regmap will be automatically freed by the + * device management code. + */ +struct regmap *devm_regmap_init_spmi_base(struct spmi_device *sdev, +					  const struct regmap_config *config) +{ +	return devm_regmap_init(&sdev->dev, ®map_spmi_base, sdev, config); +} +EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_base); + +static int regmap_spmi_ext_read(void *context, +				const void *reg, size_t reg_size, +				void *val, size_t val_size) +{ +	int err = 0; +	size_t len; +	u16 addr; + +	BUG_ON(reg_size != 2); + +	addr = *(u16 *)reg; + +	/* +	 * Split accesses into two to take advantage of the more +	 * bandwidth-efficient 'Extended Register Read' command when possible +	 */ +	while (addr <= 0xFF && val_size) { +		len = min_t(size_t, val_size, 16); + +		err = spmi_ext_register_read(context, addr, val, len); +		if (err) +			goto err_out; + +		addr += len; +		val += len; +		val_size -= len; +	} + +	while (val_size) { +		len = min_t(size_t, val_size, 8); + +		err = spmi_ext_register_readl(context, addr, val, val_size); +		if (err) +			goto err_out; + +		addr += len; +		val += len; +		val_size -= len; +	} + +err_out: +	return err; +} + +static int regmap_spmi_ext_gather_write(void *context, +					const void *reg, size_t reg_size, +					const void *val, size_t val_size) +{ +	int err = 0; +	size_t len; +	u16 addr; + +	BUG_ON(reg_size != 2); + +	addr = *(u16 *)reg; + +	while (addr <= 0xFF && val_size) { +		len = min_t(size_t, val_size, 16); + +		err = spmi_ext_register_write(context, addr, val, len); +		if (err) +			goto err_out; + +		addr += len; +		val += len; +		val_size -= len; +	} + +	while (val_size) { +		len = min_t(size_t, val_size, 8); + +		err = spmi_ext_register_writel(context, addr, val, len); +		if (err) +			goto err_out; + +		addr += len; +		val += len; +		val_size -= len; +	} + +err_out: +	return err; +} + +static int regmap_spmi_ext_write(void *context, const void *data, +				 size_t count) +{ +	BUG_ON(count < 2); +	return regmap_spmi_ext_gather_write(context, data, 2, data + 2, +					    count - 2); +} + +static struct regmap_bus regmap_spmi_ext = { +	.read				= regmap_spmi_ext_read, +	.write				= regmap_spmi_ext_write, +	.gather_write			= regmap_spmi_ext_gather_write, +	.reg_format_endian_default	= REGMAP_ENDIAN_NATIVE, +	.val_format_endian_default	= REGMAP_ENDIAN_NATIVE, +}; + +/** + * regmap_init_spmi_ext(): Create regmap for Ext register space + * @sdev:	Device that will be interacted with + * @config:	Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +struct regmap *regmap_init_spmi_ext(struct spmi_device *sdev, +				    const struct regmap_config *config) +{ +	return regmap_init(&sdev->dev, ®map_spmi_ext, sdev, config); +} +EXPORT_SYMBOL_GPL(regmap_init_spmi_ext); + +/** + * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space + * @sdev:	SPMI device that will be interacted with + * @config:	Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap.  The regmap will be automatically freed by the + * device management code. + */ +struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *sdev, +				     const struct regmap_config *config) +{ +	return devm_regmap_init(&sdev->dev, ®map_spmi_ext, sdev, config); +} +EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_ext); + +MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 7d689a15c50..74d8c0672cf 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -35,22 +35,17 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,  			       unsigned int mask, unsigned int val,  			       bool *change); +static int _regmap_bus_reg_read(void *context, unsigned int reg, +				unsigned int *val);  static int _regmap_bus_read(void *context, unsigned int reg,  			    unsigned int *val);  static int _regmap_bus_formatted_write(void *context, unsigned int reg,  				       unsigned int val); +static int _regmap_bus_reg_write(void *context, unsigned int reg, +				 unsigned int val);  static int _regmap_bus_raw_write(void *context, unsigned int reg,  				 unsigned int val); -static void async_cleanup(struct work_struct *work) -{ -	struct regmap_async *async = container_of(work, struct regmap_async, -						  cleanup); - -	kfree(async->work_buf); -	kfree(async); -} -  bool regmap_reg_in_ranges(unsigned int reg,  			  const struct regmap_range *ranges,  			  unsigned int nranges) @@ -201,6 +196,13 @@ static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)  	b[0] = cpu_to_be16(val << shift);  } +static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) +{ +	__le16 *b = buf; + +	b[0] = cpu_to_le16(val << shift); +} +  static void regmap_format_16_native(void *buf, unsigned int val,  				    unsigned int shift)  { @@ -225,6 +227,13 @@ static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)  	b[0] = cpu_to_be32(val << shift);  } +static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) +{ +	__le32 *b = buf; + +	b[0] = cpu_to_le32(val << shift); +} +  static void regmap_format_32_native(void *buf, unsigned int val,  				    unsigned int shift)  { @@ -249,6 +258,13 @@ static unsigned int regmap_parse_16_be(const void *buf)  	return be16_to_cpu(b[0]);  } +static unsigned int regmap_parse_16_le(const void *buf) +{ +	const __le16 *b = buf; + +	return le16_to_cpu(b[0]); +} +  static void regmap_parse_16_be_inplace(void *buf)  {  	__be16 *b = buf; @@ -256,6 +272,13 @@ static void regmap_parse_16_be_inplace(void *buf)  	b[0] = be16_to_cpu(b[0]);  } +static void regmap_parse_16_le_inplace(void *buf) +{ +	__le16 *b = buf; + +	b[0] = le16_to_cpu(b[0]); +} +  static unsigned int regmap_parse_16_native(const void *buf)  {  	return *(u16 *)buf; @@ -278,6 +301,13 @@ static unsigned int regmap_parse_32_be(const void *buf)  	return be32_to_cpu(b[0]);  } +static unsigned int regmap_parse_32_le(const void *buf) +{ +	const __le32 *b = buf; + +	return le32_to_cpu(b[0]); +} +  static void regmap_parse_32_be_inplace(void *buf)  {  	__be32 *b = buf; @@ -285,6 +315,13 @@ static void regmap_parse_32_be_inplace(void *buf)  	b[0] = be32_to_cpu(b[0]);  } +static void regmap_parse_32_le_inplace(void *buf) +{ +	__le32 *b = buf; + +	b[0] = le32_to_cpu(b[0]); +} +  static unsigned int regmap_parse_32_native(const void *buf)  {  	return *(u32 *)buf; @@ -389,6 +426,28 @@ static void regmap_range_exit(struct regmap *map)  	kfree(map->selector_work_buf);  } +int regmap_attach_dev(struct device *dev, struct regmap *map, +		      const struct regmap_config *config) +{ +	struct regmap **m; + +	map->dev = dev; + +	regmap_debugfs_init(map, config->name); + +	/* Add a devres resource for dev_get_regmap() */ +	m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); +	if (!m) { +		regmap_debugfs_exit(map); +		return -ENOMEM; +	} +	*m = map; +	devres_add(dev, m); + +	return 0; +} +EXPORT_SYMBOL_GPL(regmap_attach_dev); +  /**   * regmap_init(): Initialise register map   * @@ -406,7 +465,7 @@ struct regmap *regmap_init(struct device *dev,  			   void *bus_context,  			   const struct regmap_config *config)  { -	struct regmap *map, **m; +	struct regmap *map;  	int ret = -EINVAL;  	enum regmap_endian reg_endian, val_endian;  	int i, j; @@ -448,6 +507,7 @@ struct regmap *regmap_init(struct device *dev,  	else  		map->reg_stride = 1;  	map->use_single_rw = config->use_single_rw; +	map->can_multi_write = config->can_multi_write;  	map->dev = dev;  	map->bus = bus;  	map->bus_context = bus_context; @@ -465,6 +525,7 @@ struct regmap *regmap_init(struct device *dev,  	spin_lock_init(&map->async_lock);  	INIT_LIST_HEAD(&map->async_list); +	INIT_LIST_HEAD(&map->async_free);  	init_waitqueue_head(&map->async_waitq);  	if (config->read_flag_mask || config->write_flag_mask) { @@ -480,6 +541,12 @@ struct regmap *regmap_init(struct device *dev,  		map->defer_caching = false;  		goto skip_format_initialization; +	} else if (!bus->read || !bus->write) { +		map->reg_read = _regmap_bus_reg_read; +		map->reg_write = _regmap_bus_reg_write; + +		map->defer_caching = false; +		goto skip_format_initialization;  	} else {  		map->reg_read  = _regmap_bus_read;  	} @@ -593,6 +660,11 @@ struct regmap *regmap_init(struct device *dev,  			map->format.parse_val = regmap_parse_16_be;  			map->format.parse_inplace = regmap_parse_16_be_inplace;  			break; +		case REGMAP_ENDIAN_LITTLE: +			map->format.format_val = regmap_format_16_le; +			map->format.parse_val = regmap_parse_16_le; +			map->format.parse_inplace = regmap_parse_16_le_inplace; +			break;  		case REGMAP_ENDIAN_NATIVE:  			map->format.format_val = regmap_format_16_native;  			map->format.parse_val = regmap_parse_16_native; @@ -614,6 +686,11 @@ struct regmap *regmap_init(struct device *dev,  			map->format.parse_val = regmap_parse_32_be;  			map->format.parse_inplace = regmap_parse_32_be_inplace;  			break; +		case REGMAP_ENDIAN_LITTLE: +			map->format.format_val = regmap_format_32_le; +			map->format.parse_val = regmap_parse_32_le; +			map->format.parse_inplace = regmap_parse_32_le_inplace; +			break;  		case REGMAP_ENDIAN_NATIVE:  			map->format.format_val = regmap_format_32_native;  			map->format.parse_val = regmap_parse_32_native; @@ -726,7 +803,7 @@ skip_format_initialization:  		new->window_start = range_cfg->window_start;  		new->window_len = range_cfg->window_len; -		if (_regmap_range_add(map, new) == false) { +		if (!_regmap_range_add(map, new)) {  			dev_err(map->dev, "Failed to add range %d\n", i);  			kfree(new);  			goto err_range; @@ -742,25 +819,19 @@ skip_format_initialization:  		}  	} -	regmap_debugfs_init(map, config->name); -  	ret = regcache_init(map, config);  	if (ret != 0)  		goto err_range; -	/* Add a devres resource for dev_get_regmap() */ -	m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); -	if (!m) { -		ret = -ENOMEM; -		goto err_debugfs; +	if (dev) { +		ret = regmap_attach_dev(dev, map, config); +		if (ret != 0) +			goto err_regcache;  	} -	*m = map; -	devres_add(dev, m);  	return map; -err_debugfs: -	regmap_debugfs_exit(map); +err_regcache:  	regcache_exit(map);  err_range:  	regmap_range_exit(map); @@ -821,6 +892,8 @@ static void regmap_field_init(struct regmap_field *rm_field,  	rm_field->reg = reg_field.reg;  	rm_field->shift = reg_field.lsb;  	rm_field->mask = ((BIT(field_bits) - 1) << reg_field.lsb); +	rm_field->id_size = reg_field.id_size; +	rm_field->id_offset = reg_field.id_offset;  }  /** @@ -942,12 +1015,22 @@ EXPORT_SYMBOL_GPL(regmap_reinit_cache);   */  void regmap_exit(struct regmap *map)  { +	struct regmap_async *async; +  	regcache_exit(map);  	regmap_debugfs_exit(map);  	regmap_range_exit(map);  	if (map->bus && map->bus->free_context)  		map->bus->free_context(map->bus_context);  	kfree(map->work_buf); +	while (!list_empty(&map->async_free)) { +		async = list_first_entry_or_null(&map->async_free, +						 struct regmap_async, +						 list); +		list_del(&async->list); +		kfree(async->work_buf); +		kfree(async); +	}  	kfree(map);  }  EXPORT_SYMBOL_GPL(regmap_exit); @@ -1039,7 +1122,7 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,  }  int _regmap_raw_write(struct regmap *map, unsigned int reg, -		      const void *val, size_t val_len, bool async) +		      const void *val, size_t val_len)  {  	struct regmap_range_node *range;  	unsigned long flags; @@ -1091,7 +1174,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,  			dev_dbg(map->dev, "Writing window %d/%zu\n",  				win_residue, val_len / map->format.val_bytes);  			ret = _regmap_raw_write(map, reg, val, win_residue * -						map->format.val_bytes, async); +						map->format.val_bytes);  			if (ret != 0)  				return ret; @@ -1114,49 +1197,72 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,  	u8[0] |= map->write_flag_mask; -	if (async && map->bus->async_write) { -		struct regmap_async *async = map->bus->async_alloc(); -		if (!async) -			return -ENOMEM; +	/* +	 * Essentially all I/O mechanisms will be faster with a single +	 * buffer to write.  Since register syncs often generate raw +	 * writes of single registers optimise that case. +	 */ +	if (val != work_val && val_len == map->format.val_bytes) { +		memcpy(work_val, val, map->format.val_bytes); +		val = work_val; +	} + +	if (map->async && map->bus->async_write) { +		struct regmap_async *async;  		trace_regmap_async_write_start(map->dev, reg, val_len); -		async->work_buf = kzalloc(map->format.buf_size, -					  GFP_KERNEL | GFP_DMA); -		if (!async->work_buf) { -			kfree(async); -			return -ENOMEM; +		spin_lock_irqsave(&map->async_lock, flags); +		async = list_first_entry_or_null(&map->async_free, +						 struct regmap_async, +						 list); +		if (async) +			list_del(&async->list); +		spin_unlock_irqrestore(&map->async_lock, flags); + +		if (!async) { +			async = map->bus->async_alloc(); +			if (!async) +				return -ENOMEM; + +			async->work_buf = kzalloc(map->format.buf_size, +						  GFP_KERNEL | GFP_DMA); +			if (!async->work_buf) { +				kfree(async); +				return -ENOMEM; +			}  		} -		INIT_WORK(&async->cleanup, async_cleanup);  		async->map = map;  		/* If the caller supplied the value we can use it safely. */  		memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +  		       map->format.reg_bytes + map->format.val_bytes); -		if (val == work_val) -			val = async->work_buf + map->format.pad_bytes + -				map->format.reg_bytes;  		spin_lock_irqsave(&map->async_lock, flags);  		list_add_tail(&async->list, &map->async_list);  		spin_unlock_irqrestore(&map->async_lock, flags); -		ret = map->bus->async_write(map->bus_context, async->work_buf, -					    map->format.reg_bytes + -					    map->format.pad_bytes, -					    val, val_len, async); +		if (val != work_val) +			ret = map->bus->async_write(map->bus_context, +						    async->work_buf, +						    map->format.reg_bytes + +						    map->format.pad_bytes, +						    val, val_len, async); +		else +			ret = map->bus->async_write(map->bus_context, +						    async->work_buf, +						    map->format.reg_bytes + +						    map->format.pad_bytes + +						    val_len, NULL, 0, async);  		if (ret != 0) {  			dev_err(map->dev, "Failed to schedule write: %d\n",  				ret);  			spin_lock_irqsave(&map->async_lock, flags); -			list_del(&async->list); +			list_move(&async->list, &map->async_free);  			spin_unlock_irqrestore(&map->async_lock, flags); - -			kfree(async->work_buf); -			kfree(async);  		}  		return ret; @@ -1240,6 +1346,14 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,  	return ret;  } +static int _regmap_bus_reg_write(void *context, unsigned int reg, +				 unsigned int val) +{ +	struct regmap *map = context; + +	return map->bus->reg_write(map->bus_context, reg, val); +} +  static int _regmap_bus_raw_write(void *context, unsigned int reg,  				 unsigned int val)  { @@ -1253,7 +1367,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,  				 map->work_buf +  				 map->format.reg_bytes +  				 map->format.pad_bytes, -				 map->format.val_bytes, false); +				 map->format.val_bytes);  }  static inline void *_regmap_map_get_context(struct regmap *map) @@ -1318,6 +1432,37 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)  EXPORT_SYMBOL_GPL(regmap_write);  /** + * regmap_write_async(): Write a value to a single register asynchronously + * + * @map: Register map to write to + * @reg: Register to write to + * @val: Value to be written + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) +{ +	int ret; + +	if (reg % map->reg_stride) +		return -EINVAL; + +	map->lock(map->lock_arg); + +	map->async = true; + +	ret = _regmap_write(map, reg, val); + +	map->async = false; + +	map->unlock(map->lock_arg); + +	return ret; +} +EXPORT_SYMBOL_GPL(regmap_write_async); + +/**   * regmap_raw_write(): Write raw values to one or more registers   *   * @map: Register map to write to @@ -1345,7 +1490,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,  	map->lock(map->lock_arg); -	ret = _regmap_raw_write(map, reg, val, val_len, false); +	ret = _regmap_raw_write(map, reg, val, val_len);  	map->unlock(map->lock_arg); @@ -1369,6 +1514,74 @@ int regmap_field_write(struct regmap_field *field, unsigned int val)  }  EXPORT_SYMBOL_GPL(regmap_field_write); +/** + * regmap_field_update_bits():	Perform a read/modify/write cycle + *                              on the register field + * + * @field: Register field to write to + * @mask: Bitmask to change + * @val: Value to be written + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_field_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val) +{ +	mask = (mask << field->shift) & field->mask; + +	return regmap_update_bits(field->regmap, field->reg, +				  mask, val << field->shift); +} +EXPORT_SYMBOL_GPL(regmap_field_update_bits); + +/** + * regmap_fields_write(): Write a value to a single register field with port ID + * + * @field: Register field to write to + * @id: port ID + * @val: Value to be written + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_fields_write(struct regmap_field *field, unsigned int id, +			unsigned int val) +{ +	if (id >= field->id_size) +		return -EINVAL; + +	return regmap_update_bits(field->regmap, +				  field->reg + (field->id_offset * id), +				  field->mask, val << field->shift); +} +EXPORT_SYMBOL_GPL(regmap_fields_write); + +/** + * regmap_fields_update_bits():	Perform a read/modify/write cycle + *                              on the register field + * + * @field: Register field to write to + * @id: port ID + * @mask: Bitmask to change + * @val: Value to be written + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_fields_update_bits(struct regmap_field *field,  unsigned int id, +			      unsigned int mask, unsigned int val) +{ +	if (id >= field->id_size) +		return -EINVAL; + +	mask = (mask << field->shift) & field->mask; + +	return regmap_update_bits(field->regmap, +				  field->reg + (field->id_offset * id), +				  mask, val << field->shift); +} +EXPORT_SYMBOL_GPL(regmap_fields_update_bits); +  /*   * regmap_bulk_write(): Write multiple registers to the device   * @@ -1388,56 +1601,316 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,  {  	int ret = 0, i;  	size_t val_bytes = map->format.val_bytes; -	void *wval; -	if (!map->bus) -		return -EINVAL; -	if (!map->format.parse_inplace) +	if (map->bus && !map->format.parse_inplace)  		return -EINVAL;  	if (reg % map->reg_stride)  		return -EINVAL; -	map->lock(map->lock_arg); +	/* +	 * Some devices don't support bulk write, for +	 * them we have a series of single write operations. +	 */ +	if (!map->bus || map->use_single_rw) { +		map->lock(map->lock_arg); +		for (i = 0; i < val_count; i++) { +			unsigned int ival; -	/* No formatting is require if val_byte is 1 */ -	if (val_bytes == 1) { -		wval = (void *)val; +			switch (val_bytes) { +			case 1: +				ival = *(u8 *)(val + (i * val_bytes)); +				break; +			case 2: +				ival = *(u16 *)(val + (i * val_bytes)); +				break; +			case 4: +				ival = *(u32 *)(val + (i * val_bytes)); +				break; +#ifdef CONFIG_64BIT +			case 8: +				ival = *(u64 *)(val + (i * val_bytes)); +				break; +#endif +			default: +				ret = -EINVAL; +				goto out; +			} + +			ret = _regmap_write(map, reg + (i * map->reg_stride), +					ival); +			if (ret != 0) +				goto out; +		} +out: +		map->unlock(map->lock_arg);  	} else { +		void *wval; +  		wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);  		if (!wval) { -			ret = -ENOMEM;  			dev_err(map->dev, "Error in memory allocation\n"); -			goto out; +			return -ENOMEM;  		}  		for (i = 0; i < val_count * val_bytes; i += val_bytes)  			map->format.parse_inplace(wval + i); + +		map->lock(map->lock_arg); +		ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); +		map->unlock(map->lock_arg); + +		kfree(wval); +	} +	return ret; +} +EXPORT_SYMBOL_GPL(regmap_bulk_write); + +/* + * _regmap_raw_multi_reg_write() + * + * the (register,newvalue) pairs in regs have not been formatted, but + * they are all in the same page and have been changed to being page + * relative. The page register has been written if that was neccessary. + */ +static int _regmap_raw_multi_reg_write(struct regmap *map, +				       const struct reg_default *regs, +				       size_t num_regs) +{ +	int ret; +	void *buf; +	int i; +	u8 *u8; +	size_t val_bytes = map->format.val_bytes; +	size_t reg_bytes = map->format.reg_bytes; +	size_t pad_bytes = map->format.pad_bytes; +	size_t pair_size = reg_bytes + pad_bytes + val_bytes; +	size_t len = pair_size * num_regs; + +	if (!len) +		return -EINVAL; + +	buf = kzalloc(len, GFP_KERNEL); +	if (!buf) +		return -ENOMEM; + +	/* We have to linearise by hand. */ + +	u8 = buf; + +	for (i = 0; i < num_regs; i++) { +		int reg = regs[i].reg; +		int val = regs[i].def; +		trace_regmap_hw_write_start(map->dev, reg, 1); +		map->format.format_reg(u8, reg, map->reg_shift); +		u8 += reg_bytes + pad_bytes; +		map->format.format_val(u8, val, 0); +		u8 += val_bytes; +	} +	u8 = buf; +	*u8 |= map->write_flag_mask; + +	ret = map->bus->write(map->bus_context, buf, len); + +	kfree(buf); + +	for (i = 0; i < num_regs; i++) { +		int reg = regs[i].reg; +		trace_regmap_hw_write_done(map->dev, reg, 1);  	} +	return ret; +} + +static unsigned int _regmap_register_page(struct regmap *map, +					  unsigned int reg, +					  struct regmap_range_node *range) +{ +	unsigned int win_page = (reg - range->range_min) / range->window_len; + +	return win_page; +} + +static int _regmap_range_multi_paged_reg_write(struct regmap *map, +					       struct reg_default *regs, +					       size_t num_regs) +{ +	int ret; +	int i, n; +	struct reg_default *base; +	unsigned int this_page = 0;  	/* -	 * Some devices does not support bulk write, for -	 * them we have a series of single write operations. +	 * the set of registers are not neccessarily in order, but +	 * since the order of write must be preserved this algorithm +	 * chops the set each time the page changes  	 */ -	if (map->use_single_rw) { -		for (i = 0; i < val_count; i++) { -			ret = regmap_raw_write(map, -					       reg + (i * map->reg_stride), -					       val + (i * val_bytes), -					       val_bytes); +	base = regs; +	for (i = 0, n = 0; i < num_regs; i++, n++) { +		unsigned int reg = regs[i].reg; +		struct regmap_range_node *range; + +		range = _regmap_range_lookup(map, reg); +		if (range) { +			unsigned int win_page = _regmap_register_page(map, reg, +								      range); + +			if (i == 0) +				this_page = win_page; +			if (win_page != this_page) { +				this_page = win_page; +				ret = _regmap_raw_multi_reg_write(map, base, n); +				if (ret != 0) +					return ret; +				base += n; +				n = 0; +			} +			ret = _regmap_select_page(map, &base[n].reg, range, 1);  			if (ret != 0)  				return ret;  		} -	} else { -		ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count, -					false);  	} +	if (n > 0) +		return _regmap_raw_multi_reg_write(map, base, n); +	return 0; +} -	if (val_bytes != 1) -		kfree(wval); +static int _regmap_multi_reg_write(struct regmap *map, +				   const struct reg_default *regs, +				   size_t num_regs) +{ +	int i; +	int ret; + +	if (!map->can_multi_write) { +		for (i = 0; i < num_regs; i++) { +			ret = _regmap_write(map, regs[i].reg, regs[i].def); +			if (ret != 0) +				return ret; +		} +		return 0; +	} + +	if (!map->format.parse_inplace) +		return -EINVAL; + +	if (map->writeable_reg) +		for (i = 0; i < num_regs; i++) { +			int reg = regs[i].reg; +			if (!map->writeable_reg(map->dev, reg)) +				return -EINVAL; +			if (reg % map->reg_stride) +				return -EINVAL; +		} + +	if (!map->cache_bypass) { +		for (i = 0; i < num_regs; i++) { +			unsigned int val = regs[i].def; +			unsigned int reg = regs[i].reg; +			ret = regcache_write(map, reg, val); +			if (ret) { +				dev_err(map->dev, +				"Error in caching of register: %x ret: %d\n", +								reg, ret); +				return ret; +			} +		} +		if (map->cache_only) { +			map->cache_dirty = true; +			return 0; +		} +	} + +	WARN_ON(!map->bus); + +	for (i = 0; i < num_regs; i++) { +		unsigned int reg = regs[i].reg; +		struct regmap_range_node *range; +		range = _regmap_range_lookup(map, reg); +		if (range) { +			size_t len = sizeof(struct reg_default)*num_regs; +			struct reg_default *base = kmemdup(regs, len, +							   GFP_KERNEL); +			if (!base) +				return -ENOMEM; +			ret = _regmap_range_multi_paged_reg_write(map, base, +								  num_regs); +			kfree(base); + +			return ret; +		} +	} +	return _regmap_raw_multi_reg_write(map, regs, num_regs); +} + +/* + * regmap_multi_reg_write(): Write multiple registers to the device + * + * where the set of register,value pairs are supplied in any order, + * possibly not all in a single range. + * + * @map: Register map to write to + * @regs: Array of structures containing register,value to be written + * @num_regs: Number of registers to write + * + * The 'normal' block write mode will send ultimately send data on the + * target bus as R,V1,V2,V3,..,Vn where successively higer registers are + * addressed. However, this alternative block multi write mode will send + * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device + * must of course support the mode. + * + * A value of zero will be returned on success, a negative errno will be + * returned in error cases. + */ +int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs, +			   int num_regs) +{ +	int ret; + +	map->lock(map->lock_arg); + +	ret = _regmap_multi_reg_write(map, regs, num_regs); -out:  	map->unlock(map->lock_arg); +  	return ret;  } -EXPORT_SYMBOL_GPL(regmap_bulk_write); +EXPORT_SYMBOL_GPL(regmap_multi_reg_write); + +/* + * regmap_multi_reg_write_bypassed(): Write multiple registers to the + *                                    device but not the cache + * + * where the set of register are supplied in any order + * + * @map: Register map to write to + * @regs: Array of structures containing register,value to be written + * @num_regs: Number of registers to write + * + * This function is intended to be used for writing a large block of data + * atomically to the device in single transfer for those I2C client devices + * that implement this alternative block write mode. + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_multi_reg_write_bypassed(struct regmap *map, +				    const struct reg_default *regs, +				    int num_regs) +{ +	int ret; +	bool bypass; + +	map->lock(map->lock_arg); + +	bypass = map->cache_bypass; +	map->cache_bypass = true; + +	ret = _regmap_multi_reg_write(map, regs, num_regs); + +	map->cache_bypass = bypass; + +	map->unlock(map->lock_arg); + +	return ret; +} +EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);  /**   * regmap_raw_write_async(): Write raw values to one or more registers @@ -1473,7 +1946,11 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,  	map->lock(map->lock_arg); -	ret = _regmap_raw_write(map, reg, val, val_len, true); +	map->async = true; + +	ret = _regmap_raw_write(map, reg, val, val_len); + +	map->async = false;  	map->unlock(map->lock_arg); @@ -1521,6 +1998,14 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,  	return ret;  } +static int _regmap_bus_reg_read(void *context, unsigned int reg, +				unsigned int *val) +{ +	struct regmap *map = context; + +	return map->bus->reg_read(map->bus_context, reg, val); +} +  static int _regmap_bus_read(void *context, unsigned int reg,  			    unsigned int *val)  { @@ -1554,6 +2039,9 @@ static int _regmap_read(struct regmap *map, unsigned int reg,  	if (map->cache_only)  		return -EBUSY; +	if (!regmap_readable(map, reg)) +		return -EIO; +  	ret = map->reg_read(context, reg, val);  	if (ret == 0) {  #ifdef LOG_DEVICE @@ -1573,7 +2061,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,  /**   * regmap_read(): Read a value from a single register   * - * @map: Register map to write to + * @map: Register map to read from   * @reg: Register to be read from   * @val: Pointer to store read value   * @@ -1600,7 +2088,7 @@ EXPORT_SYMBOL_GPL(regmap_read);  /**   * regmap_raw_read(): Read raw data from the device   * - * @map: Register map to write to + * @map: Register map to read from   * @reg: First register to be read from   * @val: Pointer to store read value   * @val_len: Size of data to read @@ -1677,9 +2165,42 @@ int regmap_field_read(struct regmap_field *field, unsigned int *val)  EXPORT_SYMBOL_GPL(regmap_field_read);  /** + * regmap_fields_read(): Read a value to a single register field with port ID + * + * @field: Register field to read from + * @id: port ID + * @val: Pointer to store read value + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_fields_read(struct regmap_field *field, unsigned int id, +		       unsigned int *val) +{ +	int ret; +	unsigned int reg_val; + +	if (id >= field->id_size) +		return -EINVAL; + +	ret = regmap_read(field->regmap, +			  field->reg + (field->id_offset * id), +			  ®_val); +	if (ret != 0) +		return ret; + +	reg_val &= field->mask; +	reg_val >>= field->shift; +	*val = reg_val; + +	return ret; +} +EXPORT_SYMBOL_GPL(regmap_fields_read); + +/**   * regmap_bulk_read(): Read multiple registers from the device   * - * @map: Register map to write to + * @map: Register map to read from   * @reg: First register to be read from   * @val: Pointer to store read value, in native register size for device   * @val_count: Number of registers to read @@ -1694,14 +2215,10 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,  	size_t val_bytes = map->format.val_bytes;  	bool vol = regmap_volatile_range(map, reg, val_count); -	if (!map->bus) -		return -EINVAL; -	if (!map->format.parse_inplace) -		return -EINVAL;  	if (reg % map->reg_stride)  		return -EINVAL; -	if (vol || map->cache_type == REGCACHE_NONE) { +	if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {  		/*  		 * Some devices does not support bulk read, for  		 * them we have a series of single read operations. @@ -1755,9 +2272,11 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,  	if (tmp != orig) {  		ret = _regmap_write(map, reg, tmp); -		*change = true; +		if (change) +			*change = true;  	} else { -		*change = false; +		if (change) +			*change = false;  	}  	return ret; @@ -1776,11 +2295,10 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,  int regmap_update_bits(struct regmap *map, unsigned int reg,  		       unsigned int mask, unsigned int val)  { -	bool change;  	int ret;  	map->lock(map->lock_arg); -	ret = _regmap_update_bits(map, reg, mask, val, &change); +	ret = _regmap_update_bits(map, reg, mask, val, NULL);  	map->unlock(map->lock_arg);  	return ret; @@ -1788,6 +2306,40 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,  EXPORT_SYMBOL_GPL(regmap_update_bits);  /** + * regmap_update_bits_async: Perform a read/modify/write cycle on the register + *                           map asynchronously + * + * @map: Register map to update + * @reg: Register to update + * @mask: Bitmask to change + * @val: New value for bitmask + * + * With most buses the read must be done synchronously so this is most + * useful for devices with a cache which do not need to interact with + * the hardware to determine the current register value. + * + * Returns zero for success, a negative number on error. + */ +int regmap_update_bits_async(struct regmap *map, unsigned int reg, +			     unsigned int mask, unsigned int val) +{ +	int ret; + +	map->lock(map->lock_arg); + +	map->async = true; + +	ret = _regmap_update_bits(map, reg, mask, val, NULL); + +	map->async = false; + +	map->unlock(map->lock_arg); + +	return ret; +} +EXPORT_SYMBOL_GPL(regmap_update_bits_async); + +/**   * regmap_update_bits_check: Perform a read/modify/write cycle on the   *                           register map and report if updated   * @@ -1812,6 +2364,43 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,  }  EXPORT_SYMBOL_GPL(regmap_update_bits_check); +/** + * regmap_update_bits_check_async: Perform a read/modify/write cycle on the + *                                 register map asynchronously and report if + *                                 updated + * + * @map: Register map to update + * @reg: Register to update + * @mask: Bitmask to change + * @val: New value for bitmask + * @change: Boolean indicating if a write was done + * + * With most buses the read must be done synchronously so this is most + * useful for devices with a cache which do not need to interact with + * the hardware to determine the current register value. + * + * Returns zero for success, a negative number on error. + */ +int regmap_update_bits_check_async(struct regmap *map, unsigned int reg, +				   unsigned int mask, unsigned int val, +				   bool *change) +{ +	int ret; + +	map->lock(map->lock_arg); + +	map->async = true; + +	ret = _regmap_update_bits(map, reg, mask, val, change); + +	map->async = false; + +	map->unlock(map->lock_arg); + +	return ret; +} +EXPORT_SYMBOL_GPL(regmap_update_bits_check_async); +  void regmap_async_complete_cb(struct regmap_async *async, int ret)  {  	struct regmap *map = async->map; @@ -1820,8 +2409,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)  	trace_regmap_async_io_complete(map->dev);  	spin_lock(&map->async_lock); - -	list_del(&async->list); +	list_move(&async->list, &map->async_free);  	wake = list_empty(&map->async_list);  	if (ret != 0) @@ -1829,8 +2417,6 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)  	spin_unlock(&map->async_lock); -	schedule_work(&async->cleanup); -  	if (wake)  		wake_up(&map->async_waitq);  } @@ -1893,29 +2479,20 @@ EXPORT_SYMBOL_GPL(regmap_async_complete);   * apply them immediately.  Typically this is used to apply   * corrections to be applied to the device defaults on startup, such   * as the updates some vendors provide to undocumented registers. + * + * The caller must ensure that this function cannot be called + * concurrently with either itself or regcache_sync().   */  int regmap_register_patch(struct regmap *map, const struct reg_default *regs,  			  int num_regs)  {  	struct reg_default *p; -	int i, ret; +	int ret;  	bool bypass; -	map->lock(map->lock_arg); - -	bypass = map->cache_bypass; - -	map->cache_bypass = true; - -	/* Write out first; it's useful to apply even if we fail later. */ -	for (i = 0; i < num_regs; i++) { -		ret = _regmap_write(map, regs[i].reg, regs[i].def); -		if (ret != 0) { -			dev_err(map->dev, "Failed to write %x = %x: %d\n", -				regs[i].reg, regs[i].def, ret); -			goto out; -		} -	} +	if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", +	    num_regs)) +		return 0;  	p = krealloc(map->patch,  		     sizeof(struct reg_default) * (map->patch_regs + num_regs), @@ -1925,14 +2502,28 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,  		map->patch = p;  		map->patch_regs += num_regs;  	} else { -		ret = -ENOMEM; +		return -ENOMEM;  	} +	map->lock(map->lock_arg); + +	bypass = map->cache_bypass; + +	map->cache_bypass = true; +	map->async = true; + +	ret = _regmap_multi_reg_write(map, regs, num_regs); +	if (ret != 0) +		goto out; +  out: +	map->async = false;  	map->cache_bypass = bypass;  	map->unlock(map->lock_arg); +	regmap_async_complete(map); +  	return ret;  }  EXPORT_SYMBOL_GPL(regmap_register_patch); @@ -1952,6 +2543,18 @@ int regmap_get_val_bytes(struct regmap *map)  }  EXPORT_SYMBOL_GPL(regmap_get_val_bytes); +int regmap_parse_val(struct regmap *map, const void *buf, +			unsigned int *val) +{ +	if (!map->format.parse_val) +		return -EINVAL; + +	*val = map->format.parse_val(buf); + +	return 0; +} +EXPORT_SYMBOL_GPL(regmap_parse_val); +  static int __init regmap_initcall(void)  {  	regmap_debugfs_initcall(); diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c index e8d11b6630e..dbb8350ea8d 100644 --- a/drivers/base/syscore.c +++ b/drivers/base/syscore.c @@ -10,6 +10,7 @@  #include <linux/mutex.h>  #include <linux/module.h>  #include <linux/interrupt.h> +#include <trace/events/power.h>  static LIST_HEAD(syscore_ops_list);  static DEFINE_MUTEX(syscore_ops_lock); @@ -49,6 +50,7 @@ int syscore_suspend(void)  	struct syscore_ops *ops;  	int ret = 0; +	trace_suspend_resume(TPS("syscore_suspend"), 0, true);  	pr_debug("Checking wakeup interrupts\n");  	/* Return error code if there are any wakeup interrupts pending. */ @@ -70,6 +72,7 @@ int syscore_suspend(void)  				"Interrupts enabled after %pF\n", ops->suspend);  		} +	trace_suspend_resume(TPS("syscore_suspend"), 0, false);  	return 0;   err_out: @@ -92,6 +95,7 @@ void syscore_resume(void)  {  	struct syscore_ops *ops; +	trace_suspend_resume(TPS("syscore_resume"), 0, true);  	WARN_ONCE(!irqs_disabled(),  		"Interrupts enabled before system core resume.\n"); @@ -103,6 +107,7 @@ void syscore_resume(void)  			WARN_ONCE(!irqs_disabled(),  				"Interrupts enabled after %pF\n", ops->resume);  		} +	trace_suspend_resume(TPS("syscore_resume"), 0, false);  }  EXPORT_SYMBOL_GPL(syscore_resume);  #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/base/topology.c b/drivers/base/topology.c index 94ffee378f1..be7c1fb7c0c 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -23,7 +23,6 @@   * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.   *   */ -#include <linux/init.h>  #include <linux/mm.h>  #include <linux/cpu.h>  #include <linux/module.h> @@ -40,8 +39,7 @@  static ssize_t show_##name(struct device *dev,			\  		struct device_attribute *attr, char *buf)	\  {								\ -	unsigned int cpu = dev->id;				\ -	return sprintf(buf, "%d\n", topology_##name(cpu));	\ +	return sprintf(buf, "%d\n", topology_##name(dev->id));	\  }  #if defined(topology_thread_cpumask) || defined(topology_core_cpumask) || \ @@ -161,16 +159,20 @@ static int topology_cpu_callback(struct notifier_block *nfb,  static int topology_sysfs_init(void)  {  	int cpu; -	int rc; +	int rc = 0; + +	cpu_notifier_register_begin();  	for_each_online_cpu(cpu) {  		rc = topology_add_dev(cpu);  		if (rc) -			return rc; +			goto out;  	} -	hotcpu_notifier(topology_cpu_callback, 0); +	__hotcpu_notifier(topology_cpu_callback, 0); -	return 0; +out: +	cpu_notifier_register_done(); +	return rc;  }  device_initcall(topology_sysfs_init);  | 
