aboutsummaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig46
-rw-r--r--drivers/base/Makefile8
-rw-r--r--drivers/base/attribute_container.c3
-rw-r--r--drivers/base/base.h14
-rw-r--r--drivers/base/bus.c125
-rw-r--r--drivers/base/class.c33
-rw-r--r--drivers/base/component.c390
-rw-r--r--drivers/base/container.c44
-rw-r--r--drivers/base/core.c488
-rw-r--r--drivers/base/cpu.c209
-rw-r--r--drivers/base/dd.c49
-rw-r--r--drivers/base/devres.c154
-rw-r--r--drivers/base/devtmpfs.c8
-rw-r--r--drivers/base/dma-buf.c82
-rw-r--r--drivers/base/dma-coherent.c10
-rw-r--r--drivers/base/dma-contiguous.c199
-rw-r--r--drivers/base/dma-mapping.c6
-rw-r--r--drivers/base/driver.c31
-rw-r--r--drivers/base/firmware_class.c302
-rw-r--r--drivers/base/init.c1
-rw-r--r--drivers/base/memory.c408
-rw-r--r--drivers/base/node.c10
-rw-r--r--drivers/base/pinctrl.c19
-rw-r--r--drivers/base/platform.c99
-rw-r--r--drivers/base/power/Makefile3
-rw-r--r--drivers/base/power/clock_ops.c31
-rw-r--r--drivers/base/power/common.c1
-rw-r--r--drivers/base/power/domain.c19
-rw-r--r--drivers/base/power/domain_governor.c1
-rw-r--r--drivers/base/power/generic_ops.c29
-rw-r--r--drivers/base/power/main.c517
-rw-r--r--drivers/base/power/opp.c217
-rw-r--r--drivers/base/power/power.h4
-rw-r--r--drivers/base/power/qos.c224
-rw-r--r--drivers/base/power/runtime.c179
-rw-r--r--drivers/base/power/sysfs.c99
-rw-r--r--drivers/base/power/wakeup.c15
-rw-r--r--drivers/base/regmap/Kconfig5
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/internal.h34
-rw-r--r--drivers/base/regmap/regcache-rbtree.c239
-rw-r--r--drivers/base/regmap/regcache.c173
-rw-r--r--drivers/base/regmap/regmap-debugfs.c69
-rw-r--r--drivers/base/regmap/regmap-i2c.c105
-rw-r--r--drivers/base/regmap/regmap-irq.c58
-rw-r--r--drivers/base/regmap/regmap-mmio.c95
-rw-r--r--drivers/base/regmap/regmap-spi.c4
-rw-r--r--drivers/base/regmap/regmap-spmi.c256
-rw-r--r--drivers/base/regmap/regmap.c991
-rw-r--r--drivers/base/reservation.c39
-rw-r--r--drivers/base/syscore.c5
-rw-r--r--drivers/base/topology.c46
52 files changed, 4514 insertions, 1683 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 07abd9d76f7..23b8726962a 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -1,11 +1,10 @@
menu "Generic Driver Options"
-config UEVENT_HELPER_PATH
- string "path to uevent helper"
- depends on HOTPLUG
- default ""
+config UEVENT_HELPER
+ bool "Support for uevent helper"
+ default y
help
- Path to uevent helper program forked by the kernel for
+ The uevent helper program is forked by the kernel for
every uevent.
Before the switch to the netlink-based uevent source, this was
used to hook hotplug scripts into kernel device events. It
@@ -16,14 +15,18 @@ config UEVENT_HELPER_PATH
that it creates a high system load, or on smaller systems
it is known to create out-of-memory situations during bootup.
- To disable user space helper program execution at early boot
- time specify an empty string here. This setting can be altered
+config UEVENT_HELPER_PATH
+ string "path to uevent helper"
+ depends on UEVENT_HELPER
+ default ""
+ help
+ To disable user space helper program execution at by default
+ specify an empty string here. This setting can still be altered
via /proc/sys/kernel/hotplug or via /sys/kernel/uevent_helper
later at runtime.
config DEVTMPFS
bool "Maintain a devtmpfs filesystem to mount at /dev"
- depends on HOTPLUG
help
This creates a tmpfs/ramfs filesystem instance early at bootup.
In this filesystem, the kernel driver core maintains device
@@ -51,7 +54,7 @@ config DEVTMPFS_MOUNT
with the commandline parameter: devtmpfs.mount=0|1.
This option does not affect initramfs based booting, here
the devtmpfs filesystem always needs to be mounted manually
- after the roots is mounted.
+ after the rootfs is mounted.
With this option enabled, it allows to bring up a system in
rescue mode with init=/bin/sh, even when the /dev directory
on the rootfs is completely empty.
@@ -187,6 +190,9 @@ config GENERIC_CPU_DEVICES
bool
default n
+config GENERIC_CPU_AUTOPROBE
+ bool
+
config SOC_BUS
bool
@@ -202,11 +208,9 @@ config DMA_SHARED_BUFFER
APIs extension; the file's descriptor can then be passed on to other
driver.
-config CMA
- bool "Contiguous Memory Allocator"
- depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK
- select MIGRATION
- select MEMORY_ISOLATION
+config DMA_CMA
+ bool "DMA Contiguous Memory Allocator"
+ depends on HAVE_DMA_CONTIGUOUS && CMA
help
This enables the Contiguous Memory Allocator which allows drivers
to allocate big physically-contiguous blocks of memory for use with
@@ -215,17 +219,7 @@ config CMA
For more information see <include/linux/dma-contiguous.h>.
If unsure, say "n".
-if CMA
-
-config CMA_DEBUG
- bool "CMA debug messages (DEVELOPMENT)"
- depends on DEBUG_KERNEL
- help
- Turns on debug messages in CMA. This produces KERN_DEBUG
- messages for every CMA call as well as various messages while
- processing calls such as dma_alloc_from_contiguous().
- This option does not affect warning and error messages.
-
+if DMA_CMA
comment "Default contiguous memory area size:"
config CMA_SIZE_MBYTES
@@ -264,7 +258,7 @@ endchoice
config CMA_ALIGNMENT
int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
- range 4 9
+ range 4 12
default 8
help
DMA mapping framework by default aligns all buffers to the smallest
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 4e22ce3ed73..04b314e0fa5 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -1,16 +1,16 @@
# Makefile for the Linux device tree
-obj-y := core.o bus.o dd.o syscore.o \
+obj-y := component.o core.o bus.o dd.o syscore.o \
driver.o class.o platform.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o \
- topology.o
+ topology.o container.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
-obj-$(CONFIG_CMA) += dma-contiguous.o
+obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
-obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o
+obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o reservation.o
obj-$(CONFIG_ISA) += isa.o
obj-$(CONFIG_FW_LOADER) += firmware_class.o
obj-$(CONFIG_NUMA) += node.o
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index d78b204e65c..b84ca8f13f9 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -12,7 +12,6 @@
*/
#include <linux/attribute_container.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -167,7 +166,7 @@ attribute_container_add_device(struct device *dev,
ic->classdev.parent = get_device(dev);
ic->classdev.class = cont->class;
cont->class->dev_release = attribute_container_release;
- dev_set_name(&ic->classdev, dev_name(dev));
+ dev_set_name(&ic->classdev, "%s", dev_name(dev));
if (fn)
fn(cont, dev, &ic->classdev);
else
diff --git a/drivers/base/base.h b/drivers/base/base.h
index b8bdfe61daa..251c5d30f96 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -63,8 +63,6 @@ struct driver_private {
* binding of drivers which were unable to get all the resources needed by
* the device; typically because it depends on another driver getting
* probed first.
- * @driver_data - private pointer for driver specific info. Will turn into a
- * list soon.
* @device - pointer back to the struct class that this structure is
* associated with.
*
@@ -76,7 +74,6 @@ struct device_private {
struct klist_node knode_driver;
struct klist_node knode_bus;
struct list_head deferred_probe;
- void *driver_data;
struct device *device;
};
#define to_device_private_parent(obj) \
@@ -100,6 +97,7 @@ static inline int hypervisor_init(void) { return 0; }
#endif
extern int platform_bus_init(void);
extern void cpu_dev_init(void);
+extern void container_dev_init(void);
struct kobject *virtual_device_parent(struct device *dev);
@@ -119,6 +117,16 @@ static inline int driver_match_device(struct device_driver *drv,
return drv->bus->match ? drv->bus->match(dev, drv) : 1;
}
+extern int driver_add_groups(struct device_driver *drv,
+ const struct attribute_group **groups);
+extern void driver_remove_groups(struct device_driver *drv,
+ const struct attribute_group **groups);
+
+extern int device_add_groups(struct device *dev,
+ const struct attribute_group **groups);
+extern void device_remove_groups(struct device *dev,
+ const struct attribute_group **groups);
+
extern char *make_class_name(const char *name, struct kobject *kobj);
extern int devres_release_all(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index d414331b480..83e910a5756 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/mutex.h>
+#include <linux/sysfs.h>
#include "base.h"
#include "power/power.h"
@@ -145,8 +146,19 @@ void bus_remove_file(struct bus_type *bus, struct bus_attribute *attr)
}
EXPORT_SYMBOL_GPL(bus_remove_file);
+static void bus_release(struct kobject *kobj)
+{
+ struct subsys_private *priv =
+ container_of(kobj, typeof(*priv), subsys.kobj);
+ struct bus_type *bus = priv->bus;
+
+ kfree(priv);
+ bus->p = NULL;
+}
+
static struct kobj_type bus_ktype = {
.sysfs_ops = &bus_sysfs_ops,
+ .release = bus_release,
};
static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
@@ -165,8 +177,8 @@ static const struct kset_uevent_ops bus_uevent_ops = {
static struct kset *bus_kset;
/* Manually detach a device from its associated driver. */
-static ssize_t driver_unbind(struct device_driver *drv,
- const char *buf, size_t count)
+static ssize_t unbind_store(struct device_driver *drv, const char *buf,
+ size_t count)
{
struct bus_type *bus = bus_get(drv->bus);
struct device *dev;
@@ -185,15 +197,15 @@ static ssize_t driver_unbind(struct device_driver *drv,
bus_put(bus);
return err;
}
-static DRIVER_ATTR(unbind, S_IWUSR, NULL, driver_unbind);
+static DRIVER_ATTR_WO(unbind);
/*
* Manually attach a device to a driver.
* Note: the driver must want to bind to the device,
* it is not possible to override the driver's id table.
*/
-static ssize_t driver_bind(struct device_driver *drv,
- const char *buf, size_t count)
+static ssize_t bind_store(struct device_driver *drv, const char *buf,
+ size_t count)
{
struct bus_type *bus = bus_get(drv->bus);
struct device *dev;
@@ -221,7 +233,7 @@ static ssize_t driver_bind(struct device_driver *drv,
bus_put(bus);
return err;
}
-static DRIVER_ATTR(bind, S_IWUSR, NULL, driver_bind);
+static DRIVER_ATTR_WO(bind);
static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf)
{
@@ -460,7 +472,7 @@ static int device_add_attrs(struct bus_type *bus, struct device *dev)
if (!bus->dev_attrs)
return 0;
- for (i = 0; attr_name(bus->dev_attrs[i]); i++) {
+ for (i = 0; bus->dev_attrs[i].attr.name; i++) {
error = device_create_file(dev, &bus->dev_attrs[i]);
if (error) {
while (--i >= 0)
@@ -476,7 +488,7 @@ static void device_remove_attrs(struct bus_type *bus, struct device *dev)
int i;
if (bus->dev_attrs) {
- for (i = 0; attr_name(bus->dev_attrs[i]); i++)
+ for (i = 0; bus->dev_attrs[i].attr.name; i++)
device_remove_file(dev, &bus->dev_attrs[i]);
}
}
@@ -499,6 +511,9 @@ int bus_add_device(struct device *dev)
error = device_add_attrs(bus, dev);
if (error)
goto out_put;
+ error = device_add_groups(dev, bus->dev_groups);
+ if (error)
+ goto out_groups;
error = sysfs_create_link(&bus->p->devices_kset->kobj,
&dev->kobj, dev_name(dev));
if (error)
@@ -513,6 +528,8 @@ int bus_add_device(struct device *dev)
out_subsys:
sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev));
+out_groups:
+ device_remove_groups(dev, bus->dev_groups);
out_id:
device_remove_attrs(bus, dev);
out_put:
@@ -575,6 +592,7 @@ void bus_remove_device(struct device *dev)
sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
dev_name(dev));
device_remove_attrs(dev->bus, dev);
+ device_remove_groups(dev, dev->bus->dev_groups);
if (klist_node_attached(&dev->p->knode_bus))
klist_del(&dev->p->knode_bus);
@@ -584,37 +602,6 @@ void bus_remove_device(struct device *dev)
bus_put(dev->bus);
}
-static int driver_add_attrs(struct bus_type *bus, struct device_driver *drv)
-{
- int error = 0;
- int i;
-
- if (bus->drv_attrs) {
- for (i = 0; attr_name(bus->drv_attrs[i]); i++) {
- error = driver_create_file(drv, &bus->drv_attrs[i]);
- if (error)
- goto err;
- }
- }
-done:
- return error;
-err:
- while (--i >= 0)
- driver_remove_file(drv, &bus->drv_attrs[i]);
- goto done;
-}
-
-static void driver_remove_attrs(struct bus_type *bus,
- struct device_driver *drv)
-{
- int i;
-
- if (bus->drv_attrs) {
- for (i = 0; attr_name(bus->drv_attrs[i]); i++)
- driver_remove_file(drv, &bus->drv_attrs[i]);
- }
-}
-
static int __must_check add_bind_files(struct device_driver *drv)
{
int ret;
@@ -659,8 +646,8 @@ static void remove_probe_files(struct bus_type *bus)
bus_remove_file(bus, &bus_attr_drivers_probe);
}
-static ssize_t driver_uevent_store(struct device_driver *drv,
- const char *buf, size_t count)
+static ssize_t uevent_store(struct device_driver *drv, const char *buf,
+ size_t count)
{
enum kobject_action action;
@@ -668,7 +655,7 @@ static ssize_t driver_uevent_store(struct device_driver *drv,
kobject_uevent(&drv->p->kobj, action);
return count;
}
-static DRIVER_ATTR(uevent, S_IWUSR, NULL, driver_uevent_store);
+static DRIVER_ATTR_WO(uevent);
/**
* bus_add_driver - Add a driver to the bus.
@@ -713,10 +700,10 @@ int bus_add_driver(struct device_driver *drv)
printk(KERN_ERR "%s: uevent attr (%s) failed\n",
__func__, drv->name);
}
- error = driver_add_attrs(bus, drv);
+ error = driver_add_groups(drv, bus->drv_groups);
if (error) {
/* How the hell do we get out of this pickle? Give up */
- printk(KERN_ERR "%s: driver_add_attrs(%s) failed\n",
+ printk(KERN_ERR "%s: driver_create_groups(%s) failed\n",
__func__, drv->name);
}
@@ -755,7 +742,7 @@ void bus_remove_driver(struct device_driver *drv)
if (!drv->suppress_bind_attrs)
remove_bind_files(drv);
- driver_remove_attrs(drv->bus, drv);
+ driver_remove_groups(drv, drv->bus->drv_groups);
driver_remove_file(drv, &driver_attr_uevent);
klist_remove(&drv->p->knode_bus);
pr_debug("bus: '%s': remove driver %s\n", drv->bus->name, drv->name);
@@ -834,40 +821,16 @@ struct bus_type *find_bus(char *name)
}
#endif /* 0 */
-
-/**
- * bus_add_attrs - Add default attributes for this bus.
- * @bus: Bus that has just been registered.
- */
-
-static int bus_add_attrs(struct bus_type *bus)
+static int bus_add_groups(struct bus_type *bus,
+ const struct attribute_group **groups)
{
- int error = 0;
- int i;
-
- if (bus->bus_attrs) {
- for (i = 0; attr_name(bus->bus_attrs[i]); i++) {
- error = bus_create_file(bus, &bus->bus_attrs[i]);
- if (error)
- goto err;
- }
- }
-done:
- return error;
-err:
- while (--i >= 0)
- bus_remove_file(bus, &bus->bus_attrs[i]);
- goto done;
+ return sysfs_create_groups(&bus->p->subsys.kobj, groups);
}
-static void bus_remove_attrs(struct bus_type *bus)
+static void bus_remove_groups(struct bus_type *bus,
+ const struct attribute_group **groups)
{
- int i;
-
- if (bus->bus_attrs) {
- for (i = 0; attr_name(bus->bus_attrs[i]); i++)
- bus_remove_file(bus, &bus->bus_attrs[i]);
- }
+ sysfs_remove_groups(&bus->p->subsys.kobj, groups);
}
static void klist_devices_get(struct klist_node *n)
@@ -959,14 +922,14 @@ int bus_register(struct bus_type *bus)
if (retval)
goto bus_probe_files_fail;
- retval = bus_add_attrs(bus);
+ retval = bus_add_groups(bus, bus->bus_groups);
if (retval)
- goto bus_attrs_fail;
+ goto bus_groups_fail;
pr_debug("bus: '%s': registered\n", bus->name);
return 0;
-bus_attrs_fail:
+bus_groups_fail:
remove_probe_files(bus);
bus_probe_files_fail:
kset_unregister(bus->p->drivers_kset);
@@ -995,14 +958,12 @@ void bus_unregister(struct bus_type *bus)
pr_debug("bus: '%s': unregistering\n", bus->name);
if (bus->dev_root)
device_unregister(bus->dev_root);
- bus_remove_attrs(bus);
+ bus_remove_groups(bus, bus->bus_groups);
remove_probe_files(bus);
kset_unregister(bus->p->drivers_kset);
kset_unregister(bus->p->devices_kset);
bus_remove_file(bus, &bus_attr_uevent);
kset_unregister(&bus->p->subsys);
- kfree(bus->p);
- bus->p = NULL;
}
EXPORT_SYMBOL_GPL(bus_unregister);
@@ -1257,7 +1218,7 @@ err_dev:
* with the name of the subsystem. The root device can carry subsystem-
* wide attributes. All registered devices are below this single root
* device and are named after the subsystem with a simple enumeration
- * number appended. The registered devices are not explicitely named;
+ * number appended. The registered devices are not explicitly named;
* only 'id' in the device needs to be set.
*
* Do not use this interface for anything new, it exists for compatibility
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 3ce84547132..f96f70419a7 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -47,18 +47,6 @@ static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,
return ret;
}
-static const void *class_attr_namespace(struct kobject *kobj,
- const struct attribute *attr)
-{
- struct class_attribute *class_attr = to_class_attr(attr);
- struct subsys_private *cp = to_subsys_private(kobj);
- const void *ns = NULL;
-
- if (class_attr->namespace)
- ns = class_attr->namespace(cp->class, class_attr);
- return ns;
-}
-
static void class_release(struct kobject *kobj)
{
struct subsys_private *cp = to_subsys_private(kobj);
@@ -86,7 +74,6 @@ static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject
static const struct sysfs_ops class_sysfs_ops = {
.show = class_attr_show,
.store = class_attr_store,
- .namespace = class_attr_namespace,
};
static struct kobj_type class_ktype = {
@@ -99,21 +86,23 @@ static struct kobj_type class_ktype = {
static struct kset *class_kset;
-int class_create_file(struct class *cls, const struct class_attribute *attr)
+int class_create_file_ns(struct class *cls, const struct class_attribute *attr,
+ const void *ns)
{
int error;
if (cls)
- error = sysfs_create_file(&cls->p->subsys.kobj,
- &attr->attr);
+ error = sysfs_create_file_ns(&cls->p->subsys.kobj,
+ &attr->attr, ns);
else
error = -EINVAL;
return error;
}
-void class_remove_file(struct class *cls, const struct class_attribute *attr)
+void class_remove_file_ns(struct class *cls, const struct class_attribute *attr,
+ const void *ns)
{
if (cls)
- sysfs_remove_file(&cls->p->subsys.kobj, &attr->attr);
+ sysfs_remove_file_ns(&cls->p->subsys.kobj, &attr->attr, ns);
}
static struct class *class_get(struct class *cls)
@@ -135,7 +124,7 @@ static int add_class_attrs(struct class *cls)
int error = 0;
if (cls->class_attrs) {
- for (i = 0; attr_name(cls->class_attrs[i]); i++) {
+ for (i = 0; cls->class_attrs[i].attr.name; i++) {
error = class_create_file(cls, &cls->class_attrs[i]);
if (error)
goto error;
@@ -154,7 +143,7 @@ static void remove_class_attrs(struct class *cls)
int i;
if (cls->class_attrs) {
- for (i = 0; attr_name(cls->class_attrs[i]); i++)
+ for (i = 0; cls->class_attrs[i].attr.name; i++)
class_remove_file(cls, &cls->class_attrs[i]);
}
}
@@ -600,8 +589,8 @@ int __init classes_init(void)
return 0;
}
-EXPORT_SYMBOL_GPL(class_create_file);
-EXPORT_SYMBOL_GPL(class_remove_file);
+EXPORT_SYMBOL_GPL(class_create_file_ns);
+EXPORT_SYMBOL_GPL(class_remove_file_ns);
EXPORT_SYMBOL_GPL(class_unregister);
EXPORT_SYMBOL_GPL(class_destroy);
diff --git a/drivers/base/component.c b/drivers/base/component.c
new file mode 100644
index 00000000000..c4778995cd7
--- /dev/null
+++ b/drivers/base/component.c
@@ -0,0 +1,390 @@
+/*
+ * Componentized device handling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is work in progress. We gather up the component devices into a list,
+ * and bind them when instructed. At the moment, we're specific to the DRM
+ * subsystem, and only handles one master device, but this doesn't have to be
+ * the case.
+ */
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+struct master {
+ struct list_head node;
+ struct list_head components;
+ bool bound;
+
+ const struct component_master_ops *ops;
+ struct device *dev;
+};
+
+struct component {
+ struct list_head node;
+ struct list_head master_node;
+ struct master *master;
+ bool bound;
+
+ const struct component_ops *ops;
+ struct device *dev;
+};
+
+static DEFINE_MUTEX(component_mutex);
+static LIST_HEAD(component_list);
+static LIST_HEAD(masters);
+
+static struct master *__master_find(struct device *dev,
+ const struct component_master_ops *ops)
+{
+ struct master *m;
+
+ list_for_each_entry(m, &masters, node)
+ if (m->dev == dev && (!ops || m->ops == ops))
+ return m;
+
+ return NULL;
+}
+
+/* Attach an unattached component to a master. */
+static void component_attach_master(struct master *master, struct component *c)
+{
+ c->master = master;
+
+ list_add_tail(&c->master_node, &master->components);
+}
+
+/* Detach a component from a master. */
+static void component_detach_master(struct master *master, struct component *c)
+{
+ list_del(&c->master_node);
+
+ c->master = NULL;
+}
+
+int component_master_add_child(struct master *master,
+ int (*compare)(struct device *, void *), void *compare_data)
+{
+ struct component *c;
+ int ret = -ENXIO;
+
+ list_for_each_entry(c, &component_list, node) {
+ if (c->master)
+ continue;
+
+ if (compare(c->dev, compare_data)) {
+ component_attach_master(master, c);
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(component_master_add_child);
+
+/* Detach all attached components from this master */
+static void master_remove_components(struct master *master)
+{
+ while (!list_empty(&master->components)) {
+ struct component *c = list_first_entry(&master->components,
+ struct component, master_node);
+
+ WARN_ON(c->master != master);
+
+ component_detach_master(master, c);
+ }
+}
+
+/*
+ * Try to bring up a master. If component is NULL, we're interested in
+ * this master, otherwise it's a component which must be present to try
+ * and bring up the master.
+ *
+ * Returns 1 for successful bringup, 0 if not ready, or -ve errno.
+ */
+static int try_to_bring_up_master(struct master *master,
+ struct component *component)
+{
+ int ret = 0;
+
+ if (!master->bound) {
+ /*
+ * Search the list of components, looking for components that
+ * belong to this master, and attach them to the master.
+ */
+ if (master->ops->add_components(master->dev, master)) {
+ /* Failed to find all components */
+ master_remove_components(master);
+ ret = 0;
+ goto out;
+ }
+
+ if (component && component->master != master) {
+ master_remove_components(master);
+ ret = 0;
+ goto out;
+ }
+
+ if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Found all components */
+ ret = master->ops->bind(master->dev);
+ if (ret < 0) {
+ devres_release_group(master->dev, NULL);
+ dev_info(master->dev, "master bind failed: %d\n", ret);
+ master_remove_components(master);
+ goto out;
+ }
+
+ master->bound = true;
+ ret = 1;
+ }
+out:
+
+ return ret;
+}
+
+static int try_to_bring_up_masters(struct component *component)
+{
+ struct master *m;
+ int ret = 0;
+
+ list_for_each_entry(m, &masters, node) {
+ ret = try_to_bring_up_master(m, component);
+ if (ret != 0)
+ break;
+ }
+
+ return ret;
+}
+
+static void take_down_master(struct master *master)
+{
+ if (master->bound) {
+ master->ops->unbind(master->dev);
+ devres_release_group(master->dev, NULL);
+ master->bound = false;
+ }
+
+ master_remove_components(master);
+}
+
+int component_master_add(struct device *dev,
+ const struct component_master_ops *ops)
+{
+ struct master *master;
+ int ret;
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->dev = dev;
+ master->ops = ops;
+ INIT_LIST_HEAD(&master->components);
+
+ /* Add to the list of available masters. */
+ mutex_lock(&component_mutex);
+ list_add(&master->node, &masters);
+
+ ret = try_to_bring_up_master(master, NULL);
+
+ if (ret < 0) {
+ /* Delete off the list if we weren't successful */
+ list_del(&master->node);
+ kfree(master);
+ }
+ mutex_unlock(&component_mutex);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(component_master_add);
+
+void component_master_del(struct device *dev,
+ const struct component_master_ops *ops)
+{
+ struct master *master;
+
+ mutex_lock(&component_mutex);
+ master = __master_find(dev, ops);
+ if (master) {
+ take_down_master(master);
+
+ list_del(&master->node);
+ kfree(master);
+ }
+ mutex_unlock(&component_mutex);
+}
+EXPORT_SYMBOL_GPL(component_master_del);
+
+static void component_unbind(struct component *component,
+ struct master *master, void *data)
+{
+ WARN_ON(!component->bound);
+
+ component->ops->unbind(component->dev, master->dev, data);
+ component->bound = false;
+
+ /* Release all resources claimed in the binding of this component */
+ devres_release_group(component->dev, component);
+}
+
+void component_unbind_all(struct device *master_dev, void *data)
+{
+ struct master *master;
+ struct component *c;
+
+ WARN_ON(!mutex_is_locked(&component_mutex));
+
+ master = __master_find(master_dev, NULL);
+ if (!master)
+ return;
+
+ list_for_each_entry_reverse(c, &master->components, master_node)
+ component_unbind(c, master, data);
+}
+EXPORT_SYMBOL_GPL(component_unbind_all);
+
+static int component_bind(struct component *component, struct master *master,
+ void *data)
+{
+ int ret;
+
+ /*
+ * Each component initialises inside its own devres group.
+ * This allows us to roll-back a failed component without
+ * affecting anything else.
+ */
+ if (!devres_open_group(master->dev, NULL, GFP_KERNEL))
+ return -ENOMEM;
+
+ /*
+ * Also open a group for the device itself: this allows us
+ * to release the resources claimed against the sub-device
+ * at the appropriate moment.
+ */
+ if (!devres_open_group(component->dev, component, GFP_KERNEL)) {
+ devres_release_group(master->dev, NULL);
+ return -ENOMEM;
+ }
+
+ dev_dbg(master->dev, "binding %s (ops %ps)\n",
+ dev_name(component->dev), component->ops);
+
+ ret = component->ops->bind(component->dev, master->dev, data);
+ if (!ret) {
+ component->bound = true;
+
+ /*
+ * Close the component device's group so that resources
+ * allocated in the binding are encapsulated for removal
+ * at unbind. Remove the group on the DRM device as we
+ * can clean those resources up independently.
+ */
+ devres_close_group(component->dev, NULL);
+ devres_remove_group(master->dev, NULL);
+
+ dev_info(master->dev, "bound %s (ops %ps)\n",
+ dev_name(component->dev), component->ops);
+ } else {
+ devres_release_group(component->dev, NULL);
+ devres_release_group(master->dev, NULL);
+
+ dev_err(master->dev, "failed to bind %s (ops %ps): %d\n",
+ dev_name(component->dev), component->ops, ret);
+ }
+
+ return ret;
+}
+
+int component_bind_all(struct device *master_dev, void *data)
+{
+ struct master *master;
+ struct component *c;
+ int ret = 0;
+
+ WARN_ON(!mutex_is_locked(&component_mutex));
+
+ master = __master_find(master_dev, NULL);
+ if (!master)
+ return -EINVAL;
+
+ list_for_each_entry(c, &master->components, master_node) {
+ ret = component_bind(c, master, data);
+ if (ret)
+ break;
+ }
+
+ if (ret != 0) {
+ list_for_each_entry_continue_reverse(c, &master->components,
+ master_node)
+ component_unbind(c, master, data);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(component_bind_all);
+
+int component_add(struct device *dev, const struct component_ops *ops)
+{
+ struct component *component;
+ int ret;
+
+ component = kzalloc(sizeof(*component), GFP_KERNEL);
+ if (!component)
+ return -ENOMEM;
+
+ component->ops = ops;
+ component->dev = dev;
+
+ dev_dbg(dev, "adding component (ops %ps)\n", ops);
+
+ mutex_lock(&component_mutex);
+ list_add_tail(&component->node, &component_list);
+
+ ret = try_to_bring_up_masters(component);
+ if (ret < 0) {
+ list_del(&component->node);
+
+ kfree(component);
+ }
+ mutex_unlock(&component_mutex);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(component_add);
+
+void component_del(struct device *dev, const struct component_ops *ops)
+{
+ struct component *c, *component = NULL;
+
+ mutex_lock(&component_mutex);
+ list_for_each_entry(c, &component_list, node)
+ if (c->dev == dev && c->ops == ops) {
+ list_del(&c->node);
+ component = c;
+ break;
+ }
+
+ if (component && component->master)
+ take_down_master(component->master);
+
+ mutex_unlock(&component_mutex);
+
+ WARN_ON(!component);
+ kfree(component);
+}
+EXPORT_SYMBOL_GPL(component_del);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/container.c b/drivers/base/container.c
new file mode 100644
index 00000000000..ecbfbe2e908
--- /dev/null
+++ b/drivers/base/container.c
@@ -0,0 +1,44 @@
+/*
+ * System bus type for containers.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/container.h>
+
+#include "base.h"
+
+#define CONTAINER_BUS_NAME "container"
+
+static int trivial_online(struct device *dev)
+{
+ return 0;
+}
+
+static int container_offline(struct device *dev)
+{
+ struct container_dev *cdev = to_container_dev(dev);
+
+ return cdev->offline ? cdev->offline(cdev) : 0;
+}
+
+struct bus_type container_subsys = {
+ .name = CONTAINER_BUS_NAME,
+ .dev_name = CONTAINER_BUS_NAME,
+ .online = trivial_online,
+ .offline = container_offline,
+};
+
+void __init container_dev_init(void)
+{
+ int ret;
+
+ ret = subsys_system_register(&container_subsys, NULL);
+ if (ret)
+ pr_err("%s() failed: %d\n", __func__, ret);
+}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 2499cefdcdf..20da3ad1696 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -23,9 +23,9 @@
#include <linux/genhd.h>
#include <linux/kallsyms.h>
#include <linux/mutex.h>
-#include <linux/async.h>
#include <linux/pm_runtime.h>
#include <linux/netdevice.h>
+#include <linux/sysfs.h>
#include "base.h"
#include "power/power.h"
@@ -36,9 +36,9 @@ long sysfs_deprecated = 1;
#else
long sysfs_deprecated = 0;
#endif
-static __init int sysfs_deprecated_setup(char *arg)
+static int __init sysfs_deprecated_setup(char *arg)
{
- return strict_strtol(arg, 10, &sysfs_deprecated);
+ return kstrtol(arg, 10, &sysfs_deprecated);
}
early_param("sysfs.deprecated", sysfs_deprecated_setup);
#endif
@@ -49,6 +49,28 @@ static struct kobject *dev_kobj;
struct kobject *sysfs_dev_char_kobj;
struct kobject *sysfs_dev_block_kobj;
+static DEFINE_MUTEX(device_hotplug_lock);
+
+void lock_device_hotplug(void)
+{
+ mutex_lock(&device_hotplug_lock);
+}
+
+void unlock_device_hotplug(void)
+{
+ mutex_unlock(&device_hotplug_lock);
+}
+
+int lock_device_hotplug_sysfs(void)
+{
+ if (mutex_trylock(&device_hotplug_lock))
+ return 0;
+
+ /* Avoid busy looping (5 ms of sleep should do). */
+ msleep(5);
+ return restart_syscall();
+}
+
#ifdef CONFIG_BLOCK
static inline int device_is_not_partition(struct device *dev)
{
@@ -193,12 +215,12 @@ ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
EXPORT_SYMBOL_GPL(device_show_bool);
/**
- * device_release - free device structure.
- * @kobj: device's kobject.
+ * device_release - free device structure.
+ * @kobj: device's kobject.
*
- * This is called once the reference count for the object
- * reaches 0. We forward the call to the device's release
- * method, which should handle actually freeing the structure.
+ * This is called once the reference count for the object
+ * reaches 0. We forward the call to the device's release
+ * method, which should handle actually freeing the structure.
*/
static void device_release(struct kobject *kobj)
{
@@ -345,7 +367,7 @@ static const struct kset_uevent_ops device_uevent_ops = {
.uevent = dev_uevent,
};
-static ssize_t show_uevent(struct device *dev, struct device_attribute *attr,
+static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct kobject *top_kobj;
@@ -388,7 +410,7 @@ out:
return count;
}
-static ssize_t store_uevent(struct device *dev, struct device_attribute *attr,
+static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
enum kobject_action action;
@@ -399,96 +421,48 @@ static ssize_t store_uevent(struct device *dev, struct device_attribute *attr,
dev_err(dev, "uevent: unknown action-string\n");
return count;
}
+static DEVICE_ATTR_RW(uevent);
-static struct device_attribute uevent_attr =
- __ATTR(uevent, S_IRUGO | S_IWUSR, show_uevent, store_uevent);
-
-static int device_add_attributes(struct device *dev,
- struct device_attribute *attrs)
+static ssize_t online_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- int error = 0;
- int i;
+ bool val;
- if (attrs) {
- for (i = 0; attr_name(attrs[i]); i++) {
- error = device_create_file(dev, &attrs[i]);
- if (error)
- break;
- }
- if (error)
- while (--i >= 0)
- device_remove_file(dev, &attrs[i]);
- }
- return error;
+ device_lock(dev);
+ val = !dev->offline;
+ device_unlock(dev);
+ return sprintf(buf, "%u\n", val);
}
-static void device_remove_attributes(struct device *dev,
- struct device_attribute *attrs)
+static ssize_t online_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
- int i;
+ bool val;
+ int ret;
- if (attrs)
- for (i = 0; attr_name(attrs[i]); i++)
- device_remove_file(dev, &attrs[i]);
-}
+ ret = strtobool(buf, &val);
+ if (ret < 0)
+ return ret;
-static int device_add_bin_attributes(struct device *dev,
- struct bin_attribute *attrs)
-{
- int error = 0;
- int i;
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
- if (attrs) {
- for (i = 0; attr_name(attrs[i]); i++) {
- error = device_create_bin_file(dev, &attrs[i]);
- if (error)
- break;
- }
- if (error)
- while (--i >= 0)
- device_remove_bin_file(dev, &attrs[i]);
- }
- return error;
-}
-
-static void device_remove_bin_attributes(struct device *dev,
- struct bin_attribute *attrs)
-{
- int i;
-
- if (attrs)
- for (i = 0; attr_name(attrs[i]); i++)
- device_remove_bin_file(dev, &attrs[i]);
+ ret = val ? device_online(dev) : device_offline(dev);
+ unlock_device_hotplug();
+ return ret < 0 ? ret : count;
}
+static DEVICE_ATTR_RW(online);
-static int device_add_groups(struct device *dev,
- const struct attribute_group **groups)
+int device_add_groups(struct device *dev, const struct attribute_group **groups)
{
- int error = 0;
- int i;
-
- if (groups) {
- for (i = 0; groups[i]; i++) {
- error = sysfs_create_group(&dev->kobj, groups[i]);
- if (error) {
- while (--i >= 0)
- sysfs_remove_group(&dev->kobj,
- groups[i]);
- break;
- }
- }
- }
- return error;
+ return sysfs_create_groups(&dev->kobj, groups);
}
-static void device_remove_groups(struct device *dev,
- const struct attribute_group **groups)
+void device_remove_groups(struct device *dev,
+ const struct attribute_group **groups)
{
- int i;
-
- if (groups)
- for (i = 0; groups[i]; i++)
- sysfs_remove_group(&dev->kobj, groups[i]);
+ sysfs_remove_groups(&dev->kobj, groups);
}
static int device_add_attrs(struct device *dev)
@@ -498,35 +472,37 @@ static int device_add_attrs(struct device *dev)
int error;
if (class) {
- error = device_add_attributes(dev, class->dev_attrs);
+ error = device_add_groups(dev, class->dev_groups);
if (error)
return error;
- error = device_add_bin_attributes(dev, class->dev_bin_attrs);
- if (error)
- goto err_remove_class_attrs;
}
if (type) {
error = device_add_groups(dev, type->groups);
if (error)
- goto err_remove_class_bin_attrs;
+ goto err_remove_class_groups;
}
error = device_add_groups(dev, dev->groups);
if (error)
goto err_remove_type_groups;
+ if (device_supports_offline(dev) && !dev->offline_disabled) {
+ error = device_create_file(dev, &dev_attr_online);
+ if (error)
+ goto err_remove_dev_groups;
+ }
+
return 0;
+ err_remove_dev_groups:
+ device_remove_groups(dev, dev->groups);
err_remove_type_groups:
if (type)
device_remove_groups(dev, type->groups);
- err_remove_class_bin_attrs:
+ err_remove_class_groups:
if (class)
- device_remove_bin_attributes(dev, class->dev_bin_attrs);
- err_remove_class_attrs:
- if (class)
- device_remove_attributes(dev, class->dev_attrs);
+ device_remove_groups(dev, class->dev_groups);
return error;
}
@@ -536,26 +512,22 @@ static void device_remove_attrs(struct device *dev)
struct class *class = dev->class;
const struct device_type *type = dev->type;
+ device_remove_file(dev, &dev_attr_online);
device_remove_groups(dev, dev->groups);
if (type)
device_remove_groups(dev, type->groups);
- if (class) {
- device_remove_attributes(dev, class->dev_attrs);
- device_remove_bin_attributes(dev, class->dev_bin_attrs);
- }
+ if (class)
+ device_remove_groups(dev, class->dev_groups);
}
-
-static ssize_t show_dev(struct device *dev, struct device_attribute *attr,
+static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return print_dev_t(buf, dev->devt);
}
-
-static struct device_attribute devt_attr =
- __ATTR(dev, S_IRUGO, show_dev, NULL);
+static DEVICE_ATTR_RO(dev);
/* /sys/devices/ */
struct kset *devices_kset;
@@ -582,6 +554,7 @@ int device_create_file(struct device *dev,
return error;
}
+EXPORT_SYMBOL_GPL(device_create_file);
/**
* device_remove_file - remove sysfs attribute file.
@@ -594,6 +567,24 @@ void device_remove_file(struct device *dev,
if (dev)
sysfs_remove_file(&dev->kobj, &attr->attr);
}
+EXPORT_SYMBOL_GPL(device_remove_file);
+
+/**
+ * device_remove_file_self - remove sysfs attribute file from its own method.
+ * @dev: device.
+ * @attr: device attribute descriptor.
+ *
+ * See kernfs_remove_self() for details.
+ */
+bool device_remove_file_self(struct device *dev,
+ const struct device_attribute *attr)
+{
+ if (dev)
+ return sysfs_remove_file_self(&dev->kobj, &attr->attr);
+ else
+ return false;
+}
+EXPORT_SYMBOL_GPL(device_remove_file_self);
/**
* device_create_bin_file - create sysfs binary attribute file for device.
@@ -623,39 +614,6 @@ void device_remove_bin_file(struct device *dev,
}
EXPORT_SYMBOL_GPL(device_remove_bin_file);
-/**
- * device_schedule_callback_owner - helper to schedule a callback for a device
- * @dev: device.
- * @func: callback function to invoke later.
- * @owner: module owning the callback routine
- *
- * Attribute methods must not unregister themselves or their parent device
- * (which would amount to the same thing). Attempts to do so will deadlock,
- * since unregistration is mutually exclusive with driver callbacks.
- *
- * Instead methods can call this routine, which will attempt to allocate
- * and schedule a workqueue request to call back @func with @dev as its
- * argument in the workqueue's process context. @dev will be pinned until
- * @func returns.
- *
- * This routine is usually called via the inline device_schedule_callback(),
- * which automatically sets @owner to THIS_MODULE.
- *
- * Returns 0 if the request was submitted, -ENOMEM if storage could not
- * be allocated, -ENODEV if a reference to @owner isn't available.
- *
- * NOTE: This routine won't work if CONFIG_SYSFS isn't set! It uses an
- * underlying sysfs routine (since it is intended for use by attribute
- * methods), and if sysfs isn't available you'll get nothing but -ENOSYS.
- */
-int device_schedule_callback_owner(struct device *dev,
- void (*func)(struct device *), struct module *owner)
-{
- return sysfs_schedule_callback(&dev->kobj,
- (void (*)(void *)) func, dev, owner);
-}
-EXPORT_SYMBOL_GPL(device_schedule_callback_owner);
-
static void klist_children_get(struct klist_node *n)
{
struct device_private *p = to_device_private_parent(n);
@@ -704,6 +662,7 @@ void device_initialize(struct device *dev)
device_pm_init(dev);
set_dev_node(dev, -1);
}
+EXPORT_SYMBOL_GPL(device_initialize);
struct kobject *virtual_device_parent(struct device *dev)
{
@@ -1056,12 +1015,12 @@ int device_add(struct device *dev)
if (platform_notify)
platform_notify(dev);
- error = device_create_file(dev, &uevent_attr);
+ error = device_create_file(dev, &dev_attr_uevent);
if (error)
goto attrError;
if (MAJOR(dev->devt)) {
- error = device_create_file(dev, &devt_attr);
+ error = device_create_file(dev, &dev_attr_dev);
if (error)
goto ueventattrError;
@@ -1128,9 +1087,9 @@ done:
device_remove_sys_dev_entry(dev);
devtattrError:
if (MAJOR(dev->devt))
- device_remove_file(dev, &devt_attr);
+ device_remove_file(dev, &dev_attr_dev);
ueventattrError:
- device_remove_file(dev, &uevent_attr);
+ device_remove_file(dev, &dev_attr_uevent);
attrError:
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
kobject_del(&dev->kobj);
@@ -1143,6 +1102,7 @@ name_error:
dev->p = NULL;
goto done;
}
+EXPORT_SYMBOL_GPL(device_add);
/**
* device_register - register a device with the system.
@@ -1167,6 +1127,7 @@ int device_register(struct device *dev)
device_initialize(dev);
return device_add(dev);
}
+EXPORT_SYMBOL_GPL(device_register);
/**
* get_device - increment reference count for device.
@@ -1180,6 +1141,7 @@ struct device *get_device(struct device *dev)
{
return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
}
+EXPORT_SYMBOL_GPL(get_device);
/**
* put_device - decrement reference count.
@@ -1191,6 +1153,7 @@ void put_device(struct device *dev)
if (dev)
kobject_put(&dev->kobj);
}
+EXPORT_SYMBOL_GPL(put_device);
/**
* device_del - delete device from system.
@@ -1222,7 +1185,7 @@ void device_del(struct device *dev)
if (MAJOR(dev->devt)) {
devtmpfs_delete_node(dev);
device_remove_sys_dev_entry(dev);
- device_remove_file(dev, &devt_attr);
+ device_remove_file(dev, &dev_attr_dev);
}
if (dev->class) {
device_remove_class_symlinks(dev);
@@ -1237,7 +1200,7 @@ void device_del(struct device *dev)
klist_del(&dev->knode_class);
mutex_unlock(&dev->class->p->mutex);
}
- device_remove_file(dev, &uevent_attr);
+ device_remove_file(dev, &dev_attr_uevent);
device_remove_attrs(dev);
bus_remove_device(dev);
device_pm_remove(dev);
@@ -1253,6 +1216,7 @@ void device_del(struct device *dev)
kobject_del(&dev->kobj);
put_device(parent);
}
+EXPORT_SYMBOL_GPL(device_del);
/**
* device_unregister - unregister device from system.
@@ -1271,6 +1235,7 @@ void device_unregister(struct device *dev)
device_del(dev);
put_device(dev);
}
+EXPORT_SYMBOL_GPL(device_unregister);
static struct device *next_device(struct klist_iter *i)
{
@@ -1334,8 +1299,8 @@ const char *device_get_devnode(struct device *dev,
/**
* device_for_each_child - device child iterator.
* @parent: parent struct device.
- * @data: data for the callback.
* @fn: function to be called for each device.
+ * @data: data for the callback.
*
* Iterate over @parent's child devices, and call @fn for each,
* passing it @data.
@@ -1359,12 +1324,13 @@ int device_for_each_child(struct device *parent, void *data,
klist_iter_exit(&i);
return error;
}
+EXPORT_SYMBOL_GPL(device_for_each_child);
/**
* device_find_child - device iterator for locating a particular device.
* @parent: parent struct device
- * @data: Data to pass to match function
* @match: Callback function to check device
+ * @data: Data to pass to match function
*
* This is similar to the device_for_each_child() function above, but it
* returns a reference to a device that is 'found' for later use, as
@@ -1374,6 +1340,8 @@ int device_for_each_child(struct device *parent, void *data,
* if it does. If the callback returns non-zero and a reference to the
* current device can be obtained, this function will return to the caller
* and not iterate over any more devices.
+ *
+ * NOTE: you will need to drop the reference with put_device() after use.
*/
struct device *device_find_child(struct device *parent, void *data,
int (*match)(struct device *dev, void *data))
@@ -1391,6 +1359,7 @@ struct device *device_find_child(struct device *parent, void *data,
klist_iter_exit(&i);
return child;
}
+EXPORT_SYMBOL_GPL(device_find_child);
int __init devices_init(void)
{
@@ -1418,20 +1387,86 @@ int __init devices_init(void)
return -ENOMEM;
}
-EXPORT_SYMBOL_GPL(device_for_each_child);
-EXPORT_SYMBOL_GPL(device_find_child);
+static int device_check_offline(struct device *dev, void *not_used)
+{
+ int ret;
-EXPORT_SYMBOL_GPL(device_initialize);
-EXPORT_SYMBOL_GPL(device_add);
-EXPORT_SYMBOL_GPL(device_register);
+ ret = device_for_each_child(dev, NULL, device_check_offline);
+ if (ret)
+ return ret;
-EXPORT_SYMBOL_GPL(device_del);
-EXPORT_SYMBOL_GPL(device_unregister);
-EXPORT_SYMBOL_GPL(get_device);
-EXPORT_SYMBOL_GPL(put_device);
+ return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
+}
-EXPORT_SYMBOL_GPL(device_create_file);
-EXPORT_SYMBOL_GPL(device_remove_file);
+/**
+ * device_offline - Prepare the device for hot-removal.
+ * @dev: Device to be put offline.
+ *
+ * Execute the device bus type's .offline() callback, if present, to prepare
+ * the device for a subsequent hot-removal. If that succeeds, the device must
+ * not be used until either it is removed or its bus type's .online() callback
+ * is executed.
+ *
+ * Call under device_hotplug_lock.
+ */
+int device_offline(struct device *dev)
+{
+ int ret;
+
+ if (dev->offline_disabled)
+ return -EPERM;
+
+ ret = device_for_each_child(dev, NULL, device_check_offline);
+ if (ret)
+ return ret;
+
+ device_lock(dev);
+ if (device_supports_offline(dev)) {
+ if (dev->offline) {
+ ret = 1;
+ } else {
+ ret = dev->bus->offline(dev);
+ if (!ret) {
+ kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
+ dev->offline = true;
+ }
+ }
+ }
+ device_unlock(dev);
+
+ return ret;
+}
+
+/**
+ * device_online - Put the device back online after successful device_offline().
+ * @dev: Device to be put back online.
+ *
+ * If device_offline() has been successfully executed for @dev, but the device
+ * has not been removed subsequently, execute its bus type's .online() callback
+ * to indicate that the device can be used again.
+ *
+ * Call under device_hotplug_lock.
+ */
+int device_online(struct device *dev)
+{
+ int ret = 0;
+
+ device_lock(dev);
+ if (device_supports_offline(dev)) {
+ if (dev->offline) {
+ ret = dev->bus->online(dev);
+ if (!ret) {
+ kobject_uevent(&dev->kobj, KOBJ_ONLINE);
+ dev->offline = false;
+ }
+ } else {
+ ret = 1;
+ }
+ }
+ device_unlock(dev);
+
+ return ret;
+}
struct root_device {
struct device dev;
@@ -1535,34 +1570,11 @@ static void device_create_release(struct device *dev)
kfree(dev);
}
-/**
- * device_create_vargs - creates a device and registers it with sysfs
- * @class: pointer to the struct class that this device should be registered to
- * @parent: pointer to the parent struct device of this new device, if any
- * @devt: the dev_t for the char device to be added
- * @drvdata: the data to be added to the device for callbacks
- * @fmt: string for the device's name
- * @args: va_list for the device's name
- *
- * This function can be used by char device classes. A struct device
- * will be created in sysfs, registered to the specified class.
- *
- * A "dev" file will be created, showing the dev_t for the device, if
- * the dev_t is not 0,0.
- * If a pointer to a parent struct device is passed in, the newly created
- * struct device will be a child of that device in sysfs.
- * The pointer to the struct device will be returned from the call.
- * Any further sysfs files that might be required can be created using this
- * pointer.
- *
- * Returns &struct device pointer on success, or ERR_PTR() on error.
- *
- * Note: the struct class passed to this function must have previously
- * been created with a call to class_create().
- */
-struct device *device_create_vargs(struct class *class, struct device *parent,
- dev_t devt, void *drvdata, const char *fmt,
- va_list args)
+static struct device *
+device_create_groups_vargs(struct class *class, struct device *parent,
+ dev_t devt, void *drvdata,
+ const struct attribute_group **groups,
+ const char *fmt, va_list args)
{
struct device *dev = NULL;
int retval = -ENODEV;
@@ -1576,9 +1588,11 @@ struct device *device_create_vargs(struct class *class, struct device *parent,
goto error;
}
+ device_initialize(dev);
dev->devt = devt;
dev->class = class;
dev->parent = parent;
+ dev->groups = groups;
dev->release = device_create_release;
dev_set_drvdata(dev, drvdata);
@@ -1586,7 +1600,7 @@ struct device *device_create_vargs(struct class *class, struct device *parent,
if (retval)
goto error;
- retval = device_register(dev);
+ retval = device_add(dev);
if (retval)
goto error;
@@ -1596,6 +1610,39 @@ error:
put_device(dev);
return ERR_PTR(retval);
}
+
+/**
+ * device_create_vargs - creates a device and registers it with sysfs
+ * @class: pointer to the struct class that this device should be registered to
+ * @parent: pointer to the parent struct device of this new device, if any
+ * @devt: the dev_t for the char device to be added
+ * @drvdata: the data to be added to the device for callbacks
+ * @fmt: string for the device's name
+ * @args: va_list for the device's name
+ *
+ * This function can be used by char device classes. A struct device
+ * will be created in sysfs, registered to the specified class.
+ *
+ * A "dev" file will be created, showing the dev_t for the device, if
+ * the dev_t is not 0,0.
+ * If a pointer to a parent struct device is passed in, the newly created
+ * struct device will be a child of that device in sysfs.
+ * The pointer to the struct device will be returned from the call.
+ * Any further sysfs files that might be required can be created using this
+ * pointer.
+ *
+ * Returns &struct device pointer on success, or ERR_PTR() on error.
+ *
+ * Note: the struct class passed to this function must have previously
+ * been created with a call to class_create().
+ */
+struct device *device_create_vargs(struct class *class, struct device *parent,
+ dev_t devt, void *drvdata, const char *fmt,
+ va_list args)
+{
+ return device_create_groups_vargs(class, parent, devt, drvdata, NULL,
+ fmt, args);
+}
EXPORT_SYMBOL_GPL(device_create_vargs);
/**
@@ -1635,6 +1682,50 @@ struct device *device_create(struct class *class, struct device *parent,
}
EXPORT_SYMBOL_GPL(device_create);
+/**
+ * device_create_with_groups - creates a device and registers it with sysfs
+ * @class: pointer to the struct class that this device should be registered to
+ * @parent: pointer to the parent struct device of this new device, if any
+ * @devt: the dev_t for the char device to be added
+ * @drvdata: the data to be added to the device for callbacks
+ * @groups: NULL-terminated list of attribute groups to be created
+ * @fmt: string for the device's name
+ *
+ * This function can be used by char device classes. A struct device
+ * will be created in sysfs, registered to the specified class.
+ * Additional attributes specified in the groups parameter will also
+ * be created automatically.
+ *
+ * A "dev" file will be created, showing the dev_t for the device, if
+ * the dev_t is not 0,0.
+ * If a pointer to a parent struct device is passed in, the newly created
+ * struct device will be a child of that device in sysfs.
+ * The pointer to the struct device will be returned from the call.
+ * Any further sysfs files that might be required can be created using this
+ * pointer.
+ *
+ * Returns &struct device pointer on success, or ERR_PTR() on error.
+ *
+ * Note: the struct class passed to this function must have previously
+ * been created with a call to class_create().
+ */
+struct device *device_create_with_groups(struct class *class,
+ struct device *parent, dev_t devt,
+ void *drvdata,
+ const struct attribute_group **groups,
+ const char *fmt, ...)
+{
+ va_list vargs;
+ struct device *dev;
+
+ va_start(vargs, fmt);
+ dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
+ fmt, vargs);
+ va_end(vargs);
+ return dev;
+}
+EXPORT_SYMBOL_GPL(device_create_with_groups);
+
static int __match_devt(struct device *dev, const void *data)
{
const dev_t *devt = data;
@@ -1703,6 +1794,7 @@ EXPORT_SYMBOL_GPL(device_destroy);
*/
int device_rename(struct device *dev, const char *new_name)
{
+ struct kobject *kobj = &dev->kobj;
char *old_device_name = NULL;
int error;
@@ -1710,8 +1802,7 @@ int device_rename(struct device *dev, const char *new_name)
if (!dev)
return -EINVAL;
- pr_debug("device: '%s': %s: renaming to '%s'\n", dev_name(dev),
- __func__, new_name);
+ dev_dbg(dev, "renaming to %s\n", new_name);
old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
if (!old_device_name) {
@@ -1720,13 +1811,14 @@ int device_rename(struct device *dev, const char *new_name)
}
if (dev->class) {
- error = sysfs_rename_link(&dev->class->p->subsys.kobj,
- &dev->kobj, old_device_name, new_name);
+ error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
+ kobj, old_device_name,
+ new_name, kobject_namespace(kobj));
if (error)
goto out;
}
- error = kobject_rename(&dev->kobj, new_name);
+ error = kobject_rename(kobj, new_name);
if (error)
goto out;
@@ -1839,7 +1931,7 @@ EXPORT_SYMBOL_GPL(device_move);
*/
void device_shutdown(void)
{
- struct device *dev;
+ struct device *dev, *parent;
spin_lock(&devices_kset->list_lock);
/*
@@ -1856,7 +1948,7 @@ void device_shutdown(void)
* prevent it from being freed because parent's
* lock is to be held
*/
- get_device(dev->parent);
+ parent = get_device(dev->parent);
get_device(dev);
/*
* Make sure the device is off the kset list, in the
@@ -1866,8 +1958,8 @@ void device_shutdown(void)
spin_unlock(&devices_kset->list_lock);
/* hold lock to avoid race with probe/release */
- if (dev->parent)
- device_lock(dev->parent);
+ if (parent)
+ device_lock(parent);
device_lock(dev);
/* Don't allow any more runtime suspends */
@@ -1885,16 +1977,15 @@ void device_shutdown(void)
}
device_unlock(dev);
- if (dev->parent)
- device_unlock(dev->parent);
+ if (parent)
+ device_unlock(parent);
put_device(dev);
- put_device(dev->parent);
+ put_device(parent);
spin_lock(&devices_kset->list_lock);
}
spin_unlock(&devices_kset->list_lock);
- async_synchronize_full();
}
/*
@@ -1949,7 +2040,6 @@ create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen)
return pos;
}
-EXPORT_SYMBOL(create_syslog_header);
int dev_vprintk_emit(int level, const struct device *dev,
const char *fmt, va_list args)
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 3d48fc887ef..006b1bc5297 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -13,17 +13,23 @@
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/percpu.h>
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/cpufeature.h>
#include "base.h"
-struct bus_type cpu_subsys = {
- .name = "cpu",
- .dev_name = "cpu",
-};
-EXPORT_SYMBOL_GPL(cpu_subsys);
-
static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
+static int cpu_subsys_match(struct device *dev, struct device_driver *drv)
+{
+ /* ACPI style match is the only one that may succeed. */
+ if (acpi_driver_match_device(dev, drv))
+ return 1;
+
+ return 0;
+}
+
#ifdef CONFIG_HOTPLUG_CPU
static void change_cpu_under_node(struct cpu *cpu,
unsigned int from_nid, unsigned int to_nid)
@@ -34,69 +40,40 @@ static void change_cpu_under_node(struct cpu *cpu,
cpu->node_id = to_nid;
}
-static ssize_t show_online(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int __ref cpu_subsys_online(struct device *dev)
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
+ int cpuid = dev->id;
+ int from_nid, to_nid;
+ int ret;
- return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id));
-}
+ from_nid = cpu_to_node(cpuid);
+ if (from_nid == NUMA_NO_NODE)
+ return -ENODEV;
-static ssize_t __ref store_online(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- int cpuid = cpu->dev.id;
- int from_nid, to_nid;
- ssize_t ret;
-
- cpu_hotplug_driver_lock();
- switch (buf[0]) {
- case '0':
- ret = cpu_down(cpuid);
- if (!ret)
- kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
- break;
- case '1':
- from_nid = cpu_to_node(cpuid);
- ret = cpu_up(cpuid);
-
- /*
- * When hot adding memory to memoryless node and enabling a cpu
- * on the node, node number of the cpu may internally change.
- */
- to_nid = cpu_to_node(cpuid);
- if (from_nid != to_nid)
- change_cpu_under_node(cpu, from_nid, to_nid);
-
- if (!ret)
- kobject_uevent(&dev->kobj, KOBJ_ONLINE);
- break;
- default:
- ret = -EINVAL;
- }
- cpu_hotplug_driver_unlock();
+ ret = cpu_up(cpuid);
+ /*
+ * When hot adding memory to memoryless node and enabling a cpu
+ * on the node, node number of the cpu may internally change.
+ */
+ to_nid = cpu_to_node(cpuid);
+ if (from_nid != to_nid)
+ change_cpu_under_node(cpu, from_nid, to_nid);
- if (ret >= 0)
- ret = count;
return ret;
}
-static DEVICE_ATTR(online, 0644, show_online, store_online);
-static void __cpuinit register_cpu_control(struct cpu *cpu)
+static int cpu_subsys_offline(struct device *dev)
{
- device_create_file(&cpu->dev, &dev_attr_online);
+ return cpu_down(dev->id);
}
+
void unregister_cpu(struct cpu *cpu)
{
int logical_cpu = cpu->dev.id;
unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
- device_remove_file(&cpu->dev, &dev_attr_online);
-
device_unregister(&cpu->dev);
per_cpu(cpu_sys_devices, logical_cpu) = NULL;
return;
@@ -108,7 +85,17 @@ static ssize_t cpu_probe_store(struct device *dev,
const char *buf,
size_t count)
{
- return arch_cpu_probe(buf, count);
+ ssize_t cnt;
+ int ret;
+
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
+
+ cnt = arch_cpu_probe(buf, count);
+
+ unlock_device_hotplug();
+ return cnt;
}
static ssize_t cpu_release_store(struct device *dev,
@@ -116,19 +103,35 @@ static ssize_t cpu_release_store(struct device *dev,
const char *buf,
size_t count)
{
- return arch_cpu_release(buf, count);
+ ssize_t cnt;
+ int ret;
+
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
+
+ cnt = arch_cpu_release(buf, count);
+
+ unlock_device_hotplug();
+ return cnt;
}
static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
-
-#else /* ... !CONFIG_HOTPLUG_CPU */
-static inline void register_cpu_control(struct cpu *cpu)
-{
-}
#endif /* CONFIG_HOTPLUG_CPU */
+struct bus_type cpu_subsys = {
+ .name = "cpu",
+ .dev_name = "cpu",
+ .match = cpu_subsys_match,
+#ifdef CONFIG_HOTPLUG_CPU
+ .online = cpu_subsys_online,
+ .offline = cpu_subsys_offline,
+#endif
+};
+EXPORT_SYMBOL_GPL(cpu_subsys);
+
#ifdef CONFIG_KEXEC
#include <linux/kexec.h>
@@ -164,7 +167,31 @@ static ssize_t show_crash_notes_size(struct device *dev,
return rc;
}
static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL);
+
+static struct attribute *crash_note_cpu_attrs[] = {
+ &dev_attr_crash_notes.attr,
+ &dev_attr_crash_notes_size.attr,
+ NULL
+};
+
+static struct attribute_group crash_note_cpu_attr_group = {
+ .attrs = crash_note_cpu_attrs,
+};
+#endif
+
+static const struct attribute_group *common_cpu_attr_groups[] = {
+#ifdef CONFIG_KEXEC
+ &crash_note_cpu_attr_group,
+#endif
+ NULL
+};
+
+static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
+#ifdef CONFIG_KEXEC
+ &crash_note_cpu_attr_group,
#endif
+ NULL
+};
/*
* Print cpu online, possible, present, and system maps
@@ -260,6 +287,41 @@ static void cpu_device_release(struct device *dev)
*/
}
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+static ssize_t print_cpu_modalias(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t n;
+ u32 i;
+
+ n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
+ CPU_FEATURE_TYPEVAL);
+
+ for (i = 0; i < MAX_CPU_FEATURES; i++)
+ if (cpu_have_feature(i)) {
+ if (PAGE_SIZE < n + sizeof(",XXXX\n")) {
+ WARN(1, "CPU features overflow page\n");
+ break;
+ }
+ n += sprintf(&buf[n], ",%04X", i);
+ }
+ buf[n++] = '\n';
+ return n;
+}
+
+static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (buf) {
+ print_cpu_modalias(NULL, NULL, buf);
+ add_uevent_var(env, "MODALIAS=%s", buf);
+ kfree(buf);
+ }
+ return 0;
+}
+#endif
+
/*
* register_cpu - Setup a sysfs device for a CPU.
* @cpu - cpu->hotpluggable field set to 1 will generate a control file in
@@ -268,7 +330,7 @@ static void cpu_device_release(struct device *dev)
*
* Initialize and register the CPU device.
*/
-int __cpuinit register_cpu(struct cpu *cpu, int num)
+int register_cpu(struct cpu *cpu, int num)
{
int error;
@@ -277,24 +339,21 @@ int __cpuinit register_cpu(struct cpu *cpu, int num)
cpu->dev.id = num;
cpu->dev.bus = &cpu_subsys;
cpu->dev.release = cpu_device_release;
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
- cpu->dev.bus->uevent = arch_cpu_uevent;
+ cpu->dev.offline_disabled = !cpu->hotpluggable;
+ cpu->dev.offline = !cpu_online(num);
+ cpu->dev.of_node = of_get_cpu_node(num, NULL);
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+ cpu->dev.bus->uevent = cpu_uevent;
#endif
+ cpu->dev.groups = common_cpu_attr_groups;
+ if (cpu->hotpluggable)
+ cpu->dev.groups = hotplugable_cpu_attr_groups;
error = device_register(&cpu->dev);
- if (!error && cpu->hotpluggable)
- register_cpu_control(cpu);
if (!error)
per_cpu(cpu_sys_devices, num) = &cpu->dev;
if (!error)
register_cpu_under_node(num, cpu_to_node(num));
-#ifdef CONFIG_KEXEC
- if (!error)
- error = device_create_file(&cpu->dev, &dev_attr_crash_notes);
- if (!error)
- error = device_create_file(&cpu->dev,
- &dev_attr_crash_notes_size);
-#endif
return error;
}
@@ -307,8 +366,8 @@ struct device *get_cpu_device(unsigned cpu)
}
EXPORT_SYMBOL_GPL(get_cpu_device);
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
-static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
#endif
static struct attribute *cpu_root_attrs[] = {
@@ -321,7 +380,7 @@ static struct attribute *cpu_root_attrs[] = {
&cpu_attrs[2].attr.attr,
&dev_attr_kernel_max.attr,
&dev_attr_offline.attr,
-#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
&dev_attr_modalias.attr,
#endif
NULL
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 35fa3689891..e4ffbcf2f51 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -52,6 +52,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static struct workqueue_struct *deferred_wq;
+static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
/**
* deferred_probe_work_func() - Retry probing devices in the active list.
@@ -135,6 +136,17 @@ static bool driver_deferred_probe_enable = false;
* This functions moves all devices from the pending list to the active
* list and schedules the deferred probe workqueue to process them. It
* should be called anytime a driver is successfully bound to a device.
+ *
+ * Note, there is a race condition in multi-threaded probe. In the case where
+ * more than one device is probing at the same time, it is possible for one
+ * probe to complete successfully while another is about to defer. If the second
+ * depends on the first, then it will get put on the pending list after the
+ * trigger event has already occured and will be stuck there.
+ *
+ * The atomic 'deferred_trigger_count' is used to determine if a successful
+ * trigger has occurred in the midst of probing a driver. If the trigger count
+ * changes in the midst of a probe, then deferred processing should be triggered
+ * again.
*/
static void driver_deferred_probe_trigger(void)
{
@@ -147,6 +159,7 @@ static void driver_deferred_probe_trigger(void)
* into the active list so they can be retried by the workqueue
*/
mutex_lock(&deferred_probe_mutex);
+ atomic_inc(&deferred_trigger_count);
list_splice_tail_init(&deferred_probe_pending_list,
&deferred_probe_active_list);
mutex_unlock(&deferred_probe_mutex);
@@ -187,8 +200,8 @@ static void driver_bound(struct device *dev)
return;
}
- pr_debug("driver: '%s': %s: bound to device '%s'\n", dev_name(dev),
- __func__, dev->driver->name);
+ pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
+ __func__, dev_name(dev));
klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
@@ -265,6 +278,7 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
static int really_probe(struct device *dev, struct device_driver *drv)
{
int ret = 0;
+ int local_trigger_count = atomic_read(&deferred_trigger_count);
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
@@ -310,6 +324,9 @@ probe_failed:
/* Driver requested deferred probing */
dev_info(dev, "Driver %s requests probe deferral\n", drv->name);
driver_deferred_probe_add(dev);
+ /* Did a trigger occur while probing? Need to re-trigger if yes */
+ if (local_trigger_count != atomic_read(&deferred_trigger_count))
+ driver_deferred_probe_trigger();
} else if (ret != -ENODEV && ret != -ENXIO) {
/* driver matched but the probe failed */
printk(KERN_WARNING
@@ -499,7 +516,7 @@ static void __device_release_driver(struct device *dev)
BUS_NOTIFY_UNBIND_DRIVER,
dev);
- pm_runtime_put(dev);
+ pm_runtime_put_sync(dev);
if (dev->bus && dev->bus->remove)
dev->bus->remove(dev);
@@ -570,29 +587,3 @@ void driver_detach(struct device_driver *drv)
put_device(dev);
}
}
-
-/*
- * These exports can't be _GPL due to .h files using this within them, and it
- * might break something that was previously working...
- */
-void *dev_get_drvdata(const struct device *dev)
-{
- if (dev && dev->p)
- return dev->p->driver_data;
- return NULL;
-}
-EXPORT_SYMBOL(dev_get_drvdata);
-
-int dev_set_drvdata(struct device *dev, void *data)
-{
- int error;
-
- if (!dev->p) {
- error = device_private_init(dev);
- if (error)
- return error;
- }
- dev->p->driver_data = data;
- return 0;
-}
-EXPORT_SYMBOL(dev_set_drvdata);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 507379e7b76..52302946770 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -91,7 +91,8 @@ static __always_inline struct devres * alloc_dr(dr_release_t release,
if (unlikely(!dr))
return NULL;
- memset(dr, 0, tot_size);
+ memset(dr, 0, offsetof(struct devres, data));
+
INIT_LIST_HEAD(&dr->node.entry);
dr->node.release = release;
return dr;
@@ -110,7 +111,7 @@ void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
{
struct devres *dr;
- dr = alloc_dr(release, size, gfp);
+ dr = alloc_dr(release, size, gfp | __GFP_ZERO);
if (unlikely(!dr))
return NULL;
set_node_dbginfo(&dr->node, name, size);
@@ -135,7 +136,7 @@ void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
{
struct devres *dr;
- dr = alloc_dr(release, size, gfp);
+ dr = alloc_dr(release, size, gfp | __GFP_ZERO);
if (unlikely(!dr))
return NULL;
return dr->data;
@@ -745,58 +746,185 @@ void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
EXPORT_SYMBOL_GPL(devm_remove_action);
/*
- * Managed kzalloc/kfree
+ * Managed kmalloc/kfree
*/
-static void devm_kzalloc_release(struct device *dev, void *res)
+static void devm_kmalloc_release(struct device *dev, void *res)
{
/* noop */
}
-static int devm_kzalloc_match(struct device *dev, void *res, void *data)
+static int devm_kmalloc_match(struct device *dev, void *res, void *data)
{
return res == data;
}
/**
- * devm_kzalloc - Resource-managed kzalloc
+ * devm_kmalloc - Resource-managed kmalloc
* @dev: Device to allocate memory for
* @size: Allocation size
* @gfp: Allocation gfp flags
*
- * Managed kzalloc. Memory allocated with this function is
+ * Managed kmalloc. Memory allocated with this function is
* automatically freed on driver detach. Like all other devres
* resources, guaranteed alignment is unsigned long long.
*
* RETURNS:
* Pointer to allocated memory on success, NULL on failure.
*/
-void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
+void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
{
struct devres *dr;
/* use raw alloc_dr for kmalloc caller tracing */
- dr = alloc_dr(devm_kzalloc_release, size, gfp);
+ dr = alloc_dr(devm_kmalloc_release, size, gfp);
if (unlikely(!dr))
return NULL;
+ /*
+ * This is named devm_kzalloc_release for historical reasons
+ * The initial implementation did not support kmalloc, only kzalloc
+ */
set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
devres_add(dev, dr->data);
return dr->data;
}
-EXPORT_SYMBOL_GPL(devm_kzalloc);
+EXPORT_SYMBOL_GPL(devm_kmalloc);
+
+/**
+ * devm_kstrdup - Allocate resource managed space and
+ * copy an existing string into that.
+ * @dev: Device to allocate memory for
+ * @s: the string to duplicate
+ * @gfp: the GFP mask used in the devm_kmalloc() call when
+ * allocating memory
+ * RETURNS:
+ * Pointer to allocated string on success, NULL on failure.
+ */
+char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
+{
+ size_t size;
+ char *buf;
+
+ if (!s)
+ return NULL;
+
+ size = strlen(s) + 1;
+ buf = devm_kmalloc(dev, size, gfp);
+ if (buf)
+ memcpy(buf, s, size);
+ return buf;
+}
+EXPORT_SYMBOL_GPL(devm_kstrdup);
/**
* devm_kfree - Resource-managed kfree
* @dev: Device this memory belongs to
* @p: Memory to free
*
- * Free memory allocated with devm_kzalloc().
+ * Free memory allocated with devm_kmalloc().
*/
void devm_kfree(struct device *dev, void *p)
{
int rc;
- rc = devres_destroy(dev, devm_kzalloc_release, devm_kzalloc_match, p);
+ rc = devres_destroy(dev, devm_kmalloc_release, devm_kmalloc_match, p);
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_kfree);
+
+/**
+ * devm_kmemdup - Resource-managed kmemdup
+ * @dev: Device this memory belongs to
+ * @src: Memory region to duplicate
+ * @len: Memory region length
+ * @gfp: GFP mask to use
+ *
+ * Duplicate region of a memory using resource managed kmalloc
+ */
+void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
+{
+ void *p;
+
+ p = devm_kmalloc(dev, len, gfp);
+ if (p)
+ memcpy(p, src, len);
+
+ return p;
+}
+EXPORT_SYMBOL_GPL(devm_kmemdup);
+
+struct pages_devres {
+ unsigned long addr;
+ unsigned int order;
+};
+
+static int devm_pages_match(struct device *dev, void *res, void *p)
+{
+ struct pages_devres *devres = res;
+ struct pages_devres *target = p;
+
+ return devres->addr == target->addr;
+}
+
+static void devm_pages_release(struct device *dev, void *res)
+{
+ struct pages_devres *devres = res;
+
+ free_pages(devres->addr, devres->order);
+}
+
+/**
+ * devm_get_free_pages - Resource-managed __get_free_pages
+ * @dev: Device to allocate memory for
+ * @gfp_mask: Allocation gfp flags
+ * @order: Allocation size is (1 << order) pages
+ *
+ * Managed get_free_pages. Memory allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * RETURNS:
+ * Address of allocated memory on success, 0 on failure.
+ */
+
+unsigned long devm_get_free_pages(struct device *dev,
+ gfp_t gfp_mask, unsigned int order)
+{
+ struct pages_devres *devres;
+ unsigned long addr;
+
+ addr = __get_free_pages(gfp_mask, order);
+
+ if (unlikely(!addr))
+ return 0;
+
+ devres = devres_alloc(devm_pages_release,
+ sizeof(struct pages_devres), GFP_KERNEL);
+ if (unlikely(!devres)) {
+ free_pages(addr, order);
+ return 0;
+ }
+
+ devres->addr = addr;
+ devres->order = order;
+
+ devres_add(dev, devres);
+ return addr;
+}
+EXPORT_SYMBOL_GPL(devm_get_free_pages);
+
+/**
+ * devm_free_pages - Resource-managed free_pages
+ * @dev: Device this memory belongs to
+ * @addr: Memory to free
+ *
+ * Free memory allocated with devm_get_free_pages(). Unlike free_pages,
+ * there is no need to supply the @order.
+ */
+void devm_free_pages(struct device *dev, unsigned long addr)
+{
+ struct pages_devres devres = { .addr = addr };
+
+ WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
+ &devres));
+}
+EXPORT_SYMBOL_GPL(devm_free_pages);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 7413d065906..25798db1455 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -216,7 +216,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
newattrs.ia_gid = gid;
newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
mutex_lock(&dentry->d_inode->i_mutex);
- notify_change(dentry, &newattrs);
+ notify_change(dentry, &newattrs, NULL);
mutex_unlock(&dentry->d_inode->i_mutex);
/* mark as kernel-created inode */
@@ -299,7 +299,7 @@ static int handle_remove(const char *nodename, struct device *dev)
{
struct path parent;
struct dentry *dentry;
- int deleted = 1;
+ int deleted = 0;
int err;
dentry = kern_path_locked(nodename, &parent);
@@ -322,9 +322,9 @@ static int handle_remove(const char *nodename, struct device *dev)
newattrs.ia_valid =
ATTR_UID|ATTR_GID|ATTR_MODE;
mutex_lock(&dentry->d_inode->i_mutex);
- notify_change(dentry, &newattrs);
+ notify_change(dentry, &newattrs, NULL);
mutex_unlock(&dentry->d_inode->i_mutex);
- err = vfs_unlink(parent.dentry->d_inode, dentry);
+ err = vfs_unlink(parent.dentry->d_inode, dentry, NULL);
if (!err || err == -ENOENT)
deleted = 1;
}
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 08fe897c0b4..840c7fa8098 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -77,9 +77,36 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
return dmabuf->ops->mmap(dmabuf, vma);
}
+static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
+{
+ struct dma_buf *dmabuf;
+ loff_t base;
+
+ if (!is_dma_buf_file(file))
+ return -EBADF;
+
+ dmabuf = file->private_data;
+
+ /* only support discovering the end of the buffer,
+ but also allow SEEK_SET to maintain the idiomatic
+ SEEK_END(0), SEEK_CUR(0) pattern */
+ if (whence == SEEK_END)
+ base = dmabuf->size;
+ else if (whence == SEEK_SET)
+ base = 0;
+ else
+ return -EINVAL;
+
+ if (offset != 0)
+ return -EINVAL;
+
+ return base + offset;
+}
+
static const struct file_operations dma_buf_fops = {
.release = dma_buf_release,
.mmap = dma_buf_mmap_internal,
+ .llseek = dma_buf_llseek,
};
/*
@@ -133,7 +160,12 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
dmabuf->exp_name = exp_name;
file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
+ if (IS_ERR(file)) {
+ kfree(dmabuf);
+ return ERR_CAST(file);
+ }
+ file->f_mode |= FMODE_LSEEK;
dmabuf->file = file;
mutex_init(&dmabuf->lock);
@@ -219,9 +251,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* @dmabuf: [in] buffer to attach device to.
* @dev: [in] device to be attached.
*
- * Returns struct dma_buf_attachment * for this attachment; may return negative
- * error codes.
- *
+ * Returns struct dma_buf_attachment * for this attachment; returns ERR_PTR on
+ * error.
*/
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev)
@@ -287,9 +318,8 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
* @attach: [in] attachment whose scatterlist is to be returned
* @direction: [in] direction of DMA transfer
*
- * Returns sg_table containing the scatterlist to be returned; may return NULL
- * or ERR_PTR.
- *
+ * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
+ * on error.
*/
struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
@@ -302,6 +332,8 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
return ERR_PTR(-EINVAL);
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+ if (!sg_table)
+ sg_table = ERR_PTR(-ENOMEM);
return sg_table;
}
@@ -459,7 +491,7 @@ EXPORT_SYMBOL_GPL(dma_buf_kunmap);
* dma-buf buffer.
*
* This function adjusts the passed in vma so that it points at the file of the
- * dma_buf operation. It alsog adjusts the starting pgoff and does bounds
+ * dma_buf operation. It also adjusts the starting pgoff and does bounds
* checking on the size of the vma. Then it calls the exporters mmap function to
* set up the mapping.
*
@@ -512,6 +544,8 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap);
* These calls are optional in drivers. The intended use for them
* is for mapping objects linear in kernel space for high use objects.
* Please attempt to use kmap/kunmap before thinking about these interfaces.
+ *
+ * Returns NULL on error.
*/
void *dma_buf_vmap(struct dma_buf *dmabuf)
{
@@ -534,7 +568,9 @@ void *dma_buf_vmap(struct dma_buf *dmabuf)
BUG_ON(dmabuf->vmap_ptr);
ptr = dmabuf->ops->vmap(dmabuf);
- if (IS_ERR_OR_NULL(ptr))
+ if (WARN_ON_ONCE(IS_ERR(ptr)))
+ ptr = NULL;
+ if (!ptr)
goto out_unlock;
dmabuf->vmap_ptr = ptr;
@@ -584,36 +620,35 @@ static int dma_buf_describe(struct seq_file *s)
if (ret)
return ret;
- seq_printf(s, "\nDma-buf Objects:\n");
- seq_printf(s, "\texp_name\tsize\tflags\tmode\tcount\n");
+ seq_puts(s, "\nDma-buf Objects:\n");
+ seq_puts(s, "size\tflags\tmode\tcount\texp_name\n");
list_for_each_entry(buf_obj, &db_list.head, list_node) {
ret = mutex_lock_interruptible(&buf_obj->lock);
if (ret) {
- seq_printf(s,
- "\tERROR locking buffer object: skipping\n");
+ seq_puts(s,
+ "\tERROR locking buffer object: skipping\n");
continue;
}
- seq_printf(s, "\t");
-
- seq_printf(s, "\t%s\t%08zu\t%08x\t%08x\t%08ld\n",
- buf_obj->exp_name, buf_obj->size,
+ seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
+ buf_obj->size,
buf_obj->file->f_flags, buf_obj->file->f_mode,
- (long)(buf_obj->file->f_count.counter));
+ (long)(buf_obj->file->f_count.counter),
+ buf_obj->exp_name);
- seq_printf(s, "\t\tAttached Devices:\n");
+ seq_puts(s, "\tAttached Devices:\n");
attach_count = 0;
list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
- seq_printf(s, "\t\t");
+ seq_puts(s, "\t");
- seq_printf(s, "%s\n", attach_obj->dev->init_name);
+ seq_printf(s, "%s\n", dev_name(attach_obj->dev));
attach_count++;
}
- seq_printf(s, "\n\t\tTotal %d devices attached\n",
+ seq_printf(s, "Total %d devices attached\n\n",
attach_count);
count++;
@@ -680,10 +715,7 @@ int dma_buf_debugfs_create_file(const char *name,
d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir,
write, &dma_buf_debug_fops);
- if (IS_ERR(d))
- return PTR_ERR(d);
-
- return 0;
+ return PTR_ERR_OR_ZERO(d);
}
#else
static inline int dma_buf_init_debugfs(void)
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index bc256b64102..7d6e84a5142 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -10,13 +10,13 @@
struct dma_coherent_mem {
void *virt_base;
dma_addr_t device_base;
- phys_addr_t pfn_base;
+ unsigned long pfn_base;
int size;
int flags;
unsigned long *bitmap;
};
-int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags)
{
void __iomem *mem_base = NULL;
@@ -32,7 +32,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
- mem_base = ioremap(bus_addr, size);
+ mem_base = ioremap(phys_addr, size);
if (!mem_base)
goto out;
@@ -45,7 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dev->dma_mem->virt_base = mem_base;
dev->dma_mem->device_base = device_addr;
- dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
+ dev->dma_mem->pfn_base = PFN_DOWN(phys_addr);
dev->dma_mem->size = pages;
dev->dma_mem->flags = flags;
@@ -208,7 +208,7 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
*ret = -ENXIO;
if (off < count && user_count <= count - off) {
- unsigned pfn = mem->pfn_base + start + off;
+ unsigned long pfn = mem->pfn_base + start + off;
*ret = remap_pfn_range(vma, vma->vm_start, pfn,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 0ca54421ce9..6467c919c50 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -37,6 +37,7 @@ struct cma {
unsigned long base_pfn;
unsigned long count;
unsigned long *bitmap;
+ struct mutex lock;
};
struct cma *dma_contiguous_default_area;
@@ -49,7 +50,7 @@ struct cma *dma_contiguous_default_area;
/*
* Default global CMA area size can be defined in kernel's .config.
- * This is usefull mainly for distro maintainers to create a kernel
+ * This is useful mainly for distro maintainers to create a kernel
* that works correctly for most supported systems.
* The size can be set in bytes or as a percentage of the total memory
* in the system.
@@ -59,11 +60,22 @@ struct cma *dma_contiguous_default_area;
*/
static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
static phys_addr_t size_cmdline = -1;
+static phys_addr_t base_cmdline;
+static phys_addr_t limit_cmdline;
static int __init early_cma(char *p)
{
pr_debug("%s(%s)\n", __func__, p);
size_cmdline = memparse(p, &p);
+ if (*p != '@')
+ return 0;
+ base_cmdline = memparse(p + 1, &p);
+ if (*p != '-') {
+ limit_cmdline = base_cmdline + size_cmdline;
+ return 0;
+ }
+ limit_cmdline = memparse(p + 1, &p);
+
return 0;
}
early_param("cma", early_cma);
@@ -96,7 +108,7 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
#endif
/**
- * dma_contiguous_reserve() - reserve area for contiguous memory handling
+ * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
* @limit: End address of the reserved memory (optional, 0 for any).
*
* This function reserves memory from early allocator. It should be
@@ -107,11 +119,18 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
void __init dma_contiguous_reserve(phys_addr_t limit)
{
phys_addr_t selected_size = 0;
+ phys_addr_t selected_base = 0;
+ phys_addr_t selected_limit = limit;
+ bool fixed = false;
pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
if (size_cmdline != -1) {
selected_size = size_cmdline;
+ selected_base = base_cmdline;
+ selected_limit = min_not_zero(limit_cmdline, limit);
+ if (base_cmdline + size_cmdline == limit_cmdline)
+ fixed = true;
} else {
#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
selected_size = size_bytes;
@@ -124,22 +143,31 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
#endif
}
- if (selected_size) {
+ if (selected_size && !dma_contiguous_default_area) {
pr_debug("%s: reserving %ld MiB for global area\n", __func__,
(unsigned long)selected_size / SZ_1M);
- dma_declare_contiguous(NULL, selected_size, 0, limit);
+ dma_contiguous_reserve_area(selected_size, selected_base,
+ selected_limit,
+ &dma_contiguous_default_area,
+ fixed);
}
-};
+}
static DEFINE_MUTEX(cma_mutex);
-static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
+static int __init cma_activate_area(struct cma *cma)
{
- unsigned long pfn = base_pfn;
- unsigned i = count >> pageblock_order;
+ int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
+ unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
+ unsigned i = cma->count >> pageblock_order;
struct zone *zone;
+ cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+
+ if (!cma->bitmap)
+ return -ENOMEM;
+
WARN_ON_ONCE(!pfn_valid(pfn));
zone = page_zone(pfn_to_page(pfn));
@@ -148,97 +176,74 @@ static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
base_pfn = pfn;
for (j = pageblock_nr_pages; j; --j, pfn++) {
WARN_ON_ONCE(!pfn_valid(pfn));
+ /*
+ * alloc_contig_range requires the pfn range
+ * specified to be in the same zone. Make this
+ * simple by forcing the entire CMA resv range
+ * to be in the same zone.
+ */
if (page_zone(pfn_to_page(pfn)) != zone)
- return -EINVAL;
+ goto err;
}
init_cma_reserved_pageblock(pfn_to_page(base_pfn));
} while (--i);
- return 0;
-}
-
-static __init struct cma *cma_create_area(unsigned long base_pfn,
- unsigned long count)
-{
- int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
- struct cma *cma;
- int ret = -ENOMEM;
-
- pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
-
- cma = kmalloc(sizeof *cma, GFP_KERNEL);
- if (!cma)
- return ERR_PTR(-ENOMEM);
-
- cma->base_pfn = base_pfn;
- cma->count = count;
- cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
-
- if (!cma->bitmap)
- goto no_mem;
-
- ret = cma_activate_area(base_pfn, count);
- if (ret)
- goto error;
- pr_debug("%s: returned %p\n", __func__, (void *)cma);
- return cma;
+ mutex_init(&cma->lock);
+ return 0;
-error:
+err:
kfree(cma->bitmap);
-no_mem:
- kfree(cma);
- return ERR_PTR(ret);
+ return -EINVAL;
}
-static struct cma_reserved {
- phys_addr_t start;
- unsigned long size;
- struct device *dev;
-} cma_reserved[MAX_CMA_AREAS] __initdata;
-static unsigned cma_reserved_count __initdata;
+static struct cma cma_areas[MAX_CMA_AREAS];
+static unsigned cma_area_count;
static int __init cma_init_reserved_areas(void)
{
- struct cma_reserved *r = cma_reserved;
- unsigned i = cma_reserved_count;
-
- pr_debug("%s()\n", __func__);
+ int i;
- for (; i; --i, ++r) {
- struct cma *cma;
- cma = cma_create_area(PFN_DOWN(r->start),
- r->size >> PAGE_SHIFT);
- if (!IS_ERR(cma))
- dev_set_cma_area(r->dev, cma);
+ for (i = 0; i < cma_area_count; i++) {
+ int ret = cma_activate_area(&cma_areas[i]);
+ if (ret)
+ return ret;
}
+
return 0;
}
core_initcall(cma_init_reserved_areas);
/**
- * dma_declare_contiguous() - reserve area for contiguous memory handling
- * for particular device
- * @dev: Pointer to device structure.
- * @size: Size of the reserved memory.
- * @base: Start address of the reserved memory (optional, 0 for any).
+ * dma_contiguous_reserve_area() - reserve custom contiguous area
+ * @size: Size of the reserved area (in bytes),
+ * @base: Base address of the reserved area optional, use 0 for any
* @limit: End address of the reserved memory (optional, 0 for any).
+ * @res_cma: Pointer to store the created cma region.
+ * @fixed: hint about where to place the reserved area
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory. This function allows to create custom reserved areas for specific
+ * devices.
*
- * This function reserves memory for specified device. It should be
- * called by board specific code when early allocator (memblock or bootmem)
- * is still activate.
+ * If @fixed is true, reserve contiguous area at exactly @base. If false,
+ * reserve in range from @base to @limit.
*/
-int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
- phys_addr_t base, phys_addr_t limit)
+int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+ phys_addr_t limit, struct cma **res_cma,
+ bool fixed)
{
- struct cma_reserved *r = &cma_reserved[cma_reserved_count];
+ struct cma *cma = &cma_areas[cma_area_count];
phys_addr_t alignment;
+ int ret = 0;
pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
(unsigned long)size, (unsigned long)base,
(unsigned long)limit);
/* Sanity checks */
- if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
+ if (cma_area_count == ARRAY_SIZE(cma_areas)) {
pr_err("Not enough slots for CMA reserved regions!\n");
return -ENOSPC;
}
@@ -253,20 +258,17 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
limit &= ~(alignment - 1);
/* Reserve memory */
- if (base) {
+ if (base && fixed) {
if (memblock_is_region_reserved(base, size) ||
memblock_reserve(base, size) < 0) {
- base = -EBUSY;
+ ret = -EBUSY;
goto err;
}
} else {
- /*
- * Use __memblock_alloc_base() since
- * memblock_alloc_base() panic()s.
- */
- phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
+ phys_addr_t addr = memblock_alloc_range(size, alignment, base,
+ limit);
if (!addr) {
- base = -ENOMEM;
+ ret = -ENOMEM;
goto err;
} else {
base = addr;
@@ -277,10 +279,11 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
* Each reserved area must be initialised later, when more kernel
* subsystems (like slab allocator) are available.
*/
- r->start = base;
- r->size = size;
- r->dev = dev;
- cma_reserved_count++;
+ cma->base_pfn = PFN_DOWN(base);
+ cma->count = size >> PAGE_SHIFT;
+ *res_cma = cma;
+ cma_area_count++;
+
pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
(unsigned long)base);
@@ -289,7 +292,14 @@ int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
return 0;
err:
pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
- return base;
+ return ret;
+}
+
+static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
+{
+ mutex_lock(&cma->lock);
+ bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
+ mutex_unlock(&cma->lock);
}
/**
@@ -300,7 +310,7 @@ err:
*
* This function allocates memory buffer for specified device. It uses
* device specific contiguous memory area if available or the default
- * global one. Requires architecture specific get_dev_cma_area() helper
+ * global one. Requires architecture specific dev_get_cma_area() helper
* function.
*/
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
@@ -325,30 +335,41 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
mask = (1 << align) - 1;
- mutex_lock(&cma_mutex);
for (;;) {
+ mutex_lock(&cma->lock);
pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
start, count, mask);
- if (pageno >= cma->count)
+ if (pageno >= cma->count) {
+ mutex_unlock(&cma->lock);
break;
+ }
+ bitmap_set(cma->bitmap, pageno, count);
+ /*
+ * It's safe to drop the lock here. We've marked this region for
+ * our exclusive use. If the migration fails we will take the
+ * lock again and unmark it.
+ */
+ mutex_unlock(&cma->lock);
pfn = cma->base_pfn + pageno;
+ mutex_lock(&cma_mutex);
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
+ mutex_unlock(&cma_mutex);
if (ret == 0) {
- bitmap_set(cma->bitmap, pageno, count);
page = pfn_to_page(pfn);
break;
} else if (ret != -EBUSY) {
+ clear_cma_bitmap(cma, pfn, count);
break;
}
+ clear_cma_bitmap(cma, pfn, count);
pr_debug("%s(): memory range at %p is busy, retrying\n",
__func__, pfn_to_page(pfn));
/* try again with a bit different memory target */
start = pageno + mask + 1;
}
- mutex_unlock(&cma_mutex);
pr_debug("%s(): returned %p\n", __func__, page);
return page;
}
@@ -381,10 +402,8 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
- mutex_lock(&cma_mutex);
- bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
free_contig_range(pfn, count);
- mutex_unlock(&cma_mutex);
+ clear_cma_bitmap(cma, pfn, count);
return true;
}
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 0ce39a33b3c..6cd08e145bf 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -175,7 +175,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res)
/**
* dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
* @dev: Device to declare coherent memory for
- * @bus_addr: Bus address of coherent memory to be declared
+ * @phys_addr: Physical address of coherent memory to be declared
* @device_addr: Device address of coherent memory to be declared
* @size: Size of coherent memory to be declared
* @flags: Flags
@@ -185,7 +185,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res)
* RETURNS:
* 0 on success, -errno on failure.
*/
-int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags)
{
void *res;
@@ -195,7 +195,7 @@ int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
if (!res)
return -ENOMEM;
- rc = dma_declare_coherent_memory(dev, bus_addr, device_addr, size,
+ rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
flags);
if (rc == 0)
devres_add(dev, res);
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 974e301a1ef..9e29943e56c 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -15,6 +15,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/sysfs.h>
#include "base.h"
static struct device *next_device(struct klist_iter *i)
@@ -123,34 +124,16 @@ void driver_remove_file(struct device_driver *drv,
}
EXPORT_SYMBOL_GPL(driver_remove_file);
-static int driver_add_groups(struct device_driver *drv,
- const struct attribute_group **groups)
+int driver_add_groups(struct device_driver *drv,
+ const struct attribute_group **groups)
{
- int error = 0;
- int i;
-
- if (groups) {
- for (i = 0; groups[i]; i++) {
- error = sysfs_create_group(&drv->p->kobj, groups[i]);
- if (error) {
- while (--i >= 0)
- sysfs_remove_group(&drv->p->kobj,
- groups[i]);
- break;
- }
- }
- }
- return error;
+ return sysfs_create_groups(&drv->p->kobj, groups);
}
-static void driver_remove_groups(struct device_driver *drv,
- const struct attribute_group **groups)
+void driver_remove_groups(struct device_driver *drv,
+ const struct attribute_group **groups)
{
- int i;
-
- if (groups)
- for (i = 0; groups[i]; i++)
- sysfs_remove_group(&drv->p->kobj, groups[i]);
+ sysfs_remove_groups(&drv->p->kobj, groups);
}
/**
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 4b1f9265887..d276e33880b 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -27,6 +27,7 @@
#include <linux/pm.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
+#include <linux/reboot.h>
#include <generated/utsrelease.h>
@@ -95,6 +96,15 @@ static inline long firmware_loading_timeout(void)
return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT;
}
+/* firmware behavior options */
+#define FW_OPT_UEVENT (1U << 0)
+#define FW_OPT_NOWAIT (1U << 1)
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+#define FW_OPT_FALLBACK (1U << 2)
+#else
+#define FW_OPT_FALLBACK 0
+#endif
+
struct firmware_cache {
/* firmware_buf instance will be added into the below list */
spinlock_t lock;
@@ -127,9 +137,11 @@ struct firmware_buf {
size_t size;
#ifdef CONFIG_FW_LOADER_USER_HELPER
bool is_paged_buf;
+ bool need_uevent;
struct page **pages;
int nr_pages;
int page_array_size;
+ struct list_head pending_list;
#endif
char fw_id[];
};
@@ -171,6 +183,9 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
strcpy(buf->fw_id, fw_name);
buf->fwc = fwc;
init_completion(&buf->completion);
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ INIT_LIST_HEAD(&buf->pending_list);
+#endif
pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
@@ -212,19 +227,8 @@ static int fw_lookup_and_allocate_buf(const char *fw_name,
return tmp ? 0 : -ENOMEM;
}
-static struct firmware_buf *fw_lookup_buf(const char *fw_name)
-{
- struct firmware_buf *tmp;
- struct firmware_cache *fwc = &fw_cache;
-
- spin_lock(&fwc->lock);
- tmp = __fw_lookup_buf(fw_name);
- spin_unlock(&fwc->lock);
-
- return tmp;
-}
-
static void __fw_free_buf(struct kref *ref)
+ __releases(&fwc->lock)
{
struct firmware_buf *buf = to_fwbuf(ref);
struct firmware_cache *fwc = buf->fwc;
@@ -276,43 +280,47 @@ module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
/* Don't inline this: 'struct kstat' is biggish */
-static noinline_for_stack long fw_file_size(struct file *file)
+static noinline_for_stack int fw_file_size(struct file *file)
{
struct kstat st;
if (vfs_getattr(&file->f_path, &st))
return -1;
if (!S_ISREG(st.mode))
return -1;
- if (st.size != (long)st.size)
+ if (st.size != (int)st.size)
return -1;
return st.size;
}
-static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
+static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
{
- long size;
+ int size;
char *buf;
+ int rc;
size = fw_file_size(file);
if (size <= 0)
- return false;
+ return -EINVAL;
buf = vmalloc(size);
if (!buf)
- return false;
- if (kernel_read(file, 0, buf, size) != size) {
+ return -ENOMEM;
+ rc = kernel_read(file, 0, buf, size);
+ if (rc != size) {
+ if (rc > 0)
+ rc = -EIO;
vfree(buf);
- return false;
+ return rc;
}
fw_buf->data = buf;
fw_buf->size = size;
- return true;
+ return 0;
}
-static bool fw_get_filesystem_firmware(struct device *device,
+static int fw_get_filesystem_firmware(struct device *device,
struct firmware_buf *buf)
{
int i;
- bool success = false;
+ int rc = -ENOENT;
char *path = __getname();
for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
@@ -327,14 +335,17 @@ static bool fw_get_filesystem_firmware(struct device *device,
file = filp_open(path, O_RDONLY, 0);
if (IS_ERR(file))
continue;
- success = fw_read_file_contents(file, buf);
+ rc = fw_read_file_contents(file, buf);
fput(file);
- if (success)
+ if (rc)
+ dev_warn(device, "firmware, attempted to load %s, but failed with error %d\n",
+ path, rc);
+ else
break;
}
__putname(path);
- if (success) {
+ if (!rc) {
dev_dbg(device, "firmware: direct-loading firmware %s\n",
buf->fw_id);
mutex_lock(&fw_lock);
@@ -343,7 +354,7 @@ static bool fw_get_filesystem_firmware(struct device *device,
mutex_unlock(&fw_lock);
}
- return success;
+ return rc;
}
/* firmware holds the ownership of pages */
@@ -446,20 +457,54 @@ static struct firmware_priv *to_firmware_priv(struct device *dev)
return container_of(dev, struct firmware_priv, dev);
}
-static void fw_load_abort(struct firmware_priv *fw_priv)
+static void __fw_load_abort(struct firmware_buf *buf)
{
- struct firmware_buf *buf = fw_priv->buf;
+ /*
+ * There is a small window in which user can write to 'loading'
+ * between loading done and disappearance of 'loading'
+ */
+ if (test_bit(FW_STATUS_DONE, &buf->status))
+ return;
+ list_del_init(&buf->pending_list);
set_bit(FW_STATUS_ABORT, &buf->status);
complete_all(&buf->completion);
}
+static void fw_load_abort(struct firmware_priv *fw_priv)
+{
+ struct firmware_buf *buf = fw_priv->buf;
+
+ __fw_load_abort(buf);
+
+ /* avoid user action after loading abort */
+ fw_priv->buf = NULL;
+}
+
#define is_fw_load_aborted(buf) \
test_bit(FW_STATUS_ABORT, &(buf)->status)
-static ssize_t firmware_timeout_show(struct class *class,
- struct class_attribute *attr,
- char *buf)
+static LIST_HEAD(pending_fw_head);
+
+/* reboot notifier for avoid deadlock with usermode_lock */
+static int fw_shutdown_notify(struct notifier_block *unused1,
+ unsigned long unused2, void *unused3)
+{
+ mutex_lock(&fw_lock);
+ while (!list_empty(&pending_fw_head))
+ __fw_load_abort(list_first_entry(&pending_fw_head,
+ struct firmware_buf,
+ pending_list));
+ mutex_unlock(&fw_lock);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block fw_shutdown_nb = {
+ .notifier_call = fw_shutdown_notify,
+};
+
+static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", loading_timeout);
}
@@ -477,9 +522,8 @@ static ssize_t firmware_timeout_show(struct class *class,
*
* Note: zero means 'wait forever'.
**/
-static ssize_t firmware_timeout_store(struct class *class,
- struct class_attribute *attr,
- const char *buf, size_t count)
+static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
+ const char *buf, size_t count)
{
loading_timeout = simple_strtol(buf, NULL, 10);
if (loading_timeout < 0)
@@ -489,8 +533,7 @@ static ssize_t firmware_timeout_store(struct class *class,
}
static struct class_attribute firmware_class_attrs[] = {
- __ATTR(timeout, S_IWUSR | S_IRUGO,
- firmware_timeout_show, firmware_timeout_store),
+ __ATTR_RW(timeout),
__ATTR_NULL
};
@@ -499,8 +542,6 @@ static void fw_dev_release(struct device *dev)
struct firmware_priv *fw_priv = to_firmware_priv(dev);
kfree(fw_priv);
-
- module_put(THIS_MODULE);
}
static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
@@ -528,7 +569,12 @@ static ssize_t firmware_loading_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct firmware_priv *fw_priv = to_firmware_priv(dev);
- int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
+ int loading = 0;
+
+ mutex_lock(&fw_lock);
+ if (fw_priv->buf)
+ loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
+ mutex_unlock(&fw_lock);
return sprintf(buf, "%d\n", loading);
}
@@ -570,12 +616,12 @@ static ssize_t firmware_loading_store(struct device *dev,
const char *buf, size_t count)
{
struct firmware_priv *fw_priv = to_firmware_priv(dev);
- struct firmware_buf *fw_buf = fw_priv->buf;
+ struct firmware_buf *fw_buf;
int loading = simple_strtol(buf, NULL, 10);
int i;
mutex_lock(&fw_lock);
-
+ fw_buf = fw_priv->buf;
if (!fw_buf)
goto out;
@@ -603,7 +649,10 @@ static ssize_t firmware_loading_store(struct device *dev,
* see the mapped 'buf->data' once the loading
* is completed.
* */
- fw_map_pages_buf(fw_buf);
+ if (fw_map_pages_buf(fw_buf))
+ dev_err(dev, "%s: map pages failed\n",
+ __func__);
+ list_del_init(&fw_buf->pending_list);
complete_all(&fw_buf->completion);
break;
}
@@ -777,17 +826,13 @@ static void firmware_class_timeout_work(struct work_struct *work)
struct firmware_priv, timeout_work.work);
mutex_lock(&fw_lock);
- if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) {
- mutex_unlock(&fw_lock);
- return;
- }
fw_load_abort(fw_priv);
mutex_unlock(&fw_lock);
}
static struct firmware_priv *
fw_create_instance(struct firmware *firmware, const char *fw_name,
- struct device *device, bool uevent, bool nowait)
+ struct device *device, unsigned int opt_flags)
{
struct firmware_priv *fw_priv;
struct device *f_dev;
@@ -799,7 +844,7 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
goto exit;
}
- fw_priv->nowait = nowait;
+ fw_priv->nowait = !!(opt_flags & FW_OPT_NOWAIT);
fw_priv->fw = firmware;
INIT_DELAYED_WORK(&fw_priv->timeout_work,
firmware_class_timeout_work);
@@ -815,8 +860,8 @@ exit:
}
/* load a firmware via user helper */
-static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
- long timeout)
+static int _request_firmware_load(struct firmware_priv *fw_priv,
+ unsigned int opt_flags, long timeout)
{
int retval = 0;
struct device *f_dev = &fw_priv->dev;
@@ -827,9 +872,6 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
dev_set_uevent_suppress(f_dev, true);
- /* Need to pin this module until class device is destroyed */
- __module_get(THIS_MODULE);
-
retval = device_add(f_dev);
if (retval) {
dev_err(f_dev, "%s: device_register failed\n", __func__);
@@ -842,17 +884,26 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
goto err_del_dev;
}
+ mutex_lock(&fw_lock);
+ list_add(&buf->pending_list, &pending_fw_head);
+ mutex_unlock(&fw_lock);
+
retval = device_create_file(f_dev, &dev_attr_loading);
if (retval) {
+ mutex_lock(&fw_lock);
+ list_del_init(&buf->pending_list);
+ mutex_unlock(&fw_lock);
dev_err(f_dev, "%s: device_create_file failed\n", __func__);
goto err_del_bin_attr;
}
- if (uevent) {
+ if (opt_flags & FW_OPT_UEVENT) {
+ buf->need_uevent = true;
dev_set_uevent_suppress(f_dev, false);
dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
if (timeout != MAX_SCHEDULE_TIMEOUT)
- schedule_delayed_work(&fw_priv->timeout_work, timeout);
+ queue_delayed_work(system_power_efficient_wq,
+ &fw_priv->timeout_work, timeout);
kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
}
@@ -860,8 +911,8 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
wait_for_completion(&buf->completion);
cancel_delayed_work_sync(&fw_priv->timeout_work);
-
- fw_priv->buf = NULL;
+ if (!buf->data)
+ retval = -ENOMEM;
device_remove_file(f_dev, &dev_attr_loading);
err_del_bin_attr:
@@ -875,21 +926,38 @@ err_put_dev:
static int fw_load_from_user_helper(struct firmware *firmware,
const char *name, struct device *device,
- bool uevent, bool nowait, long timeout)
+ unsigned int opt_flags, long timeout)
{
struct firmware_priv *fw_priv;
- fw_priv = fw_create_instance(firmware, name, device, uevent, nowait);
+ fw_priv = fw_create_instance(firmware, name, device, opt_flags);
if (IS_ERR(fw_priv))
return PTR_ERR(fw_priv);
fw_priv->buf = firmware->priv;
- return _request_firmware_load(fw_priv, uevent, timeout);
+ return _request_firmware_load(fw_priv, opt_flags, timeout);
}
+
+#ifdef CONFIG_PM_SLEEP
+/* kill pending requests without uevent to avoid blocking suspend */
+static void kill_requests_without_uevent(void)
+{
+ struct firmware_buf *buf;
+ struct firmware_buf *next;
+
+ mutex_lock(&fw_lock);
+ list_for_each_entry_safe(buf, next, &pending_fw_head, pending_list) {
+ if (!buf->need_uevent)
+ __fw_load_abort(buf);
+ }
+ mutex_unlock(&fw_lock);
+}
+#endif
+
#else /* CONFIG_FW_LOADER_USER_HELPER */
static inline int
fw_load_from_user_helper(struct firmware *firmware, const char *name,
- struct device *device, bool uevent, bool nowait,
+ struct device *device, unsigned int opt_flags,
long timeout)
{
return -ENOENT;
@@ -898,6 +966,10 @@ fw_load_from_user_helper(struct firmware *firmware, const char *name,
/* No abort during direct loading */
#define is_fw_load_aborted(buf) false
+#ifdef CONFIG_PM_SLEEP
+static inline void kill_requests_without_uevent(void) { }
+#endif
+
#endif /* CONFIG_FW_LOADER_USER_HELPER */
@@ -965,7 +1037,8 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
return 1; /* need to load */
}
-static int assign_firmware_buf(struct firmware *fw, struct device *device)
+static int assign_firmware_buf(struct firmware *fw, struct device *device,
+ unsigned int opt_flags)
{
struct firmware_buf *buf = fw->priv;
@@ -982,7 +1055,8 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device)
* device may has been deleted already, but the problem
* should be fixed in devres or driver core.
*/
- if (device)
+ /* don't cache firmware handled without uevent */
+ if (device && (opt_flags & FW_OPT_UEVENT))
fw_add_devm_name(device, buf->fw_id);
/*
@@ -1003,7 +1077,7 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device)
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
- struct device *device, bool uevent, bool nowait)
+ struct device *device, unsigned int opt_flags)
{
struct firmware *fw;
long timeout;
@@ -1018,7 +1092,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
ret = 0;
timeout = firmware_loading_timeout();
- if (nowait) {
+ if (opt_flags & FW_OPT_NOWAIT) {
timeout = usermodehelper_read_lock_wait(timeout);
if (!timeout) {
dev_dbg(device, "firmware: %s loading timed out\n",
@@ -1035,11 +1109,20 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
}
}
- if (!fw_get_filesystem_firmware(device, fw->priv))
- ret = fw_load_from_user_helper(fw, name, device,
- uevent, nowait, timeout);
+ ret = fw_get_filesystem_firmware(device, fw->priv);
+ if (ret) {
+ if (opt_flags & FW_OPT_FALLBACK) {
+ dev_warn(device,
+ "Direct firmware load failed with error %d\n",
+ ret);
+ dev_warn(device, "Falling back to user helper\n");
+ ret = fw_load_from_user_helper(fw, name, device,
+ opt_flags, timeout);
+ }
+ }
+
if (!ret)
- ret = assign_firmware_buf(fw, device);
+ ret = assign_firmware_buf(fw, device, opt_flags);
usermodehelper_read_unlock();
@@ -1077,8 +1160,40 @@ int
request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device)
{
- return _request_firmware(firmware_p, name, device, true, false);
+ int ret;
+
+ /* Need to pin this module until return */
+ __module_get(THIS_MODULE);
+ ret = _request_firmware(firmware_p, name, device,
+ FW_OPT_UEVENT | FW_OPT_FALLBACK);
+ module_put(THIS_MODULE);
+ return ret;
+}
+EXPORT_SYMBOL(request_firmware);
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+/**
+ * request_firmware: - load firmware directly without usermode helper
+ * @firmware_p: pointer to firmware image
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ *
+ * This function works pretty much like request_firmware(), but this doesn't
+ * fall back to usermode helper even if the firmware couldn't be loaded
+ * directly from fs. Hence it's useful for loading optional firmwares, which
+ * aren't always present, without extra long timeouts of udev.
+ **/
+int request_firmware_direct(const struct firmware **firmware_p,
+ const char *name, struct device *device)
+{
+ int ret;
+ __module_get(THIS_MODULE);
+ ret = _request_firmware(firmware_p, name, device, FW_OPT_UEVENT);
+ module_put(THIS_MODULE);
+ return ret;
}
+EXPORT_SYMBOL_GPL(request_firmware_direct);
+#endif
/**
* release_firmware: - release the resource associated with a firmware image
@@ -1092,6 +1207,7 @@ void release_firmware(const struct firmware *fw)
kfree(fw);
}
}
+EXPORT_SYMBOL(release_firmware);
/* Async support */
struct firmware_work {
@@ -1101,7 +1217,7 @@ struct firmware_work {
struct device *device;
void *context;
void (*cont)(const struct firmware *fw, void *context);
- bool uevent;
+ unsigned int opt_flags;
};
static void request_firmware_work_func(struct work_struct *work)
@@ -1112,7 +1228,7 @@ static void request_firmware_work_func(struct work_struct *work)
fw_work = container_of(work, struct firmware_work, work);
_request_firmware(&fw, fw_work->name, fw_work->device,
- fw_work->uevent, true);
+ fw_work->opt_flags);
fw_work->cont(fw, fw_work->context);
put_device(fw_work->device); /* taken in request_firmware_nowait() */
@@ -1160,7 +1276,8 @@ request_firmware_nowait(
fw_work->device = device;
fw_work->context = context;
fw_work->cont = cont;
- fw_work->uevent = uevent;
+ fw_work->opt_flags = FW_OPT_NOWAIT | FW_OPT_FALLBACK |
+ (uevent ? FW_OPT_UEVENT : 0);
if (!try_module_get(module)) {
kfree(fw_work);
@@ -1172,6 +1289,10 @@ request_firmware_nowait(
schedule_work(&fw_work->work);
return 0;
}
+EXPORT_SYMBOL(request_firmware_nowait);
+
+#ifdef CONFIG_PM_SLEEP
+static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
/**
* cache_firmware - cache one firmware image in kernel memory space
@@ -1187,7 +1308,7 @@ request_firmware_nowait(
* Return !0 otherwise
*
*/
-int cache_firmware(const char *fw_name)
+static int cache_firmware(const char *fw_name)
{
int ret;
const struct firmware *fw;
@@ -1203,6 +1324,18 @@ int cache_firmware(const char *fw_name)
return ret;
}
+static struct firmware_buf *fw_lookup_buf(const char *fw_name)
+{
+ struct firmware_buf *tmp;
+ struct firmware_cache *fwc = &fw_cache;
+
+ spin_lock(&fwc->lock);
+ tmp = __fw_lookup_buf(fw_name);
+ spin_unlock(&fwc->lock);
+
+ return tmp;
+}
+
/**
* uncache_firmware - remove one cached firmware image
* @fw_name: the firmware image name
@@ -1214,7 +1347,7 @@ int cache_firmware(const char *fw_name)
* Return !0 otherwise
*
*/
-int uncache_firmware(const char *fw_name)
+static int uncache_firmware(const char *fw_name)
{
struct firmware_buf *buf;
struct firmware fw;
@@ -1233,9 +1366,6 @@ int uncache_firmware(const char *fw_name)
return -EINVAL;
}
-#ifdef CONFIG_PM_SLEEP
-static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
-
static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
{
struct fw_cache_entry *fce;
@@ -1445,8 +1575,8 @@ static void device_uncache_fw_images_work(struct work_struct *work)
*/
static void device_uncache_fw_images_delay(unsigned long delay)
{
- schedule_delayed_work(&fw_cache.work,
- msecs_to_jiffies(delay));
+ queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
+ msecs_to_jiffies(delay));
}
static int fw_pm_notify(struct notifier_block *notify_block,
@@ -1455,6 +1585,8 @@ static int fw_pm_notify(struct notifier_block *notify_block,
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
+ case PM_RESTORE_PREPARE:
+ kill_requests_without_uevent();
device_cache_fw_images();
break;
@@ -1517,6 +1649,7 @@ static int __init firmware_class_init(void)
{
fw_cache_init();
#ifdef CONFIG_FW_LOADER_USER_HELPER
+ register_reboot_notifier(&fw_shutdown_nb);
return class_register(&firmware_class);
#else
return 0;
@@ -1530,15 +1663,10 @@ static void __exit firmware_class_exit(void)
unregister_pm_notifier(&fw_cache.pm_notify);
#endif
#ifdef CONFIG_FW_LOADER_USER_HELPER
+ unregister_reboot_notifier(&fw_shutdown_nb);
class_unregister(&firmware_class);
#endif
}
fs_initcall(firmware_class_init);
module_exit(firmware_class_exit);
-
-EXPORT_SYMBOL(release_firmware);
-EXPORT_SYMBOL(request_firmware);
-EXPORT_SYMBOL(request_firmware_nowait);
-EXPORT_SYMBOL_GPL(cache_firmware);
-EXPORT_SYMBOL_GPL(uncache_firmware);
diff --git a/drivers/base/init.c b/drivers/base/init.c
index c16f0b808a1..da033d3bab3 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -33,4 +33,5 @@ void __init driver_init(void)
platform_bus_init();
cpu_dev_init();
memory_dev_init();
+ container_dev_init();
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 14f8a6954da..89f752dd846 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -16,7 +16,6 @@
#include <linux/capability.h>
#include <linux/device.h>
#include <linux/memory.h>
-#include <linux/kobject.h>
#include <linux/memory_hotplug.h>
#include <linux/mm.h>
#include <linux/mutex.h>
@@ -30,6 +29,8 @@ static DEFINE_MUTEX(mem_sysfs_mutex);
#define MEMORY_CLASS_NAME "memory"
+#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
+
static int sections_per_block;
static inline int base_memory_block_id(int section_nr)
@@ -37,9 +38,14 @@ static inline int base_memory_block_id(int section_nr)
return section_nr / sections_per_block;
}
+static int memory_subsys_online(struct device *dev);
+static int memory_subsys_offline(struct device *dev);
+
static struct bus_type memory_subsys = {
.name = MEMORY_CLASS_NAME,
.dev_name = MEMORY_CLASS_NAME,
+ .online = memory_subsys_online,
+ .offline = memory_subsys_offline,
};
static BLOCKING_NOTIFIER_HEAD(memory_chain);
@@ -72,27 +78,11 @@ EXPORT_SYMBOL(unregister_memory_isolate_notifier);
static void memory_block_release(struct device *dev)
{
- struct memory_block *mem = container_of(dev, struct memory_block, dev);
+ struct memory_block *mem = to_memory_block(dev);
kfree(mem);
}
-/*
- * register_memory - Setup a sysfs device for a memory block
- */
-static
-int register_memory(struct memory_block *memory)
-{
- int error;
-
- memory->dev.bus = &memory_subsys;
- memory->dev.id = memory->start_section_nr / sections_per_block;
- memory->dev.release = memory_block_release;
-
- error = device_register(&memory->dev);
- return error;
-}
-
unsigned long __weak memory_block_size_bytes(void)
{
return MIN_MEMORY_BLOCK_SIZE;
@@ -121,25 +111,13 @@ static unsigned long get_memory_block_size(void)
static ssize_t show_mem_start_phys_index(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct memory_block *mem =
- container_of(dev, struct memory_block, dev);
+ struct memory_block *mem = to_memory_block(dev);
unsigned long phys_index;
phys_index = mem->start_section_nr / sections_per_block;
return sprintf(buf, "%08lx\n", phys_index);
}
-static ssize_t show_mem_end_phys_index(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct memory_block *mem =
- container_of(dev, struct memory_block, dev);
- unsigned long phys_index;
-
- phys_index = mem->end_section_nr / sections_per_block;
- return sprintf(buf, "%08lx\n", phys_index);
-}
-
/*
* Show whether the section of memory is likely to be hot-removable
*/
@@ -148,10 +126,11 @@ static ssize_t show_mem_removable(struct device *dev,
{
unsigned long i, pfn;
int ret = 1;
- struct memory_block *mem =
- container_of(dev, struct memory_block, dev);
+ struct memory_block *mem = to_memory_block(dev);
for (i = 0; i < sections_per_block; i++) {
+ if (!present_section_nr(mem->start_section_nr + i))
+ continue;
pfn = section_nr_to_pfn(mem->start_section_nr + i);
ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
}
@@ -165,8 +144,7 @@ static ssize_t show_mem_removable(struct device *dev,
static ssize_t show_mem_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct memory_block *mem =
- container_of(dev, struct memory_block, dev);
+ struct memory_block *mem = to_memory_block(dev);
ssize_t len = 0;
/*
@@ -272,76 +250,107 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
return ret;
}
-static int __memory_block_change_state(struct memory_block *mem,
- unsigned long to_state, unsigned long from_state_req,
- int online_type)
+static int memory_block_change_state(struct memory_block *mem,
+ unsigned long to_state, unsigned long from_state_req)
{
int ret = 0;
- if (mem->state != from_state_req) {
- ret = -EINVAL;
- goto out;
- }
+ if (mem->state != from_state_req)
+ return -EINVAL;
if (to_state == MEM_OFFLINE)
mem->state = MEM_GOING_OFFLINE;
- ret = memory_block_action(mem->start_section_nr, to_state, online_type);
+ ret = memory_block_action(mem->start_section_nr, to_state,
+ mem->online_type);
- if (ret) {
- mem->state = from_state_req;
- goto out;
- }
+ mem->state = ret ? from_state_req : to_state;
- mem->state = to_state;
- switch (mem->state) {
- case MEM_OFFLINE:
- kobject_uevent(&mem->dev.kobj, KOBJ_OFFLINE);
- break;
- case MEM_ONLINE:
- kobject_uevent(&mem->dev.kobj, KOBJ_ONLINE);
- break;
- default:
- break;
- }
-out:
return ret;
}
-static int memory_block_change_state(struct memory_block *mem,
- unsigned long to_state, unsigned long from_state_req,
- int online_type)
+/* The device lock serializes operations on memory_subsys_[online|offline] */
+static int memory_subsys_online(struct device *dev)
{
+ struct memory_block *mem = to_memory_block(dev);
int ret;
- mutex_lock(&mem->state_mutex);
- ret = __memory_block_change_state(mem, to_state, from_state_req,
- online_type);
- mutex_unlock(&mem->state_mutex);
+ if (mem->state == MEM_ONLINE)
+ return 0;
+
+ /*
+ * If we are called from store_mem_state(), online_type will be
+ * set >= 0 Otherwise we were called from the device online
+ * attribute and need to set the online_type.
+ */
+ if (mem->online_type < 0)
+ mem->online_type = ONLINE_KEEP;
+
+ ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
+
+ /* clear online_type */
+ mem->online_type = -1;
return ret;
}
+
+static int memory_subsys_offline(struct device *dev)
+{
+ struct memory_block *mem = to_memory_block(dev);
+
+ if (mem->state == MEM_OFFLINE)
+ return 0;
+
+ return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
+}
+
static ssize_t
store_mem_state(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct memory_block *mem;
- int ret = -EINVAL;
+ struct memory_block *mem = to_memory_block(dev);
+ int ret, online_type;
- mem = container_of(dev, struct memory_block, dev);
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
if (!strncmp(buf, "online_kernel", min_t(int, count, 13)))
- ret = memory_block_change_state(mem, MEM_ONLINE,
- MEM_OFFLINE, ONLINE_KERNEL);
+ online_type = ONLINE_KERNEL;
else if (!strncmp(buf, "online_movable", min_t(int, count, 14)))
- ret = memory_block_change_state(mem, MEM_ONLINE,
- MEM_OFFLINE, ONLINE_MOVABLE);
+ online_type = ONLINE_MOVABLE;
else if (!strncmp(buf, "online", min_t(int, count, 6)))
- ret = memory_block_change_state(mem, MEM_ONLINE,
- MEM_OFFLINE, ONLINE_KEEP);
- else if(!strncmp(buf, "offline", min_t(int, count, 7)))
- ret = memory_block_change_state(mem, MEM_OFFLINE,
- MEM_ONLINE, -1);
+ online_type = ONLINE_KEEP;
+ else if (!strncmp(buf, "offline", min_t(int, count, 7)))
+ online_type = -1;
+ else {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (online_type) {
+ case ONLINE_KERNEL:
+ case ONLINE_MOVABLE:
+ case ONLINE_KEEP:
+ /*
+ * mem->online_type is not protected so there can be a
+ * race here. However, when racing online, the first
+ * will succeed and the second will just return as the
+ * block will already be online. The online type
+ * could be either one, but that is expected.
+ */
+ mem->online_type = online_type;
+ ret = device_online(&mem->dev);
+ break;
+ case -1:
+ ret = device_offline(&mem->dev);
+ break;
+ default:
+ ret = -EINVAL; /* should never happen */
+ }
+
+err:
+ unlock_device_hotplug();
if (ret)
return ret;
@@ -360,22 +369,15 @@ store_mem_state(struct device *dev,
static ssize_t show_phys_device(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct memory_block *mem =
- container_of(dev, struct memory_block, dev);
+ struct memory_block *mem = to_memory_block(dev);
return sprintf(buf, "%d\n", mem->phys_device);
}
static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
-static DEVICE_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL);
static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL);
-#define mem_create_simple_file(mem, attr_name) \
- device_create_file(&mem->dev, &dev_attr_##attr_name)
-#define mem_remove_simple_file(mem, attr_name) \
- device_remove_file(&mem->dev, &dev_attr_##attr_name)
-
/*
* Block size attribute stuff
*/
@@ -388,12 +390,6 @@ print_block_size(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL);
-static int block_size_init(void)
-{
- return device_create_file(memory_subsys.dev_root,
- &dev_attr_block_size_bytes);
-}
-
/*
* Some architectures will have custom drivers to do this, and
* will not need to do it from userspace. The fake hot-add code
@@ -429,17 +425,8 @@ memory_probe_store(struct device *dev, struct device_attribute *attr,
out:
return ret;
}
-static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
-static int memory_probe_init(void)
-{
- return device_create_file(memory_subsys.dev_root, &dev_attr_probe);
-}
-#else
-static inline int memory_probe_init(void)
-{
- return 0;
-}
+static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
#endif
#ifdef CONFIG_MEMORY_FAILURE
@@ -457,7 +444,7 @@ store_soft_offline_page(struct device *dev,
u64 pfn;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (strict_strtoull(buf, 0, &pfn) < 0)
+ if (kstrtoull(buf, 0, &pfn) < 0)
return -EINVAL;
pfn >>= PAGE_SHIFT;
if (!pfn_valid(pfn))
@@ -476,7 +463,7 @@ store_hard_offline_page(struct device *dev,
u64 pfn;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (strict_strtoull(buf, 0, &pfn) < 0)
+ if (kstrtoull(buf, 0, &pfn) < 0)
return -EINVAL;
pfn >>= PAGE_SHIFT;
ret = memory_failure(pfn, 0, 0);
@@ -485,23 +472,6 @@ store_hard_offline_page(struct device *dev,
static DEVICE_ATTR(soft_offline_page, S_IWUSR, NULL, store_soft_offline_page);
static DEVICE_ATTR(hard_offline_page, S_IWUSR, NULL, store_hard_offline_page);
-
-static __init int memory_fail_init(void)
-{
- int err;
-
- err = device_create_file(memory_subsys.dev_root,
- &dev_attr_soft_offline_page);
- if (!err)
- err = device_create_file(memory_subsys.dev_root,
- &dev_attr_hard_offline_page);
- return err;
-}
-#else
-static inline int memory_fail_init(void)
-{
- return 0;
-}
#endif
/*
@@ -530,7 +500,7 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section,
put_device(&hint->dev);
if (!dev)
return NULL;
- return container_of(dev, struct memory_block, dev);
+ return to_memory_block(dev);
}
/*
@@ -546,6 +516,38 @@ struct memory_block *find_memory_block(struct mem_section *section)
return find_memory_block_hinted(section, NULL);
}
+static struct attribute *memory_memblk_attrs[] = {
+ &dev_attr_phys_index.attr,
+ &dev_attr_state.attr,
+ &dev_attr_phys_device.attr,
+ &dev_attr_removable.attr,
+ NULL
+};
+
+static struct attribute_group memory_memblk_attr_group = {
+ .attrs = memory_memblk_attrs,
+};
+
+static const struct attribute_group *memory_memblk_attr_groups[] = {
+ &memory_memblk_attr_group,
+ NULL,
+};
+
+/*
+ * register_memory - Setup a sysfs device for a memory block
+ */
+static
+int register_memory(struct memory_block *memory)
+{
+ memory->dev.bus = &memory_subsys;
+ memory->dev.id = memory->start_section_nr / sections_per_block;
+ memory->dev.release = memory_block_release;
+ memory->dev.groups = memory_memblk_attr_groups;
+ memory->dev.offline = memory->state == MEM_OFFLINE;
+
+ return device_register(&memory->dev);
+}
+
static int init_memory_block(struct memory_block **memory,
struct mem_section *section, unsigned long state)
{
@@ -564,75 +566,66 @@ static int init_memory_block(struct memory_block **memory,
mem->end_section_nr = mem->start_section_nr + sections_per_block - 1;
mem->state = state;
mem->section_count++;
- mutex_init(&mem->state_mutex);
start_pfn = section_nr_to_pfn(mem->start_section_nr);
mem->phys_device = arch_get_memory_phys_device(start_pfn);
ret = register_memory(mem);
- if (!ret)
- ret = mem_create_simple_file(mem, phys_index);
- if (!ret)
- ret = mem_create_simple_file(mem, end_phys_index);
- if (!ret)
- ret = mem_create_simple_file(mem, state);
- if (!ret)
- ret = mem_create_simple_file(mem, phys_device);
- if (!ret)
- ret = mem_create_simple_file(mem, removable);
*memory = mem;
return ret;
}
-static int add_memory_section(int nid, struct mem_section *section,
- struct memory_block **mem_p,
- unsigned long state, enum mem_add_context context)
+static int add_memory_block(int base_section_nr)
{
- struct memory_block *mem = NULL;
- int scn_nr = __section_nr(section);
- int ret = 0;
-
- mutex_lock(&mem_sysfs_mutex);
-
- if (context == BOOT) {
- /* same memory block ? */
- if (mem_p && *mem_p)
- if (scn_nr >= (*mem_p)->start_section_nr &&
- scn_nr <= (*mem_p)->end_section_nr) {
- mem = *mem_p;
- kobject_get(&mem->dev.kobj);
- }
- } else
- mem = find_memory_block(section);
-
- if (mem) {
- mem->section_count++;
- kobject_put(&mem->dev.kobj);
- } else {
- ret = init_memory_block(&mem, section, state);
- /* store memory_block pointer for next loop */
- if (!ret && context == BOOT)
- if (mem_p)
- *mem_p = mem;
- }
+ struct memory_block *mem;
+ int i, ret, section_count = 0, section_nr;
- if (!ret) {
- if (context == HOTPLUG &&
- mem->section_count == sections_per_block)
- ret = register_mem_sect_under_node(mem, nid);
+ for (i = base_section_nr;
+ (i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS;
+ i++) {
+ if (!present_section_nr(i))
+ continue;
+ if (section_count == 0)
+ section_nr = i;
+ section_count++;
}
- mutex_unlock(&mem_sysfs_mutex);
- return ret;
+ if (section_count == 0)
+ return 0;
+ ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE);
+ if (ret)
+ return ret;
+ mem->section_count = section_count;
+ return 0;
}
+
/*
* need an interface for the VM to add new memory regions,
* but without onlining it.
*/
int register_new_memory(int nid, struct mem_section *section)
{
- return add_memory_section(nid, section, NULL, MEM_OFFLINE, HOTPLUG);
+ int ret = 0;
+ struct memory_block *mem;
+
+ mutex_lock(&mem_sysfs_mutex);
+
+ mem = find_memory_block(section);
+ if (mem) {
+ mem->section_count++;
+ put_device(&mem->dev);
+ } else {
+ ret = init_memory_block(&mem, section, MEM_OFFLINE);
+ if (ret)
+ goto out;
+ }
+
+ if (mem->section_count == sections_per_block)
+ ret = register_mem_sect_under_node(mem, nid);
+out:
+ mutex_unlock(&mem_sysfs_mutex);
+ return ret;
}
#ifdef CONFIG_MEMORY_HOTREMOVE
@@ -642,7 +635,7 @@ unregister_memory(struct memory_block *memory)
BUG_ON(memory->dev.bus != &memory_subsys);
/* drop the ref. we got in remove_memory_block() */
- kobject_put(&memory->dev.kobj);
+ put_device(&memory->dev);
device_unregister(&memory->dev);
}
@@ -656,15 +649,10 @@ static int remove_memory_block(unsigned long node_id,
unregister_mem_sect_under_nodes(mem, __section_nr(section));
mem->section_count--;
- if (mem->section_count == 0) {
- mem_remove_simple_file(mem, phys_index);
- mem_remove_simple_file(mem, end_phys_index);
- mem_remove_simple_file(mem, state);
- mem_remove_simple_file(mem, phys_device);
- mem_remove_simple_file(mem, removable);
+ if (mem->section_count == 0)
unregister_memory(mem);
- } else
- kobject_put(&mem->dev.kobj);
+ else
+ put_device(&mem->dev);
mutex_unlock(&mem_sysfs_mutex);
return 0;
@@ -679,27 +667,35 @@ int unregister_memory_section(struct mem_section *section)
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
-/*
- * offline one memory block. If the memory block has been offlined, do nothing.
- */
-int offline_memory_block(struct memory_block *mem)
-{
- int ret = 0;
-
- mutex_lock(&mem->state_mutex);
- if (mem->state != MEM_OFFLINE)
- ret = __memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE, -1);
- mutex_unlock(&mem->state_mutex);
-
- return ret;
-}
-
/* return true if the memory block is offlined, otherwise, return false */
bool is_memblock_offlined(struct memory_block *mem)
{
return mem->state == MEM_OFFLINE;
}
+static struct attribute *memory_root_attrs[] = {
+#ifdef CONFIG_ARCH_MEMORY_PROBE
+ &dev_attr_probe.attr,
+#endif
+
+#ifdef CONFIG_MEMORY_FAILURE
+ &dev_attr_soft_offline_page.attr,
+ &dev_attr_hard_offline_page.attr,
+#endif
+
+ &dev_attr_block_size_bytes.attr,
+ NULL
+};
+
+static struct attribute_group memory_root_attr_group = {
+ .attrs = memory_root_attrs,
+};
+
+static const struct attribute_group *memory_root_attr_groups[] = {
+ &memory_root_attr_group,
+ NULL,
+};
+
/*
* Initialize the sysfs support for memory devices...
*/
@@ -709,9 +705,8 @@ int __init memory_dev_init(void)
int ret;
int err;
unsigned long block_sz;
- struct memory_block *mem = NULL;
- ret = subsys_system_register(&memory_subsys, NULL);
+ ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
if (ret)
goto out;
@@ -722,27 +717,14 @@ int __init memory_dev_init(void)
* Create entries for memory sections that were found
* during boot and have been initialized
*/
- for (i = 0; i < NR_MEM_SECTIONS; i++) {
- if (!present_section_nr(i))
- continue;
- /* don't need to reuse memory_block if only one per block */
- err = add_memory_section(0, __nr_to_section(i),
- (sections_per_block == 1) ? NULL : &mem,
- MEM_ONLINE,
- BOOT);
+ mutex_lock(&mem_sysfs_mutex);
+ for (i = 0; i < NR_MEM_SECTIONS; i += sections_per_block) {
+ err = add_memory_block(i);
if (!ret)
ret = err;
}
+ mutex_unlock(&mem_sysfs_mutex);
- err = memory_probe_init();
- if (!ret)
- ret = err;
- err = memory_fail_init();
- if (!ret)
- ret = err;
- err = block_size_init();
- if (!ret)
- ret = err;
out:
if (ret)
printk(KERN_ERR "%s() failed: %d\n", __func__, ret);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 7616a77ca32..8f7ed9933a7 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -125,13 +125,7 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(nid, NR_WRITEBACK)),
nid, K(node_page_state(nid, NR_FILE_PAGES)),
nid, K(node_page_state(nid, NR_FILE_MAPPED)),
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- nid, K(node_page_state(nid, NR_ANON_PAGES)
- + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
- HPAGE_PMD_NR),
-#else
nid, K(node_page_state(nid, NR_ANON_PAGES)),
-#endif
nid, K(node_page_state(nid, NR_SHMEM)),
nid, node_page_state(nid, NR_KERNEL_STACK) *
THREAD_SIZE / 1024,
@@ -605,7 +599,11 @@ int register_one_node(int nid)
void unregister_one_node(int nid)
{
+ if (!node_devices[nid])
+ return;
+
unregister_node(node_devices[nid]);
+ kfree(node_devices[nid]);
node_devices[nid] = NULL;
}
diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c
index 67a274e8672..5fb74b43848 100644
--- a/drivers/base/pinctrl.c
+++ b/drivers/base/pinctrl.c
@@ -48,6 +48,25 @@ int pinctrl_bind_pins(struct device *dev)
goto cleanup_get;
}
+#ifdef CONFIG_PM
+ /*
+ * If power management is enabled, we also look for the optional
+ * sleep and idle pin states, with semantics as defined in
+ * <linux/pinctrl/pinctrl-state.h>
+ */
+ dev->pins->sleep_state = pinctrl_lookup_state(dev->pins->p,
+ PINCTRL_STATE_SLEEP);
+ if (IS_ERR(dev->pins->sleep_state))
+ /* Not supplying this state is perfectly legal */
+ dev_dbg(dev, "no sleep pinctrl state\n");
+
+ dev->pins->idle_state = pinctrl_lookup_state(dev->pins->p,
+ PINCTRL_STATE_IDLE);
+ if (IS_ERR(dev->pins->idle_state))
+ /* Not supplying this state is perfectly legal */
+ dev_dbg(dev, "no idle pinctrl state\n");
+#endif
+
return 0;
/*
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 9eda84246ff..eee48c49f5d 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -13,6 +13,7 @@
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
@@ -29,9 +30,6 @@
/* For automatically allocated device IDs */
static DEFINE_IDA(platform_devid_ida);
-#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \
- driver))
-
struct device platform_bus = {
.init_name = "platform",
};
@@ -90,7 +88,16 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
return -ENXIO;
return dev->archdata.irqs[num];
#else
- struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num);
+ struct resource *r;
+ if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
+ int ret;
+
+ ret = of_irq_get(dev->dev.of_node, num);
+ if (ret >= 0 || ret == -EPROBE_DEFER)
+ return ret;
+ }
+
+ r = platform_get_resource(dev, IORESOURCE_IRQ, num);
return r ? r->start : -ENXIO;
#endif
@@ -129,9 +136,17 @@ EXPORT_SYMBOL_GPL(platform_get_resource_byname);
*/
int platform_get_irq_byname(struct platform_device *dev, const char *name)
{
- struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ,
- name);
+ struct resource *r;
+
+ if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
+ int ret;
+ ret = of_irq_get_byname(dev->dev.of_node, name);
+ if (ret >= 0 || ret == -EPROBE_DEFER)
+ return ret;
+ }
+
+ r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
return r ? r->start : -ENXIO;
}
EXPORT_SYMBOL_GPL(platform_get_irq_byname);
@@ -435,7 +450,7 @@ struct platform_device *platform_device_register_full(
goto err_alloc;
pdev->dev.parent = pdevinfo->parent;
- ACPI_HANDLE_SET(&pdev->dev, pdevinfo->acpi_node.handle);
+ ACPI_COMPANION_SET(&pdev->dev, pdevinfo->acpi_node.companion);
if (pdevinfo->dma_mask) {
/*
@@ -466,7 +481,7 @@ struct platform_device *platform_device_register_full(
ret = platform_device_add(pdev);
if (ret) {
err:
- ACPI_HANDLE_SET(&pdev->dev, NULL);
+ ACPI_COMPANION_SET(&pdev->dev, NULL);
kfree(pdev->dev.dma_mask);
err_alloc:
@@ -484,13 +499,17 @@ static int platform_drv_probe(struct device *_dev)
struct platform_device *dev = to_platform_device(_dev);
int ret;
- if (ACPI_HANDLE(_dev))
- acpi_dev_pm_attach(_dev, true);
+ acpi_dev_pm_attach(_dev, true);
ret = drv->probe(dev);
- if (ret && ACPI_HANDLE(_dev))
+ if (ret)
acpi_dev_pm_detach(_dev, true);
+ if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
+ dev_warn(_dev, "probe deferral not supported\n");
+ ret = -ENXIO;
+ }
+
return ret;
}
@@ -506,8 +525,7 @@ static int platform_drv_remove(struct device *_dev)
int ret;
ret = drv->remove(dev);
- if (ACPI_HANDLE(_dev))
- acpi_dev_pm_detach(_dev, true);
+ acpi_dev_pm_detach(_dev, true);
return ret;
}
@@ -518,16 +536,18 @@ static void platform_drv_shutdown(struct device *_dev)
struct platform_device *dev = to_platform_device(_dev);
drv->shutdown(dev);
- if (ACPI_HANDLE(_dev))
- acpi_dev_pm_detach(_dev, true);
+ acpi_dev_pm_detach(_dev, true);
}
/**
- * platform_driver_register - register a driver for platform-level devices
+ * __platform_driver_register - register a driver for platform-level devices
* @drv: platform driver structure
+ * @owner: owning module/driver
*/
-int platform_driver_register(struct platform_driver *drv)
+int __platform_driver_register(struct platform_driver *drv,
+ struct module *owner)
{
+ drv->driver.owner = owner;
drv->driver.bus = &platform_bus_type;
if (drv->probe)
drv->driver.probe = platform_drv_probe;
@@ -538,7 +558,7 @@ int platform_driver_register(struct platform_driver *drv)
return driver_register(&drv->driver);
}
-EXPORT_SYMBOL_GPL(platform_driver_register);
+EXPORT_SYMBOL_GPL(__platform_driver_register);
/**
* platform_driver_unregister - unregister a driver for platform-level devices
@@ -553,8 +573,7 @@ EXPORT_SYMBOL_GPL(platform_driver_unregister);
/**
* platform_driver_probe - register driver for non-hotpluggable device
* @drv: platform driver structure
- * @probe: the driver probe routine, probably from an __init section,
- * must not return -EPROBE_DEFER.
+ * @probe: the driver probe routine, probably from an __init section
*
* Use this instead of platform_driver_register() when you know the device
* is not hotpluggable and has already been registered, and you want to
@@ -565,8 +584,7 @@ EXPORT_SYMBOL_GPL(platform_driver_unregister);
* into system-on-chip processors, where the controller devices have been
* configured as part of board setup.
*
- * This is incompatible with deferred probing so probe() must not
- * return -EPROBE_DEFER.
+ * Note that this is incompatible with deferred probing.
*
* Returns zero if the driver registered and bound to a device, else returns
* a negative error code and with the driver not registered.
@@ -576,6 +594,12 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv,
{
int retval, code;
+ /*
+ * Prevent driver from requesting probe deferral to avoid further
+ * futile probe attempts.
+ */
+ drv->prevent_deferred_probe = true;
+
/* make sure driver won't have bind/unbind attributes */
drv->driver.suppress_bind_attrs = true;
@@ -668,15 +692,27 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct platform_device *pdev = to_platform_device(dev);
- int len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
+ int len;
+
+ len = of_device_get_modalias(dev, buf, PAGE_SIZE -1);
+ if (len != -ENODEV)
+ return len;
+
+ len = acpi_device_modalias(dev, buf, PAGE_SIZE -1);
+ if (len != -ENODEV)
+ return len;
+
+ len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
}
+static DEVICE_ATTR_RO(modalias);
-static struct device_attribute platform_dev_attrs[] = {
- __ATTR_RO(modalias),
- __ATTR_NULL,
+static struct attribute *platform_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(platform_dev);
static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
{
@@ -688,6 +724,10 @@ static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
if (rc != -ENODEV)
return rc;
+ rc = acpi_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
+
add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
pdev->name);
return 0;
@@ -888,13 +928,12 @@ int platform_pm_restore(struct device *dev)
static const struct dev_pm_ops platform_dev_pm_ops = {
.runtime_suspend = pm_generic_runtime_suspend,
.runtime_resume = pm_generic_runtime_resume,
- .runtime_idle = pm_generic_runtime_idle,
USE_PLATFORM_PM_SLEEP_OPS
};
struct bus_type platform_bus_type = {
.name = "platform",
- .dev_attrs = platform_dev_attrs,
+ .dev_groups = platform_dev_groups,
.match = platform_match,
.uevent = platform_uevent,
.pm = &platform_dev_pm_ops,
@@ -1055,7 +1094,7 @@ void __init early_platform_driver_register_all(char *class_str)
* @epdrv: early platform driver structure
* @id: id to match against
*/
-static __init struct platform_device *
+static struct platform_device * __init
early_platform_match(struct early_platform_driver *epdrv, int id)
{
struct platform_device *pd;
@@ -1073,7 +1112,7 @@ early_platform_match(struct early_platform_driver *epdrv, int id)
* @epdrv: early platform driver structure
* @id: return true if id or above exists
*/
-static __init int early_platform_left(struct early_platform_driver *epdrv,
+static int __init early_platform_left(struct early_platform_driver *epdrv,
int id)
{
struct platform_device *pd;
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 2e58ebb1f6c..1cb8544598d 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,6 +1,5 @@
-obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o
+obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o
obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
-obj-$(CONFIG_PM_RUNTIME) += runtime.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_OPP) += opp.o
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 9d8fde70939..b99e6c06ee6 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -6,7 +6,6 @@
* This file is released under the GPLv2.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/io.h>
@@ -33,6 +32,21 @@ struct pm_clock_entry {
};
/**
+ * pm_clk_enable - Enable a clock, reporting any errors
+ * @dev: The device for the given clock
+ * @clk: The clock being enabled.
+ */
+static inline int __pm_clk_enable(struct device *dev, struct clk *clk)
+{
+ int ret = clk_enable(clk);
+ if (ret)
+ dev_err(dev, "%s: failed to enable clk %p, error %d\n",
+ __func__, clk, ret);
+
+ return ret;
+}
+
+/**
* pm_clk_acquire - Acquire a device clock.
* @dev: Device whose clock is to be acquired.
* @ce: PM clock entry corresponding to the clock.
@@ -43,6 +57,7 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
if (IS_ERR(ce->clk)) {
ce->status = PCE_STATUS_ERROR;
} else {
+ clk_prepare(ce->clk);
ce->status = PCE_STATUS_ACQUIRED;
dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id);
}
@@ -99,10 +114,12 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
if (ce->status < PCE_STATUS_ERROR) {
if (ce->status == PCE_STATUS_ENABLED)
- clk_disable_unprepare(ce->clk);
+ clk_disable(ce->clk);
- if (ce->status >= PCE_STATUS_ACQUIRED)
+ if (ce->status >= PCE_STATUS_ACQUIRED) {
+ clk_unprepare(ce->clk);
clk_put(ce->clk);
+ }
}
kfree(ce->con_id);
@@ -249,6 +266,7 @@ int pm_clk_resume(struct device *dev)
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -259,8 +277,9 @@ int pm_clk_resume(struct device *dev)
list_for_each_entry(ce, &psd->clock_list, node) {
if (ce->status < PCE_STATUS_ERROR) {
- clk_enable(ce->clk);
- ce->status = PCE_STATUS_ENABLED;
+ ret = __pm_clk_enable(dev, ce->clk);
+ if (!ret)
+ ce->status = PCE_STATUS_ENABLED;
}
}
@@ -376,7 +395,7 @@ int pm_clk_resume(struct device *dev)
spin_lock_irqsave(&psd->lock, flags);
list_for_each_entry(ce, &psd->clock_list, node)
- clk_enable(ce->clk);
+ __pm_clk_enable(dev, ce->clk);
spin_unlock_irqrestore(&psd->lock, flags);
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 5da91404130..df2e5eeaeb0 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -6,7 +6,6 @@
* This file is released under the GPLv2.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/export.h>
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 7072404c8b6..eee55c1e5fd 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -6,7 +6,6 @@
* This file is released under the GPLv2.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
@@ -42,7 +41,7 @@
struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
if (!__retval && __elapsed > __td->field) { \
__td->field = __elapsed; \
- dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
+ dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \
__elapsed); \
genpd->max_off_time_changed = true; \
__td->constraint_changed = true; \
@@ -106,7 +105,7 @@ static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
{
atomic_inc(&genpd->sd_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
static void genpd_acquire_lock(struct generic_pm_domain *genpd)
@@ -706,6 +705,14 @@ static int pm_genpd_runtime_resume(struct device *dev)
return 0;
}
+static bool pd_ignore_unused;
+static int __init pd_ignore_unused_setup(char *__unused)
+{
+ pd_ignore_unused = true;
+ return 1;
+}
+__setup("pd_ignore_unused", pd_ignore_unused_setup);
+
/**
* pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
*/
@@ -713,6 +720,11 @@ void pm_genpd_poweroff_unused(void)
{
struct generic_pm_domain *genpd;
+ if (pd_ignore_unused) {
+ pr_warn("genpd: Not disabling unused power domains\n");
+ return;
+ }
+
mutex_lock(&gpd_list_lock);
list_for_each_entry(genpd, &gpd_list, gpd_list_node)
@@ -2143,7 +2155,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->max_off_time_changed = true;
genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
- genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
genpd->domain.ops.prepare = pm_genpd_prepare;
genpd->domain.ops.suspend = pm_genpd_suspend;
genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 28dee3053f1..a089e3bcdfb 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -6,7 +6,6 @@
* This file is released under the GPLv2.
*/
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index bfd898b8988..96a92db83ca 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -10,30 +10,7 @@
#include <linux/pm_runtime.h>
#include <linux/export.h>
-#ifdef CONFIG_PM_RUNTIME
-/**
- * pm_generic_runtime_idle - Generic runtime idle callback for subsystems.
- * @dev: Device to handle.
- *
- * If PM operations are defined for the @dev's driver and they include
- * ->runtime_idle(), execute it and return its error code, if nonzero.
- * Otherwise, execute pm_runtime_suspend() for the device and return 0.
- */
-int pm_generic_runtime_idle(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- if (pm && pm->runtime_idle) {
- int ret = pm->runtime_idle(dev);
- if (ret)
- return ret;
- }
-
- pm_runtime_suspend(dev);
- return 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_runtime_idle);
-
+#ifdef CONFIG_PM
/**
* pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
* @dev: Device to suspend.
@@ -71,7 +48,7 @@ int pm_generic_runtime_resume(struct device *dev)
return ret;
}
EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
/**
@@ -308,7 +285,7 @@ int pm_generic_restore(struct device *dev)
EXPORT_SYMBOL_GPL(pm_generic_restore);
/**
- * pm_generic_complete - Generic routine competing a device power transition.
+ * pm_generic_complete - Generic routine completing a device power transition.
* @dev: Device to handle.
*
* Complete a device power transition during a system-wide power transition.
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 5a9b6569dd7..bf412961a93 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -28,7 +28,11 @@
#include <linux/sched.h>
#include <linux/async.h>
#include <linux/suspend.h>
+#include <trace/events/power.h>
+#include <linux/cpufreq.h>
#include <linux/cpuidle.h>
+#include <linux/timer.h>
+
#include "../base.h"
#include "power.h"
@@ -56,6 +60,30 @@ static pm_message_t pm_transition;
static int async_error;
+static char *pm_verb(int event)
+{
+ switch (event) {
+ case PM_EVENT_SUSPEND:
+ return "suspend";
+ case PM_EVENT_RESUME:
+ return "resume";
+ case PM_EVENT_FREEZE:
+ return "freeze";
+ case PM_EVENT_QUIESCE:
+ return "quiesce";
+ case PM_EVENT_HIBERNATE:
+ return "hibernate";
+ case PM_EVENT_THAW:
+ return "thaw";
+ case PM_EVENT_RESTORE:
+ return "restore";
+ case PM_EVENT_RECOVER:
+ return "recover";
+ default:
+ return "(unknown PM event)";
+ }
+}
+
/**
* device_pm_sleep_init - Initialize system suspend-related device fields.
* @dev: Device object being initialized.
@@ -64,6 +92,8 @@ void device_pm_sleep_init(struct device *dev)
{
dev->power.is_prepared = false;
dev->power.is_suspended = false;
+ dev->power.is_noirq_suspended = false;
+ dev->power.is_late_suspended = false;
init_completion(&dev->power.completion);
complete_all(&dev->power.completion);
dev->power.wakeup = NULL;
@@ -172,15 +202,17 @@ static ktime_t initcall_debug_start(struct device *dev)
}
static void initcall_debug_report(struct device *dev, ktime_t calltime,
- int error)
+ int error, pm_message_t state, char *info)
{
- ktime_t delta, rettime;
+ ktime_t rettime;
+ s64 nsecs;
+
+ rettime = ktime_get();
+ nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
if (pm_print_times_enabled) {
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
- error, (unsigned long long)ktime_to_ns(delta) >> 10);
+ error, (unsigned long long)nsecs >> 10);
}
}
@@ -309,30 +341,6 @@ static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t stat
return NULL;
}
-static char *pm_verb(int event)
-{
- switch (event) {
- case PM_EVENT_SUSPEND:
- return "suspend";
- case PM_EVENT_RESUME:
- return "resume";
- case PM_EVENT_FREEZE:
- return "freeze";
- case PM_EVENT_QUIESCE:
- return "quiesce";
- case PM_EVENT_HIBERNATE:
- return "hibernate";
- case PM_EVENT_THAW:
- return "thaw";
- case PM_EVENT_RESTORE:
- return "restore";
- case PM_EVENT_RECOVER:
- return "recover";
- default:
- return "(unknown PM event)";
- }
-}
-
static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
@@ -376,14 +384,81 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
calltime = initcall_debug_start(dev);
pm_dev_dbg(dev, state, info);
+ trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev);
+ trace_device_pm_callback_end(dev, error);
suspend_report_result(cb, error);
- initcall_debug_report(dev, calltime, error);
+ initcall_debug_report(dev, calltime, error, state, info);
return error;
}
+#ifdef CONFIG_DPM_WATCHDOG
+struct dpm_watchdog {
+ struct device *dev;
+ struct task_struct *tsk;
+ struct timer_list timer;
+};
+
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
+ struct dpm_watchdog wd
+
+/**
+ * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
+ * @data: Watchdog object address.
+ *
+ * Called when a driver has timed out suspending or resuming.
+ * There's not much we can do here to recover so panic() to
+ * capture a crash-dump in pstore.
+ */
+static void dpm_watchdog_handler(unsigned long data)
+{
+ struct dpm_watchdog *wd = (void *)data;
+
+ dev_emerg(wd->dev, "**** DPM device timeout ****\n");
+ show_stack(wd->tsk, NULL);
+ panic("%s %s: unrecoverable failure\n",
+ dev_driver_string(wd->dev), dev_name(wd->dev));
+}
+
+/**
+ * dpm_watchdog_set - Enable pm watchdog for given device.
+ * @wd: Watchdog. Must be allocated on the stack.
+ * @dev: Device to handle.
+ */
+static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
+{
+ struct timer_list *timer = &wd->timer;
+
+ wd->dev = dev;
+ wd->tsk = current;
+
+ init_timer_on_stack(timer);
+ /* use same timeout value for both suspend and resume */
+ timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
+ timer->function = dpm_watchdog_handler;
+ timer->data = (unsigned long)wd;
+ add_timer(timer);
+}
+
+/**
+ * dpm_watchdog_clear - Disable suspend/resume watchdog.
+ * @wd: Watchdog to disable.
+ */
+static void dpm_watchdog_clear(struct dpm_watchdog *wd)
+{
+ struct timer_list *timer = &wd->timer;
+
+ del_timer_sync(timer);
+ destroy_timer_on_stack(timer);
+}
+#else
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
+#define dpm_watchdog_set(x, y)
+#define dpm_watchdog_clear(x)
+#endif
+
/*------------------------- Resume routines -------------------------*/
/**
@@ -394,7 +469,7 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int device_resume_noirq(struct device *dev, pm_message_t state)
+static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
@@ -403,9 +478,14 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
TRACE_DEVICE(dev);
TRACE_RESUME(0);
- if (dev->power.syscore)
+ if (dev->power.syscore || dev->power.direct_complete)
+ goto Out;
+
+ if (!dev->power.is_noirq_suspended)
goto Out;
+ dpm_wait(dev->parent, async);
+
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -426,12 +506,32 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
}
error = dpm_run_callback(callback, dev, state, info);
+ dev->power.is_noirq_suspended = false;
Out:
+ complete_all(&dev->power.completion);
TRACE_RESUME(error);
return error;
}
+static bool is_async(struct device *dev)
+{
+ return dev->power.async_suspend && pm_async_enabled
+ && !pm_trace_is_enabled();
+}
+
+static void async_resume_noirq(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = device_resume_noirq(dev, pm_transition, true);
+ if (error)
+ pm_dev_err(dev, pm_transition, " async", error);
+
+ put_device(dev);
+}
+
/**
* dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
@@ -441,32 +541,53 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
*/
static void dpm_resume_noirq(pm_message_t state)
{
+ struct device *dev;
ktime_t starttime = ktime_get();
+ trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_noirq_list)) {
- struct device *dev = to_device(dpm_noirq_list.next);
- int error;
+ pm_transition = state;
+ /*
+ * Advanced the async threads upfront,
+ * in case the starting of async threads is
+ * delayed by non-async resuming devices.
+ */
+ list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
+ reinit_completion(&dev->power.completion);
+ if (is_async(dev)) {
+ get_device(dev);
+ async_schedule(async_resume_noirq, dev);
+ }
+ }
+
+ while (!list_empty(&dpm_noirq_list)) {
+ dev = to_device(dpm_noirq_list.next);
get_device(dev);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
mutex_unlock(&dpm_list_mtx);
- error = device_resume_noirq(dev, state);
- if (error) {
- suspend_stats.failed_resume_noirq++;
- dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " noirq", error);
+ if (!is_async(dev)) {
+ int error;
+
+ error = device_resume_noirq(dev, state, false);
+ if (error) {
+ suspend_stats.failed_resume_noirq++;
+ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, " noirq", error);
+ }
}
mutex_lock(&dpm_list_mtx);
put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
dpm_show_time(starttime, state, "noirq");
resume_device_irqs();
cpuidle_resume();
+ trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
/**
@@ -476,7 +597,7 @@ static void dpm_resume_noirq(pm_message_t state)
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_resume_early(struct device *dev, pm_message_t state)
+static int device_resume_early(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
@@ -485,9 +606,14 @@ static int device_resume_early(struct device *dev, pm_message_t state)
TRACE_DEVICE(dev);
TRACE_RESUME(0);
- if (dev->power.syscore)
+ if (dev->power.syscore || dev->power.direct_complete)
+ goto Out;
+
+ if (!dev->power.is_late_suspended)
goto Out;
+ dpm_wait(dev->parent, async);
+
if (dev->pm_domain) {
info = "early power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -508,44 +634,78 @@ static int device_resume_early(struct device *dev, pm_message_t state)
}
error = dpm_run_callback(callback, dev, state, info);
+ dev->power.is_late_suspended = false;
Out:
TRACE_RESUME(error);
pm_runtime_enable(dev);
+ complete_all(&dev->power.completion);
return error;
}
+static void async_resume_early(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = device_resume_early(dev, pm_transition, true);
+ if (error)
+ pm_dev_err(dev, pm_transition, " async", error);
+
+ put_device(dev);
+}
+
/**
* dpm_resume_early - Execute "early resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*/
static void dpm_resume_early(pm_message_t state)
{
+ struct device *dev;
ktime_t starttime = ktime_get();
+ trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_late_early_list)) {
- struct device *dev = to_device(dpm_late_early_list.next);
- int error;
+ pm_transition = state;
+
+ /*
+ * Advanced the async threads upfront,
+ * in case the starting of async threads is
+ * delayed by non-async resuming devices.
+ */
+ list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
+ reinit_completion(&dev->power.completion);
+ if (is_async(dev)) {
+ get_device(dev);
+ async_schedule(async_resume_early, dev);
+ }
+ }
+ while (!list_empty(&dpm_late_early_list)) {
+ dev = to_device(dpm_late_early_list.next);
get_device(dev);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
mutex_unlock(&dpm_list_mtx);
- error = device_resume_early(dev, state);
- if (error) {
- suspend_stats.failed_resume_early++;
- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " early", error);
- }
+ if (!is_async(dev)) {
+ int error;
+ error = device_resume_early(dev, state, false);
+ if (error) {
+ suspend_stats.failed_resume_early++;
+ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, " early", error);
+ }
+ }
mutex_lock(&dpm_list_mtx);
put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
dpm_show_time(starttime, state, "early");
+ trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
}
/**
@@ -570,6 +730,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
pm_callback_t callback = NULL;
char *info = NULL;
int error = 0;
+ DECLARE_DPM_WATCHDOG_ON_STACK(wd);
TRACE_DEVICE(dev);
TRACE_RESUME(0);
@@ -577,7 +738,14 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
+ if (dev->power.direct_complete) {
+ /* Match the pm_runtime_disable() in __device_suspend(). */
+ pm_runtime_enable(dev);
+ goto Complete;
+ }
+
dpm_wait(dev->parent, async);
+ dpm_watchdog_set(&wd, dev);
device_lock(dev);
/*
@@ -636,6 +804,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
Unlock:
device_unlock(dev);
+ dpm_watchdog_clear(&wd);
Complete:
complete_all(&dev->power.completion);
@@ -656,12 +825,6 @@ static void async_resume(void *data, async_cookie_t cookie)
put_device(dev);
}
-static bool is_async(struct device *dev)
-{
- return dev->power.async_suspend && pm_async_enabled
- && !pm_trace_is_enabled();
-}
-
/**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -674,6 +837,7 @@ void dpm_resume(pm_message_t state)
struct device *dev;
ktime_t starttime = ktime_get();
+ trace_suspend_resume(TPS("dpm_resume"), state.event, true);
might_sleep();
mutex_lock(&dpm_list_mtx);
@@ -681,7 +845,7 @@ void dpm_resume(pm_message_t state)
async_error = 0;
list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
- INIT_COMPLETION(dev->power.completion);
+ reinit_completion(&dev->power.completion);
if (is_async(dev)) {
get_device(dev);
async_schedule(async_resume, dev);
@@ -713,6 +877,9 @@ void dpm_resume(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, NULL);
+
+ cpufreq_resume();
+ trace_suspend_resume(TPS("dpm_resume"), state.event, false);
}
/**
@@ -751,7 +918,9 @@ static void device_complete(struct device *dev, pm_message_t state)
if (callback) {
pm_dev_dbg(dev, state, info);
+ trace_device_pm_callback_start(dev, info, state.event);
callback(dev);
+ trace_device_pm_callback_end(dev, 0);
}
device_unlock(dev);
@@ -770,6 +939,7 @@ void dpm_complete(pm_message_t state)
{
struct list_head list;
+ trace_suspend_resume(TPS("dpm_complete"), state.event, true);
might_sleep();
INIT_LIST_HEAD(&list);
@@ -789,6 +959,7 @@ void dpm_complete(pm_message_t state)
}
list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
+ trace_suspend_resume(TPS("dpm_complete"), state.event, false);
}
/**
@@ -837,13 +1008,24 @@ static pm_message_t resume_event(pm_message_t sleep_state)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int device_suspend_noirq(struct device *dev, pm_message_t state)
+static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
+ int error = 0;
- if (dev->power.syscore)
- return 0;
+ if (async_error)
+ goto Complete;
+
+ if (pm_wakeup_pending()) {
+ async_error = -EBUSY;
+ goto Complete;
+ }
+
+ if (dev->power.syscore || dev->power.direct_complete)
+ goto Complete;
+
+ dpm_wait_for_children(dev, async);
if (dev->pm_domain) {
info = "noirq power domain ";
@@ -864,7 +1046,41 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
callback = pm_noirq_op(dev->driver->pm, state);
}
- return dpm_run_callback(callback, dev, state, info);
+ error = dpm_run_callback(callback, dev, state, info);
+ if (!error)
+ dev->power.is_noirq_suspended = true;
+ else
+ async_error = error;
+
+Complete:
+ complete_all(&dev->power.completion);
+ return error;
+}
+
+static void async_suspend_noirq(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = __device_suspend_noirq(dev, pm_transition, true);
+ if (error) {
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, pm_transition, " async", error);
+ }
+
+ put_device(dev);
+}
+
+static int device_suspend_noirq(struct device *dev)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (pm_async_enabled && dev->power.async_suspend) {
+ get_device(dev);
+ async_schedule(async_suspend_noirq, dev);
+ return 0;
+ }
+ return __device_suspend_noirq(dev, pm_transition, false);
}
/**
@@ -879,22 +1095,24 @@ static int dpm_suspend_noirq(pm_message_t state)
ktime_t starttime = ktime_get();
int error = 0;
+ trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
cpuidle_pause();
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
+
while (!list_empty(&dpm_late_early_list)) {
struct device *dev = to_device(dpm_late_early_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_noirq(dev, state);
+ error = device_suspend_noirq(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, " noirq", error);
- suspend_stats.failed_suspend_noirq++;
- dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
@@ -903,16 +1121,22 @@ static int dpm_suspend_noirq(pm_message_t state)
list_move(&dev->power.entry, &dpm_noirq_list);
put_device(dev);
- if (pm_wakeup_pending()) {
- error = -EBUSY;
+ if (async_error)
break;
- }
}
mutex_unlock(&dpm_list_mtx);
- if (error)
+ async_synchronize_full();
+ if (!error)
+ error = async_error;
+
+ if (error) {
+ suspend_stats.failed_suspend_noirq++;
+ dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
dpm_resume_noirq(resume_event(state));
- else
+ } else {
dpm_show_time(starttime, state, "noirq");
+ }
+ trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
return error;
}
@@ -923,15 +1147,26 @@ static int dpm_suspend_noirq(pm_message_t state)
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_suspend_late(struct device *dev, pm_message_t state)
+static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
+ int error = 0;
__pm_runtime_disable(dev, false);
- if (dev->power.syscore)
- return 0;
+ if (async_error)
+ goto Complete;
+
+ if (pm_wakeup_pending()) {
+ async_error = -EBUSY;
+ goto Complete;
+ }
+
+ if (dev->power.syscore || dev->power.direct_complete)
+ goto Complete;
+
+ dpm_wait_for_children(dev, async);
if (dev->pm_domain) {
info = "late power domain ";
@@ -952,7 +1187,41 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
callback = pm_late_early_op(dev->driver->pm, state);
}
- return dpm_run_callback(callback, dev, state, info);
+ error = dpm_run_callback(callback, dev, state, info);
+ if (!error)
+ dev->power.is_late_suspended = true;
+ else
+ async_error = error;
+
+Complete:
+ complete_all(&dev->power.completion);
+ return error;
+}
+
+static void async_suspend_late(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = __device_suspend_late(dev, pm_transition, true);
+ if (error) {
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, pm_transition, " async", error);
+ }
+ put_device(dev);
+}
+
+static int device_suspend_late(struct device *dev)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (pm_async_enabled && dev->power.async_suspend) {
+ get_device(dev);
+ async_schedule(async_suspend_late, dev);
+ return 0;
+ }
+
+ return __device_suspend_late(dev, pm_transition, false);
}
/**
@@ -964,20 +1233,22 @@ static int dpm_suspend_late(pm_message_t state)
ktime_t starttime = ktime_get();
int error = 0;
+ trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
+
while (!list_empty(&dpm_suspended_list)) {
struct device *dev = to_device(dpm_suspended_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_late(dev, state);
+ error = device_suspend_late(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, " late", error);
- suspend_stats.failed_suspend_late++;
- dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
@@ -986,17 +1257,19 @@ static int dpm_suspend_late(pm_message_t state)
list_move(&dev->power.entry, &dpm_late_early_list);
put_device(dev);
- if (pm_wakeup_pending()) {
- error = -EBUSY;
+ if (async_error)
break;
- }
}
mutex_unlock(&dpm_list_mtx);
- if (error)
+ async_synchronize_full();
+ if (error) {
+ suspend_stats.failed_suspend_late++;
+ dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
- else
+ } else {
dpm_show_time(starttime, state, "late");
-
+ }
+ trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
return error;
}
@@ -1027,17 +1300,20 @@ EXPORT_SYMBOL_GPL(dpm_suspend_end);
* @cb: Suspend callback to execute.
*/
static int legacy_suspend(struct device *dev, pm_message_t state,
- int (*cb)(struct device *dev, pm_message_t state))
+ int (*cb)(struct device *dev, pm_message_t state),
+ char *info)
{
int error;
ktime_t calltime;
calltime = initcall_debug_start(dev);
+ trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev, state);
+ trace_device_pm_callback_end(dev, error);
suspend_report_result(cb, error);
- initcall_debug_report(dev, calltime, error);
+ initcall_debug_report(dev, calltime, error, state, info);
return error;
}
@@ -1053,6 +1329,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
pm_callback_t callback = NULL;
char *info = NULL;
int error = 0;
+ DECLARE_DPM_WATCHDOG_ON_STACK(wd);
dpm_wait_for_children(dev, async);
@@ -1076,6 +1353,18 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
+ if (dev->power.direct_complete) {
+ if (pm_runtime_status_suspended(dev)) {
+ pm_runtime_disable(dev);
+ if (pm_runtime_suspended_if_enabled(dev))
+ goto Complete;
+
+ pm_runtime_enable(dev);
+ }
+ dev->power.direct_complete = false;
+ }
+
+ dpm_watchdog_set(&wd, dev);
device_lock(dev);
if (dev->pm_domain) {
@@ -1097,7 +1386,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
goto Run;
} else if (dev->class->suspend) {
pm_dev_dbg(dev, state, "legacy class ");
- error = legacy_suspend(dev, state, dev->class->suspend);
+ error = legacy_suspend(dev, state, dev->class->suspend,
+ "legacy class ");
goto End;
}
}
@@ -1108,7 +1398,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
callback = pm_op(dev->bus->pm, state);
} else if (dev->bus->suspend) {
pm_dev_dbg(dev, state, "legacy bus ");
- error = legacy_suspend(dev, state, dev->bus->suspend);
+ error = legacy_suspend(dev, state, dev->bus->suspend,
+ "legacy bus ");
goto End;
}
}
@@ -1123,13 +1414,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
End:
if (!error) {
+ struct device *parent = dev->parent;
+
dev->power.is_suspended = true;
- if (dev->power.wakeup_path
- && dev->parent && !dev->parent->power.ignore_children)
- dev->parent->power.wakeup_path = true;
+ if (parent) {
+ spin_lock_irq(&parent->power.lock);
+
+ dev->parent->power.direct_complete = false;
+ if (dev->power.wakeup_path
+ && !dev->parent->power.ignore_children)
+ dev->parent->power.wakeup_path = true;
+
+ spin_unlock_irq(&parent->power.lock);
+ }
}
device_unlock(dev);
+ dpm_watchdog_clear(&wd);
Complete:
complete_all(&dev->power.completion);
@@ -1155,7 +1456,7 @@ static void async_suspend(void *data, async_cookie_t cookie)
static int device_suspend(struct device *dev)
{
- INIT_COMPLETION(dev->power.completion);
+ reinit_completion(&dev->power.completion);
if (pm_async_enabled && dev->power.async_suspend) {
get_device(dev);
@@ -1175,8 +1476,11 @@ int dpm_suspend(pm_message_t state)
ktime_t starttime = ktime_get();
int error = 0;
+ trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
might_sleep();
+ cpufreq_suspend();
+
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
@@ -1210,6 +1514,7 @@ int dpm_suspend(pm_message_t state)
dpm_save_failed_step(SUSPEND_SUSPEND);
} else
dpm_show_time(starttime, state, NULL);
+ trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
return error;
}
@@ -1225,7 +1530,7 @@ static int device_prepare(struct device *dev, pm_message_t state)
{
int (*callback)(struct device *) = NULL;
char *info = NULL;
- int error = 0;
+ int ret = 0;
if (dev->power.syscore)
return 0;
@@ -1262,13 +1567,29 @@ static int device_prepare(struct device *dev, pm_message_t state)
}
if (callback) {
- error = callback(dev);
- suspend_report_result(callback, error);
+ trace_device_pm_callback_start(dev, info, state.event);
+ ret = callback(dev);
+ trace_device_pm_callback_end(dev, ret);
}
device_unlock(dev);
- return error;
+ if (ret < 0) {
+ suspend_report_result(callback, ret);
+ pm_runtime_put(dev);
+ return ret;
+ }
+ /*
+ * A positive return value from ->prepare() means "this device appears
+ * to be runtime-suspended and its state is fine, so if it really is
+ * runtime-suspended, you can leave it in that state provided that you
+ * will do the same thing with all of its descendants". This only
+ * applies to suspend transitions, however.
+ */
+ spin_lock_irq(&dev->power.lock);
+ dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
+ spin_unlock_irq(&dev->power.lock);
+ return 0;
}
/**
@@ -1281,6 +1602,7 @@ int dpm_prepare(pm_message_t state)
{
int error = 0;
+ trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
might_sleep();
mutex_lock(&dpm_list_mtx);
@@ -1311,6 +1633,7 @@ int dpm_prepare(pm_message_t state)
put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
+ trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
return error;
}
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index f0077cb8e24..89ced955faf 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -14,14 +14,12 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/cpufreq.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#include <linux/of.h>
#include <linux/export.h>
@@ -42,7 +40,7 @@
*/
/**
- * struct opp - Generic OPP description structure
+ * struct dev_pm_opp - Generic OPP description structure
* @node: opp list node. The nodes are maintained throughout the lifetime
* of boot. It is expected only an optimal set of OPPs are
* added to the library by the SoC framework.
@@ -59,7 +57,7 @@
*
* This structure stores the OPP information for a given device.
*/
-struct opp {
+struct dev_pm_opp {
struct list_head node;
bool available;
@@ -136,7 +134,7 @@ static struct device_opp *find_device_opp(struct device *dev)
}
/**
- * opp_get_voltage() - Gets the voltage corresponding to an available opp
+ * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
* @opp: opp for which voltage has to be returned for
*
* Return voltage in micro volt corresponding to the opp, else
@@ -150,9 +148,9 @@ static struct device_opp *find_device_opp(struct device *dev)
* prior to unlocking with rcu_read_unlock() to maintain the integrity of the
* pointer.
*/
-unsigned long opp_get_voltage(struct opp *opp)
+unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
{
- struct opp *tmp_opp;
+ struct dev_pm_opp *tmp_opp;
unsigned long v = 0;
tmp_opp = rcu_dereference(opp);
@@ -163,10 +161,10 @@ unsigned long opp_get_voltage(struct opp *opp)
return v;
}
-EXPORT_SYMBOL_GPL(opp_get_voltage);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
/**
- * opp_get_freq() - Gets the frequency corresponding to an available opp
+ * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
* @opp: opp for which frequency has to be returned for
*
* Return frequency in hertz corresponding to the opp, else
@@ -180,9 +178,9 @@ EXPORT_SYMBOL_GPL(opp_get_voltage);
* prior to unlocking with rcu_read_unlock() to maintain the integrity of the
* pointer.
*/
-unsigned long opp_get_freq(struct opp *opp)
+unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
{
- struct opp *tmp_opp;
+ struct dev_pm_opp *tmp_opp;
unsigned long f = 0;
tmp_opp = rcu_dereference(opp);
@@ -193,10 +191,10 @@ unsigned long opp_get_freq(struct opp *opp)
return f;
}
-EXPORT_SYMBOL_GPL(opp_get_freq);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
/**
- * opp_get_opp_count() - Get number of opps available in the opp list
+ * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
* @dev: device for which we do this operation
*
* This function returns the number of available opps if there are any,
@@ -206,10 +204,10 @@ EXPORT_SYMBOL_GPL(opp_get_freq);
* internally references two RCU protected structures: device_opp and opp which
* are safe as long as we are under a common RCU locked section.
*/
-int opp_get_opp_count(struct device *dev)
+int dev_pm_opp_get_opp_count(struct device *dev)
{
struct device_opp *dev_opp;
- struct opp *temp_opp;
+ struct dev_pm_opp *temp_opp;
int count = 0;
dev_opp = find_device_opp(dev);
@@ -226,10 +224,10 @@ int opp_get_opp_count(struct device *dev)
return count;
}
-EXPORT_SYMBOL_GPL(opp_get_opp_count);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
/**
- * opp_find_freq_exact() - search for an exact frequency
+ * dev_pm_opp_find_freq_exact() - search for an exact frequency
* @dev: device for which we do this operation
* @freq: frequency to search for
* @available: true/false - match for available opp
@@ -254,11 +252,12 @@ EXPORT_SYMBOL_GPL(opp_get_opp_count);
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
-struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
- bool available)
+struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+ unsigned long freq,
+ bool available)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
@@ -277,10 +276,10 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
return opp;
}
-EXPORT_SYMBOL_GPL(opp_find_freq_exact);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
/**
- * opp_find_freq_ceil() - Search for an rounded ceil freq
+ * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
* @dev: device for which we do this operation
* @freq: Start frequency
*
@@ -300,10 +299,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_exact);
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
-struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+ unsigned long *freq)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -324,10 +324,10 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
return opp;
}
-EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
/**
- * opp_find_freq_floor() - Search for a rounded floor freq
+ * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
* @dev: device for which we do this operation
* @freq: Start frequency
*
@@ -347,10 +347,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
-struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
+struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+ unsigned long *freq)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -375,32 +376,39 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
return opp;
}
-EXPORT_SYMBOL_GPL(opp_find_freq_floor);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
/**
- * opp_add() - Add an OPP table from a table definitions
+ * dev_pm_opp_add() - Add an OPP table from a table definitions
* @dev: device for which we do this operation
* @freq: Frequency in Hz for this OPP
* @u_volt: Voltage in uVolts for this OPP
*
* This function adds an opp definition to the opp list and returns status.
* The opp is made available by default and it can be controlled using
- * opp_enable/disable functions.
+ * dev_pm_opp_enable/disable functions.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
+ *
+ * Return:
+ * 0: On success OR
+ * Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST: Freq are same and volt are different OR
+ * Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM: Memory allocation failure
*/
-int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
+int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
{
struct device_opp *dev_opp = NULL;
- struct opp *opp, *new_opp;
+ struct dev_pm_opp *opp, *new_opp;
struct list_head *head;
/* allocate new OPP node */
- new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL);
+ new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
if (!new_opp) {
dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
return -ENOMEM;
@@ -441,15 +449,31 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
new_opp->u_volt = u_volt;
new_opp->available = true;
- /* Insert new OPP in order of increasing frequency */
+ /*
+ * Insert new OPP in order of increasing frequency
+ * and discard if already present
+ */
head = &dev_opp->opp_list;
list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
- if (new_opp->rate < opp->rate)
+ if (new_opp->rate <= opp->rate)
break;
else
head = &opp->node;
}
+ /* Duplicate OPPs ? */
+ if (new_opp->rate == opp->rate) {
+ int ret = opp->available && new_opp->u_volt == opp->u_volt ?
+ 0 : -EEXIST;
+
+ dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
+ __func__, opp->rate, opp->u_volt, opp->available,
+ new_opp->rate, new_opp->u_volt, new_opp->available);
+ mutex_unlock(&dev_opp_list_lock);
+ kfree(new_opp);
+ return ret;
+ }
+
list_add_rcu(&new_opp->node, head);
mutex_unlock(&dev_opp_list_lock);
@@ -460,6 +484,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
return 0;
}
+EXPORT_SYMBOL_GPL(dev_pm_opp_add);
/**
* opp_set_availability() - helper to set the availability of an opp
@@ -484,11 +509,11 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
bool availability_req)
{
struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
- struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
+ struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
int r = 0;
/* keep the node allocated */
- new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL);
+ new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
if (!new_opp) {
dev_warn(dev, "%s: Unable to create OPP\n", __func__);
return -ENOMEM;
@@ -551,13 +576,13 @@ unlock:
}
/**
- * opp_enable() - Enable a specific OPP
+ * dev_pm_opp_enable() - Enable a specific OPP
* @dev: device for which we do this operation
* @freq: OPP frequency to enable
*
* Enables a provided opp. If the operation is valid, this returns 0, else the
* corresponding error value. It is meant to be used for users an OPP available
- * after being temporarily made unavailable with opp_disable.
+ * after being temporarily made unavailable with dev_pm_opp_disable.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function indirectly uses RCU and mutex locks to keep the
@@ -565,21 +590,21 @@ unlock:
* this function is *NOT* called under RCU protection or in contexts where
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*/
-int opp_enable(struct device *dev, unsigned long freq)
+int dev_pm_opp_enable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, true);
}
-EXPORT_SYMBOL_GPL(opp_enable);
+EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
/**
- * opp_disable() - Disable a specific OPP
+ * dev_pm_opp_disable() - Disable a specific OPP
* @dev: device for which we do this operation
* @freq: OPP frequency to disable
*
* Disables a provided opp. If the operation is valid, this returns
* 0, else the corresponding error value. It is meant to be a temporary
* control by users to make this OPP not available until the circumstances are
- * right to make it available again (with a call to opp_enable).
+ * right to make it available again (with a call to dev_pm_opp_enable).
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function indirectly uses RCU and mutex locks to keep the
@@ -587,107 +612,17 @@ EXPORT_SYMBOL_GPL(opp_enable);
* this function is *NOT* called under RCU protection or in contexts where
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*/
-int opp_disable(struct device *dev, unsigned long freq)
+int dev_pm_opp_disable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, false);
}
-EXPORT_SYMBOL_GPL(opp_disable);
-
-#ifdef CONFIG_CPU_FREQ
-/**
- * opp_init_cpufreq_table() - create a cpufreq table for a device
- * @dev: device for which we do this operation
- * @table: Cpufreq table returned back to caller
- *
- * Generate a cpufreq table for a provided device- this assumes that the
- * opp list is already initialized and ready for usage.
- *
- * This function allocates required memory for the cpufreq table. It is
- * expected that the caller does the required maintenance such as freeing
- * the table as required.
- *
- * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
- * if no memory available for the operation (table is not populated), returns 0
- * if successful and table is populated.
- *
- * WARNING: It is important for the callers to ensure refreshing their copy of
- * the table if any of the mentioned functions have been invoked in the interim.
- *
- * Locking: The internal device_opp and opp structures are RCU protected.
- * To simplify the logic, we pretend we are updater and hold relevant mutex here
- * Callers should ensure that this function is *NOT* called under RCU protection
- * or in contexts where mutex locking cannot be used.
- */
-int opp_init_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table)
-{
- struct device_opp *dev_opp;
- struct opp *opp;
- struct cpufreq_frequency_table *freq_table;
- int i = 0;
-
- /* Pretend as if I am an updater */
- mutex_lock(&dev_opp_list_lock);
-
- dev_opp = find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- int r = PTR_ERR(dev_opp);
- mutex_unlock(&dev_opp_list_lock);
- dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r);
- return r;
- }
-
- freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
- (opp_get_opp_count(dev) + 1), GFP_KERNEL);
- if (!freq_table) {
- mutex_unlock(&dev_opp_list_lock);
- dev_warn(dev, "%s: Unable to allocate frequency table\n",
- __func__);
- return -ENOMEM;
- }
-
- list_for_each_entry(opp, &dev_opp->opp_list, node) {
- if (opp->available) {
- freq_table[i].index = i;
- freq_table[i].frequency = opp->rate / 1000;
- i++;
- }
- }
- mutex_unlock(&dev_opp_list_lock);
-
- freq_table[i].index = i;
- freq_table[i].frequency = CPUFREQ_TABLE_END;
-
- *table = &freq_table[0];
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(opp_init_cpufreq_table);
-
-/**
- * opp_free_cpufreq_table() - free the cpufreq table
- * @dev: device for which we do this operation
- * @table: table to free
- *
- * Free up the table allocated by opp_init_cpufreq_table
- */
-void opp_free_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table)
-{
- if (!table)
- return;
-
- kfree(*table);
- *table = NULL;
-}
-EXPORT_SYMBOL_GPL(opp_free_cpufreq_table);
-#endif /* CONFIG_CPU_FREQ */
+EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
/**
- * opp_get_notifier() - find notifier_head of the device with opp
+ * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
* @dev: device pointer used to lookup device OPPs.
*/
-struct srcu_notifier_head *opp_get_notifier(struct device *dev)
+struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
{
struct device_opp *dev_opp = find_device_opp(dev);
@@ -731,11 +666,9 @@ int of_init_opp_table(struct device *dev)
unsigned long freq = be32_to_cpup(val++) * 1000;
unsigned long volt = be32_to_cpup(val++);
- if (opp_add(dev, freq, volt)) {
+ if (dev_pm_opp_add(dev, freq, volt))
dev_warn(dev, "%s: Failed to add OPP %ld\n",
__func__, freq);
- continue;
- }
nr -= 2;
}
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index cfc3226ec49..a21223d9592 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -89,8 +89,8 @@ extern void dpm_sysfs_remove(struct device *dev);
extern void rpm_sysfs_remove(struct device *dev);
extern int wakeup_sysfs_add(struct device *dev);
extern void wakeup_sysfs_remove(struct device *dev);
-extern int pm_qos_sysfs_add_latency(struct device *dev);
-extern void pm_qos_sysfs_remove_latency(struct device *dev);
+extern int pm_qos_sysfs_add_resume_latency(struct device *dev);
+extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);
extern int pm_qos_sysfs_add_flags(struct device *dev);
extern void pm_qos_sysfs_remove_flags(struct device *dev);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 71671c42ef4..36b9eb4862c 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -42,6 +42,7 @@
#include <linux/export.h>
#include <linux/pm_runtime.h>
#include <linux/err.h>
+#include <trace/events/power.h>
#include "power.h"
@@ -104,7 +105,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
s32 __dev_pm_qos_read_value(struct device *dev)
{
return IS_ERR_OR_NULL(dev->power.qos) ?
- 0 : pm_qos_read_value(&dev->power.qos->latency);
+ 0 : pm_qos_read_value(&dev->power.qos->resume_latency);
}
/**
@@ -140,16 +141,24 @@ static int apply_constraint(struct dev_pm_qos_request *req,
int ret;
switch(req->type) {
- case DEV_PM_QOS_LATENCY:
- ret = pm_qos_update_target(&qos->latency, &req->data.pnode,
- action, value);
+ case DEV_PM_QOS_RESUME_LATENCY:
+ ret = pm_qos_update_target(&qos->resume_latency,
+ &req->data.pnode, action, value);
if (ret) {
- value = pm_qos_read_value(&qos->latency);
+ value = pm_qos_read_value(&qos->resume_latency);
blocking_notifier_call_chain(&dev_pm_notifiers,
(unsigned long)value,
req);
}
break;
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ ret = pm_qos_update_target(&qos->latency_tolerance,
+ &req->data.pnode, action, value);
+ if (ret) {
+ value = pm_qos_read_value(&qos->latency_tolerance);
+ req->dev->power.set_latency_tolerance(req->dev, value);
+ }
+ break;
case DEV_PM_QOS_FLAGS:
ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
action, value);
@@ -185,13 +194,21 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
}
BLOCKING_INIT_NOTIFIER_HEAD(n);
- c = &qos->latency;
+ c = &qos->resume_latency;
plist_head_init(&c->list);
- c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
- c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
+ c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+ c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+ c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
c->type = PM_QOS_MIN;
c->notifiers = n;
+ c = &qos->latency_tolerance;
+ plist_head_init(&c->list);
+ c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
+ c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
+ c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+ c->type = PM_QOS_MIN;
+
INIT_LIST_HEAD(&qos->flags.list);
spin_lock_irq(&dev->power.lock);
@@ -223,7 +240,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
* If the device's PM QoS resume latency limit or PM QoS flags have been
* exposed to user space, they have to be hidden at this point.
*/
- pm_qos_sysfs_remove_latency(dev);
+ pm_qos_sysfs_remove_resume_latency(dev);
pm_qos_sysfs_remove_flags(dev);
mutex_lock(&dev_pm_qos_mtx);
@@ -236,7 +253,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
goto out;
/* Flush the constraints lists for the device. */
- c = &qos->latency;
+ c = &qos->resume_latency;
plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
/*
* Update constraints list and call the notification
@@ -245,6 +262,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
+ c = &qos->latency_tolerance;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
f = &qos->flags;
list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
@@ -264,6 +286,40 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
+static bool dev_pm_qos_invalid_request(struct device *dev,
+ struct dev_pm_qos_request *req)
+{
+ return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE
+ && !dev->power.set_latency_tolerance);
+}
+
+static int __dev_pm_qos_add_request(struct device *dev,
+ struct dev_pm_qos_request *req,
+ enum dev_pm_qos_req_type type, s32 value)
+{
+ int ret = 0;
+
+ if (!dev || dev_pm_qos_invalid_request(dev, req))
+ return -EINVAL;
+
+ if (WARN(dev_pm_qos_request_active(req),
+ "%s() called for already added request\n", __func__))
+ return -EINVAL;
+
+ if (IS_ERR(dev->power.qos))
+ ret = -ENODEV;
+ else if (!dev->power.qos)
+ ret = dev_pm_qos_constraints_allocate(dev);
+
+ trace_dev_pm_qos_add_request(dev_name(dev), type, value);
+ if (!ret) {
+ req->dev = dev;
+ req->type = type;
+ ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
+ }
+ return ret;
+}
+
/**
* dev_pm_qos_add_request - inserts new qos request into the list
* @dev: target device for the constraint
@@ -289,30 +345,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type, s32 value)
{
- int ret = 0;
-
- if (!dev || !req) /*guard against callers passing in null */
- return -EINVAL;
-
- if (WARN(dev_pm_qos_request_active(req),
- "%s() called for already added request\n", __func__))
- return -EINVAL;
+ int ret;
mutex_lock(&dev_pm_qos_mtx);
-
- if (IS_ERR(dev->power.qos))
- ret = -ENODEV;
- else if (!dev->power.qos)
- ret = dev_pm_qos_constraints_allocate(dev);
-
- if (!ret) {
- req->dev = dev;
- req->type = type;
- ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
- }
-
+ ret = __dev_pm_qos_add_request(dev, req, type, value);
mutex_unlock(&dev_pm_qos_mtx);
-
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
@@ -339,7 +376,8 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
return -ENODEV;
switch(req->type) {
- case DEV_PM_QOS_LATENCY:
+ case DEV_PM_QOS_RESUME_LATENCY:
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
curr_value = req->data.pnode.prio;
break;
case DEV_PM_QOS_FLAGS:
@@ -349,6 +387,8 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
return -EINVAL;
}
+ trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
+ new_value);
if (curr_value != new_value)
ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
@@ -398,6 +438,8 @@ static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
if (IS_ERR_OR_NULL(req->dev->power.qos))
return -ENODEV;
+ trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
+ PM_QOS_DEFAULT_VALUE);
ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
return ret;
@@ -454,8 +496,8 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
ret = dev_pm_qos_constraints_allocate(dev);
if (!ret)
- ret = blocking_notifier_chain_register(
- dev->power.qos->latency.notifiers, notifier);
+ ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
+ notifier);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
@@ -481,9 +523,8 @@ int dev_pm_qos_remove_notifier(struct device *dev,
/* Silently return if the constraints object is not present. */
if (!IS_ERR_OR_NULL(dev->power.qos))
- retval = blocking_notifier_chain_unregister(
- dev->power.qos->latency.notifiers,
- notifier);
+ retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
+ notifier);
mutex_unlock(&dev_pm_qos_mtx);
return retval;
@@ -524,20 +565,32 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
* dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
* @dev: Device whose ancestor to add the request for.
* @req: Pointer to the preallocated handle.
+ * @type: Type of the request.
* @value: Constraint latency value.
*/
int dev_pm_qos_add_ancestor_request(struct device *dev,
- struct dev_pm_qos_request *req, s32 value)
+ struct dev_pm_qos_request *req,
+ enum dev_pm_qos_req_type type, s32 value)
{
struct device *ancestor = dev->parent;
int ret = -ENODEV;
- while (ancestor && !ancestor->power.ignore_children)
- ancestor = ancestor->parent;
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ while (ancestor && !ancestor->power.ignore_children)
+ ancestor = ancestor->parent;
+
+ break;
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ while (ancestor && !ancestor->power.set_latency_tolerance)
+ ancestor = ancestor->parent;
+ break;
+ default:
+ ancestor = NULL;
+ }
if (ancestor)
- ret = dev_pm_qos_add_request(ancestor, req,
- DEV_PM_QOS_LATENCY, value);
+ ret = dev_pm_qos_add_request(ancestor, req, type, value);
if (ret < 0)
req->dev = NULL;
@@ -553,9 +606,13 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
struct dev_pm_qos_request *req = NULL;
switch(type) {
- case DEV_PM_QOS_LATENCY:
- req = dev->power.qos->latency_req;
- dev->power.qos->latency_req = NULL;
+ case DEV_PM_QOS_RESUME_LATENCY:
+ req = dev->power.qos->resume_latency_req;
+ dev->power.qos->resume_latency_req = NULL;
+ break;
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ req = dev->power.qos->latency_tolerance_req;
+ dev->power.qos->latency_tolerance_req = NULL;
break;
case DEV_PM_QOS_FLAGS:
req = dev->power.qos->flags_req;
@@ -591,7 +648,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (!req)
return -ENOMEM;
- ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
+ ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
if (ret < 0) {
kfree(req);
return ret;
@@ -603,7 +660,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (IS_ERR_OR_NULL(dev->power.qos))
ret = -ENODEV;
- else if (dev->power.qos->latency_req)
+ else if (dev->power.qos->resume_latency_req)
ret = -EEXIST;
if (ret < 0) {
@@ -612,13 +669,13 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
mutex_unlock(&dev_pm_qos_mtx);
goto out;
}
- dev->power.qos->latency_req = req;
+ dev->power.qos->resume_latency_req = req;
mutex_unlock(&dev_pm_qos_mtx);
- ret = pm_qos_sysfs_add_latency(dev);
+ ret = pm_qos_sysfs_add_resume_latency(dev);
if (ret)
- dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+ dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
out:
mutex_unlock(&dev_pm_qos_sysfs_mtx);
@@ -628,8 +685,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
static void __dev_pm_qos_hide_latency_limit(struct device *dev)
{
- if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)
- __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
}
/**
@@ -640,7 +697,7 @@ void dev_pm_qos_hide_latency_limit(struct device *dev)
{
mutex_lock(&dev_pm_qos_sysfs_mtx);
- pm_qos_sysfs_remove_latency(dev);
+ pm_qos_sysfs_remove_resume_latency(dev);
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_latency_limit(dev);
@@ -762,6 +819,67 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
pm_runtime_put(dev);
return ret;
}
+
+/**
+ * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
+ * @dev: Device to obtain the user space latency tolerance for.
+ */
+s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
+{
+ s32 ret;
+
+ mutex_lock(&dev_pm_qos_mtx);
+ ret = IS_ERR_OR_NULL(dev->power.qos)
+ || !dev->power.qos->latency_tolerance_req ?
+ PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
+ dev->power.qos->latency_tolerance_req->data.pnode.prio;
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+
+/**
+ * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
+ * @dev: Device to update the user space latency tolerance for.
+ * @val: New user space latency tolerance for @dev (negative values disable).
+ */
+int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
+{
+ int ret;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR_OR_NULL(dev->power.qos)
+ || !dev->power.qos->latency_tolerance_req) {
+ struct dev_pm_qos_request *req;
+
+ if (val < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
+ if (ret < 0) {
+ kfree(req);
+ goto out;
+ }
+ dev->power.qos->latency_tolerance_req = req;
+ } else {
+ if (val < 0) {
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
+ ret = 0;
+ } else {
+ ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
+ }
+ }
+
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
#else /* !CONFIG_PM_RUNTIME */
static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
static void __dev_pm_qos_hide_flags(struct device *dev) {}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index ef13ad08afb..67c7938e430 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -13,6 +13,43 @@
#include <trace/events/rpm.h>
#include "power.h"
+#define RPM_GET_CALLBACK(dev, cb) \
+({ \
+ int (*__rpm_cb)(struct device *__d); \
+ \
+ if (dev->pm_domain) \
+ __rpm_cb = dev->pm_domain->ops.cb; \
+ else if (dev->type && dev->type->pm) \
+ __rpm_cb = dev->type->pm->cb; \
+ else if (dev->class && dev->class->pm) \
+ __rpm_cb = dev->class->pm->cb; \
+ else if (dev->bus && dev->bus->pm) \
+ __rpm_cb = dev->bus->pm->cb; \
+ else \
+ __rpm_cb = NULL; \
+ \
+ if (!__rpm_cb && dev->driver && dev->driver->pm) \
+ __rpm_cb = dev->driver->pm->cb; \
+ \
+ __rpm_cb; \
+})
+
+static int (*rpm_get_suspend_cb(struct device *dev))(struct device *)
+{
+ return RPM_GET_CALLBACK(dev, runtime_suspend);
+}
+
+static int (*rpm_get_resume_cb(struct device *dev))(struct device *)
+{
+ return RPM_GET_CALLBACK(dev, runtime_resume);
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int (*rpm_get_idle_cb(struct device *dev))(struct device *)
+{
+ return RPM_GET_CALLBACK(dev, runtime_idle);
+}
+
static int rpm_resume(struct device *dev, int rpmflags);
static int rpm_suspend(struct device *dev, int rpmflags);
@@ -258,7 +295,8 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
* Check if the device's runtime PM status allows it to be suspended. If
* another idle notification has been started earlier, return immediately. If
* the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
- * run the ->runtime_idle() callback directly.
+ * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
+ * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
@@ -293,11 +331,8 @@ static int rpm_idle(struct device *dev, int rpmflags)
/* Pending requests need to be canceled. */
dev->power.request = RPM_REQ_NONE;
- if (dev->power.no_callbacks) {
- /* Assume ->runtime_idle() callback would have suspended. */
- retval = rpm_suspend(dev, rpmflags);
+ if (dev->power.no_callbacks)
goto out;
- }
/* Carry out an asynchronous or a synchronous idle notification. */
if (rpmflags & RPM_ASYNC) {
@@ -306,34 +341,23 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.request_pending = true;
queue_work(pm_wq, &dev->power.work);
}
- goto out;
+ trace_rpm_return_int(dev, _THIS_IP_, 0);
+ return 0;
}
dev->power.idle_notification = true;
- if (dev->pm_domain)
- callback = dev->pm_domain->ops.runtime_idle;
- else if (dev->type && dev->type->pm)
- callback = dev->type->pm->runtime_idle;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_idle;
- else if (dev->bus && dev->bus->pm)
- callback = dev->bus->pm->runtime_idle;
- else
- callback = NULL;
-
- if (!callback && dev->driver && dev->driver->pm)
- callback = dev->driver->pm->runtime_idle;
+ callback = rpm_get_idle_cb(dev);
if (callback)
- __rpm_callback(callback, dev);
+ retval = __rpm_callback(callback, dev);
dev->power.idle_notification = false;
wake_up_all(&dev->power.wait_queue);
out:
trace_rpm_return_int(dev, _THIS_IP_, retval);
- return retval;
+ return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
}
/**
@@ -493,19 +517,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_SUSPENDING);
- if (dev->pm_domain)
- callback = dev->pm_domain->ops.runtime_suspend;
- else if (dev->type && dev->type->pm)
- callback = dev->type->pm->runtime_suspend;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_suspend;
- else if (dev->bus && dev->bus->pm)
- callback = dev->bus->pm->runtime_suspend;
- else
- callback = NULL;
-
- if (!callback && dev->driver && dev->driver->pm)
- callback = dev->driver->pm->runtime_suspend;
+ callback = rpm_get_suspend_cb(dev);
retval = rpm_callback(callback, dev);
if (retval)
@@ -725,19 +737,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_RESUMING);
- if (dev->pm_domain)
- callback = dev->pm_domain->ops.runtime_resume;
- else if (dev->type && dev->type->pm)
- callback = dev->type->pm->runtime_resume;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_resume;
- else if (dev->bus && dev->bus->pm)
- callback = dev->bus->pm->runtime_resume;
- else
- callback = NULL;
-
- if (!callback && dev->driver && dev->driver->pm)
- callback = dev->driver->pm->runtime_resume;
+ callback = rpm_get_resume_cb(dev);
retval = rpm_callback(callback, dev);
if (retval) {
@@ -1131,7 +1131,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_barrier);
* @dev: Device to handle.
* @check_resume: If set, check if there's a resume request for the device.
*
- * Increment power.disable_depth for the device and if was zero previously,
+ * Increment power.disable_depth for the device and if it was zero previously,
* cancel all pending runtime PM requests for the device and wait for all
* operations in progress to complete. The device can be either active or
* suspended after its runtime PM has been disabled.
@@ -1402,3 +1402,86 @@ void pm_runtime_remove(struct device *dev)
if (dev->power.irq_safe && dev->parent)
pm_runtime_put(dev->parent);
}
+#endif
+
+/**
+ * pm_runtime_force_suspend - Force a device into suspend state if needed.
+ * @dev: Device to suspend.
+ *
+ * Disable runtime PM so we safely can check the device's runtime PM status and
+ * if it is active, invoke it's .runtime_suspend callback to bring it into
+ * suspend state. Keep runtime PM disabled to preserve the state unless we
+ * encounter errors.
+ *
+ * Typically this function may be invoked from a system suspend callback to make
+ * sure the device is put into low power state.
+ */
+int pm_runtime_force_suspend(struct device *dev)
+{
+ int (*callback)(struct device *);
+ int ret = 0;
+
+ pm_runtime_disable(dev);
+
+ /*
+ * Note that pm_runtime_status_suspended() returns false while
+ * !CONFIG_PM_RUNTIME, which means the device will be put into low
+ * power state.
+ */
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ callback = rpm_get_suspend_cb(dev);
+
+ if (!callback) {
+ ret = -ENOSYS;
+ goto err;
+ }
+
+ ret = callback(dev);
+ if (ret)
+ goto err;
+
+ pm_runtime_set_suspended(dev);
+ return 0;
+err:
+ pm_runtime_enable(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
+
+/**
+ * pm_runtime_force_resume - Force a device into resume state.
+ * @dev: Device to resume.
+ *
+ * Prior invoking this function we expect the user to have brought the device
+ * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
+ * those actions and brings the device into full power. We update the runtime PM
+ * status and re-enables runtime PM.
+ *
+ * Typically this function may be invoked from a system resume callback to make
+ * sure the device is put into full power state.
+ */
+int pm_runtime_force_resume(struct device *dev)
+{
+ int (*callback)(struct device *);
+ int ret = 0;
+
+ callback = rpm_get_resume_cb(dev);
+
+ if (!callback) {
+ ret = -ENOSYS;
+ goto out;
+ }
+
+ ret = callback(dev);
+ if (ret)
+ goto out;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_mark_last_busy(dev);
+out:
+ pm_runtime_enable(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a53ebd26570..95b181d1ca6 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -206,7 +206,7 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
if (!dev->power.use_autosuspend)
return -EIO;
- if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay)
+ if (kstrtol(buf, 10, &delay) != 0 || delay != (int) delay)
return -EINVAL;
device_lock(dev);
@@ -218,15 +218,16 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
autosuspend_delay_ms_store);
-static ssize_t pm_qos_latency_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pm_qos_resume_latency_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev));
+ return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev));
}
-static ssize_t pm_qos_latency_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
+static ssize_t pm_qos_resume_latency_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
{
s32 value;
int ret;
@@ -237,12 +238,47 @@ static ssize_t pm_qos_latency_store(struct device *dev,
if (value < 0)
return -EINVAL;
- ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value);
+ ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
+ value);
return ret < 0 ? ret : n;
}
static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
- pm_qos_latency_show, pm_qos_latency_store);
+ pm_qos_resume_latency_show, pm_qos_resume_latency_store);
+
+static ssize_t pm_qos_latency_tolerance_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
+
+ if (value < 0)
+ return sprintf(buf, "auto\n");
+ else if (value == PM_QOS_LATENCY_ANY)
+ return sprintf(buf, "any\n");
+
+ return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ s32 value;
+ int ret;
+
+ if (kstrtos32(buf, 0, &value)) {
+ if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n"))
+ value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+ else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
+ value = PM_QOS_LATENCY_ANY;
+ }
+ ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
+ return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644,
+ pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store);
static ssize_t pm_qos_no_power_off_show(struct device *dev,
struct device_attribute *attr,
@@ -618,15 +654,26 @@ static struct attribute_group pm_runtime_attr_group = {
.attrs = runtime_attrs,
};
-static struct attribute *pm_qos_latency_attrs[] = {
+static struct attribute *pm_qos_resume_latency_attrs[] = {
#ifdef CONFIG_PM_RUNTIME
&dev_attr_pm_qos_resume_latency_us.attr,
#endif /* CONFIG_PM_RUNTIME */
NULL,
};
-static struct attribute_group pm_qos_latency_attr_group = {
+static struct attribute_group pm_qos_resume_latency_attr_group = {
+ .name = power_group_name,
+ .attrs = pm_qos_resume_latency_attrs,
+};
+
+static struct attribute *pm_qos_latency_tolerance_attrs[] = {
+#ifdef CONFIG_PM_RUNTIME
+ &dev_attr_pm_qos_latency_tolerance_us.attr,
+#endif /* CONFIG_PM_RUNTIME */
+ NULL,
+};
+static struct attribute_group pm_qos_latency_tolerance_attr_group = {
.name = power_group_name,
- .attrs = pm_qos_latency_attrs,
+ .attrs = pm_qos_latency_tolerance_attrs,
};
static struct attribute *pm_qos_flags_attrs[] = {
@@ -654,18 +701,23 @@ int dpm_sysfs_add(struct device *dev)
if (rc)
goto err_out;
}
-
if (device_can_wakeup(dev)) {
rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
- if (rc) {
- if (pm_runtime_callbacks_present(dev))
- sysfs_unmerge_group(&dev->kobj,
- &pm_runtime_attr_group);
- goto err_out;
- }
+ if (rc)
+ goto err_runtime;
+ }
+ if (dev->power.set_latency_tolerance) {
+ rc = sysfs_merge_group(&dev->kobj,
+ &pm_qos_latency_tolerance_attr_group);
+ if (rc)
+ goto err_wakeup;
}
return 0;
+ err_wakeup:
+ sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+ err_runtime:
+ sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
err_out:
sysfs_remove_group(&dev->kobj, &pm_attr_group);
return rc;
@@ -681,14 +733,14 @@ void wakeup_sysfs_remove(struct device *dev)
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
}
-int pm_qos_sysfs_add_latency(struct device *dev)
+int pm_qos_sysfs_add_resume_latency(struct device *dev)
{
- return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group);
+ return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
}
-void pm_qos_sysfs_remove_latency(struct device *dev)
+void pm_qos_sysfs_remove_resume_latency(struct device *dev)
{
- sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group);
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
}
int pm_qos_sysfs_add_flags(struct device *dev)
@@ -708,6 +760,7 @@ void rpm_sysfs_remove(struct device *dev)
void dpm_sysfs_remove(struct device *dev)
{
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
dev_pm_qos_constraints_destroy(dev);
rpm_sysfs_remove(dev);
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 79715e7fa43..eb1bd2ecad8 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -318,10 +318,16 @@ int device_init_wakeup(struct device *dev, bool enable)
{
int ret = 0;
+ if (!dev)
+ return -EINVAL;
+
if (enable) {
device_set_wakeup_capable(dev, true);
ret = device_wakeup_enable(dev);
} else {
+ if (dev->power.can_wakeup)
+ device_wakeup_disable(dev);
+
device_set_wakeup_capable(dev, false);
}
@@ -659,7 +665,7 @@ void pm_wakeup_event(struct device *dev, unsigned int msec)
}
EXPORT_SYMBOL_GPL(pm_wakeup_event);
-static void print_active_wakeup_sources(void)
+void pm_print_active_wakeup_sources(void)
{
struct wakeup_source *ws;
int active = 0;
@@ -683,6 +689,7 @@ static void print_active_wakeup_sources(void)
last_activity_ws->name);
rcu_read_unlock();
}
+EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
/**
* pm_wakeup_pending - Check if power transition in progress should be aborted.
@@ -707,8 +714,10 @@ bool pm_wakeup_pending(void)
}
spin_unlock_irqrestore(&events_lock, flags);
- if (ret)
- print_active_wakeup_sources();
+ if (ret) {
+ pr_info("PM: Wakeup pending, aborting suspend\n");
+ pm_print_active_wakeup_sources();
+ }
return ret;
}
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index f0d30543fcc..4251570610c 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -3,7 +3,7 @@
# subsystems should select the appropriate symbols.
config REGMAP
- default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_MMIO || REGMAP_IRQ)
+ default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_MMIO || REGMAP_IRQ)
select LZO_COMPRESS
select LZO_DECOMPRESS
select IRQ_DOMAIN if REGMAP_IRQ
@@ -15,6 +15,9 @@ config REGMAP_I2C
config REGMAP_SPI
tristate
+config REGMAP_SPMI
+ tristate
+
config REGMAP_MMIO
tristate
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index cf129980abd..a7c670b4123 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -3,5 +3,6 @@ obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o
obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
+obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index c130536e0ab..7d1326985be 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -44,7 +44,6 @@ struct regmap_format {
struct regmap_async {
struct list_head list;
- struct work_struct cleanup;
struct regmap *map;
void *work_buf;
};
@@ -52,6 +51,7 @@ struct regmap_async {
struct regmap {
struct mutex mutex;
spinlock_t spinlock;
+ unsigned long spinlock_flags;
regmap_lock lock;
regmap_unlock unlock;
void *lock_arg; /* This is passed to lock/unlock functions */
@@ -63,9 +63,11 @@ struct regmap {
void *bus_context;
const char *name;
+ bool async;
spinlock_t async_lock;
wait_queue_head_t async_waitq;
struct list_head async_list;
+ struct list_head async_free;
int async_ret;
#ifdef CONFIG_DEBUG_FS
@@ -127,14 +129,13 @@ struct regmap {
void *cache;
u32 cache_dirty;
- unsigned long *cache_present;
- unsigned int cache_present_nbits;
-
struct reg_default *patch;
int patch_regs;
/* if set, converts bulk rw to single rw */
bool use_single_rw;
+ /* if set, the device supports multi write mode */
+ bool can_multi_write;
struct rb_root range_tree;
void *selector_work_buf; /* Scratch buffer used for selector */
@@ -148,6 +149,7 @@ struct regcache_ops {
int (*read)(struct regmap *map, unsigned int reg, unsigned int *value);
int (*write)(struct regmap *map, unsigned int reg, unsigned int value);
int (*sync)(struct regmap *map, unsigned int min, unsigned int max);
+ int (*drop)(struct regmap *map, unsigned int min, unsigned int max);
};
bool regmap_writeable(struct regmap *map, unsigned int reg);
@@ -174,6 +176,17 @@ struct regmap_range_node {
unsigned int window_len;
};
+struct regmap_field {
+ struct regmap *regmap;
+ unsigned int mask;
+ /* lsb */
+ unsigned int shift;
+ unsigned int reg;
+
+ unsigned int id_size;
+ unsigned int id_offset;
+};
+
#ifdef CONFIG_DEBUG_FS
extern void regmap_debugfs_initcall(void);
extern void regmap_debugfs_init(struct regmap *map, const char *name);
@@ -193,6 +206,7 @@ int regcache_write(struct regmap *map,
unsigned int reg, unsigned int value);
int regcache_sync(struct regmap *map);
int regcache_sync_block(struct regmap *map, void *block,
+ unsigned long *cache_present,
unsigned int block_base, unsigned int start,
unsigned int end);
@@ -208,19 +222,9 @@ unsigned int regcache_get_val(struct regmap *map, const void *base,
bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
unsigned int val);
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
-int regcache_set_reg_present(struct regmap *map, unsigned int reg);
-
-static inline bool regcache_reg_present(struct regmap *map, unsigned int reg)
-{
- if (!map->cache_present)
- return true;
- if (reg > map->cache_present_nbits)
- return false;
- return map->cache_present[BIT_WORD(reg)] & BIT_MASK(reg);
-}
int _regmap_raw_write(struct regmap *map, unsigned int reg,
- const void *val, size_t val_len, bool async);
+ const void *val, size_t val_len);
void regmap_async_complete_cb(struct regmap_async *async, int ret);
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 02f490bad30..6a7e4fa1285 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -23,14 +23,16 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
static int regcache_rbtree_exit(struct regmap *map);
struct regcache_rbtree_node {
- /* the actual rbtree node holding this block */
- struct rb_node node;
- /* base register handled by this block */
- unsigned int base_reg;
/* block of adjacent registers */
void *block;
+ /* Which registers are present */
+ long *cache_present;
+ /* base register handled by this block */
+ unsigned int base_reg;
/* number of registers available in the block */
unsigned int blklen;
+ /* the actual rbtree node holding this block */
+ struct rb_node node;
} __attribute__ ((packed));
struct regcache_rbtree_ctx {
@@ -57,6 +59,7 @@ static void regcache_rbtree_set_register(struct regmap *map,
struct regcache_rbtree_node *rbnode,
unsigned int idx, unsigned int val)
{
+ set_bit(idx, rbnode->cache_present);
regcache_set_val(map, rbnode->block, idx, val);
}
@@ -146,13 +149,13 @@ static int rbtree_show(struct seq_file *s, void *ignored)
map->lock(map->lock_arg);
mem_size = sizeof(*rbtree_ctx);
- mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long);
for (node = rb_first(&rbtree_ctx->root); node != NULL;
node = rb_next(node)) {
n = container_of(node, struct regcache_rbtree_node, node);
mem_size += sizeof(*n);
mem_size += (n->blklen * map->cache_word_size);
+ mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
regcache_rbtree_get_base_top_reg(map, n, &base, &top);
this_registers = ((top - base) / map->reg_stride) + 1;
@@ -245,6 +248,7 @@ static int regcache_rbtree_exit(struct regmap *map)
rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
next = rb_next(&rbtree_node->node);
rb_erase(&rbtree_node->node, &rbtree_ctx->root);
+ kfree(rbtree_node->cache_present);
kfree(rbtree_node->block);
kfree(rbtree_node);
}
@@ -265,7 +269,7 @@ static int regcache_rbtree_read(struct regmap *map,
rbnode = regcache_rbtree_lookup(map, reg);
if (rbnode) {
reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
- if (!regcache_reg_present(map, reg))
+ if (!test_bit(reg_tmp, rbnode->cache_present))
return -ENOENT;
*value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
} else {
@@ -278,32 +282,101 @@ static int regcache_rbtree_read(struct regmap *map,
static int regcache_rbtree_insert_to_block(struct regmap *map,
struct regcache_rbtree_node *rbnode,
- unsigned int pos, unsigned int reg,
+ unsigned int base_reg,
+ unsigned int top_reg,
+ unsigned int reg,
unsigned int value)
{
+ unsigned int blklen;
+ unsigned int pos, offset;
+ unsigned long *present;
u8 *blk;
+ blklen = (top_reg - base_reg) / map->reg_stride + 1;
+ pos = (reg - base_reg) / map->reg_stride;
+ offset = (rbnode->base_reg - base_reg) / map->reg_stride;
+
blk = krealloc(rbnode->block,
- (rbnode->blklen + 1) * map->cache_word_size,
+ blklen * map->cache_word_size,
GFP_KERNEL);
if (!blk)
return -ENOMEM;
+ present = krealloc(rbnode->cache_present,
+ BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
+ if (!present) {
+ kfree(blk);
+ return -ENOMEM;
+ }
+
/* insert the register value in the correct place in the rbnode block */
- memmove(blk + (pos + 1) * map->cache_word_size,
- blk + pos * map->cache_word_size,
- (rbnode->blklen - pos) * map->cache_word_size);
+ if (pos == 0) {
+ memmove(blk + offset * map->cache_word_size,
+ blk, rbnode->blklen * map->cache_word_size);
+ bitmap_shift_right(present, present, offset, blklen);
+ }
/* update the rbnode block, its size and the base register */
rbnode->block = blk;
- rbnode->blklen++;
- if (!pos)
- rbnode->base_reg = reg;
+ rbnode->blklen = blklen;
+ rbnode->base_reg = base_reg;
+ rbnode->cache_present = present;
regcache_rbtree_set_register(map, rbnode, pos, value);
return 0;
}
+static struct regcache_rbtree_node *
+regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
+{
+ struct regcache_rbtree_node *rbnode;
+ const struct regmap_range *range;
+ int i;
+
+ rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
+ if (!rbnode)
+ return NULL;
+
+ /* If there is a read table then use it to guess at an allocation */
+ if (map->rd_table) {
+ for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
+ if (regmap_reg_in_range(reg,
+ &map->rd_table->yes_ranges[i]))
+ break;
+ }
+
+ if (i != map->rd_table->n_yes_ranges) {
+ range = &map->rd_table->yes_ranges[i];
+ rbnode->blklen = (range->range_max - range->range_min) /
+ map->reg_stride + 1;
+ rbnode->base_reg = range->range_min;
+ }
+ }
+
+ if (!rbnode->blklen) {
+ rbnode->blklen = 1;
+ rbnode->base_reg = reg;
+ }
+
+ rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
+ GFP_KERNEL);
+ if (!rbnode->block)
+ goto err_free;
+
+ rbnode->cache_present = kzalloc(BITS_TO_LONGS(rbnode->blklen) *
+ sizeof(*rbnode->cache_present), GFP_KERNEL);
+ if (!rbnode->cache_present)
+ goto err_free_block;
+
+ return rbnode;
+
+err_free_block:
+ kfree(rbnode->block);
+err_free:
+ kfree(rbnode);
+ return NULL;
+}
+
static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
unsigned int value)
{
@@ -311,15 +384,9 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
struct regcache_rbtree_node *rbnode, *rbnode_tmp;
struct rb_node *node;
unsigned int reg_tmp;
- unsigned int pos;
- int i;
int ret;
rbtree_ctx = map->cache;
- /* update the reg_present bitmap, make space if necessary */
- ret = regcache_set_reg_present(map, reg);
- if (ret < 0)
- return ret;
/* if we can't locate it in the cached rbnode we'll have
* to traverse the rbtree looking for it.
@@ -329,48 +396,53 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
} else {
+ unsigned int base_reg, top_reg;
+ unsigned int new_base_reg, new_top_reg;
+ unsigned int min, max;
+ unsigned int max_dist;
+
+ max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
+ map->cache_word_size;
+ if (reg < max_dist)
+ min = 0;
+ else
+ min = reg - max_dist;
+ max = reg + max_dist;
+
/* look for an adjacent register to the one we are about to add */
for (node = rb_first(&rbtree_ctx->root); node;
node = rb_next(node)) {
rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
node);
- for (i = 0; i < rbnode_tmp->blklen; i++) {
- reg_tmp = rbnode_tmp->base_reg +
- (i * map->reg_stride);
- if (abs(reg_tmp - reg) != map->reg_stride)
- continue;
- /* decide where in the block to place our register */
- if (reg_tmp + map->reg_stride == reg)
- pos = i + 1;
- else
- pos = i;
- ret = regcache_rbtree_insert_to_block(map,
- rbnode_tmp,
- pos, reg,
- value);
- if (ret)
- return ret;
- rbtree_ctx->cached_rbnode = rbnode_tmp;
- return 0;
+
+ regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
+ &base_reg, &top_reg);
+
+ if (base_reg <= max && top_reg >= min) {
+ new_base_reg = min(reg, base_reg);
+ new_top_reg = max(reg, top_reg);
+ } else {
+ continue;
}
+
+ ret = regcache_rbtree_insert_to_block(map, rbnode_tmp,
+ new_base_reg,
+ new_top_reg, reg,
+ value);
+ if (ret)
+ return ret;
+ rbtree_ctx->cached_rbnode = rbnode_tmp;
+ return 0;
}
- /* we did not manage to find a place to insert it in an existing
- * block so create a new rbnode with a single register in its block.
- * This block will get populated further if any other adjacent
- * registers get modified in the future.
+
+ /* We did not manage to find a place to insert it in
+ * an existing block so create a new rbnode.
*/
- rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
+ rbnode = regcache_rbtree_node_alloc(map, reg);
if (!rbnode)
return -ENOMEM;
- rbnode->blklen = sizeof(*rbnode);
- rbnode->base_reg = reg;
- rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
- GFP_KERNEL);
- if (!rbnode->block) {
- kfree(rbnode);
- return -ENOMEM;
- }
- regcache_rbtree_set_register(map, rbnode, 0, value);
+ regcache_rbtree_set_register(map, rbnode,
+ reg - rbnode->base_reg, value);
regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
rbtree_ctx->cached_rbnode = rbnode;
}
@@ -384,30 +456,34 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
struct regcache_rbtree_ctx *rbtree_ctx;
struct rb_node *node;
struct regcache_rbtree_node *rbnode;
+ unsigned int base_reg, top_reg;
+ unsigned int start, end;
int ret;
- int base, end;
rbtree_ctx = map->cache;
for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
rbnode = rb_entry(node, struct regcache_rbtree_node, node);
- if (rbnode->base_reg > max)
+ regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
+ &top_reg);
+ if (base_reg > max)
break;
- if (rbnode->base_reg + rbnode->blklen < min)
+ if (top_reg < min)
continue;
- if (min > rbnode->base_reg)
- base = min - rbnode->base_reg;
+ if (min > base_reg)
+ start = (min - base_reg) / map->reg_stride;
else
- base = 0;
+ start = 0;
- if (max < rbnode->base_reg + rbnode->blklen)
- end = max - rbnode->base_reg + 1;
+ if (max < top_reg)
+ end = (max - base_reg) / map->reg_stride + 1;
else
end = rbnode->blklen;
- ret = regcache_sync_block(map, rbnode->block, rbnode->base_reg,
- base, end);
+ ret = regcache_sync_block(map, rbnode->block,
+ rbnode->cache_present,
+ rbnode->base_reg, start, end);
if (ret != 0)
return ret;
}
@@ -415,6 +491,42 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
return regmap_async_complete(map);
}
+static int regcache_rbtree_drop(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ struct regcache_rbtree_ctx *rbtree_ctx;
+ struct regcache_rbtree_node *rbnode;
+ struct rb_node *node;
+ unsigned int base_reg, top_reg;
+ unsigned int start, end;
+
+ rbtree_ctx = map->cache;
+ for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
+ rbnode = rb_entry(node, struct regcache_rbtree_node, node);
+
+ regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
+ &top_reg);
+ if (base_reg > max)
+ break;
+ if (top_reg < min)
+ continue;
+
+ if (min > base_reg)
+ start = (min - base_reg) / map->reg_stride;
+ else
+ start = 0;
+
+ if (max < top_reg)
+ end = (max - base_reg) / map->reg_stride + 1;
+ else
+ end = rbnode->blklen;
+
+ bitmap_clear(rbnode->cache_present, start, end - start);
+ }
+
+ return 0;
+}
+
struct regcache_ops regcache_rbtree_ops = {
.type = REGCACHE_RBTREE,
.name = "rbtree",
@@ -422,5 +534,6 @@ struct regcache_ops regcache_rbtree_ops = {
.exit = regcache_rbtree_exit,
.read = regcache_rbtree_read,
.write = regcache_rbtree_write,
- .sync = regcache_rbtree_sync
+ .sync = regcache_rbtree_sync,
+ .drop = regcache_rbtree_drop,
};
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 507ee2da0f6..29b4128da0b 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -121,8 +121,6 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
map->reg_defaults_raw = config->reg_defaults_raw;
map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
- map->cache_present = NULL;
- map->cache_present_nbits = 0;
map->cache = NULL;
map->cache_ops = cache_types[i];
@@ -181,7 +179,6 @@ void regcache_exit(struct regmap *map)
BUG_ON(!map->cache_ops);
- kfree(map->cache_present);
kfree(map->reg_defaults);
if (map->cache_free)
kfree(map->reg_defaults_raw);
@@ -241,15 +238,45 @@ int regcache_write(struct regmap *map,
BUG_ON(!map->cache_ops);
- if (!regmap_writeable(map, reg))
- return -EIO;
-
if (!regmap_volatile(map, reg))
return map->cache_ops->write(map, reg, value);
return 0;
}
+static int regcache_default_sync(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ unsigned int reg;
+
+ for (reg = min; reg <= max; reg += map->reg_stride) {
+ unsigned int val;
+ int ret;
+
+ if (regmap_volatile(map, reg) ||
+ !regmap_writeable(map, reg))
+ continue;
+
+ ret = regcache_read(map, reg, &val);
+ if (ret)
+ return ret;
+
+ /* Is this the hardware default? If so skip. */
+ ret = regcache_lookup_reg(map, reg);
+ if (ret >= 0 && val == map->reg_defaults[ret].def)
+ continue;
+
+ map->cache_bypass = 1;
+ ret = _regmap_write(map, reg, val);
+ map->cache_bypass = 0;
+ if (ret)
+ return ret;
+ dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
+ }
+
+ return 0;
+}
+
/**
* regcache_sync: Sync the register cache with the hardware.
*
@@ -268,7 +295,7 @@ int regcache_sync(struct regmap *map)
const char *name;
unsigned int bypass;
- BUG_ON(!map->cache_ops || !map->cache_ops->sync);
+ BUG_ON(!map->cache_ops);
map->lock(map->lock_arg);
/* Remember the initial bypass state */
@@ -281,13 +308,11 @@ int regcache_sync(struct regmap *map)
if (!map->cache_dirty)
goto out;
+ map->async = true;
+
/* Apply any patch first */
map->cache_bypass = 1;
for (i = 0; i < map->patch_regs; i++) {
- if (map->patch[i].reg % map->reg_stride) {
- ret = -EINVAL;
- goto out;
- }
ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
if (ret != 0) {
dev_err(map->dev, "Failed to write %x = %x: %d\n",
@@ -297,17 +322,24 @@ int regcache_sync(struct regmap *map)
}
map->cache_bypass = 0;
- ret = map->cache_ops->sync(map, 0, map->max_register);
+ if (map->cache_ops->sync)
+ ret = map->cache_ops->sync(map, 0, map->max_register);
+ else
+ ret = regcache_default_sync(map, 0, map->max_register);
if (ret == 0)
map->cache_dirty = false;
out:
- trace_regcache_sync(map->dev, name, "stop");
/* Restore the bypass state */
+ map->async = false;
map->cache_bypass = bypass;
map->unlock(map->lock_arg);
+ regmap_async_complete(map);
+
+ trace_regcache_sync(map->dev, name, "stop");
+
return ret;
}
EXPORT_SYMBOL_GPL(regcache_sync);
@@ -331,7 +363,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
const char *name;
unsigned int bypass;
- BUG_ON(!map->cache_ops || !map->cache_ops->sync);
+ BUG_ON(!map->cache_ops);
map->lock(map->lock_arg);
@@ -346,19 +378,59 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
if (!map->cache_dirty)
goto out;
- ret = map->cache_ops->sync(map, min, max);
+ map->async = true;
+
+ if (map->cache_ops->sync)
+ ret = map->cache_ops->sync(map, min, max);
+ else
+ ret = regcache_default_sync(map, min, max);
out:
- trace_regcache_sync(map->dev, name, "stop region");
/* Restore the bypass state */
map->cache_bypass = bypass;
+ map->async = false;
map->unlock(map->lock_arg);
+ regmap_async_complete(map);
+
+ trace_regcache_sync(map->dev, name, "stop region");
+
return ret;
}
EXPORT_SYMBOL_GPL(regcache_sync_region);
/**
+ * regcache_drop_region: Discard part of the register cache
+ *
+ * @map: map to operate on
+ * @min: first register to discard
+ * @max: last register to discard
+ *
+ * Discard part of the register cache.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_drop_region(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ int ret = 0;
+
+ if (!map->cache_ops || !map->cache_ops->drop)
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ trace_regcache_drop_region(map->dev, min, max);
+
+ ret = map->cache_ops->drop(map, min, max);
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regcache_drop_region);
+
+/**
* regcache_cache_only: Put a register map into cache only mode
*
* @map: map to configure
@@ -418,42 +490,6 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
}
EXPORT_SYMBOL_GPL(regcache_cache_bypass);
-int regcache_set_reg_present(struct regmap *map, unsigned int reg)
-{
- unsigned long *cache_present;
- unsigned int cache_present_size;
- unsigned int nregs;
- int i;
-
- nregs = reg + 1;
- cache_present_size = BITS_TO_LONGS(nregs);
- cache_present_size *= sizeof(long);
-
- if (!map->cache_present) {
- cache_present = kmalloc(cache_present_size, GFP_KERNEL);
- if (!cache_present)
- return -ENOMEM;
- bitmap_zero(cache_present, nregs);
- map->cache_present = cache_present;
- map->cache_present_nbits = nregs;
- }
-
- if (nregs > map->cache_present_nbits) {
- cache_present = krealloc(map->cache_present,
- cache_present_size, GFP_KERNEL);
- if (!cache_present)
- return -ENOMEM;
- for (i = 0; i < nregs; i++)
- if (i >= map->cache_present_nbits)
- clear_bit(i, cache_present);
- map->cache_present = cache_present;
- map->cache_present_nbits = nregs;
- }
-
- set_bit(reg, map->cache_present);
- return 0;
-}
-
bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
unsigned int val)
{
@@ -545,7 +581,16 @@ int regcache_lookup_reg(struct regmap *map, unsigned int reg)
return -ENOENT;
}
+static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
+{
+ if (!cache_present)
+ return true;
+
+ return test_bit(idx, cache_present);
+}
+
static int regcache_sync_block_single(struct regmap *map, void *block,
+ unsigned long *cache_present,
unsigned int block_base,
unsigned int start, unsigned int end)
{
@@ -555,7 +600,7 @@ static int regcache_sync_block_single(struct regmap *map, void *block,
for (i = start; i < end; i++) {
regtmp = block_base + (i * map->reg_stride);
- if (!regcache_reg_present(map, regtmp))
+ if (!regcache_reg_present(cache_present, i))
continue;
val = regcache_get_val(map, block, i);
@@ -588,15 +633,14 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
if (*data == NULL)
return 0;
- count = cur - base;
+ count = (cur - base) / map->reg_stride;
dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
- count * val_bytes, count, base, cur - 1);
+ count * val_bytes, count, base, cur - map->reg_stride);
map->cache_bypass = 1;
- ret = _regmap_raw_write(map, base, *data, count * val_bytes,
- false);
+ ret = _regmap_raw_write(map, base, *data, count * val_bytes);
map->cache_bypass = 0;
@@ -606,6 +650,7 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
}
static int regcache_sync_block_raw(struct regmap *map, void *block,
+ unsigned long *cache_present,
unsigned int block_base, unsigned int start,
unsigned int end)
{
@@ -618,7 +663,7 @@ static int regcache_sync_block_raw(struct regmap *map, void *block,
for (i = start; i < end; i++) {
regtmp = block_base + (i * map->reg_stride);
- if (!regcache_reg_present(map, regtmp)) {
+ if (!regcache_reg_present(cache_present, i)) {
ret = regcache_sync_block_raw_flush(map, &data,
base, regtmp);
if (ret != 0)
@@ -644,17 +689,19 @@ static int regcache_sync_block_raw(struct regmap *map, void *block,
}
}
- return regcache_sync_block_raw_flush(map, &data, base, regtmp);
+ return regcache_sync_block_raw_flush(map, &data, base, regtmp +
+ map->reg_stride);
}
int regcache_sync_block(struct regmap *map, void *block,
+ unsigned long *cache_present,
unsigned int block_base, unsigned int start,
unsigned int end)
{
if (regmap_can_raw_write(map))
- return regcache_sync_block_raw(map, block, block_base,
- start, end);
+ return regcache_sync_block_raw(map, block, cache_present,
+ block_base, start, end);
else
- return regcache_sync_block_single(map, block, block_base,
- start, end);
+ return regcache_sync_block_single(map, block, cache_present,
+ block_base, start, end);
}
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 975719bc345..45d812c0ea7 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -15,10 +15,19 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/device.h>
+#include <linux/list.h>
#include "internal.h"
+struct regmap_debugfs_node {
+ struct regmap *map;
+ const char *name;
+ struct list_head link;
+};
+
static struct dentry *regmap_debugfs_root;
+static LIST_HEAD(regmap_debugfs_early_list);
+static DEFINE_MUTEX(regmap_debugfs_early_lock);
/* Calculate the length of a fixed format */
static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
@@ -84,6 +93,10 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
unsigned int fpos_offset;
unsigned int reg_offset;
+ /* Suppress the cache if we're using a subrange */
+ if (base)
+ return base;
+
/*
* If we don't have a cache build one so we don't have to do a
* linear scan each time.
@@ -145,7 +158,7 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
reg_offset = fpos_offset / map->debugfs_tot_len;
*pos = c->min + (reg_offset * map->debugfs_tot_len);
mutex_unlock(&map->cache_lock);
- return c->base_reg + reg_offset;
+ return c->base_reg + (reg_offset * map->reg_stride);
}
*pos = c->max;
@@ -277,11 +290,11 @@ static ssize_t regmap_map_write_file(struct file *file,
reg = simple_strtoul(start, &start, 16);
while (*start == ' ')
start++;
- if (strict_strtoul(start, 16, &value))
+ if (kstrtoul(start, 16, &value))
return -EINVAL;
/* Userspace has been fiddling around behind the kernel's back */
- add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE);
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
ret = regmap_write(map, reg, value);
if (ret < 0)
@@ -461,6 +474,20 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
struct rb_node *next;
struct regmap_range_node *range_node;
+ /* If we don't have the debugfs root yet, postpone init */
+ if (!regmap_debugfs_root) {
+ struct regmap_debugfs_node *node;
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return;
+ node->map = map;
+ node->name = name;
+ mutex_lock(&regmap_debugfs_early_lock);
+ list_add(&node->link, &regmap_debugfs_early_list);
+ mutex_unlock(&regmap_debugfs_early_lock);
+ return;
+ }
+
INIT_LIST_HEAD(&map->debugfs_off_cache);
mutex_init(&map->cache_lock);
@@ -484,7 +511,7 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
debugfs_create_file("range", 0400, map->debugfs,
map, &regmap_reg_ranges_fops);
- if (map->max_register) {
+ if (map->max_register || regmap_readable(map, 0)) {
debugfs_create_file("registers", 0400, map->debugfs,
map, &regmap_map_fops);
debugfs_create_file("access", 0400, map->debugfs,
@@ -515,18 +542,42 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
void regmap_debugfs_exit(struct regmap *map)
{
- debugfs_remove_recursive(map->debugfs);
- mutex_lock(&map->cache_lock);
- regmap_debugfs_free_dump_cache(map);
- mutex_unlock(&map->cache_lock);
- kfree(map->debugfs_name);
+ if (map->debugfs) {
+ debugfs_remove_recursive(map->debugfs);
+ mutex_lock(&map->cache_lock);
+ regmap_debugfs_free_dump_cache(map);
+ mutex_unlock(&map->cache_lock);
+ kfree(map->debugfs_name);
+ } else {
+ struct regmap_debugfs_node *node, *tmp;
+
+ mutex_lock(&regmap_debugfs_early_lock);
+ list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list,
+ link) {
+ if (node->map == map) {
+ list_del(&node->link);
+ kfree(node);
+ }
+ }
+ mutex_unlock(&regmap_debugfs_early_lock);
+ }
}
void regmap_debugfs_initcall(void)
{
+ struct regmap_debugfs_node *node, *tmp;
+
regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
if (!regmap_debugfs_root) {
pr_warn("regmap: Failed to create debugfs root\n");
return;
}
+
+ mutex_lock(&regmap_debugfs_early_lock);
+ list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list, link) {
+ regmap_debugfs_init(node->map, node->name);
+ list_del(&node->link);
+ kfree(node);
+ }
+ mutex_unlock(&regmap_debugfs_early_lock);
}
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index fa6bf5279d2..ca193d1ef47 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -13,7 +13,79 @@
#include <linux/regmap.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/init.h>
+
+
+static int regmap_smbus_byte_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret;
+
+ if (reg > 0xff)
+ return -EINVAL;
+
+ ret = i2c_smbus_read_byte_data(i2c, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+
+ return 0;
+}
+
+static int regmap_smbus_byte_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ if (val > 0xff || reg > 0xff)
+ return -EINVAL;
+
+ return i2c_smbus_write_byte_data(i2c, reg, val);
+}
+
+static struct regmap_bus regmap_smbus_byte = {
+ .reg_write = regmap_smbus_byte_reg_write,
+ .reg_read = regmap_smbus_byte_reg_read,
+};
+
+static int regmap_smbus_word_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret;
+
+ if (reg > 0xff)
+ return -EINVAL;
+
+ ret = i2c_smbus_read_word_data(i2c, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+
+ return 0;
+}
+
+static int regmap_smbus_word_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ if (val > 0xffff || reg > 0xff)
+ return -EINVAL;
+
+ return i2c_smbus_write_word_data(i2c, reg, val);
+}
+
+static struct regmap_bus regmap_smbus_word = {
+ .reg_write = regmap_smbus_word_reg_write,
+ .reg_read = regmap_smbus_word_reg_read,
+};
static int regmap_i2c_write(void *context, const void *data, size_t count)
{
@@ -98,6 +170,23 @@ static struct regmap_bus regmap_i2c = {
.read = regmap_i2c_read,
};
+static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
+ const struct regmap_config *config)
+{
+ if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C))
+ return &regmap_i2c;
+ else if (config->val_bits == 16 && config->reg_bits == 8 &&
+ i2c_check_functionality(i2c->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return &regmap_smbus_word;
+ else if (config->val_bits == 8 && config->reg_bits == 8 &&
+ i2c_check_functionality(i2c->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA))
+ return &regmap_smbus_byte;
+
+ return ERR_PTR(-ENOTSUPP);
+}
+
/**
* regmap_init_i2c(): Initialise register map
*
@@ -110,7 +199,12 @@ static struct regmap_bus regmap_i2c = {
struct regmap *regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config)
{
- return regmap_init(&i2c->dev, &regmap_i2c, &i2c->dev, config);
+ const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return regmap_init(&i2c->dev, bus, &i2c->dev, config);
}
EXPORT_SYMBOL_GPL(regmap_init_i2c);
@@ -127,7 +221,12 @@ EXPORT_SYMBOL_GPL(regmap_init_i2c);
struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config)
{
- return devm_regmap_init(&i2c->dev, &regmap_i2c, &i2c->dev, config);
+ const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return devm_regmap_init(&i2c->dev, bus, &i2c->dev, config);
}
EXPORT_SYMBOL_GPL(devm_regmap_init_i2c);
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 1643e889baf..6299a50a596 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -10,13 +10,13 @@
* published by the Free Software Foundation.
*/
-#include <linux/export.h>
#include <linux/device.h>
-#include <linux/regmap.h>
-#include <linux/irq.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
#include "internal.h"
@@ -105,6 +105,22 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
"Failed to sync wakes in %x: %d\n",
reg, ret);
}
+
+ if (!d->chip->init_ack_masked)
+ continue;
+ /*
+ * Ack all the masked interrupts uncondictionly,
+ * OR if there is masked interrupt which hasn't been Acked,
+ * it'll be ignored in irq handler, then may introduce irq storm
+ */
+ if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
+ reg = d->chip->ack_base +
+ (i * map->reg_stride * d->irq_reg_stride);
+ ret = regmap_write(map, reg, d->mask_buf[i]);
+ if (ret != 0)
+ dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
+ reg, ret);
+ }
}
if (d->chip->runtime_pm)
@@ -255,7 +271,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
for (i = 0; i < data->chip->num_regs; i++) {
data->status_buf[i] &= ~data->mask_buf[i];
- if (data->status_buf[i] && chip->ack_base) {
+ if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
reg = chip->ack_base +
(i * map->reg_stride * data->irq_reg_stride);
ret = regmap_write(map, reg, data->status_buf[i]);
@@ -331,6 +347,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
int ret = -ENOMEM;
u32 reg;
+ if (chip->num_regs <= 0)
+ return -EINVAL;
+
for (i = 0; i < chip->num_irqs; i++) {
if (chip->irqs[i].reg_offset % map->reg_stride)
return -EINVAL;
@@ -352,8 +371,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
if (!d)
return -ENOMEM;
- *data = d;
-
d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
GFP_KERNEL);
if (!d->status_buf)
@@ -418,6 +435,31 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
reg, ret);
goto err_alloc;
}
+
+ if (!chip->init_ack_masked)
+ continue;
+
+ /* Ack masked but set interrupts */
+ reg = chip->status_base +
+ (i * map->reg_stride * d->irq_reg_stride);
+ ret = regmap_read(map, reg, &d->status_buf[i]);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to read IRQ status: %d\n",
+ ret);
+ goto err_alloc;
+ }
+
+ if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
+ reg = chip->ack_base +
+ (i * map->reg_stride * d->irq_reg_stride);
+ ret = regmap_write(map, reg,
+ d->status_buf[i] & d->mask_buf[i]);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to ack 0x%x: %d\n",
+ reg, ret);
+ goto err_alloc;
+ }
+ }
}
/* Wake is disabled by default */
@@ -465,6 +507,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
goto err_domain;
}
+ *data = d;
+
return 0;
err_domain:
@@ -492,7 +536,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
return;
free_irq(irq, d);
- /* We should unmap the domain but... */
+ irq_domain_remove(d->domain);
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 98745dd77e8..04a329a377e 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -18,7 +18,6 @@
#include <linux/clk.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/regmap.h>
@@ -26,27 +25,83 @@
struct regmap_mmio_context {
void __iomem *regs;
+ unsigned reg_bytes;
unsigned val_bytes;
+ unsigned pad_bytes;
struct clk *clk;
};
+static inline void regmap_mmio_regsize_check(size_t reg_size)
+{
+ switch (reg_size) {
+ case 1:
+ case 2:
+ case 4:
+#ifdef CONFIG_64BIT
+ case 8:
+#endif
+ break;
+ default:
+ BUG();
+ }
+}
+
+static int regmap_mmio_regbits_check(size_t reg_bits)
+{
+ switch (reg_bits) {
+ case 8:
+ case 16:
+ case 32:
+#ifdef CONFIG_64BIT
+ case 64:
+#endif
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline void regmap_mmio_count_check(size_t count, u32 offset)
+{
+ BUG_ON(count <= offset);
+}
+
+static inline unsigned int
+regmap_mmio_get_offset(const void *reg, size_t reg_size)
+{
+ switch (reg_size) {
+ case 1:
+ return *(u8 *)reg;
+ case 2:
+ return *(u16 *)reg;
+ case 4:
+ return *(u32 *)reg;
+#ifdef CONFIG_64BIT
+ case 8:
+ return *(u64 *)reg;
+#endif
+ default:
+ BUG();
+ }
+}
+
static int regmap_mmio_gather_write(void *context,
const void *reg, size_t reg_size,
const void *val, size_t val_size)
{
struct regmap_mmio_context *ctx = context;
- u32 offset;
+ unsigned int offset;
int ret;
- BUG_ON(reg_size != 4);
+ regmap_mmio_regsize_check(reg_size);
- if (ctx->clk) {
+ if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
if (ret < 0)
return ret;
}
- offset = *(u32 *)reg;
+ offset = regmap_mmio_get_offset(reg, reg_size);
while (val_size) {
switch (ctx->val_bytes) {
@@ -73,7 +128,7 @@ static int regmap_mmio_gather_write(void *context,
offset += ctx->val_bytes;
}
- if (ctx->clk)
+ if (!IS_ERR(ctx->clk))
clk_disable(ctx->clk);
return 0;
@@ -81,9 +136,13 @@ static int regmap_mmio_gather_write(void *context,
static int regmap_mmio_write(void *context, const void *data, size_t count)
{
- BUG_ON(count < 4);
+ struct regmap_mmio_context *ctx = context;
+ unsigned int offset = ctx->reg_bytes + ctx->pad_bytes;
- return regmap_mmio_gather_write(context, data, 4, data + 4, count - 4);
+ regmap_mmio_count_check(count, offset);
+
+ return regmap_mmio_gather_write(context, data, ctx->reg_bytes,
+ data + offset, count - offset);
}
static int regmap_mmio_read(void *context,
@@ -91,18 +150,18 @@ static int regmap_mmio_read(void *context,
void *val, size_t val_size)
{
struct regmap_mmio_context *ctx = context;
- u32 offset;
+ unsigned int offset;
int ret;
- BUG_ON(reg_size != 4);
+ regmap_mmio_regsize_check(reg_size);
- if (ctx->clk) {
+ if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
if (ret < 0)
return ret;
}
- offset = *(u32 *)reg;
+ offset = regmap_mmio_get_offset(reg, reg_size);
while (val_size) {
switch (ctx->val_bytes) {
@@ -129,7 +188,7 @@ static int regmap_mmio_read(void *context,
offset += ctx->val_bytes;
}
- if (ctx->clk)
+ if (!IS_ERR(ctx->clk))
clk_disable(ctx->clk);
return 0;
@@ -139,7 +198,7 @@ static void regmap_mmio_free_context(void *context)
{
struct regmap_mmio_context *ctx = context;
- if (ctx->clk) {
+ if (!IS_ERR(ctx->clk)) {
clk_unprepare(ctx->clk);
clk_put(ctx->clk);
}
@@ -165,8 +224,9 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
int min_stride;
int ret;
- if (config->reg_bits != 32)
- return ERR_PTR(-EINVAL);
+ ret = regmap_mmio_regbits_check(config->reg_bits);
+ if (ret)
+ return ERR_PTR(ret);
if (config->pad_bits)
return ERR_PTR(-EINVAL);
@@ -209,6 +269,9 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
ctx->regs = regs;
ctx->val_bytes = config->val_bits / 8;
+ ctx->reg_bytes = config->reg_bits / 8;
+ ctx->pad_bytes = config->pad_bits / 8;
+ ctx->clk = ERR_PTR(-ENODEV);
if (clk_id == NULL)
return ctx;
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 4c506bd940f..0eb3097c0d7 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -12,7 +12,6 @@
#include <linux/regmap.h>
#include <linux/spi/spi.h>
-#include <linux/init.h>
#include <linux/module.h>
#include "internal.h"
@@ -73,7 +72,8 @@ static int regmap_spi_async_write(void *context,
spi_message_init(&async->m);
spi_message_add_tail(&async->t[0], &async->m);
- spi_message_add_tail(&async->t[1], &async->m);
+ if (val)
+ spi_message_add_tail(&async->t[1], &async->m);
async->m.complete = regmap_spi_complete;
async->m.context = async;
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
new file mode 100644
index 00000000000..d7026dc3338
--- /dev/null
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -0,0 +1,256 @@
+/*
+ * Register map access API - SPMI support
+ *
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * Based on regmap-i2c.c:
+ * Copyright 2011 Wolfson Microelectronics plc
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/regmap.h>
+#include <linux/spmi.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+static int regmap_spmi_base_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ u8 addr = *(u8 *)reg;
+ int err = 0;
+
+ BUG_ON(reg_size != 1);
+
+ while (val_size-- && !err)
+ err = spmi_register_read(context, addr++, val++);
+
+ return err;
+}
+
+static int regmap_spmi_base_gather_write(void *context,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
+{
+ const u8 *data = val;
+ u8 addr = *(u8 *)reg;
+ int err = 0;
+
+ BUG_ON(reg_size != 1);
+
+ /*
+ * SPMI defines a more bandwidth-efficient 'Register 0 Write' sequence,
+ * use it when possible.
+ */
+ if (addr == 0 && val_size) {
+ err = spmi_register_zero_write(context, *data);
+ if (err)
+ goto err_out;
+
+ data++;
+ addr++;
+ val_size--;
+ }
+
+ while (val_size) {
+ err = spmi_register_write(context, addr, *data);
+ if (err)
+ goto err_out;
+
+ data++;
+ addr++;
+ val_size--;
+ }
+
+err_out:
+ return err;
+}
+
+static int regmap_spmi_base_write(void *context, const void *data,
+ size_t count)
+{
+ BUG_ON(count < 1);
+ return regmap_spmi_base_gather_write(context, data, 1, data + 1,
+ count - 1);
+}
+
+static struct regmap_bus regmap_spmi_base = {
+ .read = regmap_spmi_base_read,
+ .write = regmap_spmi_base_write,
+ .gather_write = regmap_spmi_base_gather_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+/**
+ * regmap_init_spmi_base(): Create regmap for the Base register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+struct regmap *regmap_init_spmi_base(struct spmi_device *sdev,
+ const struct regmap_config *config)
+{
+ return regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config);
+}
+EXPORT_SYMBOL_GPL(regmap_init_spmi_base);
+
+/**
+ * devm_regmap_init_spmi_base(): Create managed regmap for Base register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+struct regmap *devm_regmap_init_spmi_base(struct spmi_device *sdev,
+ const struct regmap_config *config)
+{
+ return devm_regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_base);
+
+static int regmap_spmi_ext_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ int err = 0;
+ size_t len;
+ u16 addr;
+
+ BUG_ON(reg_size != 2);
+
+ addr = *(u16 *)reg;
+
+ /*
+ * Split accesses into two to take advantage of the more
+ * bandwidth-efficient 'Extended Register Read' command when possible
+ */
+ while (addr <= 0xFF && val_size) {
+ len = min_t(size_t, val_size, 16);
+
+ err = spmi_ext_register_read(context, addr, val, len);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+ while (val_size) {
+ len = min_t(size_t, val_size, 8);
+
+ err = spmi_ext_register_readl(context, addr, val, val_size);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+err_out:
+ return err;
+}
+
+static int regmap_spmi_ext_gather_write(void *context,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
+{
+ int err = 0;
+ size_t len;
+ u16 addr;
+
+ BUG_ON(reg_size != 2);
+
+ addr = *(u16 *)reg;
+
+ while (addr <= 0xFF && val_size) {
+ len = min_t(size_t, val_size, 16);
+
+ err = spmi_ext_register_write(context, addr, val, len);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+ while (val_size) {
+ len = min_t(size_t, val_size, 8);
+
+ err = spmi_ext_register_writel(context, addr, val, len);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+err_out:
+ return err;
+}
+
+static int regmap_spmi_ext_write(void *context, const void *data,
+ size_t count)
+{
+ BUG_ON(count < 2);
+ return regmap_spmi_ext_gather_write(context, data, 2, data + 2,
+ count - 2);
+}
+
+static struct regmap_bus regmap_spmi_ext = {
+ .read = regmap_spmi_ext_read,
+ .write = regmap_spmi_ext_write,
+ .gather_write = regmap_spmi_ext_gather_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+/**
+ * regmap_init_spmi_ext(): Create regmap for Ext register space
+ * @sdev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+struct regmap *regmap_init_spmi_ext(struct spmi_device *sdev,
+ const struct regmap_config *config)
+{
+ return regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config);
+}
+EXPORT_SYMBOL_GPL(regmap_init_spmi_ext);
+
+/**
+ * devm_regmap_init_spmi_ext(): Create managed regmap for Ext register space
+ * @sdev: SPMI device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *sdev,
+ const struct regmap_config *config)
+{
+ return devm_regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_spmi_ext);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index a941dcfe759..74d8c0672cf 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -35,22 +35,17 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change);
+static int _regmap_bus_reg_read(void *context, unsigned int reg,
+ unsigned int *val);
static int _regmap_bus_read(void *context, unsigned int reg,
unsigned int *val);
static int _regmap_bus_formatted_write(void *context, unsigned int reg,
unsigned int val);
+static int _regmap_bus_reg_write(void *context, unsigned int reg,
+ unsigned int val);
static int _regmap_bus_raw_write(void *context, unsigned int reg,
unsigned int val);
-static void async_cleanup(struct work_struct *work)
-{
- struct regmap_async *async = container_of(work, struct regmap_async,
- cleanup);
-
- kfree(async->work_buf);
- kfree(async);
-}
-
bool regmap_reg_in_ranges(unsigned int reg,
const struct regmap_range *ranges,
unsigned int nranges)
@@ -65,9 +60,8 @@ bool regmap_reg_in_ranges(unsigned int reg,
}
EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
-static bool _regmap_check_range_table(struct regmap *map,
- unsigned int reg,
- const struct regmap_access_table *table)
+bool regmap_check_range_table(struct regmap *map, unsigned int reg,
+ const struct regmap_access_table *table)
{
/* Check "no ranges" first */
if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
@@ -80,6 +74,7 @@ static bool _regmap_check_range_table(struct regmap *map,
return regmap_reg_in_ranges(reg, table->yes_ranges,
table->n_yes_ranges);
}
+EXPORT_SYMBOL_GPL(regmap_check_range_table);
bool regmap_writeable(struct regmap *map, unsigned int reg)
{
@@ -90,7 +85,7 @@ bool regmap_writeable(struct regmap *map, unsigned int reg)
return map->writeable_reg(map->dev, reg);
if (map->wr_table)
- return _regmap_check_range_table(map, reg, map->wr_table);
+ return regmap_check_range_table(map, reg, map->wr_table);
return true;
}
@@ -107,7 +102,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
return map->readable_reg(map->dev, reg);
if (map->rd_table)
- return _regmap_check_range_table(map, reg, map->rd_table);
+ return regmap_check_range_table(map, reg, map->rd_table);
return true;
}
@@ -121,9 +116,12 @@ bool regmap_volatile(struct regmap *map, unsigned int reg)
return map->volatile_reg(map->dev, reg);
if (map->volatile_table)
- return _regmap_check_range_table(map, reg, map->volatile_table);
+ return regmap_check_range_table(map, reg, map->volatile_table);
- return true;
+ if (map->cache_ops)
+ return false;
+ else
+ return true;
}
bool regmap_precious(struct regmap *map, unsigned int reg)
@@ -135,7 +133,7 @@ bool regmap_precious(struct regmap *map, unsigned int reg)
return map->precious_reg(map->dev, reg);
if (map->precious_table)
- return _regmap_check_range_table(map, reg, map->precious_table);
+ return regmap_check_range_table(map, reg, map->precious_table);
return false;
}
@@ -198,6 +196,13 @@ static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
b[0] = cpu_to_be16(val << shift);
}
+static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
+{
+ __le16 *b = buf;
+
+ b[0] = cpu_to_le16(val << shift);
+}
+
static void regmap_format_16_native(void *buf, unsigned int val,
unsigned int shift)
{
@@ -222,6 +227,13 @@ static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
b[0] = cpu_to_be32(val << shift);
}
+static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
+{
+ __le32 *b = buf;
+
+ b[0] = cpu_to_le32(val << shift);
+}
+
static void regmap_format_32_native(void *buf, unsigned int val,
unsigned int shift)
{
@@ -246,6 +258,13 @@ static unsigned int regmap_parse_16_be(const void *buf)
return be16_to_cpu(b[0]);
}
+static unsigned int regmap_parse_16_le(const void *buf)
+{
+ const __le16 *b = buf;
+
+ return le16_to_cpu(b[0]);
+}
+
static void regmap_parse_16_be_inplace(void *buf)
{
__be16 *b = buf;
@@ -253,6 +272,13 @@ static void regmap_parse_16_be_inplace(void *buf)
b[0] = be16_to_cpu(b[0]);
}
+static void regmap_parse_16_le_inplace(void *buf)
+{
+ __le16 *b = buf;
+
+ b[0] = le16_to_cpu(b[0]);
+}
+
static unsigned int regmap_parse_16_native(const void *buf)
{
return *(u16 *)buf;
@@ -275,6 +301,13 @@ static unsigned int regmap_parse_32_be(const void *buf)
return be32_to_cpu(b[0]);
}
+static unsigned int regmap_parse_32_le(const void *buf)
+{
+ const __le32 *b = buf;
+
+ return le32_to_cpu(b[0]);
+}
+
static void regmap_parse_32_be_inplace(void *buf)
{
__be32 *b = buf;
@@ -282,6 +315,13 @@ static void regmap_parse_32_be_inplace(void *buf)
b[0] = be32_to_cpu(b[0]);
}
+static void regmap_parse_32_le_inplace(void *buf)
+{
+ __le32 *b = buf;
+
+ b[0] = le32_to_cpu(b[0]);
+}
+
static unsigned int regmap_parse_32_native(const void *buf)
{
return *(u32 *)buf;
@@ -300,15 +340,20 @@ static void regmap_unlock_mutex(void *__map)
}
static void regmap_lock_spinlock(void *__map)
+__acquires(&map->spinlock)
{
struct regmap *map = __map;
- spin_lock(&map->spinlock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&map->spinlock, flags);
+ map->spinlock_flags = flags;
}
static void regmap_unlock_spinlock(void *__map)
+__releases(&map->spinlock)
{
struct regmap *map = __map;
- spin_unlock(&map->spinlock);
+ spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
}
static void dev_get_regmap_release(struct device *dev, void *res)
@@ -381,6 +426,28 @@ static void regmap_range_exit(struct regmap *map)
kfree(map->selector_work_buf);
}
+int regmap_attach_dev(struct device *dev, struct regmap *map,
+ const struct regmap_config *config)
+{
+ struct regmap **m;
+
+ map->dev = dev;
+
+ regmap_debugfs_init(map, config->name);
+
+ /* Add a devres resource for dev_get_regmap() */
+ m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
+ if (!m) {
+ regmap_debugfs_exit(map);
+ return -ENOMEM;
+ }
+ *m = map;
+ devres_add(dev, m);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_attach_dev);
+
/**
* regmap_init(): Initialise register map
*
@@ -398,7 +465,7 @@ struct regmap *regmap_init(struct device *dev,
void *bus_context,
const struct regmap_config *config)
{
- struct regmap *map, **m;
+ struct regmap *map;
int ret = -EINVAL;
enum regmap_endian reg_endian, val_endian;
int i, j;
@@ -440,6 +507,7 @@ struct regmap *regmap_init(struct device *dev,
else
map->reg_stride = 1;
map->use_single_rw = config->use_single_rw;
+ map->can_multi_write = config->can_multi_write;
map->dev = dev;
map->bus = bus;
map->bus_context = bus_context;
@@ -457,6 +525,7 @@ struct regmap *regmap_init(struct device *dev,
spin_lock_init(&map->async_lock);
INIT_LIST_HEAD(&map->async_list);
+ INIT_LIST_HEAD(&map->async_free);
init_waitqueue_head(&map->async_waitq);
if (config->read_flag_mask || config->write_flag_mask) {
@@ -472,6 +541,12 @@ struct regmap *regmap_init(struct device *dev,
map->defer_caching = false;
goto skip_format_initialization;
+ } else if (!bus->read || !bus->write) {
+ map->reg_read = _regmap_bus_reg_read;
+ map->reg_write = _regmap_bus_reg_write;
+
+ map->defer_caching = false;
+ goto skip_format_initialization;
} else {
map->reg_read = _regmap_bus_read;
}
@@ -585,6 +660,11 @@ struct regmap *regmap_init(struct device *dev,
map->format.parse_val = regmap_parse_16_be;
map->format.parse_inplace = regmap_parse_16_be_inplace;
break;
+ case REGMAP_ENDIAN_LITTLE:
+ map->format.format_val = regmap_format_16_le;
+ map->format.parse_val = regmap_parse_16_le;
+ map->format.parse_inplace = regmap_parse_16_le_inplace;
+ break;
case REGMAP_ENDIAN_NATIVE:
map->format.format_val = regmap_format_16_native;
map->format.parse_val = regmap_parse_16_native;
@@ -606,6 +686,11 @@ struct regmap *regmap_init(struct device *dev,
map->format.parse_val = regmap_parse_32_be;
map->format.parse_inplace = regmap_parse_32_be_inplace;
break;
+ case REGMAP_ENDIAN_LITTLE:
+ map->format.format_val = regmap_format_32_le;
+ map->format.parse_val = regmap_parse_32_le;
+ map->format.parse_inplace = regmap_parse_32_le_inplace;
+ break;
case REGMAP_ENDIAN_NATIVE:
map->format.format_val = regmap_format_32_native;
map->format.parse_val = regmap_parse_32_native;
@@ -681,6 +766,10 @@ skip_format_initialization:
unsigned win_max = win_min +
config->ranges[j].window_len - 1;
+ /* Allow data window inside its own virtual range */
+ if (j == i)
+ continue;
+
if (range_cfg->range_min <= sel_reg &&
sel_reg <= range_cfg->range_max) {
dev_err(map->dev,
@@ -714,7 +803,7 @@ skip_format_initialization:
new->window_start = range_cfg->window_start;
new->window_len = range_cfg->window_len;
- if (_regmap_range_add(map, new) == false) {
+ if (!_regmap_range_add(map, new)) {
dev_err(map->dev, "Failed to add range %d\n", i);
kfree(new);
goto err_range;
@@ -730,25 +819,19 @@ skip_format_initialization:
}
}
- regmap_debugfs_init(map, config->name);
-
ret = regcache_init(map, config);
if (ret != 0)
goto err_range;
- /* Add a devres resource for dev_get_regmap() */
- m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
- if (!m) {
- ret = -ENOMEM;
- goto err_debugfs;
+ if (dev) {
+ ret = regmap_attach_dev(dev, map, config);
+ if (ret != 0)
+ goto err_regcache;
}
- *m = map;
- devres_add(dev, m);
return map;
-err_debugfs:
- regmap_debugfs_exit(map);
+err_regcache:
regcache_exit(map);
err_range:
regmap_range_exit(map);
@@ -801,6 +884,97 @@ struct regmap *devm_regmap_init(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_regmap_init);
+static void regmap_field_init(struct regmap_field *rm_field,
+ struct regmap *regmap, struct reg_field reg_field)
+{
+ int field_bits = reg_field.msb - reg_field.lsb + 1;
+ rm_field->regmap = regmap;
+ rm_field->reg = reg_field.reg;
+ rm_field->shift = reg_field.lsb;
+ rm_field->mask = ((BIT(field_bits) - 1) << reg_field.lsb);
+ rm_field->id_size = reg_field.id_size;
+ rm_field->id_offset = reg_field.id_offset;
+}
+
+/**
+ * devm_regmap_field_alloc(): Allocate and initialise a register field
+ * in a register map.
+ *
+ * @dev: Device that will be interacted with
+ * @regmap: regmap bank in which this register field is located.
+ * @reg_field: Register field with in the bank.
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap_field. The regmap_field will be automatically freed
+ * by the device management code.
+ */
+struct regmap_field *devm_regmap_field_alloc(struct device *dev,
+ struct regmap *regmap, struct reg_field reg_field)
+{
+ struct regmap_field *rm_field = devm_kzalloc(dev,
+ sizeof(*rm_field), GFP_KERNEL);
+ if (!rm_field)
+ return ERR_PTR(-ENOMEM);
+
+ regmap_field_init(rm_field, regmap, reg_field);
+
+ return rm_field;
+
+}
+EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
+
+/**
+ * devm_regmap_field_free(): Free register field allocated using
+ * devm_regmap_field_alloc. Usally drivers need not call this function,
+ * as the memory allocated via devm will be freed as per device-driver
+ * life-cyle.
+ *
+ * @dev: Device that will be interacted with
+ * @field: regmap field which should be freed.
+ */
+void devm_regmap_field_free(struct device *dev,
+ struct regmap_field *field)
+{
+ devm_kfree(dev, field);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_field_free);
+
+/**
+ * regmap_field_alloc(): Allocate and initialise a register field
+ * in a register map.
+ *
+ * @regmap: regmap bank in which this register field is located.
+ * @reg_field: Register field with in the bank.
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap_field. The regmap_field should be freed by the
+ * user once its finished working with it using regmap_field_free().
+ */
+struct regmap_field *regmap_field_alloc(struct regmap *regmap,
+ struct reg_field reg_field)
+{
+ struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
+
+ if (!rm_field)
+ return ERR_PTR(-ENOMEM);
+
+ regmap_field_init(rm_field, regmap, reg_field);
+
+ return rm_field;
+}
+EXPORT_SYMBOL_GPL(regmap_field_alloc);
+
+/**
+ * regmap_field_free(): Free register field allocated using regmap_field_alloc
+ *
+ * @field: regmap field which should be freed.
+ */
+void regmap_field_free(struct regmap_field *field)
+{
+ kfree(field);
+}
+EXPORT_SYMBOL_GPL(regmap_field_free);
+
/**
* regmap_reinit_cache(): Reinitialise the current register cache
*
@@ -841,12 +1015,22 @@ EXPORT_SYMBOL_GPL(regmap_reinit_cache);
*/
void regmap_exit(struct regmap *map)
{
+ struct regmap_async *async;
+
regcache_exit(map);
regmap_debugfs_exit(map);
regmap_range_exit(map);
if (map->bus && map->bus->free_context)
map->bus->free_context(map->bus_context);
kfree(map->work_buf);
+ while (!list_empty(&map->async_free)) {
+ async = list_first_entry_or_null(&map->async_free,
+ struct regmap_async,
+ list);
+ list_del(&async->list);
+ kfree(async->work_buf);
+ kfree(async);
+ }
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
@@ -938,7 +1122,7 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
}
int _regmap_raw_write(struct regmap *map, unsigned int reg,
- const void *val, size_t val_len, bool async)
+ const void *val, size_t val_len)
{
struct regmap_range_node *range;
unsigned long flags;
@@ -990,7 +1174,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
dev_dbg(map->dev, "Writing window %d/%zu\n",
win_residue, val_len / map->format.val_bytes);
ret = _regmap_raw_write(map, reg, val, win_residue *
- map->format.val_bytes, async);
+ map->format.val_bytes);
if (ret != 0)
return ret;
@@ -1013,49 +1197,72 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
u8[0] |= map->write_flag_mask;
- if (async && map->bus->async_write) {
- struct regmap_async *async = map->bus->async_alloc();
- if (!async)
- return -ENOMEM;
+ /*
+ * Essentially all I/O mechanisms will be faster with a single
+ * buffer to write. Since register syncs often generate raw
+ * writes of single registers optimise that case.
+ */
+ if (val != work_val && val_len == map->format.val_bytes) {
+ memcpy(work_val, val, map->format.val_bytes);
+ val = work_val;
+ }
+
+ if (map->async && map->bus->async_write) {
+ struct regmap_async *async;
trace_regmap_async_write_start(map->dev, reg, val_len);
- async->work_buf = kzalloc(map->format.buf_size,
- GFP_KERNEL | GFP_DMA);
- if (!async->work_buf) {
- kfree(async);
- return -ENOMEM;
+ spin_lock_irqsave(&map->async_lock, flags);
+ async = list_first_entry_or_null(&map->async_free,
+ struct regmap_async,
+ list);
+ if (async)
+ list_del(&async->list);
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ if (!async) {
+ async = map->bus->async_alloc();
+ if (!async)
+ return -ENOMEM;
+
+ async->work_buf = kzalloc(map->format.buf_size,
+ GFP_KERNEL | GFP_DMA);
+ if (!async->work_buf) {
+ kfree(async);
+ return -ENOMEM;
+ }
}
- INIT_WORK(&async->cleanup, async_cleanup);
async->map = map;
/* If the caller supplied the value we can use it safely. */
memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
map->format.reg_bytes + map->format.val_bytes);
- if (val == work_val)
- val = async->work_buf + map->format.pad_bytes +
- map->format.reg_bytes;
spin_lock_irqsave(&map->async_lock, flags);
list_add_tail(&async->list, &map->async_list);
spin_unlock_irqrestore(&map->async_lock, flags);
- ret = map->bus->async_write(map->bus_context, async->work_buf,
- map->format.reg_bytes +
- map->format.pad_bytes,
- val, val_len, async);
+ if (val != work_val)
+ ret = map->bus->async_write(map->bus_context,
+ async->work_buf,
+ map->format.reg_bytes +
+ map->format.pad_bytes,
+ val, val_len, async);
+ else
+ ret = map->bus->async_write(map->bus_context,
+ async->work_buf,
+ map->format.reg_bytes +
+ map->format.pad_bytes +
+ val_len, NULL, 0, async);
if (ret != 0) {
dev_err(map->dev, "Failed to schedule write: %d\n",
ret);
spin_lock_irqsave(&map->async_lock, flags);
- list_del(&async->list);
+ list_move(&async->list, &map->async_free);
spin_unlock_irqrestore(&map->async_lock, flags);
-
- kfree(async->work_buf);
- kfree(async);
}
return ret;
@@ -1139,6 +1346,14 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
return ret;
}
+static int _regmap_bus_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct regmap *map = context;
+
+ return map->bus->reg_write(map->bus_context, reg, val);
+}
+
static int _regmap_bus_raw_write(void *context, unsigned int reg,
unsigned int val)
{
@@ -1152,7 +1367,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
map->work_buf +
map->format.reg_bytes +
map->format.pad_bytes,
- map->format.val_bytes, false);
+ map->format.val_bytes);
}
static inline void *_regmap_map_get_context(struct regmap *map)
@@ -1166,6 +1381,9 @@ int _regmap_write(struct regmap *map, unsigned int reg,
int ret;
void *context = _regmap_map_get_context(map);
+ if (!regmap_writeable(map, reg))
+ return -EIO;
+
if (!map->cache_bypass && !map->defer_caching) {
ret = regcache_write(map, reg, val);
if (ret != 0)
@@ -1214,6 +1432,37 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
EXPORT_SYMBOL_GPL(regmap_write);
/**
+ * regmap_write_async(): Write a value to a single register asynchronously
+ *
+ * @map: Register map to write to
+ * @reg: Register to write to
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
+{
+ int ret;
+
+ if (reg % map->reg_stride)
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ map->async = true;
+
+ ret = _regmap_write(map, reg, val);
+
+ map->async = false;
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_write_async);
+
+/**
* regmap_raw_write(): Write raw values to one or more registers
*
* @map: Register map to write to
@@ -1241,7 +1490,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
map->lock(map->lock_arg);
- ret = _regmap_raw_write(map, reg, val, val_len, false);
+ ret = _regmap_raw_write(map, reg, val, val_len);
map->unlock(map->lock_arg);
@@ -1249,6 +1498,90 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
}
EXPORT_SYMBOL_GPL(regmap_raw_write);
+/**
+ * regmap_field_write(): Write a value to a single register field
+ *
+ * @field: Register field to write to
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_field_write(struct regmap_field *field, unsigned int val)
+{
+ return regmap_update_bits(field->regmap, field->reg,
+ field->mask, val << field->shift);
+}
+EXPORT_SYMBOL_GPL(regmap_field_write);
+
+/**
+ * regmap_field_update_bits(): Perform a read/modify/write cycle
+ * on the register field
+ *
+ * @field: Register field to write to
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_field_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val)
+{
+ mask = (mask << field->shift) & field->mask;
+
+ return regmap_update_bits(field->regmap, field->reg,
+ mask, val << field->shift);
+}
+EXPORT_SYMBOL_GPL(regmap_field_update_bits);
+
+/**
+ * regmap_fields_write(): Write a value to a single register field with port ID
+ *
+ * @field: Register field to write to
+ * @id: port ID
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_fields_write(struct regmap_field *field, unsigned int id,
+ unsigned int val)
+{
+ if (id >= field->id_size)
+ return -EINVAL;
+
+ return regmap_update_bits(field->regmap,
+ field->reg + (field->id_offset * id),
+ field->mask, val << field->shift);
+}
+EXPORT_SYMBOL_GPL(regmap_fields_write);
+
+/**
+ * regmap_fields_update_bits(): Perform a read/modify/write cycle
+ * on the register field
+ *
+ * @field: Register field to write to
+ * @id: port ID
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ if (id >= field->id_size)
+ return -EINVAL;
+
+ mask = (mask << field->shift) & field->mask;
+
+ return regmap_update_bits(field->regmap,
+ field->reg + (field->id_offset * id),
+ mask, val << field->shift);
+}
+EXPORT_SYMBOL_GPL(regmap_fields_update_bits);
+
/*
* regmap_bulk_write(): Write multiple registers to the device
*
@@ -1268,56 +1601,316 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
{
int ret = 0, i;
size_t val_bytes = map->format.val_bytes;
- void *wval;
- if (!map->bus)
- return -EINVAL;
- if (!map->format.parse_inplace)
+ if (map->bus && !map->format.parse_inplace)
return -EINVAL;
if (reg % map->reg_stride)
return -EINVAL;
- map->lock(map->lock_arg);
+ /*
+ * Some devices don't support bulk write, for
+ * them we have a series of single write operations.
+ */
+ if (!map->bus || map->use_single_rw) {
+ map->lock(map->lock_arg);
+ for (i = 0; i < val_count; i++) {
+ unsigned int ival;
- /* No formatting is require if val_byte is 1 */
- if (val_bytes == 1) {
- wval = (void *)val;
+ switch (val_bytes) {
+ case 1:
+ ival = *(u8 *)(val + (i * val_bytes));
+ break;
+ case 2:
+ ival = *(u16 *)(val + (i * val_bytes));
+ break;
+ case 4:
+ ival = *(u32 *)(val + (i * val_bytes));
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ ival = *(u64 *)(val + (i * val_bytes));
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = _regmap_write(map, reg + (i * map->reg_stride),
+ ival);
+ if (ret != 0)
+ goto out;
+ }
+out:
+ map->unlock(map->lock_arg);
} else {
+ void *wval;
+
wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
if (!wval) {
- ret = -ENOMEM;
dev_err(map->dev, "Error in memory allocation\n");
- goto out;
+ return -ENOMEM;
}
for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_inplace(wval + i);
+
+ map->lock(map->lock_arg);
+ ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
+ map->unlock(map->lock_arg);
+
+ kfree(wval);
}
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_bulk_write);
+
+/*
+ * _regmap_raw_multi_reg_write()
+ *
+ * the (register,newvalue) pairs in regs have not been formatted, but
+ * they are all in the same page and have been changed to being page
+ * relative. The page register has been written if that was neccessary.
+ */
+static int _regmap_raw_multi_reg_write(struct regmap *map,
+ const struct reg_default *regs,
+ size_t num_regs)
+{
+ int ret;
+ void *buf;
+ int i;
+ u8 *u8;
+ size_t val_bytes = map->format.val_bytes;
+ size_t reg_bytes = map->format.reg_bytes;
+ size_t pad_bytes = map->format.pad_bytes;
+ size_t pair_size = reg_bytes + pad_bytes + val_bytes;
+ size_t len = pair_size * num_regs;
+
+ if (!len)
+ return -EINVAL;
+
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* We have to linearise by hand. */
+
+ u8 = buf;
+
+ for (i = 0; i < num_regs; i++) {
+ int reg = regs[i].reg;
+ int val = regs[i].def;
+ trace_regmap_hw_write_start(map->dev, reg, 1);
+ map->format.format_reg(u8, reg, map->reg_shift);
+ u8 += reg_bytes + pad_bytes;
+ map->format.format_val(u8, val, 0);
+ u8 += val_bytes;
+ }
+ u8 = buf;
+ *u8 |= map->write_flag_mask;
+
+ ret = map->bus->write(map->bus_context, buf, len);
+
+ kfree(buf);
+
+ for (i = 0; i < num_regs; i++) {
+ int reg = regs[i].reg;
+ trace_regmap_hw_write_done(map->dev, reg, 1);
+ }
+ return ret;
+}
+
+static unsigned int _regmap_register_page(struct regmap *map,
+ unsigned int reg,
+ struct regmap_range_node *range)
+{
+ unsigned int win_page = (reg - range->range_min) / range->window_len;
+
+ return win_page;
+}
+
+static int _regmap_range_multi_paged_reg_write(struct regmap *map,
+ struct reg_default *regs,
+ size_t num_regs)
+{
+ int ret;
+ int i, n;
+ struct reg_default *base;
+ unsigned int this_page = 0;
/*
- * Some devices does not support bulk write, for
- * them we have a series of single write operations.
+ * the set of registers are not neccessarily in order, but
+ * since the order of write must be preserved this algorithm
+ * chops the set each time the page changes
*/
- if (map->use_single_rw) {
- for (i = 0; i < val_count; i++) {
- ret = regmap_raw_write(map,
- reg + (i * map->reg_stride),
- val + (i * val_bytes),
- val_bytes);
+ base = regs;
+ for (i = 0, n = 0; i < num_regs; i++, n++) {
+ unsigned int reg = regs[i].reg;
+ struct regmap_range_node *range;
+
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ unsigned int win_page = _regmap_register_page(map, reg,
+ range);
+
+ if (i == 0)
+ this_page = win_page;
+ if (win_page != this_page) {
+ this_page = win_page;
+ ret = _regmap_raw_multi_reg_write(map, base, n);
+ if (ret != 0)
+ return ret;
+ base += n;
+ n = 0;
+ }
+ ret = _regmap_select_page(map, &base[n].reg, range, 1);
if (ret != 0)
return ret;
}
- } else {
- ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count,
- false);
+ }
+ if (n > 0)
+ return _regmap_raw_multi_reg_write(map, base, n);
+ return 0;
+}
+
+static int _regmap_multi_reg_write(struct regmap *map,
+ const struct reg_default *regs,
+ size_t num_regs)
+{
+ int i;
+ int ret;
+
+ if (!map->can_multi_write) {
+ for (i = 0; i < num_regs; i++) {
+ ret = _regmap_write(map, regs[i].reg, regs[i].def);
+ if (ret != 0)
+ return ret;
+ }
+ return 0;
}
- if (val_bytes != 1)
- kfree(wval);
+ if (!map->format.parse_inplace)
+ return -EINVAL;
+
+ if (map->writeable_reg)
+ for (i = 0; i < num_regs; i++) {
+ int reg = regs[i].reg;
+ if (!map->writeable_reg(map->dev, reg))
+ return -EINVAL;
+ if (reg % map->reg_stride)
+ return -EINVAL;
+ }
+
+ if (!map->cache_bypass) {
+ for (i = 0; i < num_regs; i++) {
+ unsigned int val = regs[i].def;
+ unsigned int reg = regs[i].reg;
+ ret = regcache_write(map, reg, val);
+ if (ret) {
+ dev_err(map->dev,
+ "Error in caching of register: %x ret: %d\n",
+ reg, ret);
+ return ret;
+ }
+ }
+ if (map->cache_only) {
+ map->cache_dirty = true;
+ return 0;
+ }
+ }
+
+ WARN_ON(!map->bus);
+
+ for (i = 0; i < num_regs; i++) {
+ unsigned int reg = regs[i].reg;
+ struct regmap_range_node *range;
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ size_t len = sizeof(struct reg_default)*num_regs;
+ struct reg_default *base = kmemdup(regs, len,
+ GFP_KERNEL);
+ if (!base)
+ return -ENOMEM;
+ ret = _regmap_range_multi_paged_reg_write(map, base,
+ num_regs);
+ kfree(base);
+
+ return ret;
+ }
+ }
+ return _regmap_raw_multi_reg_write(map, regs, num_regs);
+}
+
+/*
+ * regmap_multi_reg_write(): Write multiple registers to the device
+ *
+ * where the set of register,value pairs are supplied in any order,
+ * possibly not all in a single range.
+ *
+ * @map: Register map to write to
+ * @regs: Array of structures containing register,value to be written
+ * @num_regs: Number of registers to write
+ *
+ * The 'normal' block write mode will send ultimately send data on the
+ * target bus as R,V1,V2,V3,..,Vn where successively higer registers are
+ * addressed. However, this alternative block multi write mode will send
+ * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
+ * must of course support the mode.
+ *
+ * A value of zero will be returned on success, a negative errno will be
+ * returned in error cases.
+ */
+int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
+ int num_regs)
+{
+ int ret;
+
+ map->lock(map->lock_arg);
+
+ ret = _regmap_multi_reg_write(map, regs, num_regs);
-out:
map->unlock(map->lock_arg);
+
return ret;
}
-EXPORT_SYMBOL_GPL(regmap_bulk_write);
+EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
+
+/*
+ * regmap_multi_reg_write_bypassed(): Write multiple registers to the
+ * device but not the cache
+ *
+ * where the set of register are supplied in any order
+ *
+ * @map: Register map to write to
+ * @regs: Array of structures containing register,value to be written
+ * @num_regs: Number of registers to write
+ *
+ * This function is intended to be used for writing a large block of data
+ * atomically to the device in single transfer for those I2C client devices
+ * that implement this alternative block write mode.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_multi_reg_write_bypassed(struct regmap *map,
+ const struct reg_default *regs,
+ int num_regs)
+{
+ int ret;
+ bool bypass;
+
+ map->lock(map->lock_arg);
+
+ bypass = map->cache_bypass;
+ map->cache_bypass = true;
+
+ ret = _regmap_multi_reg_write(map, regs, num_regs);
+
+ map->cache_bypass = bypass;
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
/**
* regmap_raw_write_async(): Write raw values to one or more registers
@@ -1353,7 +1946,11 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
map->lock(map->lock_arg);
- ret = _regmap_raw_write(map, reg, val, val_len, true);
+ map->async = true;
+
+ ret = _regmap_raw_write(map, reg, val, val_len);
+
+ map->async = false;
map->unlock(map->lock_arg);
@@ -1401,6 +1998,14 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
return ret;
}
+static int _regmap_bus_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct regmap *map = context;
+
+ return map->bus->reg_read(map->bus_context, reg, val);
+}
+
static int _regmap_bus_read(void *context, unsigned int reg,
unsigned int *val)
{
@@ -1434,6 +2039,9 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
if (map->cache_only)
return -EBUSY;
+ if (!regmap_readable(map, reg))
+ return -EIO;
+
ret = map->reg_read(context, reg, val);
if (ret == 0) {
#ifdef LOG_DEVICE
@@ -1453,7 +2061,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
/**
* regmap_read(): Read a value from a single register
*
- * @map: Register map to write to
+ * @map: Register map to read from
* @reg: Register to be read from
* @val: Pointer to store read value
*
@@ -1480,7 +2088,7 @@ EXPORT_SYMBOL_GPL(regmap_read);
/**
* regmap_raw_read(): Read raw data from the device
*
- * @map: Register map to write to
+ * @map: Register map to read from
* @reg: First register to be read from
* @val: Pointer to store read value
* @val_len: Size of data to read
@@ -1532,9 +2140,67 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
EXPORT_SYMBOL_GPL(regmap_raw_read);
/**
+ * regmap_field_read(): Read a value to a single register field
+ *
+ * @field: Register field to read from
+ * @val: Pointer to store read value
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_field_read(struct regmap_field *field, unsigned int *val)
+{
+ int ret;
+ unsigned int reg_val;
+ ret = regmap_read(field->regmap, field->reg, &reg_val);
+ if (ret != 0)
+ return ret;
+
+ reg_val &= field->mask;
+ reg_val >>= field->shift;
+ *val = reg_val;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_field_read);
+
+/**
+ * regmap_fields_read(): Read a value to a single register field with port ID
+ *
+ * @field: Register field to read from
+ * @id: port ID
+ * @val: Pointer to store read value
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_fields_read(struct regmap_field *field, unsigned int id,
+ unsigned int *val)
+{
+ int ret;
+ unsigned int reg_val;
+
+ if (id >= field->id_size)
+ return -EINVAL;
+
+ ret = regmap_read(field->regmap,
+ field->reg + (field->id_offset * id),
+ &reg_val);
+ if (ret != 0)
+ return ret;
+
+ reg_val &= field->mask;
+ reg_val >>= field->shift;
+ *val = reg_val;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_fields_read);
+
+/**
* regmap_bulk_read(): Read multiple registers from the device
*
- * @map: Register map to write to
+ * @map: Register map to read from
* @reg: First register to be read from
* @val: Pointer to store read value, in native register size for device
* @val_count: Number of registers to read
@@ -1549,14 +2215,10 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_bytes = map->format.val_bytes;
bool vol = regmap_volatile_range(map, reg, val_count);
- if (!map->bus)
- return -EINVAL;
- if (!map->format.parse_inplace)
- return -EINVAL;
if (reg % map->reg_stride)
return -EINVAL;
- if (vol || map->cache_type == REGCACHE_NONE) {
+ if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
/*
* Some devices does not support bulk read, for
* them we have a series of single read operations.
@@ -1610,9 +2272,11 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
if (tmp != orig) {
ret = _regmap_write(map, reg, tmp);
- *change = true;
+ if (change)
+ *change = true;
} else {
- *change = false;
+ if (change)
+ *change = false;
}
return ret;
@@ -1631,11 +2295,10 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
int regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val)
{
- bool change;
int ret;
map->lock(map->lock_arg);
- ret = _regmap_update_bits(map, reg, mask, val, &change);
+ ret = _regmap_update_bits(map, reg, mask, val, NULL);
map->unlock(map->lock_arg);
return ret;
@@ -1643,6 +2306,40 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,
EXPORT_SYMBOL_GPL(regmap_update_bits);
/**
+ * regmap_update_bits_async: Perform a read/modify/write cycle on the register
+ * map asynchronously
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ *
+ * With most buses the read must be done synchronously so this is most
+ * useful for devices with a cache which do not need to interact with
+ * the hardware to determine the current register value.
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ int ret;
+
+ map->lock(map->lock_arg);
+
+ map->async = true;
+
+ ret = _regmap_update_bits(map, reg, mask, val, NULL);
+
+ map->async = false;
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_update_bits_async);
+
+/**
* regmap_update_bits_check: Perform a read/modify/write cycle on the
* register map and report if updated
*
@@ -1667,6 +2364,43 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
}
EXPORT_SYMBOL_GPL(regmap_update_bits_check);
+/**
+ * regmap_update_bits_check_async: Perform a read/modify/write cycle on the
+ * register map asynchronously and report if
+ * updated
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ * @change: Boolean indicating if a write was done
+ *
+ * With most buses the read must be done synchronously so this is most
+ * useful for devices with a cache which do not need to interact with
+ * the hardware to determine the current register value.
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ int ret;
+
+ map->lock(map->lock_arg);
+
+ map->async = true;
+
+ ret = _regmap_update_bits(map, reg, mask, val, change);
+
+ map->async = false;
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_update_bits_check_async);
+
void regmap_async_complete_cb(struct regmap_async *async, int ret)
{
struct regmap *map = async->map;
@@ -1675,8 +2409,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
trace_regmap_async_io_complete(map->dev);
spin_lock(&map->async_lock);
-
- list_del(&async->list);
+ list_move(&async->list, &map->async_free);
wake = list_empty(&map->async_list);
if (ret != 0)
@@ -1684,8 +2417,6 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
spin_unlock(&map->async_lock);
- schedule_work(&async->cleanup);
-
if (wake)
wake_up(&map->async_waitq);
}
@@ -1717,7 +2448,7 @@ int regmap_async_complete(struct regmap *map)
int ret;
/* Nothing to do with no async support */
- if (!map->bus->async_write)
+ if (!map->bus || !map->bus->async_write)
return 0;
trace_regmap_async_complete_start(map->dev);
@@ -1748,47 +2479,51 @@ EXPORT_SYMBOL_GPL(regmap_async_complete);
* apply them immediately. Typically this is used to apply
* corrections to be applied to the device defaults on startup, such
* as the updates some vendors provide to undocumented registers.
+ *
+ * The caller must ensure that this function cannot be called
+ * concurrently with either itself or regcache_sync().
*/
int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
int num_regs)
{
- int i, ret;
+ struct reg_default *p;
+ int ret;
bool bypass;
- /* If needed the implementation can be extended to support this */
- if (map->patch)
- return -EBUSY;
+ if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
+ num_regs))
+ return 0;
+
+ p = krealloc(map->patch,
+ sizeof(struct reg_default) * (map->patch_regs + num_regs),
+ GFP_KERNEL);
+ if (p) {
+ memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
+ map->patch = p;
+ map->patch_regs += num_regs;
+ } else {
+ return -ENOMEM;
+ }
map->lock(map->lock_arg);
bypass = map->cache_bypass;
map->cache_bypass = true;
+ map->async = true;
- /* Write out first; it's useful to apply even if we fail later. */
- for (i = 0; i < num_regs; i++) {
- ret = _regmap_write(map, regs[i].reg, regs[i].def);
- if (ret != 0) {
- dev_err(map->dev, "Failed to write %x = %x: %d\n",
- regs[i].reg, regs[i].def, ret);
- goto out;
- }
- }
-
- map->patch = kcalloc(num_regs, sizeof(struct reg_default), GFP_KERNEL);
- if (map->patch != NULL) {
- memcpy(map->patch, regs,
- num_regs * sizeof(struct reg_default));
- map->patch_regs = num_regs;
- } else {
- ret = -ENOMEM;
- }
+ ret = _regmap_multi_reg_write(map, regs, num_regs);
+ if (ret != 0)
+ goto out;
out:
+ map->async = false;
map->cache_bypass = bypass;
map->unlock(map->lock_arg);
+ regmap_async_complete(map);
+
return ret;
}
EXPORT_SYMBOL_GPL(regmap_register_patch);
@@ -1808,6 +2543,18 @@ int regmap_get_val_bytes(struct regmap *map)
}
EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
+int regmap_parse_val(struct regmap *map, const void *buf,
+ unsigned int *val)
+{
+ if (!map->format.parse_val)
+ return -EINVAL;
+
+ *val = map->format.parse_val(buf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_parse_val);
+
static int __init regmap_initcall(void)
{
regmap_debugfs_initcall();
diff --git a/drivers/base/reservation.c b/drivers/base/reservation.c
new file mode 100644
index 00000000000..a73fbf3b8e5
--- /dev/null
+++ b/drivers/base/reservation.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012-2013 Canonical Ltd
+ *
+ * Based on bo.c which bears the following copyright notice,
+ * but is dual licensed:
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <linux/reservation.h>
+#include <linux/export.h>
+
+DEFINE_WW_CLASS(reservation_ww_class);
+EXPORT_SYMBOL(reservation_ww_class);
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index e8d11b6630e..dbb8350ea8d 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -10,6 +10,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <trace/events/power.h>
static LIST_HEAD(syscore_ops_list);
static DEFINE_MUTEX(syscore_ops_lock);
@@ -49,6 +50,7 @@ int syscore_suspend(void)
struct syscore_ops *ops;
int ret = 0;
+ trace_suspend_resume(TPS("syscore_suspend"), 0, true);
pr_debug("Checking wakeup interrupts\n");
/* Return error code if there are any wakeup interrupts pending. */
@@ -70,6 +72,7 @@ int syscore_suspend(void)
"Interrupts enabled after %pF\n", ops->suspend);
}
+ trace_suspend_resume(TPS("syscore_suspend"), 0, false);
return 0;
err_out:
@@ -92,6 +95,7 @@ void syscore_resume(void)
{
struct syscore_ops *ops;
+ trace_suspend_resume(TPS("syscore_resume"), 0, true);
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled before system core resume.\n");
@@ -103,6 +107,7 @@ void syscore_resume(void)
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled after %pF\n", ops->resume);
}
+ trace_suspend_resume(TPS("syscore_resume"), 0, false);
}
EXPORT_SYMBOL_GPL(syscore_resume);
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index ae989c57cd5..be7c1fb7c0c 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -23,7 +23,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/module.h>
@@ -40,8 +39,7 @@
static ssize_t show_##name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
- unsigned int cpu = dev->id; \
- return sprintf(buf, "%d\n", topology_##name(cpu)); \
+ return sprintf(buf, "%d\n", topology_##name(dev->id)); \
}
#if defined(topology_thread_cpumask) || defined(topology_core_cpumask) || \
@@ -62,25 +60,6 @@ static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf)
}
#endif
-#ifdef arch_provides_topology_pointers
-#define define_siblings_show_map(name) \
-static ssize_t show_##name(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- unsigned int cpu = dev->id; \
- return show_cpumap(0, topology_##name(cpu), buf); \
-}
-
-#define define_siblings_show_list(name) \
-static ssize_t show_##name##_list(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- unsigned int cpu = dev->id; \
- return show_cpumap(1, topology_##name(cpu), buf); \
-}
-
-#else
#define define_siblings_show_map(name) \
static ssize_t show_##name(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -95,7 +74,6 @@ static ssize_t show_##name##_list(struct device *dev, \
{ \
return show_cpumap(1, topology_##name(dev->id), buf); \
}
-#endif
#define define_siblings_show_func(name) \
define_siblings_show_map(name); define_siblings_show_list(name)
@@ -143,22 +121,22 @@ static struct attribute_group topology_attr_group = {
};
/* Add/Remove cpu_topology interface for CPU device */
-static int __cpuinit topology_add_dev(unsigned int cpu)
+static int topology_add_dev(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
return sysfs_create_group(&dev->kobj, &topology_attr_group);
}
-static void __cpuinit topology_remove_dev(unsigned int cpu)
+static void topology_remove_dev(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
sysfs_remove_group(&dev->kobj, &topology_attr_group);
}
-static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int topology_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
int rc = 0;
@@ -178,19 +156,23 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
return notifier_from_errno(rc);
}
-static int __cpuinit topology_sysfs_init(void)
+static int topology_sysfs_init(void)
{
int cpu;
- int rc;
+ int rc = 0;
+
+ cpu_notifier_register_begin();
for_each_online_cpu(cpu) {
rc = topology_add_dev(cpu);
if (rc)
- return rc;
+ goto out;
}
- hotcpu_notifier(topology_cpu_callback, 0);
+ __hotcpu_notifier(topology_cpu_callback, 0);
- return 0;
+out:
+ cpu_notifier_register_done();
+ return rc;
}
device_initcall(topology_sysfs_init);