aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/arm/sunxi/clocks.txt56
-rw-r--r--Documentation/cgroups/cgroups.txt3
-rw-r--r--Documentation/cgroups/devices.txt70
-rw-r--r--Documentation/clk.txt15
-rw-r--r--Documentation/devicetree/bindings/clock/axi-clkgen.txt22
-rw-r--r--Documentation/devicetree/bindings/clock/fixed-factor-clock.txt24
-rw-r--r--Documentation/devicetree/bindings/clock/silabs,si5351.txt114
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi.txt151
-rw-r--r--Documentation/devicetree/bindings/mfd/mc13xxx.txt36
-rw-r--r--Documentation/devicetree/bindings/regulator/max8952.txt52
-rw-r--r--Documentation/devicetree/bindings/spi/brcm,bcm2835-spi.txt22
-rw-r--r--Documentation/devicetree/bindings/spi/fsl-spi.txt3
-rw-r--r--Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt26
-rw-r--r--Documentation/devicetree/bindings/spi/spi-samsung.txt8
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt2
-rw-r--r--Documentation/kernel-parameters.txt24
-rw-r--r--Documentation/this_cpu_ops.txt205
-rw-r--r--Documentation/trace/ftrace.txt2097
-rw-r--r--MAINTAINERS21
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi40
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi40
-rw-r--r--arch/arm/boot/dts/at91sam9263ek.dts10
-rw-r--r--arch/arm/boot/dts/at91sam9g20ek_common.dtsi10
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi40
-rw-r--r--arch/arm/boot/dts/at91sam9m10g45ek.dts10
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi40
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts10
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi40
-rw-r--r--arch/arm/boot/dts/at91sam9x5ek.dtsi10
-rw-r--r--arch/arm/mach-at91/at91sam9260.c2
-rw-r--r--arch/arm/mach-at91/at91sam9g45.c2
-rw-r--r--arch/arm/mach-at91/at91sam9n12.c2
-rw-r--r--arch/arm/mach-at91/at91sam9x5.c2
-rw-r--r--arch/arm/mach-exynos/mach-universal_c210.c27
-rw-r--r--arch/arm/mach-imx/clk-busy.c2
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.c774
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.h7
-rw-r--r--arch/arm/mach-ux500/board-mop500.c5
-rw-r--r--arch/arm/mach-vexpress/v2m.c8
-rw-r--r--arch/arm/plat-samsung/devs.c10
-rw-r--r--arch/mips/bcm63xx/dev-spi.c11
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h1
-rw-r--r--arch/powerpc/mm/numa.c1
-rw-r--r--arch/sh/drivers/pci/pcie-sh7786.c2
-rw-r--r--block/blk-cgroup.h2
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/bus.c73
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/regmap/internal.h40
-rw-r--r--drivers/base/regmap/regcache-lzo.c6
-rw-r--r--drivers/base/regmap/regcache-rbtree.c100
-rw-r--r--drivers/base/regmap/regcache.c196
-rw-r--r--drivers/base/regmap/regmap-debugfs.c94
-rw-r--r--drivers/base/regmap/regmap-irq.c3
-rw-r--r--drivers/base/regmap/regmap.c92
-rw-r--r--drivers/clk/Kconfig18
-rw-r--r--drivers/clk/Makefile4
-rw-r--r--drivers/clk/clk-axi-clkgen.c331
-rw-r--r--drivers/clk/clk-composite.c210
-rw-r--r--drivers/clk/clk-divider.c5
-rw-r--r--drivers/clk/clk-fixed-factor.c36
-rw-r--r--drivers/clk/clk-mux.c50
-rw-r--r--drivers/clk/clk-prima2.c2
-rw-r--r--drivers/clk/clk-si5351.c1510
-rw-r--r--drivers/clk/clk-si5351.h155
-rw-r--r--drivers/clk/clk-vt8500.c2
-rw-r--r--drivers/clk/clk-zynq.c1
-rw-r--r--drivers/clk/clk.c406
-rw-r--r--drivers/clk/mvebu/clk-core.c4
-rw-r--r--drivers/clk/mvebu/clk-cpu.c17
-rw-r--r--drivers/clk/mvebu/clk-cpu.h22
-rw-r--r--drivers/clk/mvebu/clk.c6
-rw-r--r--drivers/clk/mxs/clk.c1
-rw-r--r--drivers/clk/spear/spear1340_clock.c18
-rw-r--r--drivers/clk/sunxi/Makefile5
-rw-r--r--drivers/clk/sunxi/clk-factors.c180
-rw-r--r--drivers/clk/sunxi/clk-factors.h27
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c469
-rw-r--r--drivers/clk/tegra/clk.h27
-rw-r--r--drivers/clk/ux500/Makefile1
-rw-r--r--drivers/clk/ux500/abx500-clk.c71
-rw-r--r--drivers/clk/ux500/clk-prcmu.c136
-rw-r--r--drivers/clk/ux500/clk-sysctrl.c221
-rw-r--r--drivers/clk/ux500/clk.h34
-rw-r--r--drivers/clk/versatile/Makefile2
-rw-r--r--drivers/clk/versatile/clk-sp810.c188
-rw-r--r--drivers/clk/versatile/clk-vexpress.c49
-rw-r--r--drivers/clocksource/sunxi_timer.c4
-rw-r--r--drivers/mfd/ab3100-core.c1
-rw-r--r--drivers/regulator/88pm8607.c36
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/ab3100.c236
-rw-r--r--drivers/regulator/ab8500-ext.c407
-rw-r--r--drivers/regulator/ab8500.c2662
-rw-r--r--drivers/regulator/arizona-ldo1.c2
-rw-r--r--drivers/regulator/as3711-regulator.c67
-rw-r--r--drivers/regulator/core.c220
-rw-r--r--drivers/regulator/dbx500-prcmu.h2
-rw-r--r--drivers/regulator/fan53555.c4
-rw-r--r--drivers/regulator/lp3971.c2
-rw-r--r--drivers/regulator/lp3972.c2
-rw-r--r--drivers/regulator/lp872x.c3
-rw-r--r--drivers/regulator/lp8788-buck.c2
-rw-r--r--drivers/regulator/lp8788-ldo.c157
-rw-r--r--drivers/regulator/max1586.c5
-rw-r--r--drivers/regulator/max77686.c32
-rw-r--r--drivers/regulator/max8649.c45
-rw-r--r--drivers/regulator/max8660.c3
-rw-r--r--drivers/regulator/max8925-regulator.c5
-rw-r--r--drivers/regulator/max8952.c75
-rw-r--r--drivers/regulator/max8973-regulator.c10
-rw-r--r--drivers/regulator/max8997.c4
-rw-r--r--drivers/regulator/max8998.c9
-rw-r--r--drivers/regulator/mc13783-regulator.c44
-rw-r--r--drivers/regulator/mc13892-regulator.c46
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c17
-rw-r--r--drivers/regulator/mc13xxx.h4
-rw-r--r--drivers/regulator/palmas-regulator.c365
-rw-r--r--drivers/regulator/rc5t583-regulator.c6
-rw-r--r--drivers/regulator/s5m8767.c8
-rw-r--r--drivers/regulator/tps62360-regulator.c2
-rw-r--r--drivers/regulator/tps65023-regulator.c48
-rw-r--r--drivers/regulator/tps6507x-regulator.c1
-rw-r--r--drivers/regulator/tps6524x-regulator.c4
-rw-r--r--drivers/regulator/tps6586x-regulator.c7
-rw-r--r--drivers/regulator/tps65910-regulator.c4
-rw-r--r--drivers/regulator/tps80031-regulator.c45
-rw-r--r--drivers/regulator/twl-regulator.c34
-rw-r--r--drivers/regulator/wm8994-regulator.c61
-rw-r--r--drivers/spi/Kconfig30
-rw-r--r--drivers/spi/Makefile3
-rw-r--r--drivers/spi/spi-atmel.c728
-rw-r--r--drivers/spi/spi-bcm2835.c422
-rw-r--r--drivers/spi/spi-bcm63xx.c78
-rw-r--r--drivers/spi/spi-fsl-cpm.c387
-rw-r--r--drivers/spi/spi-fsl-cpm.h43
-rw-r--r--drivers/spi/spi-fsl-lib.c8
-rw-r--r--drivers/spi/spi-fsl-lib.h15
-rw-r--r--drivers/spi/spi-fsl-spi.c601
-rw-r--r--drivers/spi/spi-fsl-spi.h72
-rw-r--r--drivers/spi/spi-gpio.c2
-rw-r--r--drivers/spi/spi-mpc512x-psc.c41
-rw-r--r--drivers/spi/spi-mxs.c1
-rw-r--r--drivers/spi/spi-oc-tiny.c4
-rw-r--r--drivers/spi/spi-omap2-mcspi.c43
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c6
-rw-r--r--drivers/spi/spi-pxa2xx.c14
-rw-r--r--drivers/spi/spi-s3c64xx.c292
-rw-r--r--drivers/spi/spi-sh-msiof.c4
-rw-r--r--drivers/spi/spi-sirf.c2
-rw-r--r--drivers/spi/spi-tegra114.c1246
-rw-r--r--drivers/spi/spi-tegra20-sflash.c41
-rw-r--r--drivers/spi/spi-tegra20-slink.c84
-rw-r--r--drivers/spi/spi-topcliff-pch.c16
-rw-r--r--drivers/spi/spi.c8
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--include/linux/async.h13
-rw-r--r--include/linux/cgroup.h170
-rw-r--r--include/linux/clk-private.h2
-rw-r--r--include/linux/clk-provider.h63
-rw-r--r--include/linux/clk.h8
-rw-r--r--include/linux/clk/sunxi.h22
-rw-r--r--include/linux/cpumask.h15
-rw-r--r--include/linux/cpuset.h1
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/ftrace.h6
-rw-r--r--include/linux/ftrace_event.h111
-rw-r--r--include/linux/kernel.h70
-rw-r--r--include/linux/mfd/abx500/ab8500.h11
-rw-r--r--include/linux/mfd/palmas.h24
-rw-r--r--include/linux/platform_data/si5351.h114
-rw-r--r--include/linux/platform_data/spi-s3c64xx.h3
-rw-r--r--include/linux/regmap.h1
-rw-r--r--include/linux/regulator/ab8500.h217
-rw-r--r--include/linux/regulator/consumer.h14
-rw-r--r--include/linux/regulator/driver.h9
-rw-r--r--include/linux/regulator/max8952.h10
-rw-r--r--include/linux/res_counter.h2
-rw-r--r--include/linux/ring_buffer.h6
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/spi/spi-tegra.h40
-rw-r--r--include/linux/spi/spi.h8
-rw-r--r--include/linux/trace_clock.h1
-rw-r--r--include/linux/workqueue.h166
-rw-r--r--include/trace/events/regmap.h48
-rw-r--r--include/trace/ftrace.h49
-rw-r--r--kernel/async.c40
-rw-r--r--kernel/cgroup.c728
-rw-r--r--kernel/cpuset.c131
-rw-r--r--kernel/events/core.c24
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/sched/core.c9
-rw-r--r--kernel/trace/Kconfig49
-rw-r--r--kernel/trace/blktrace.c4
-rw-r--r--kernel/trace/ftrace.c98
-rw-r--r--kernel/trace/ring_buffer.c500
-rw-r--r--kernel/trace/trace.c2204
-rw-r--r--kernel/trace/trace.h144
-rw-r--r--kernel/trace/trace_branch.c8
-rw-r--r--kernel/trace/trace_clock.c10
-rw-r--r--kernel/trace/trace_entries.h23
-rw-r--r--kernel/trace/trace_events.c1397
-rw-r--r--kernel/trace/trace_events_filter.c34
-rw-r--r--kernel/trace/trace_export.c4
-rw-r--r--kernel/trace/trace_functions.c207
-rw-r--r--kernel/trace/trace_functions_graph.c12
-rw-r--r--kernel/trace/trace_irqsoff.c85
-rw-r--r--kernel/trace/trace_kdb.c12
-rw-r--r--kernel/trace/trace_mmiotrace.c12
-rw-r--r--kernel/trace/trace_output.c119
-rw-r--r--kernel/trace/trace_output.h4
-rw-r--r--kernel/trace/trace_sched_switch.c8
-rw-r--r--kernel/trace/trace_sched_wakeup.c87
-rw-r--r--kernel/trace/trace_selftest.c51
-rw-r--r--kernel/trace/trace_stack.c76
-rw-r--r--kernel/trace/trace_stat.c2
-rw-r--r--kernel/trace/trace_syscalls.c90
-rw-r--r--kernel/tracepoint.c21
-rw-r--r--kernel/workqueue.c2828
-rw-r--r--kernel/workqueue_internal.h9
-rw-r--r--mm/memcontrol.c80
-rw-r--r--security/device_cgroup.c267
-rwxr-xr-xtools/testing/ktest/ktest.pl14
223 files changed, 22705 insertions, 5657 deletions
diff --git a/Documentation/arm/sunxi/clocks.txt b/Documentation/arm/sunxi/clocks.txt
new file mode 100644
index 00000000000..e09a88aa313
--- /dev/null
+++ b/Documentation/arm/sunxi/clocks.txt
@@ -0,0 +1,56 @@
+Frequently asked questions about the sunxi clock system
+=======================================================
+
+This document contains useful bits of information that people tend to ask
+about the sunxi clock system, as well as accompanying ASCII art when adequate.
+
+Q: Why is the main 24MHz oscillator gatable? Wouldn't that break the
+ system?
+
+A: The 24MHz oscillator allows gating to save power. Indeed, if gated
+ carelessly the system would stop functioning, but with the right
+ steps, one can gate it and keep the system running. Consider this
+ simplified suspend example:
+
+ While the system is operational, you would see something like
+
+ 24MHz 32kHz
+ |
+ PLL1
+ \
+ \_ CPU Mux
+ |
+ [CPU]
+
+ When you are about to suspend, you switch the CPU Mux to the 32kHz
+ oscillator:
+
+ 24Mhz 32kHz
+ | |
+ PLL1 |
+ /
+ CPU Mux _/
+ |
+ [CPU]
+
+ Finally you can gate the main oscillator
+
+ 32kHz
+ |
+ |
+ /
+ CPU Mux _/
+ |
+ [CPU]
+
+Q: Were can I learn more about the sunxi clocks?
+
+A: The linux-sunxi wiki contains a page documenting the clock registers,
+ you can find it at
+
+ http://linux-sunxi.org/A10/CCM
+
+ The authoritative source for information at this time is the ccmu driver
+ released by Allwinner, you can find it at
+
+ https://github.com/linux-sunxi/linux-sunxi/tree/sunxi-3.0/arch/arm/mach-sun4i/clock/ccmu
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index bcf1a00b06a..638bf17ff86 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -442,7 +442,7 @@ You can attach the current shell task by echoing 0:
You can use the cgroup.procs file instead of the tasks file to move all
threads in a threadgroup at once. Echoing the PID of any task in a
threadgroup to cgroup.procs causes all tasks in that threadgroup to be
-be attached to the cgroup. Writing 0 to cgroup.procs moves all tasks
+attached to the cgroup. Writing 0 to cgroup.procs moves all tasks
in the writing task's threadgroup.
Note: Since every task is always a member of exactly one cgroup in each
@@ -580,6 +580,7 @@ propagation along the hierarchy. See the comment on
cgroup_for_each_descendant_pre() for details.
void css_offline(struct cgroup *cgrp);
+(cgroup_mutex held by caller)
This is the counterpart of css_online() and called iff css_online()
has succeeded on @cgrp. This signifies the beginning of the end of
diff --git a/Documentation/cgroups/devices.txt b/Documentation/cgroups/devices.txt
index 16624a7f822..3c1095ca02e 100644
--- a/Documentation/cgroups/devices.txt
+++ b/Documentation/cgroups/devices.txt
@@ -13,9 +13,7 @@ either an integer or * for all. Access is a composition of r
The root device cgroup starts with rwm to 'all'. A child device
cgroup gets a copy of the parent. Administrators can then remove
devices from the whitelist or add new entries. A child cgroup can
-never receive a device access which is denied by its parent. However
-when a device access is removed from a parent it will not also be
-removed from the child(ren).
+never receive a device access which is denied by its parent.
2. User Interface
@@ -50,3 +48,69 @@ task to a new cgroup. (Again we'll probably want to change that).
A cgroup may not be granted more permissions than the cgroup's
parent has.
+
+4. Hierarchy
+
+device cgroups maintain hierarchy by making sure a cgroup never has more
+access permissions than its parent. Every time an entry is written to
+a cgroup's devices.deny file, all its children will have that entry removed
+from their whitelist and all the locally set whitelist entries will be
+re-evaluated. In case one of the locally set whitelist entries would provide
+more access than the cgroup's parent, it'll be removed from the whitelist.
+
+Example:
+ A
+ / \
+ B
+
+ group behavior exceptions
+ A allow "b 8:* rwm", "c 116:1 rw"
+ B deny "c 1:3 rwm", "c 116:2 rwm", "b 3:* rwm"
+
+If a device is denied in group A:
+ # echo "c 116:* r" > A/devices.deny
+it'll propagate down and after revalidating B's entries, the whitelist entry
+"c 116:2 rwm" will be removed:
+
+ group whitelist entries denied devices
+ A all "b 8:* rwm", "c 116:* rw"
+ B "c 1:3 rwm", "b 3:* rwm" all the rest
+
+In case parent's exceptions change and local exceptions are not allowed
+anymore, they'll be deleted.
+
+Notice that new whitelist entries will not be propagated:
+ A
+ / \
+ B
+
+ group whitelist entries denied devices
+ A "c 1:3 rwm", "c 1:5 r" all the rest
+ B "c 1:3 rwm", "c 1:5 r" all the rest
+
+when adding "c *:3 rwm":
+ # echo "c *:3 rwm" >A/devices.allow
+
+the result:
+ group whitelist entries denied devices
+ A "c *:3 rwm", "c 1:5 r" all the rest
+ B "c 1:3 rwm", "c 1:5 r" all the rest
+
+but now it'll be possible to add new entries to B:
+ # echo "c 2:3 rwm" >B/devices.allow
+ # echo "c 50:3 r" >B/devices.allow
+or even
+ # echo "c *:3 rwm" >B/devices.allow
+
+Allowing or denying all by writing 'a' to devices.allow or devices.deny will
+not be possible once the device cgroups has children.
+
+4.1 Hierarchy (internal implementation)
+
+device cgroups is implemented internally using a behavior (ALLOW, DENY) and a
+list of exceptions. The internal state is controlled using the same user
+interface to preserve compatibility with the previous whitelist-only
+implementation. Removal or addition of exceptions that will reduce the access
+to devices will be propagated down the hierarchy.
+For every propagated exception, the effective rules will be re-evaluated based
+on current parent's access rules.
diff --git a/Documentation/clk.txt b/Documentation/clk.txt
index 1943fae014f..b9911c27f49 100644
--- a/Documentation/clk.txt
+++ b/Documentation/clk.txt
@@ -174,9 +174,9 @@ int clk_foo_enable(struct clk_hw *hw)
};
Below is a matrix detailing which clk_ops are mandatory based upon the
-hardware capbilities of that clock. A cell marked as "y" means
+hardware capabilities of that clock. A cell marked as "y" means
mandatory, a cell marked as "n" implies that either including that
-callback is invalid or otherwise uneccesary. Empty cells are either
+callback is invalid or otherwise unnecessary. Empty cells are either
optional or must be evaluated on a case-by-case basis.
clock hardware characteristics
@@ -231,3 +231,14 @@ To better enforce this policy, always follow this simple rule: any
statically initialized clock data MUST be defined in a separate file
from the logic that implements its ops. Basically separate the logic
from the data and all is well.
+
+ Part 6 - Disabling clock gating of unused clocks
+
+Sometimes during development it can be useful to be able to bypass the
+default disabling of unused clocks. For example, if drivers aren't enabling
+clocks properly but rely on them being on from the bootloader, bypassing
+the disabling means that the driver will remain functional while the issues
+are sorted out.
+
+To bypass this disabling, include "clk_ignore_unused" in the bootargs to the
+kernel.
diff --git a/Documentation/devicetree/bindings/clock/axi-clkgen.txt b/Documentation/devicetree/bindings/clock/axi-clkgen.txt
new file mode 100644
index 00000000000..028b493e97f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/axi-clkgen.txt
@@ -0,0 +1,22 @@
+Binding for the axi-clkgen clock generator
+
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : shall be "adi,axi-clkgen".
+- #clock-cells : from common clock binding; Should always be set to 0.
+- reg : Address and length of the axi-clkgen register set.
+- clocks : Phandle and clock specifier for the parent clock.
+
+Optional properties:
+- clock-output-names : From common clock binding.
+
+Example:
+ clock@0xff000000 {
+ compatible = "adi,axi-clkgen";
+ #clock-cells = <0>;
+ reg = <0xff000000 0x1000>;
+ clocks = <&osc 1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/fixed-factor-clock.txt b/Documentation/devicetree/bindings/clock/fixed-factor-clock.txt
new file mode 100644
index 00000000000..5757f9abfc2
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/fixed-factor-clock.txt
@@ -0,0 +1,24 @@
+Binding for simple fixed factor rate clock sources.
+
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : shall be "fixed-factor-clock".
+- #clock-cells : from common clock binding; shall be set to 0.
+- clock-div: fixed divider.
+- clock-mult: fixed multiplier.
+- clocks: parent clock.
+
+Optional properties:
+- clock-output-names : From common clock binding.
+
+Example:
+ clock {
+ compatible = "fixed-factor-clock";
+ clocks = <&parentclk>;
+ #clock-cells = <0>;
+ div = <2>;
+ mult = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/silabs,si5351.txt b/Documentation/devicetree/bindings/clock/silabs,si5351.txt
new file mode 100644
index 00000000000..cc374651662
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/silabs,si5351.txt
@@ -0,0 +1,114 @@
+Binding for Silicon Labs Si5351a/b/c programmable i2c clock generator.
+
+Reference
+[1] Si5351A/B/C Data Sheet
+ http://www.silabs.com/Support%20Documents/TechnicalDocs/Si5351.pdf
+
+The Si5351a/b/c are programmable i2c clock generators with upto 8 output
+clocks. Si5351a also has a reduced pin-count package (MSOP10) where only
+3 output clocks are accessible. The internal structure of the clock
+generators can be found in [1].
+
+==I2C device node==
+
+Required properties:
+- compatible: shall be one of "silabs,si5351{a,a-msop,b,c}".
+- reg: i2c device address, shall be 0x60 or 0x61.
+- #clock-cells: from common clock binding; shall be set to 1.
+- clocks: from common clock binding; list of parent clock
+ handles, shall be xtal reference clock or xtal and clkin for
+ si5351c only.
+- #address-cells: shall be set to 1.
+- #size-cells: shall be set to 0.
+
+Optional properties:
+- silabs,pll-source: pair of (number, source) for each pll. Allows
+ to overwrite clock source of pll A (number=0) or B (number=1).
+
+==Child nodes==
+
+Each of the clock outputs can be overwritten individually by
+using a child node to the I2C device node. If a child node for a clock
+output is not set, the eeprom configuration is not overwritten.
+
+Required child node properties:
+- reg: number of clock output.
+
+Optional child node properties:
+- silabs,clock-source: source clock of the output divider stage N, shall be
+ 0 = multisynth N
+ 1 = multisynth 0 for output clocks 0-3, else multisynth4
+ 2 = xtal
+ 3 = clkin (si5351c only)
+- silabs,drive-strength: output drive strength in mA, shall be one of {2,4,6,8}.
+- silabs,multisynth-source: source pll A(0) or B(1) of corresponding multisynth
+ divider.
+- silabs,pll-master: boolean, multisynth can change pll frequency.
+
+==Example==
+
+/* 25MHz reference crystal */
+ref25: ref25M {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+};
+
+i2c-master-node {
+
+ /* Si5351a msop10 i2c clock generator */
+ si5351a: clock-generator@60 {
+ compatible = "silabs,si5351a-msop";
+ reg = <0x60>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #clock-cells = <1>;
+
+ /* connect xtal input to 25MHz reference */
+ clocks = <&ref25>;
+
+ /* connect xtal input as source of pll0 and pll1 */
+ silabs,pll-source = <0 0>, <1 0>;
+
+ /*
+ * overwrite clkout0 configuration with:
+ * - 8mA output drive strength
+ * - pll0 as clock source of multisynth0
+ * - multisynth0 as clock source of output divider
+ * - multisynth0 can change pll0
+ * - set initial clock frequency of 74.25MHz
+ */
+ clkout0 {
+ reg = <0>;
+ silabs,drive-strength = <8>;
+ silabs,multisynth-source = <0>;
+ silabs,clock-source = <0>;
+ silabs,pll-master;
+ clock-frequency = <74250000>;
+ };
+
+ /*
+ * overwrite clkout1 configuration with:
+ * - 4mA output drive strength
+ * - pll1 as clock source of multisynth1
+ * - multisynth1 as clock source of output divider
+ * - multisynth1 can change pll1
+ */
+ clkout1 {
+ reg = <1>;
+ silabs,drive-strength = <4>;
+ silabs,multisynth-source = <1>;
+ silabs,clock-source = <0>;
+ pll-master;
+ };
+
+ /*
+ * overwrite clkout2 configuration with:
+ * - xtal as clock source of output divider
+ */
+ clkout2 {
+ reg = <2>;
+ silabs,clock-source = <2>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/clock/sunxi.txt b/Documentation/devicetree/bindings/clock/sunxi.txt
new file mode 100644
index 00000000000..729f52426fe
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/sunxi.txt
@@ -0,0 +1,151 @@
+Device Tree Clock bindings for arch-sunxi
+
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : shall be one of the following:
+ "allwinner,sun4i-osc-clk" - for a gatable oscillator
+ "allwinner,sun4i-pll1-clk" - for the main PLL clock
+ "allwinner,sun4i-cpu-clk" - for the CPU multiplexer clock
+ "allwinner,sun4i-axi-clk" - for the AXI clock
+ "allwinner,sun4i-axi-gates-clk" - for the AXI gates
+ "allwinner,sun4i-ahb-clk" - for the AHB clock
+ "allwinner,sun4i-ahb-gates-clk" - for the AHB gates
+ "allwinner,sun4i-apb0-clk" - for the APB0 clock
+ "allwinner,sun4i-apb0-gates-clk" - for the APB0 gates
+ "allwinner,sun4i-apb1-clk" - for the APB1 clock
+ "allwinner,sun4i-apb1-mux-clk" - for the APB1 clock muxing
+ "allwinner,sun4i-apb1-gates-clk" - for the APB1 gates
+
+Required properties for all clocks:
+- reg : shall be the control register address for the clock.
+- clocks : shall be the input parent clock(s) phandle for the clock
+- #clock-cells : from common clock binding; shall be set to 0 except for
+ "allwinner,sun4i-*-gates-clk" where it shall be set to 1
+
+Additionally, "allwinner,sun4i-*-gates-clk" clocks require:
+- clock-output-names : the corresponding gate names that the clock controls
+
+For example:
+
+osc24M: osc24M@01c20050 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-osc-clk";
+ reg = <0x01c20050 0x4>;
+ clocks = <&osc24M_fixed>;
+};
+
+pll1: pll1@01c20000 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-pll1-clk";
+ reg = <0x01c20000 0x4>;
+ clocks = <&osc24M>;
+};
+
+cpu: cpu@01c20054 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-cpu-clk";
+ reg = <0x01c20054 0x4>;
+ clocks = <&osc32k>, <&osc24M>, <&pll1>;
+};
+
+
+
+Gate clock outputs
+
+The "allwinner,sun4i-*-gates-clk" clocks provide several gatable outputs;
+their corresponding offsets as present on sun4i are listed below. Note that
+some of these gates are not present on sun5i.
+
+ * AXI gates ("allwinner,sun4i-axi-gates-clk")
+
+ DRAM 0
+
+ * AHB gates ("allwinner,sun4i-ahb-gates-clk")
+
+ USB0 0
+ EHCI0 1
+ OHCI0 2*
+ EHCI1 3
+ OHCI1 4*
+ SS 5
+ DMA 6
+ BIST 7
+ MMC0 8
+ MMC1 9
+ MMC2 10
+ MMC3 11
+ MS 12**
+ NAND 13
+ SDRAM 14
+
+ ACE 16
+ EMAC 17
+ TS 18
+
+ SPI0 20
+ SPI1 21
+ SPI2 22
+ SPI3 23
+ PATA 24
+ SATA 25**
+ GPS 26*
+
+ VE 32
+ TVD 33
+ TVE0 34
+ TVE1 35
+ LCD0 36
+ LCD1 37
+
+ CSI0 40
+ CSI1 41
+
+ HDMI 43
+ DE_BE0 44
+ DE_BE1 45
+ DE_FE0 46
+ DE_FE1 47
+
+ MP 50
+
+ MALI400 52
+
+ * APB0 gates ("allwinner,sun4i-apb0-gates-clk")
+
+ CODEC 0
+ SPDIF 1*
+ AC97 2
+ IIS 3
+
+ PIO 5
+ IR0 6
+ IR1 7
+
+ KEYPAD 10
+
+ * APB1 gates ("allwinner,sun4i-apb1-gates-clk")
+
+ I2C0 0
+ I2C1 1
+ I2C2 2
+
+ CAN 4
+ SCR 5
+ PS20 6
+ PS21 7
+
+ UART0 16
+ UART1 17
+ UART2 18
+ UART3 19
+ UART4 20
+ UART5 21
+ UART6 22
+ UART7 23
+
+Notation:
+ [*]: The datasheet didn't mention these, but they are present on AW code
+ [**]: The datasheet had this marked as "NC" but they are used on AW code
diff --git a/Documentation/devicetree/bindings/mfd/mc13xxx.txt b/Documentation/devicetree/bindings/mfd/mc13xxx.txt
index baf07987ae6..abd9e3cb2db 100644
--- a/Documentation/devicetree/bindings/mfd/mc13xxx.txt
+++ b/Documentation/devicetree/bindings/mfd/mc13xxx.txt
@@ -10,10 +10,40 @@ Optional properties:
- fsl,mc13xxx-uses-touch : Indicate the touchscreen controller is being used
Sub-nodes:
-- regulators : Contain the regulator nodes. The MC13892 regulators are
- bound using their names as listed below with their registers and bits
- for enabling.
+- regulators : Contain the regulator nodes. The regulators are bound using
+ their names as listed below with their registers and bits for enabling.
+MC13783 regulators:
+ sw1a : regulator SW1A (register 24, bit 0)
+ sw1b : regulator SW1B (register 25, bit 0)
+ sw2a : regulator SW2A (register 26, bit 0)
+ sw2b : regulator SW2B (register 27, bit 0)
+ sw3 : regulator SW3 (register 29, bit 20)
+ vaudio : regulator VAUDIO (register 32, bit 0)
+ viohi : regulator VIOHI (register 32, bit 3)
+ violo : regulator VIOLO (register 32, bit 6)
+ vdig : regulator VDIG (register 32, bit 9)
+ vgen : regulator VGEN (register 32, bit 12)
+ vrfdig : regulator VRFDIG (register 32, bit 15)
+ vrfref : regulator VRFREF (register 32, bit 18)
+ vrfcp : regulator VRFCP (register 32, bit 21)
+ vsim : regulator VSIM (register 33, bit 0)
+ vesim : regulator VESIM (register 33, bit 3)
+ vcam : regulator VCAM (register 33, bit 6)
+ vrfbg : regulator VRFBG (register 33, bit 9)
+ vvib : regulator VVIB (register 33, bit 11)
+ vrf1 : regulator VRF1 (register 33, bit 12)
+ vrf2 : regulator VRF2 (register 33, bit 15)
+ vmmc1 : regulator VMMC1 (register 33, bit 18)
+ vmmc2 : regulator VMMC2 (register 33, bit 21)
+ gpo1 : regulator GPO1 (register 34, bit 6)
+ gpo2 : regulator GPO2 (register 34, bit 8)
+ gpo3 : regulator GPO3 (register 34, bit 10)
+ gpo4 : regulator GPO4 (register 34, bit 12)
+ pwgt1spi : regulator PWGT1SPI (register 34, bit 15)
+ pwgt2spi : regulator PWGT2SPI (register 34, bit 16)
+
+MC13892 regulators:
vcoincell : regulator VCOINCELL (register 13, bit 23)
sw1 : regulator SW1 (register 24, bit 0)
sw2 : regulator SW2 (register 25, bit 0)
diff --git a/Documentation/devicetree/bindings/regulator/max8952.txt b/Documentation/devicetree/bindings/regulator/max8952.txt
new file mode 100644
index 00000000000..866fcdd0f4e
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/max8952.txt
@@ -0,0 +1,52 @@
+Maxim MAX8952 voltage regulator
+
+Required properties:
+- compatible: must be equal to "maxim,max8952"
+- reg: I2C slave address, usually 0x60
+- max8952,dvs-mode-microvolt: array of 4 integer values defining DVS voltages
+ in microvolts. All values must be from range <770000, 1400000>
+- any required generic properties defined in regulator.txt
+
+Optional properties:
+- max8952,vid-gpios: array of two GPIO pins used for DVS voltage selection
+- max8952,en-gpio: GPIO used to control enable status of regulator
+- max8952,default-mode: index of default DVS voltage, from <0, 3> range
+- max8952,sync-freq: sync frequency, must be one of following values:
+ - 0: 26 MHz
+ - 1: 13 MHz
+ - 2: 19.2 MHz
+ Defaults to 26 MHz if not specified.
+- max8952,ramp-speed: voltage ramp speed, must be one of following values:
+ - 0: 32mV/us
+ - 1: 16mV/us
+ - 2: 8mV/us
+ - 3: 4mV/us
+ - 4: 2mV/us
+ - 5: 1mV/us
+ - 6: 0.5mV/us
+ - 7: 0.25mV/us
+ Defaults to 32mV/us if not specified.
+- any available generic properties defined in regulator.txt
+
+Example:
+
+ vdd_arm_reg: pmic@60 {
+ compatible = "maxim,max8952";
+ reg = <0x60>;
+
+ /* max8952-specific properties */
+ max8952,vid-gpios = <&gpx0 3 0>, <&gpx0 4 0>;
+ max8952,en-gpio = <&gpx0 1 0>;
+ max8952,default-mode = <0>;
+ max8952,dvs-mode-microvolt = <1250000>, <1200000>,
+ <1050000>, <950000>;
+ max8952,sync-freq = <0>;
+ max8952,ramp-speed = <0>;
+
+ /* generic regulator properties */
+ regulator-name = "vdd_arm";
+ regulator-min-microvolt = <770000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
diff --git a/Documentation/devicetree/bindings/spi/brcm,bcm2835-spi.txt b/Documentation/devicetree/bindings/spi/brcm,bcm2835-spi.txt
new file mode 100644
index 00000000000..8bf89c64364
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/brcm,bcm2835-spi.txt
@@ -0,0 +1,22 @@
+Broadcom BCM2835 SPI0 controller
+
+The BCM2835 contains two forms of SPI master controller, one known simply as
+SPI0, and the other known as the "Universal SPI Master"; part of the
+auxilliary block. This binding applies to the SPI0 controller.
+
+Required properties:
+- compatible: Should be "brcm,bcm2835-spi".
+- reg: Should contain register location and length.
+- interrupts: Should contain interrupt.
+- clocks: The clock feeding the SPI controller.
+
+Example:
+
+spi@20204000 {
+ compatible = "brcm,bcm2835-spi";
+ reg = <0x7e204000 0x1000>;
+ interrupts = <2 22>;
+ clocks = <&clk_spi>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+};
diff --git a/Documentation/devicetree/bindings/spi/fsl-spi.txt b/Documentation/devicetree/bindings/spi/fsl-spi.txt
index 777abd7399d..b032dd76e9d 100644
--- a/Documentation/devicetree/bindings/spi/fsl-spi.txt
+++ b/Documentation/devicetree/bindings/spi/fsl-spi.txt
@@ -4,7 +4,7 @@ Required properties:
- cell-index : QE SPI subblock index.
0: QE subblock SPI1
1: QE subblock SPI2
-- compatible : should be "fsl,spi".
+- compatible : should be "fsl,spi" or "aeroflexgaisler,spictrl".
- mode : the SPI operation mode, it can be "cpu" or "cpu-qe".
- reg : Offset and length of the register set for the device
- interrupts : <a b> where a is the interrupt number and b is a
@@ -14,6 +14,7 @@ Required properties:
controller you have.
- interrupt-parent : the phandle for the interrupt controller that
services interrupts for this device.
+- clock-frequency : input clock frequency to non FSL_SOC cores
Optional properties:
- gpios : specifies the gpio pins to be used for chipselects.
diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt b/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt
new file mode 100644
index 00000000000..91ff771c7e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt
@@ -0,0 +1,26 @@
+NVIDIA Tegra114 SPI controller.
+
+Required properties:
+- compatible : should be "nvidia,tegra114-spi".
+- reg: Should contain SPI registers location and length.
+- interrupts: Should contain SPI interrupts.
+- nvidia,dma-request-selector : The Tegra DMA controller's phandle and
+ request selector for this SPI controller.
+- This is also require clock named "spi" as per binding document
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Recommended properties:
+- spi-max-frequency: Definition as per
+ Documentation/devicetree/bindings/spi/spi-bus.txt
+Example:
+
+spi@7000d600 {
+ compatible = "nvidia,tegra114-spi";
+ reg = <0x7000d600 0x200>;
+ interrupts = <0 82 0x04>;
+ nvidia,dma-request-selector = <&apbdma 16>;
+ spi-max-frequency = <25000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-samsung.txt b/Documentation/devicetree/bindings/spi/spi-samsung.txt
index a15ffeddfba..86aa061f069 100644
--- a/Documentation/devicetree/bindings/spi/spi-samsung.txt
+++ b/Documentation/devicetree/bindings/spi/spi-samsung.txt
@@ -31,9 +31,6 @@ Required Board Specific Properties:
- #address-cells: should be 1.
- #size-cells: should be 0.
-- gpios: The gpio specifier for clock, mosi and miso interface lines (in the
- order specified). The format of the gpio specifier depends on the gpio
- controller.
Optional Board Specific Properties:
@@ -86,9 +83,8 @@ Example:
spi_0: spi@12d20000 {
#address-cells = <1>;
#size-cells = <0>;
- gpios = <&gpa2 4 2 3 0>,
- <&gpa2 6 2 3 0>,
- <&gpa2 7 2 3 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0_bus>;
w25q80bw@0 {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 19e1ef73ab0..4d1919bf233 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -5,6 +5,7 @@ using them to avoid name-space collisions.
ad Avionic Design GmbH
adi Analog Devices, Inc.
+aeroflexgaisler Aeroflex Gaisler AB
ak Asahi Kasei Corp.
amcc Applied Micro Circuits Corporation (APM, formally AMCC)
apm Applied Micro Circuits Corporation (APM)
@@ -48,6 +49,7 @@ samsung Samsung Semiconductor
sbs Smart Battery System
schindler Schindler
sil Silicon Image
+silabs Silicon Laboratories
simtek
sirf SiRF Technology, Inc.
snps Synopsys, Inc.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 8ccbf27aead..12bbce346d5 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -44,6 +44,7 @@ parameter is applicable:
AVR32 AVR32 architecture is enabled.
AX25 Appropriate AX.25 support is enabled.
BLACKFIN Blackfin architecture is enabled.
+ CLK Common clock infrastructure is enabled.
DRM Direct Rendering Management support is enabled.
DYNAMIC_DEBUG Build in debug messages and enable them at runtime
EDD BIOS Enhanced Disk Drive Services (EDD) is enabled
@@ -320,6 +321,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
on: enable for both 32- and 64-bit processes
off: disable for both 32- and 64-bit processes
+ alloc_snapshot [FTRACE]
+ Allocate the ftrace snapshot buffer on boot up when the
+ main buffer is allocated. This is handy if debugging
+ and you need to use tracing_snapshot() on boot up, and
+ do not want to use tracing_snapshot_alloc() as it needs
+ to be done where GFP_KERNEL allocations are allowed.
+
amd_iommu= [HW,X86-64]
Pass parameters to the AMD IOMMU driver in the system.
Possible values are:
@@ -465,6 +473,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
cio_ignore= [S390]
See Documentation/s390/CommonIO for details.
+ clk_ignore_unused
+ [CLK]
+ Keep all clocks already enabled by bootloader on,
+ even if no driver has claimed them. This is useful
+ for debug and development, but should not be
+ needed on a platform with proper driver support.
+ For more information, see Documentation/clk.txt.
clock= [BUGS=X86-32, HW] gettimeofday clocksource override.
[Deprecated]
@@ -3245,6 +3260,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
or other driver-specific files in the
Documentation/watchdog/ directory.
+ workqueue.disable_numa
+ By default, all work items queued to unbound
+ workqueues are affine to the NUMA nodes they're
+ issued on, which results in better behavior in
+ general. If NUMA affinity needs to be disabled for
+ whatever reason, this option can be used. Note
+ that this also can be controlled per-workqueue for
+ workqueues visible under /sys/bus/workqueue/.
+
x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of
default x2apic cluster mode on platforms
supporting x2apic.
diff --git a/Documentation/this_cpu_ops.txt b/Documentation/this_cpu_ops.txt
new file mode 100644
index 00000000000..1a4ce7e3e05
--- /dev/null
+++ b/Documentation/this_cpu_ops.txt
@@ -0,0 +1,205 @@
+this_cpu operations
+-------------------
+
+this_cpu operations are a way of optimizing access to per cpu
+variables associated with the *currently* executing processor through
+the use of segment registers (or a dedicated register where the cpu
+permanently stored the beginning of the per cpu area for a specific
+processor).
+
+The this_cpu operations add a per cpu variable offset to the processor
+specific percpu base and encode that operation in the instruction
+operating on the per cpu variable.
+
+This means there are no atomicity issues between the calculation of
+the offset and the operation on the data. Therefore it is not
+necessary to disable preempt or interrupts to ensure that the
+processor is not changed between the calculation of the address and
+the operation on the data.
+
+Read-modify-write operations are of particular interest. Frequently
+processors have special lower latency instructions that can operate
+without the typical synchronization overhead but still provide some
+sort of relaxed atomicity guarantee. The x86 for example can execute
+RMV (Read Modify Write) instructions like inc/dec/cmpxchg without the
+lock prefix and the associated latency penalty.
+
+Access to the variable without the lock prefix is not synchronized but
+synchronization is not necessary since we are dealing with per cpu
+data specific to the currently executing processor. Only the current
+processor should be accessing that variable and therefore there are no
+concurrency issues with other processors in the system.
+
+On x86 the fs: or the gs: segment registers contain the base of the
+per cpu area. It is then possible to simply use the segment override
+to relocate a per cpu relative address to the proper per cpu area for
+the processor. So the relocation to the per cpu base is encoded in the
+instruction via a segment register prefix.
+
+For example:
+
+ DEFINE_PER_CPU(int, x);
+ int z;
+
+ z = this_cpu_read(x);
+
+results in a single instruction
+
+ mov ax, gs:[x]
+
+instead of a sequence of calculation of the address and then a fetch
+from that address which occurs with the percpu operations. Before
+this_cpu_ops such sequence also required preempt disable/enable to
+prevent the kernel from moving the thread to a different processor
+while the calculation is performed.
+
+The main use of the this_cpu operations has been to optimize counter
+operations.
+
+ this_cpu_inc(x)
+
+results in the following single instruction (no lock prefix!)
+
+ inc gs:[x]
+
+instead of the following operations required if there is no segment
+register.
+
+ int *y;
+ int cpu;
+
+ cpu = get_cpu();
+ y = per_cpu_ptr(&x, cpu);
+ (*y)++;
+ put_cpu();
+
+Note that these operations can only be used on percpu data that is
+reserved for a specific processor. Without disabling preemption in the
+surrounding code this_cpu_inc() will only guarantee that one of the
+percpu counters is correctly incremented. However, there is no
+guarantee that the OS will not move the process directly before or
+after the this_cpu instruction is executed. In general this means that
+the value of the individual counters for each processor are
+meaningless. The sum of all the per cpu counters is the only value
+that is of interest.
+
+Per cpu variables are used for performance reasons. Bouncing cache
+lines can be avoided if multiple processors concurrently go through
+the same code paths. Since each processor has its own per cpu
+variables no concurrent cacheline updates take place. The price that
+has to be paid for this optimization is the need to add up the per cpu
+counters when the value of the counter is needed.
+
+
+Special operations:
+-------------------
+
+ y = this_cpu_ptr(&x)
+
+Takes the offset of a per cpu variable (&x !) and returns the address
+of the per cpu variable that belongs to the currently executing
+processor. this_cpu_ptr avoids multiple steps that the common
+get_cpu/put_cpu sequence requires. No processor number is
+available. Instead the offset of the local per cpu area is simply
+added to the percpu offset.
+
+
+
+Per cpu variables and offsets
+-----------------------------
+
+Per cpu variables have *offsets* to the beginning of the percpu
+area. They do not have addresses although they look like that in the
+code. Offsets cannot be directly dereferenced. The offset must be
+added to a base pointer of a percpu area of a processor in order to
+form a valid address.
+
+Therefore the use of x or &x outside of the context of per cpu
+operations is invalid and will generally be treated like a NULL
+pointer dereference.
+
+In the context of per cpu operations
+
+ x is a per cpu variable. Most this_cpu operations take a cpu
+ variable.
+
+ &x is the *offset* a per cpu variable. this_cpu_ptr() takes
+ the offset of a per cpu variable which makes this look a bit
+ strange.
+
+
+
+Operations on a field of a per cpu structure
+--------------------------------------------
+
+Let's say we have a percpu structure
+
+ struct s {
+ int n,m;
+ };
+
+ DEFINE_PER_CPU(struct s, p);
+
+
+Operations on these fields are straightforward
+
+ this_cpu_inc(p.m)
+
+ z = this_cpu_cmpxchg(p.m, 0, 1);
+
+
+If we have an offset to struct s:
+
+ struct s __percpu *ps = &p;
+
+ z = this_cpu_dec(ps->m);
+
+ z = this_cpu_inc_return(ps->n);
+
+
+The calculation of the pointer may require the use of this_cpu_ptr()
+if we do not make use of this_cpu ops later to manipulate fields:
+
+ struct s *pp;
+
+ pp = this_cpu_ptr(&p);
+
+ pp->m--;
+
+ z = pp->n++;
+
+
+Variants of this_cpu ops
+-------------------------
+
+this_cpu ops are interrupt safe. Some architecture do not support
+these per cpu local operations. In that case the operation must be
+replaced by code that disables interrupts, then does the operations
+that are guaranteed to be atomic and then reenable interrupts. Doing
+so is expensive. If there are other reasons why the scheduler cannot
+change the processor we are executing on then there is no reason to
+disable interrupts. For that purpose the __this_cpu operations are
+provided. For example.
+
+ __this_cpu_inc(x);
+
+Will increment x and will not fallback to code that disables
+interrupts on platforms that cannot accomplish atomicity through
+address relocation and a Read-Modify-Write operation in the same
+instruction.
+
+
+
+&this_cpu_ptr(pp)->n vs this_cpu_ptr(&pp->n)
+--------------------------------------------
+
+The first operation takes the offset and forms an address and then
+adds the offset of the n field.
+
+The second one first adds the two offsets and then does the
+relocation. IMHO the second form looks cleaner and has an easier time
+with (). The second form also is consistent with the way
+this_cpu_read() and friends are used.
+
+
+Christoph Lameter, April 3rd, 2013
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index a372304aef1..bfe8c29b1f1 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -8,6 +8,7 @@ Copyright 2008 Red Hat Inc.
Reviewers: Elias Oltmanns, Randy Dunlap, Andrew Morton,
John Kacur, and David Teigland.
Written for: 2.6.28-rc2
+Updated for: 3.10
Introduction
------------
@@ -17,13 +18,16 @@ designers of systems to find what is going on inside the kernel.
It can be used for debugging or analyzing latencies and
performance issues that take place outside of user-space.
-Although ftrace is the function tracer, it also includes an
-infrastructure that allows for other types of tracing. Some of
-the tracers that are currently in ftrace include a tracer to
-trace context switches, the time it takes for a high priority
-task to run after it was woken up, the time interrupts are
-disabled, and more (ftrace allows for tracer plugins, which
-means that the list of tracers can always grow).
+Although ftrace is typically considered the function tracer, it
+is really a frame work of several assorted tracing utilities.
+There's latency tracing to examine what occurs between interrupts
+disabled and enabled, as well as for preemption and from a time
+a task is woken to the task is actually scheduled in.
+
+One of the most common uses of ftrace is the event tracing.
+Through out the kernel is hundreds of static event points that
+can be enabled via the debugfs file system to see what is
+going on in certain parts of the kernel.
Implementation Details
@@ -61,7 +65,7 @@ the extended "/sys/kernel/debug/tracing" path name.
That's it! (assuming that you have ftrace configured into your kernel)
-After mounting the debugfs, you can see a directory called
+After mounting debugfs, you can see a directory called
"tracing". This directory contains the control and output files
of ftrace. Here is a list of some of the key files:
@@ -84,7 +88,9 @@ of ftrace. Here is a list of some of the key files:
This sets or displays whether writing to the trace
ring buffer is enabled. Echo 0 into this file to disable
- the tracer or 1 to enable it.
+ the tracer or 1 to enable it. Note, this only disables
+ writing to the ring buffer, the tracing overhead may
+ still be occurring.
trace:
@@ -109,7 +115,15 @@ of ftrace. Here is a list of some of the key files:
This file lets the user control the amount of data
that is displayed in one of the above output
- files.
+ files. Options also exist to modify how a tracer
+ or events work (stack traces, timestamps, etc).
+
+ options:
+
+ This is a directory that has a file for every available
+ trace option (also in trace_options). Options may also be set
+ or cleared by writing a "1" or "0" respectively into the
+ corresponding file with the option name.
tracing_max_latency:
@@ -121,10 +135,17 @@ of ftrace. Here is a list of some of the key files:
latency is greater than the value in this
file. (in microseconds)
+ tracing_thresh:
+
+ Some latency tracers will record a trace whenever the
+ latency is greater than the number in this file.
+ Only active when the file contains a number greater than 0.
+ (in microseconds)
+
buffer_size_kb:
This sets or displays the number of kilobytes each CPU
- buffer can hold. The tracer buffers are the same size
+ buffer holds. By default, the trace buffers are the same size
for each CPU. The displayed number is the size of the
CPU buffer and not total size of all buffers. The
trace buffers are allocated in pages (blocks of memory
@@ -133,16 +154,30 @@ of ftrace. Here is a list of some of the key files:
than requested, the rest of the page will be used,
making the actual allocation bigger than requested.
( Note, the size may not be a multiple of the page size
- due to buffer management overhead. )
+ due to buffer management meta-data. )
- This can only be updated when the current_tracer
- is set to "nop".
+ buffer_total_size_kb:
+
+ This displays the total combined size of all the trace buffers.
+
+ free_buffer:
+
+ If a process is performing the tracing, and the ring buffer
+ should be shrunk "freed" when the process is finished, even
+ if it were to be killed by a signal, this file can be used
+ for that purpose. On close of this file, the ring buffer will
+ be resized to its minimum size. Having a process that is tracing
+ also open this file, when the process exits its file descriptor
+ for this file will be closed, and in doing so, the ring buffer
+ will be "freed".
+
+ It may also stop tracing if disable_on_free option is set.
tracing_cpumask:
This is a mask that lets the user only trace
- on specified CPUS. The format is a hex string
- representing the CPUS.
+ on specified CPUs. The format is a hex string
+ representing the CPUs.
set_ftrace_filter:
@@ -183,6 +218,261 @@ of ftrace. Here is a list of some of the key files:
"set_ftrace_notrace". (See the section "dynamic ftrace"
below for more details.)
+ enabled_functions:
+
+ This file is more for debugging ftrace, but can also be useful
+ in seeing if any function has a callback attached to it.
+ Not only does the trace infrastructure use ftrace function
+ trace utility, but other subsystems might too. This file
+ displays all functions that have a callback attached to them
+ as well as the number of callbacks that have been attached.
+ Note, a callback may also call multiple functions which will
+ not be listed in this count.
+
+ If the callback registered to be traced by a function with
+ the "save regs" attribute (thus even more overhead), a 'R'
+ will be displayed on the same line as the function that
+ is returning registers.
+
+ function_profile_enabled:
+
+ When set it will enable all functions with either the function
+ tracer, or if enabled, the function graph tracer. It will
+ keep a histogram of the number of functions that were called
+ and if run with the function graph tracer, it will also keep
+ track of the time spent in those functions. The histogram
+ content can be displayed in the files:
+
+ trace_stats/function<cpu> ( function0, function1, etc).
+
+ trace_stats:
+
+ A directory that holds different tracing stats.
+
+ kprobe_events:
+
+ Enable dynamic trace points. See kprobetrace.txt.
+
+ kprobe_profile:
+
+ Dynamic trace points stats. See kprobetrace.txt.
+
+ max_graph_depth:
+
+ Used with the function graph tracer. This is the max depth
+ it will trace into a function. Setting this to a value of
+ one will show only the first kernel function that is called
+ from user space.
+
+ printk_formats:
+
+ This is for tools that read the raw format files. If an event in
+ the ring buffer references a string (currently only trace_printk()
+ does this), only a pointer to the string is recorded into the buffer
+ and not the string itself. This prevents tools from knowing what
+ that string was. This file displays the string and address for
+ the string allowing tools to map the pointers to what the
+ strings were.
+
+ saved_cmdlines:
+
+ Only the pid of the task is recorded in a trace event unless
+ the event specifically saves the task comm as well. Ftrace
+ makes a cache of pid mappings to comms to try to display
+ comms for events. If a pid for a comm is not listed, then
+ "<...>" is displayed in the output.
+
+ snapshot:
+
+ This displays the "snapshot" buffer and also lets the user
+ take a snapshot of the current running trace.
+ See the "Snapshot" section below for more details.
+
+ stack_max_size:
+
+ When the stack tracer is activated, this will display the
+ maximum stack size it has encountered.
+ See the "Stack Trace" section below.
+
+ stack_trace:
+
+ This displays the stack back trace of the largest stack
+ that was encountered when the stack tracer is activated.
+ See the "Stack Trace" section below.
+
+ stack_trace_filter:
+
+ This is similar to "set_ftrace_filter" but it limits what
+ functions the stack tracer will check.
+
+ trace_clock:
+
+ Whenever an event is recorded into the ring buffer, a
+ "timestamp" is added. This stamp comes from a specified
+ clock. By default, ftrace uses the "local" clock. This
+ clock is very fast and strictly per cpu, but on some
+ systems it may not be monotonic with respect to other
+ CPUs. In other words, the local clocks may not be in sync
+ with local clocks on other CPUs.
+
+ Usual clocks for tracing:
+
+ # cat trace_clock
+ [local] global counter x86-tsc
+
+ local: Default clock, but may not be in sync across CPUs
+
+ global: This clock is in sync with all CPUs but may
+ be a bit slower than the local clock.
+
+ counter: This is not a clock at all, but literally an atomic
+ counter. It counts up one by one, but is in sync
+ with all CPUs. This is useful when you need to
+ know exactly the order events occurred with respect to
+ each other on different CPUs.
+
+ uptime: This uses the jiffies counter and the time stamp
+ is relative to the time since boot up.
+
+ perf: This makes ftrace use the same clock that perf uses.
+ Eventually perf will be able to read ftrace buffers
+ and this will help out in interleaving the data.
+
+ x86-tsc: Architectures may define their own clocks. For
+ example, x86 uses its own TSC cycle clock here.
+
+ To set a clock, simply echo the clock name into this file.
+
+ echo global > trace_clock
+
+ trace_marker:
+
+ This is a very useful file for synchronizing user space
+ with events happening in the kernel. Writing strings into
+ this file will be written into the ftrace buffer.
+
+ It is useful in applications to open this file at the start
+ of the application and just reference the file descriptor
+ for the file.
+
+ void trace_write(const char *fmt, ...)
+ {
+ va_list ap;
+ char buf[256];
+ int n;
+
+ if (trace_fd < 0)
+ return;
+
+ va_start(ap, fmt);
+ n = vsnprintf(buf, 256, fmt, ap);
+ va_end(ap);
+
+ write(trace_fd, buf, n);
+ }
+
+ start:
+
+ trace_fd = open("trace_marker", WR_ONLY);
+
+ uprobe_events:
+
+ Add dynamic tracepoints in programs.
+ See uprobetracer.txt
+
+ uprobe_profile:
+
+ Uprobe statistics. See uprobetrace.txt
+
+ instances:
+
+ This is a way to make multiple trace buffers where different
+ events can be recorded in different buffers.
+ See "Instances" section below.
+
+ events:
+
+ This is the trace event directory. It holds event tracepoints
+ (also known as static tracepoints) that have been compiled
+ into the kernel. It shows what event tracepoints exist
+ and how they are grouped by system. There are "enable"
+ files at various levels that can enable the tracepoints
+ when a "1" is written to them.
+
+ See events.txt for more information.
+
+ per_cpu:
+
+ This is a directory that contains the trace per_cpu information.
+
+ per_cpu/cpu0/buffer_size_kb:
+
+ The ftrace buffer is defined per_cpu. That is, there's a separate
+ buffer for each CPU to allow writes to be done atomically,
+ and free from cache bouncing. These buffers may have different
+ size buffers. This file is similar to the buffer_size_kb
+ file, but it only displays or sets the buffer size for the
+ specific CPU. (here cpu0).
+
+ per_cpu/cpu0/trace:
+
+ This is similar to the "trace" file, but it will only display
+ the data specific for the CPU. If written to, it only clears
+ the specific CPU buffer.
+
+ per_cpu/cpu0/trace_pipe
+
+ This is similar to the "trace_pipe" file, and is a consuming
+ read, but it will only display (and consume) the data specific
+ for the CPU.
+
+ per_cpu/cpu0/trace_pipe_raw
+
+ For tools that can parse the ftrace ring buffer binary format,
+ the trace_pipe_raw file can be used to extract the data
+ from the ring buffer directly. With the use of the splice()
+ system call, the buffer data can be quickly transferred to
+ a file or to the network where a server is collecting the
+ data.
+
+ Like trace_pipe, this is a consuming reader, where multiple
+ reads will always produce different data.
+
+ per_cpu/cpu0/snapshot:
+
+ This is similar to the main "snapshot" file, but will only
+ snapshot the current CPU (if supported). It only displays
+ the content of the snapshot for a given CPU, and if
+ written to, only clears this CPU buffer.
+
+ per_cpu/cpu0/snapshot_raw:
+
+ Similar to the trace_pipe_raw, but will read the binary format
+ from the snapshot buffer for the given CPU.
+
+ per_cpu/cpu0/stats:
+
+ This displays certain stats about the ring buffer:
+
+ entries: The number of events that are still in the buffer.
+
+ overrun: The number of lost events due to overwriting when
+ the buffer was full.
+
+ commit overrun: Should always be zero.
+ This gets set if so many events happened within a nested
+ event (ring buffer is re-entrant), that it fills the
+ buffer and starts dropping events.
+
+ bytes: Bytes actually read (not overwritten).
+
+ oldest event ts: The oldest timestamp in the buffer
+
+ now ts: The current timestamp
+
+ dropped events: Events lost due to overwrite option being off.
+
+ read events: The number of events read.
The Tracers
-----------
@@ -234,11 +524,6 @@ Here is the list of current tracers that may be configured.
RT tasks (as the current "wakeup" does). This is useful
for those interested in wake up timings of RT tasks.
- "hw-branch-tracer"
-
- Uses the BTS CPU feature on x86 CPUs to traces all
- branches executed.
-
"nop"
This is the "trace nothing" tracer. To remove all
@@ -261,70 +546,100 @@ Here is an example of the output format of the file "trace"
--------
# tracer: function
#
-# TASK-PID CPU# TIMESTAMP FUNCTION
-# | | | | |
- bash-4251 [01] 10152.583854: path_put <-path_walk
- bash-4251 [01] 10152.583855: dput <-path_put
- bash-4251 [01] 10152.583855: _atomic_dec_and_lock <-dput
+# entries-in-buffer/entries-written: 140080/250280 #P:4
+#
+# _-----=> irqs-off
+# / _----=> need-resched
+# | / _---=> hardirq/softirq
+# || / _--=> preempt-depth
+# ||| / delay
+# TASK-PID CPU# |||| TIMESTAMP FUNCTION
+# | | | |||| | |
+ bash-1977 [000] .... 17284.993652: sys_close <-system_call_fastpath
+ bash-1977 [000] .... 17284.993653: __close_fd <-sys_close
+ bash-1977 [000] .... 17284.993653: _raw_spin_lock <-__close_fd
+ sshd-1974 [003] .... 17284.993653: __srcu_read_unlock <-fsnotify
+ bash-1977 [000] .... 17284.993654: add_preempt_count <-_raw_spin_lock
+ bash-1977 [000] ...1 17284.993655: _raw_spin_unlock <-__close_fd
+ bash-1977 [000] ...1 17284.993656: sub_preempt_count <-_raw_spin_unlock
+ bash-1977 [000] .... 17284.993657: filp_close <-__close_fd
+ bash-1977 [000] .... 17284.993657: dnotify_flush <-filp_close
+ sshd-1974 [003] .... 17284.993658: sys_select <-system_call_fastpath
--------
A header is printed with the tracer name that is represented by
-the trace. In this case the tracer is "function". Then a header
-showing the format. Task name "bash", the task PID "4251", the
-CPU that it was running on "01", the timestamp in <secs>.<usecs>
-format, the function name that was traced "path_put" and the
-parent function that called this function "path_walk". The
-timestamp is the time at which the function was entered.
+the trace. In this case the tracer is "function". Then it shows the
+number of events in the buffer as well as the total number of entries
+that were written. The difference is the number of entries that were
+lost due to the buffer filling up (250280 - 140080 = 110200 events
+lost).
+
+The header explains the content of the events. Task name "bash", the task
+PID "1977", the CPU that it was running on "000", the latency format
+(explained below), the timestamp in <secs>.<usecs> format, the
+function name that was traced "sys_close" and the parent function that
+called this function "system_call_fastpath". The timestamp is the time
+at which the function was entered.
Latency trace format
--------------------
-When the latency-format option is enabled, the trace file gives
-somewhat more information to see why a latency happened.
-Here is a typical trace.
+When the latency-format option is enabled or when one of the latency
+tracers is set, the trace file gives somewhat more information to see
+why a latency happened. Here is a typical trace.
# tracer: irqsoff
#
-irqsoff latency trace v1.1.5 on 2.6.26-rc8
---------------------------------------------------------------------
- latency: 97 us, #3/3, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
- -----------------
- | task: swapper-0 (uid:0 nice:0 policy:0 rt_prio:0)
- -----------------
- => started at: apic_timer_interrupt
- => ended at: do_softirq
-
-# _------=> CPU#
-# / _-----=> irqs-off
-# | / _----=> need-resched
-# || / _---=> hardirq/softirq
-# ||| / _--=> preempt-depth
-# |||| /
-# ||||| delay
-# cmd pid ||||| time | caller
-# \ / ||||| \ | /
- <idle>-0 0d..1 0us+: trace_hardirqs_off_thunk (apic_timer_interrupt)
- <idle>-0 0d.s. 97us : __do_softirq (do_softirq)
- <idle>-0 0d.s1 98us : trace_hardirqs_on (do_softirq)
+# irqsoff latency trace v1.1.5 on 3.8.0-test+
+# --------------------------------------------------------------------
+# latency: 259 us, #4/4, CPU#2 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:4)
+# -----------------
+# | task: ps-6143 (uid:0 nice:0 policy:0 rt_prio:0)
+# -----------------
+# => started at: __lock_task_sighand
+# => ended at: _raw_spin_unlock_irqrestore
+#
+#
+# _------=> CPU#
+# / _-----=> irqs-off
+# | / _----=> need-resched
+# || / _---=> hardirq/softirq
+# ||| / _--=> preempt-depth
+# |||| / delay
+# cmd pid ||||| time | caller
+# \ / ||||| \ | /
+ ps-6143 2d... 0us!: trace_hardirqs_off <-__lock_task_sighand
+ ps-6143 2d..1 259us+: trace_hardirqs_on <-_raw_spin_unlock_irqrestore
+ ps-6143 2d..1 263us+: time_hardirqs_on <-_raw_spin_unlock_irqrestore
+ ps-6143 2d..1 306us : <stack trace>
+ => trace_hardirqs_on_caller
+ => trace_hardirqs_on
+ => _raw_spin_unlock_irqrestore
+ => do_task_stat
+ => proc_tgid_stat
+ => proc_single_show
+ => seq_read
+ => vfs_read
+ => sys_read
+ => system_call_fastpath
This shows that the current tracer is "irqsoff" tracing the time
-for which interrupts were disabled. It gives the trace version
-and the version of the kernel upon which this was executed on
-(2.6.26-rc8). Then it displays the max latency in microsecs (97
-us). The number of trace entries displayed and the total number
-recorded (both are three: #3/3). The type of preemption that was
-used (PREEMPT). VP, KP, SP, and HP are always zero and are
-reserved for later use. #P is the number of online CPUS (#P:2).
+for which interrupts were disabled. It gives the trace version (which
+never changes) and the version of the kernel upon which this was executed on
+(3.10). Then it displays the max latency in microseconds (259 us). The number
+of trace entries displayed and the total number (both are four: #4/4).
+VP, KP, SP, and HP are always zero and are reserved for later use.
+#P is the number of online CPUs (#P:4).
The task is the process that was running when the latency
-occurred. (swapper pid: 0).
+occurred. (ps pid: 6143).
The start and stop (the functions in which the interrupts were
disabled and enabled respectively) that caused the latencies:
- apic_timer_interrupt is where the interrupts were disabled.
- do_softirq is where they were enabled again.
+ __lock_task_sighand is where the interrupts were disabled.
+ _raw_spin_unlock_irqrestore is where they were enabled again.
The next lines after the header are the trace itself. The header
explains which is which.
@@ -367,16 +682,43 @@ The above is mostly meaningful for kernel developers.
The rest is the same as the 'trace' file.
+ Note, the latency tracers will usually end with a back trace
+ to easily find where the latency occurred.
trace_options
-------------
-The trace_options file is used to control what gets printed in
-the trace output. To see what is available, simply cat the file:
+The trace_options file (or the options directory) is used to control
+what gets printed in the trace output, or manipulate the tracers.
+To see what is available, simply cat the file:
cat trace_options
- print-parent nosym-offset nosym-addr noverbose noraw nohex nobin \
- noblock nostacktrace nosched-tree nouserstacktrace nosym-userobj
+print-parent
+nosym-offset
+nosym-addr
+noverbose
+noraw
+nohex
+nobin
+noblock
+nostacktrace
+trace_printk
+noftrace_preempt
+nobranch
+annotate
+nouserstacktrace
+nosym-userobj
+noprintk-msg-only
+context-info
+latency-format
+sleep-time
+graph-time
+record-cmd
+overwrite
+nodisable_on_free
+irq-info
+markers
+function-trace
To disable one of the options, echo in the option prepended with
"no".
@@ -428,13 +770,34 @@ Here are the available options:
bin - This will print out the formats in raw binary.
- block - TBD (needs update)
+ block - When set, reading trace_pipe will not block when polled.
stacktrace - This is one of the options that changes the trace
itself. When a trace is recorded, so is the stack
of functions. This allows for back traces of
trace sites.
+ trace_printk - Can disable trace_printk() from writing into the buffer.
+
+ branch - Enable branch tracing with the tracer.
+
+ annotate - It is sometimes confusing when the CPU buffers are full
+ and one CPU buffer had a lot of events recently, thus
+ a shorter time frame, were another CPU may have only had
+ a few events, which lets it have older events. When
+ the trace is reported, it shows the oldest events first,
+ and it may look like only one CPU ran (the one with the
+ oldest events). When the annotate option is set, it will
+ display when a new CPU buffer started:
+
+ <idle>-0 [001] dNs4 21169.031481: wake_up_idle_cpu <-add_timer_on
+ <idle>-0 [001] dNs4 21169.031482: _raw_spin_unlock_irqrestore <-add_timer_on
+ <idle>-0 [001] .Ns4 21169.031484: sub_preempt_count <-_raw_spin_unlock_irqrestore
+##### CPU 2 buffer started ####
+ <idle>-0 [002] .N.1 21169.031484: rcu_idle_exit <-cpu_idle
+ <idle>-0 [001] .Ns3 21169.031484: _raw_spin_unlock <-clocksource_watchdog
+ <idle>-0 [001] .Ns3 21169.031485: sub_preempt_count <-_raw_spin_unlock
+
userstacktrace - This option changes the trace. It records a
stacktrace of the current userspace thread.
@@ -451,9 +814,13 @@ Here are the available options:
a.out-1623 [000] 40874.465068: /root/a.out[+0x480] <-/root/a.out[+0
x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6]
- sched-tree - trace all tasks that are on the runqueue, at
- every scheduling event. Will add overhead if
- there's a lot of tasks running at once.
+
+ printk-msg-only - When set, trace_printk()s will only show the format
+ and not their parameters (if trace_bprintk() or
+ trace_bputs() was used to save the trace_printk()).
+
+ context-info - Show only the event data. Hides the comm, PID,
+ timestamp, CPU, and other useful data.
latency-format - This option changes the trace. When
it is enabled, the trace displays
@@ -461,31 +828,61 @@ x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6]
latencies, as described in "Latency
trace format".
+ sleep-time - When running function graph tracer, to include
+ the time a task schedules out in its function.
+ When enabled, it will account time the task has been
+ scheduled out as part of the function call.
+
+ graph-time - When running function graph tracer, to include the
+ time to call nested functions. When this is not set,
+ the time reported for the function will only include
+ the time the function itself executed for, not the time
+ for functions that it called.
+
+ record-cmd - When any event or tracer is enabled, a hook is enabled
+ in the sched_switch trace point to fill comm cache
+ with mapped pids and comms. But this may cause some
+ overhead, and if you only care about pids, and not the
+ name of the task, disabling this option can lower the
+ impact of tracing.
+
overwrite - This controls what happens when the trace buffer is
full. If "1" (default), the oldest events are
discarded and overwritten. If "0", then the newest
events are discarded.
+ (see per_cpu/cpu0/stats for overrun and dropped)
-ftrace_enabled
---------------
+ disable_on_free - When the free_buffer is closed, tracing will
+ stop (tracing_on set to 0).
-The following tracers (listed below) give different output
-depending on whether or not the sysctl ftrace_enabled is set. To
-set ftrace_enabled, one can either use the sysctl function or
-set it via the proc file system interface.
+ irq-info - Shows the interrupt, preempt count, need resched data.
+ When disabled, the trace looks like:
- sysctl kernel.ftrace_enabled=1
+# tracer: function
+#
+# entries-in-buffer/entries-written: 144405/9452052 #P:4
+#
+# TASK-PID CPU# TIMESTAMP FUNCTION
+# | | | | |
+ <idle>-0 [002] 23636.756054: ttwu_do_activate.constprop.89 <-try_to_wake_up
+ <idle>-0 [002] 23636.756054: activate_task <-ttwu_do_activate.constprop.89
+ <idle>-0 [002] 23636.756055: enqueue_task <-activate_task
- or
- echo 1 > /proc/sys/kernel/ftrace_enabled
+ markers - When set, the trace_marker is writable (only by root).
+ When disabled, the trace_marker will error with EINVAL
+ on write.
+
+
+ function-trace - The latency tracers will enable function tracing
+ if this option is enabled (default it is). When
+ it is disabled, the latency tracers do not trace
+ functions. This keeps the overhead of the tracer down
+ when performing latency tests.
-To disable ftrace_enabled simply replace the '1' with '0' in the
-above commands.
+ Note: Some tracers have their own options. They only appear
+ when the tracer is active.
-When ftrace_enabled is set the tracers will also record the
-functions that are within the trace. The descriptions of the
-tracers will also show an example with ftrace enabled.
irqsoff
@@ -506,95 +903,133 @@ new trace is saved.
To reset the maximum, echo 0 into tracing_max_latency. Here is
an example:
+ # echo 0 > options/function-trace
# echo irqsoff > current_tracer
- # echo latency-format > trace_options
- # echo 0 > tracing_max_latency
# echo 1 > tracing_on
+ # echo 0 > tracing_max_latency
# ls -ltr
[...]
# echo 0 > tracing_on
# cat trace
# tracer: irqsoff
#
-irqsoff latency trace v1.1.5 on 2.6.26
---------------------------------------------------------------------
- latency: 12 us, #3/3, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
- -----------------
- | task: bash-3730 (uid:0 nice:0 policy:0 rt_prio:0)
- -----------------
- => started at: sys_setpgid
- => ended at: sys_setpgid
-
-# _------=> CPU#
-# / _-----=> irqs-off
-# | / _----=> need-resched
-# || / _---=> hardirq/softirq
-# ||| / _--=> preempt-depth
-# |||| /
-# ||||| delay
-# cmd pid ||||| time | caller
-# \ / ||||| \ | /
- bash-3730 1d... 0us : _write_lock_irq (sys_setpgid)
- bash-3730 1d..1 1us+: _write_unlock_irq (sys_setpgid)
- bash-3730 1d..2 14us : trace_hardirqs_on (sys_setpgid)
-
-
-Here we see that that we had a latency of 12 microsecs (which is
-very good). The _write_lock_irq in sys_setpgid disabled
-interrupts. The difference between the 12 and the displayed
-timestamp 14us occurred because the clock was incremented
+# irqsoff latency trace v1.1.5 on 3.8.0-test+
+# --------------------------------------------------------------------
+# latency: 16 us, #4/4, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:4)
+# -----------------
+# | task: swapper/0-0 (uid:0 nice:0 policy:0 rt_prio:0)
+# -----------------
+# => started at: run_timer_softirq
+# => ended at: run_timer_softirq
+#
+#
+# _------=> CPU#
+# / _-----=> irqs-off
+# | / _----=> need-resched
+# || / _---=> hardirq/softirq
+# ||| / _--=> preempt-depth
+# |||| / delay
+# cmd pid ||||| time | caller
+# \ / ||||| \ | /
+ <idle>-0 0d.s2 0us+: _raw_spin_lock_irq <-run_timer_softirq
+ <idle>-0 0dNs3 17us : _raw_spin_unlock_irq <-run_timer_softirq
+ <idle>-0 0dNs3 17us+: trace_hardirqs_on <-run_timer_softirq
+ <idle>-0 0dNs3 25us : <stack trace>
+ => _raw_spin_unlock_irq
+ => run_timer_softirq
+ => __do_softirq
+ => call_softirq
+ => do_softirq
+ => irq_exit
+ => smp_apic_timer_interrupt
+ => apic_timer_interrupt
+ => rcu_idle_exit
+ => cpu_idle
+ => rest_init
+ => start_kernel
+ => x86_64_start_reservations
+ => x86_64_start_kernel
+
+Here we see that that we had a latency of 16 microseconds (which is
+very good). The _raw_spin_lock_irq in run_timer_softirq disabled
+interrupts. The difference between the 16 and the displayed
+timestamp 25us occurred because the clock was incremented
between the time of recording the max latency and the time of
recording the function that had that latency.
-Note the above example had ftrace_enabled not set. If we set the
-ftrace_enabled, we get a much larger output:
+Note the above example had function-trace not set. If we set
+function-trace, we get a much larger output:
+
+ with echo 1 > options/function-trace
# tracer: irqsoff
#
-irqsoff latency trace v1.1.5 on 2.6.26-rc8
---------------------------------------------------------------------
- latency: 50 us, #101/101, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
- -----------------
- | task: ls-4339 (uid:0 nice:0 policy:0 rt_prio:0)
- -----------------
- => started at: __alloc_pages_internal
- => ended at: __alloc_pages_internal
-
-# _------=> CPU#
-# / _-----=> irqs-off
-# | / _----=> need-resched
-# || / _---=> hardirq/softirq
-# ||| / _--=> preempt-depth
-# |||| /
-# ||||| delay
-# cmd pid ||||| time | caller
-# \ / ||||| \ | /
- ls-4339 0...1 0us+: get_page_from_freelist (__alloc_pages_internal)
- ls-4339 0d..1 3us : rmqueue_bulk (get_page_from_freelist)
- ls-4339 0d..1 3us : _spin_lock (rmqueue_bulk)
- ls-4339 0d..1 4us : add_preempt_count (_spin_lock)
- ls-4339 0d..2 4us : __rmqueue (rmqueue_bulk)
- ls-4339 0d..2 5us : __rmqueue_smallest (__rmqueue)
- ls-4339 0d..2 5us : __mod_zone_page_state (__rmqueue_smallest)
- ls-4339 0d..2 6us : __rmqueue (rmqueue_bulk)
- ls-4339 0d..2 6us : __rmqueue_smallest (__rmqueue)
- ls-4339 0d..2 7us : __mod_zone_page_state (__rmqueue_smallest)
- ls-4339 0d..2 7us : __rmqueue (rmqueue_bulk)
- ls-4339 0d..2 8us : __rmqueue_smallest (__rmqueue)
+# irqsoff latency trace v1.1.5 on 3.8.0-test+
+# --------------------------------------------------------------------
+# latency: 71 us, #168/168, CPU#3 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:4)
+# -----------------
+# | task: bash-2042 (uid:0 nice:0 policy:0 rt_prio:0)
+# -----------------
+# => started at: ata_scsi_queuecmd
+# => ended at: ata_scsi_queuecmd
+#
+#
+# _------=> CPU#
+# / _-----=> irqs-off
+# | / _----=> need-resched
+# || / _---=> hardirq/softirq
+# ||| / _--=> preempt-depth
+# |||| / delay
+# cmd pid ||||| time | caller
+# \ / ||||| \ | /
+ bash-2042 3d... 0us : _raw_spin_lock_irqsave <-ata_scsi_queuecmd
+ bash-2042 3d... 0us : add_preempt_count <-_raw_spin_lock_irqsave
+ bash-2042 3d..1 1us : ata_scsi_find_dev <-ata_scsi_queuecmd
+ bash-2042 3d..1 1us : __ata_scsi_find_dev <-ata_scsi_find_dev
+ bash-2042 3d..1 2us : ata_find_dev.part.14 <-__ata_scsi_find_dev
+ bash-2042 3d..1 2us : ata_qc_new_init <-__ata_scsi_queuecmd
+ bash-2042 3d..1 3us : ata_sg_init <-__ata_scsi_queuecmd
+ bash-2042 3d..1 4us : ata_scsi_rw_xlat <-__ata_scsi_queuecmd
+ bash-2042 3d..1 4us : ata_build_rw_tf <-ata_scsi_rw_xlat
[...]
- ls-4339 0d..2 46us : __rmqueue_smallest (__rmqueue)
- ls-4339 0d..2 47us : __mod_zone_page_state (__rmqueue_smallest)
- ls-4339 0d..2 47us : __rmqueue (rmqueue_bulk)
- ls-4339 0d..2 48us : __rmqueue_smallest (__rmqueue)
- ls-4339 0d..2 48us : __mod_zone_page_state (__rmqueue_smallest)
- ls-4339 0d..2 49us : _spin_unlock (rmqueue_bulk)
- ls-4339 0d..2 49us : sub_preempt_count (_spin_unlock)
- ls-4339 0d..1 50us : get_page_from_freelist (__alloc_pages_internal)
- ls-4339 0d..2 51us : trace_hardirqs_on (__alloc_pages_internal)
-
-
-
-Here we traced a 50 microsecond latency. But we also see all the
+ bash-2042 3d..1 67us : delay_tsc <-__delay
+ bash-2042 3d..1 67us : add_preempt_count <-delay_tsc
+ bash-2042 3d..2 67us : sub_preempt_count <-delay_tsc
+ bash-2042 3d..1 67us : add_preempt_count <-delay_tsc
+ bash-2042 3d..2 68us : sub_preempt_count <-delay_tsc
+ bash-2042 3d..1 68us+: ata_bmdma_start <-ata_bmdma_qc_issue
+ bash-2042 3d..1 71us : _raw_spin_unlock_irqrestore <-ata_scsi_queuecmd
+ bash-2042 3d..1 71us : _raw_spin_unlock_irqrestore <-ata_scsi_queuecmd
+ bash-2042 3d..1 72us+: trace_hardirqs_on <-ata_scsi_queuecmd
+ bash-2042 3d..1 120us : <stack trace>
+ => _raw_spin_unlock_irqrestore
+ => ata_scsi_queuecmd
+ => scsi_dispatch_cmd
+ => scsi_request_fn
+ => __blk_run_queue_uncond
+ => __blk_run_queue
+ => blk_queue_bio
+ => generic_make_request
+ => submit_bio
+ => submit_bh
+ => __ext3_get_inode_loc
+ => ext3_iget
+ => ext3_lookup
+ => lookup_real
+ => __lookup_hash
+ => walk_component
+ => lookup_last
+ => path_lookupat
+ => filename_lookup
+ => user_path_at_empty
+ => user_path_at
+ => vfs_fstatat
+ => vfs_stat
+ => sys_newstat
+ => system_call_fastpath
+
+
+Here we traced a 71 microsecond latency. But we also see all the
functions that were called during that time. Note that by
enabling function tracing, we incur an added overhead. This
overhead may extend the latency times. But nevertheless, this
@@ -614,120 +1049,122 @@ Like the irqsoff tracer, it records the maximum latency for
which preemption was disabled. The control of preemptoff tracer
is much like the irqsoff tracer.
+ # echo 0 > options/function-trace
# echo preemptoff > current_tracer
- # echo latency-format > trace_options
- # echo 0 > tracing_max_latency
# echo 1 > tracing_on
+ # echo 0 > tracing_max_latency
# ls -ltr
[...]
# echo 0 > tracing_on
# cat trace
# tracer: preemptoff
#
-preemptoff latency trace v1.1.5 on 2.6.26-rc8
---------------------------------------------------------------------
- latency: 29 us, #3/3, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
- -----------------
- | task: sshd-4261 (uid:0 nice:0 policy:0 rt_prio:0)
- -----------------
- => started at: do_IRQ
- => ended at: __do_softirq
-
-# _------=> CPU#
-# / _-----=> irqs-off
-# | / _----=> need-resched
-# || / _---=> hardirq/softirq
-# ||| / _--=> preempt-depth
-# |||| /
-# ||||| delay
-# cmd pid ||||| time | caller
-# \ / ||||| \ | /
- sshd-4261 0d.h. 0us+: irq_enter (do_IRQ)
- sshd-4261 0d.s. 29us : _local_bh_enable (__do_softirq)
- sshd-4261 0d.s1 30us : trace_preempt_on (__do_softirq)
+# preemptoff latency trace v1.1.5 on 3.8.0-test+
+# --------------------------------------------------------------------
+# latency: 46 us, #4/4, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:4)
+# -----------------
+# | task: sshd-1991 (uid:0 nice:0 policy:0 rt_prio:0)
+# -----------------
+# => started at: do_IRQ
+# => ended at: do_IRQ
+#
+#
+# _------=> CPU#
+# / _-----=> irqs-off
+# | / _----=> need-resched
+# || / _---=> hardirq/softirq
+# ||| / _--=> preempt-depth
+# |||| / delay
+# cmd pid ||||| time | caller
+# \ / ||||| \ | /
+ sshd-1991 1d.h. 0us+: irq_enter <-do_IRQ
+ sshd-1991 1d..1 46us : irq_exit <-do_IRQ
+ sshd-1991 1d..1 47us+: trace_preempt_on <-do_IRQ
+ sshd-1991 1d..1 52us : <stack trace>
+ => sub_preempt_count
+ => irq_exit
+ => do_IRQ
+ => ret_from_intr
This has some more changes. Preemption was disabled when an
-interrupt came in (notice the 'h'), and was enabled while doing
-a softirq. (notice the 's'). But we also see that interrupts
-have been disabled when entering the preempt off section and
-leaving it (the 'd'). We do not know if interrupts were enabled
-in the mean time.
+interrupt came in (notice the 'h'), and was enabled on exit.
+But we also see that interrupts have been disabled when entering
+the preempt off section and leaving it (the 'd'). We do not know if
+interrupts were enabled in the mean time or shortly after this
+was over.
# tracer: preemptoff
#
-preemptoff latency trace v1.1.5 on 2.6.26-rc8
---------------------------------------------------------------------
- latency: 63 us, #87/87, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
- -----------------
- | task: sshd-4261 (uid:0 nice:0 policy:0 rt_prio:0)
- -----------------
- => started at: remove_wait_queue
- => ended at: __do_softirq
-
-# _------=> CPU#
-# / _-----=> irqs-off
-# | / _----=> need-resched
-# || / _---=> hardirq/softirq
-# ||| / _--=> preempt-depth
-# |||| /
-# ||||| delay
-# cmd pid ||||| time | caller
-# \ / ||||| \ | /
- sshd-4261 0d..1 0us : _spin_lock_irqsave (remove_wait_queue)
- sshd-4261 0d..1 1us : _spin_unlock_irqrestore (remove_wait_queue)
- sshd-4261 0d..1 2us : do_IRQ (common_interrupt)
- sshd-4261 0d..1 2us : irq_enter (do_IRQ)
- sshd-4261 0d..1 2us : idle_cpu (irq_enter)
- sshd-4261 0d..1 3us : add_preempt_count (irq_enter)
- sshd-4261 0d.h1 3us : idle_cpu (irq_enter)
- sshd-4261 0d.h. 4us : handle_fasteoi_irq (do_IRQ)
+# preemptoff latency trace v1.1.5 on 3.8.0-test+
+# --------------------------------------------------------------------
+# latency: 83 us, #241/241, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:4)
+# -----------------
+# | task: bash-1994 (uid:0 nice:0 policy:0 rt_prio:0)
+# -----------------
+# => started at: wake_up_new_task
+# => ended at: task_rq_unlock
+#
+#
+# _------=> CPU#
+# / _-----=> irqs-off
+# | / _----=> need-resched
+# || / _---=> hardirq/softirq
+# ||| / _--=> preempt-depth
+# |||| / delay
+# cmd pid ||||| time | caller
+# \ / ||||| \ | /
+ bash-1994 1d..1 0us : _raw_spin_lock_irqsave <-wake_up_new_task
+ bash-1994 1d..1 0us : select_task_rq_fair <-select_task_rq
+ bash-1994 1d..1 1us : __rcu_read_lock <-select_task_rq_fair
+ bash-1994 1d..1 1us : source_load <-select_task_rq_fair
+ bash-1994 1d..1 1us : source_load <-select_task_rq_fair
[...]
- sshd-4261 0d.h. 12us : add_preempt_count (_spin_lock)
- sshd-4261 0d.h1 12us : ack_ioapic_quirk_irq (handle_fasteoi_irq)
- sshd-4261 0d.h1 13us : move_native_irq (ack_ioapic_quirk_irq)
- sshd-4261 0d.h1 13us : _spin_unlock (handle_fasteoi_irq)
- sshd-4261 0d.h1 14us : sub_preempt_count (_spin_unlock)
- sshd-4261 0d.h1 14us : irq_exit (do_IRQ)
- sshd-4261 0d.h1 15us : sub_preempt_count (irq_exit)
- sshd-4261 0d..2 15us : do_softirq (irq_exit)
- sshd-4261 0d... 15us : __do_softirq (do_softirq)
- sshd-4261 0d... 16us : __local_bh_disable (__do_softirq)
- sshd-4261 0d... 16us+: add_preempt_count (__local_bh_disable)
- sshd-4261 0d.s4 20us : add_preempt_count (__local_bh_disable)
- sshd-4261 0d.s4 21us : sub_preempt_count (local_bh_enable)
- sshd-4261 0d.s5 21us : sub_preempt_count (local_bh_enable)
+ bash-1994 1d..1 12us : irq_enter <-smp_apic_timer_interrupt
+ bash-1994 1d..1 12us : rcu_irq_enter <-irq_enter
+ bash-1994 1d..1 13us : add_preempt_count <-irq_enter
+ bash-1994 1d.h1 13us : exit_idle <-smp_apic_timer_interrupt
+ bash-1994 1d.h1 13us : hrtimer_interrupt <-smp_apic_timer_interrupt
+ bash-1994 1d.h1 13us : _raw_spin_lock <-hrtimer_interrupt
+ bash-1994 1d.h1 14us : add_preempt_count <-_raw_spin_lock
+ bash-1994 1d.h2 14us : ktime_get_update_offsets <-hrtimer_interrupt
[...]
- sshd-4261 0d.s6 41us : add_preempt_count (__local_bh_disable)
- sshd-4261 0d.s6 42us : sub_preempt_count (local_bh_enable)
- sshd-4261 0d.s7 42us : sub_preempt_count (local_bh_enable)
- sshd-4261 0d.s5 43us : add_preempt_count (__local_bh_disable)
- sshd-4261 0d.s5 43us : sub_preempt_count (local_bh_enable_ip)
- sshd-4261 0d.s6 44us : sub_preempt_count (local_bh_enable_ip)
- sshd-4261 0d.s5 44us : add_preempt_count (__local_bh_disable)
- sshd-4261 0d.s5 45us : sub_preempt_count (local_bh_enable)
+ bash-1994 1d.h1 35us : lapic_next_event <-clockevents_program_event
+ bash-1994 1d.h1 35us : irq_exit <-smp_apic_timer_interrupt
+ bash-1994 1d.h1 36us : sub_preempt_count <-irq_exit
+ bash-1994 1d..2 36us : do_softirq <-irq_exit
+ bash-1994 1d..2 36us : __do_softirq <-call_softirq
+ bash-1994 1d..2 36us : __local_bh_disable <-__do_softirq
+ bash-1994 1d.s2 37us : add_preempt_count <-_raw_spin_lock_irq
+ bash-1994 1d.s3 38us : _raw_spin_unlock <-run_timer_softirq
+ bash-1994 1d.s3 39us : sub_preempt_count <-_raw_spin_unlock
+ bash-1994 1d.s2 39us : call_timer_fn <-run_timer_softirq
[...]
- sshd-4261 0d.s. 63us : _local_bh_enable (__do_softirq)
- sshd-4261 0d.s1 64us : trace_preempt_on (__do_softirq)
+ bash-1994 1dNs2 81us : cpu_needs_another_gp <-rcu_process_callbacks
+ bash-1994 1dNs2 82us : __local_bh_enable <-__do_softirq
+ bash-1994 1dNs2 82us : sub_preempt_count <-__local_bh_enable
+ bash-1994 1dN.2 82us : idle_cpu <-irq_exit
+ bash-1994 1dN.2 83us : rcu_irq_exit <-irq_exit
+ bash-1994 1dN.2 83us : sub_preempt_count <-irq_exit
+ bash-1994 1.N.1 84us : _raw_spin_unlock_irqrestore <-task_rq_unlock
+ bash-1994 1.N.1 84us+: trace_preempt_on <-task_rq_unlock
+ bash-1994 1.N.1 104us : <stack trace>
+ => sub_preempt_count
+ => _raw_spin_unlock_irqrestore
+ => task_rq_unlock
+ => wake_up_new_task
+ => do_fork
+ => sys_clone
+ => stub_clone
The above is an example of the preemptoff trace with
-ftrace_enabled set. Here we see that interrupts were disabled
+function-trace set. Here we see that interrupts were not disabled
the entire time. The irq_enter code lets us know that we entered
an interrupt 'h'. Before that, the functions being traced still
show that it is not in an interrupt, but we can see from the
functions themselves that this is not the case.
-Notice that __do_softirq when called does not have a
-preempt_count. It may seem that we missed a preempt enabling.
-What really happened is that the preempt count is held on the
-thread's stack and we switched to the softirq stack (4K stacks
-in effect). The code does not copy the preempt count, but
-because interrupts are disabled, we do not need to worry about
-it. Having a tracer like this is good for letting people know
-what really happens inside the kernel.
-
-
preemptirqsoff
--------------
@@ -762,38 +1199,57 @@ tracer.
Again, using this trace is much like the irqsoff and preemptoff
tracers.
+ # echo 0 > options/function-trace
# echo preemptirqsoff > current_tracer
- # echo latency-format > trace_options
- # echo 0 > tracing_max_latency
# echo 1 > tracing_on
+ # echo 0 > tracing_max_latency
# ls -ltr
[...]
# echo 0 > tracing_on
# cat trace
# tracer: preemptirqsoff
#
-preemptirqsoff latency trace v1.1.5 on 2.6.26-rc8
---------------------------------------------------------------------
- latency: 293 us, #3/3, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
- -----------------
- | task: ls-4860 (uid:0 nice:0 policy:0 rt_prio:0)
- -----------------
- => started at: apic_timer_interrupt
- => ended at: __do_softirq
-
-# _------=> CPU#
-# / _-----=> irqs-off
-# | / _----=> need-resched
-# || / _---=> hardirq/softirq
-# ||| / _--=> preempt-depth
-# |||| /
-# ||||| delay
-# cmd pid ||||| time | caller
-# \ / ||||| \ | /
- ls-4860 0d... 0us!: trace_hardirqs_off_thunk (apic_timer_interrupt)
- ls-4860 0d.s. 294us : _local_bh_enable (__do_softirq)
- ls-4860 0d.s1 294us : trace_preempt_on (__do_softirq)
-
+# preemptirqsoff latency trace v1.1.5 on 3.8.0-test+
+# --------------------------------------------------------------------
+# latency: 100 us, #4/4, CPU#3 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:4)
+# -----------------
+# | task: ls-2230 (uid:0 nice:0 policy:0 rt_prio:0)
+# -----------------
+# => started at: ata_scsi_queuecmd
+# => ended at: ata_scsi_queuecmd
+#
+#
+# _------=> CPU#
+# / _-----=> irqs-off
+# | / _----=> need-resched
+# || / _---=> hardirq/softirq
+# ||| / _--=> preempt-depth
+# |||| / delay
+# cmd pid ||||| time | caller
+# \ / ||||| \ | /
+ ls-2230 3d... 0us+: _raw_spin_lock_irqsave <-ata_scsi_queuecmd
+ ls-2230 3...1 100us : _raw_spin_unlock_irqrestore <-ata_scsi_queuecmd
+ ls-2230 3...1 101us+: trace_preempt_on <-ata_scsi_queuecmd
+ ls-2230 3...1 111us : <stack trace>
+ => sub_preempt_count
+ => _raw_spin_unlock_irqrestore
+ => ata_scsi_queuecmd
+ => scsi_dispatch_cmd
+ => scsi_request_fn
+ => __blk_run_queue_uncond
+ => __blk_run_queue
+ => blk_queue_bio
+ => generic_make_request
+ => submit_bio
+ => submit_bh
+ => ext3_bread
+ => ext3_dir_bread
+ => htree_dirblock_to_tree
+ => ext3_htree_fill_tree
+ => ext3_readdir
+ => vfs_readdir
+ => sys_getdents
+ => system_call_fastpath
The trace_hardirqs_off_thunk is called from assembly on x86 when
@@ -802,105 +1258,158 @@ function tracing, we do not know if interrupts were enabled
within the preemption points. We do see that it started with
preemption enabled.
-Here is a trace with ftrace_enabled set:
-
+Here is a trace with function-trace set:
# tracer: preemptirqsoff
#
-preemptirqsoff latency trace v1.1.5 on 2.6.26-rc8
---------------------------------------------------------------------
- latency: 105 us, #183/183, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
- -----------------
- | task: sshd-4261 (uid:0 nice:0 policy:0 rt_prio:0)
- -----------------
- => started at: write_chan
- => ended at: __do_softirq
-
-# _------=> CPU#
-# / _-----=> irqs-off
-# | / _----=> need-resched
-# || / _---=> hardirq/softirq
-# ||| / _--=> preempt-depth
-# |||| /
-# ||||| delay
-# cmd pid ||||| time | caller
-# \ / ||||| \ | /
- ls-4473 0.N.. 0us : preempt_schedule (write_chan)
- ls-4473 0dN.1 1us : _spin_lock (schedule)
- ls-4473 0dN.1 2us : add_preempt_count (_spin_lock)
- ls-4473 0d..2 2us : put_prev_task_fair (schedule)
-[...]
- ls-4473 0d..2 13us : set_normalized_timespec (ktime_get_ts)
- ls-4473 0d..2 13us : __switch_to (schedule)
- sshd-4261 0d..2 14us : finish_task_switch (schedule)
- sshd-4261 0d..2 14us : _spin_unlock_irq (finish_task_switch)
- sshd-4261 0d..1 15us : add_preempt_count (_spin_lock_irqsave)
- sshd-4261 0d..2 16us : _spin_unlock_irqrestore (hrtick_set)
- sshd-4261 0d..2 16us : do_IRQ (common_interrupt)
- sshd-4261 0d..2 17us : irq_enter (do_IRQ)
- sshd-4261 0d..2 17us : idle_cpu (irq_enter)
- sshd-4261 0d..2 18us : add_preempt_count (irq_enter)
- sshd-4261 0d.h2 18us : idle_cpu (irq_enter)
- sshd-4261 0d.h. 18us : handle_fasteoi_irq (do_IRQ)
- sshd-4261 0d.h. 19us : _spin_lock (handle_fasteoi_irq)
- sshd-4261 0d.h. 19us : add_preempt_count (_spin_lock)
- sshd-4261 0d.h1 20us : _spin_unlock (handle_fasteoi_irq)
- sshd-4261 0d.h1 20us : sub_preempt_count (_spin_unlock)
-[...]
- sshd-4261 0d.h1 28us : _spin_unlock (handle_fasteoi_irq)
- sshd-4261 0d.h1 29us : sub_preempt_count (_spin_unlock)
- sshd-4261 0d.h2 29us : irq_exit (do_IRQ)
- sshd-4261 0d.h2 29us : sub_preempt_count (irq_exit)
- sshd-4261 0d..3 30us : do_softirq (irq_exit)
- sshd-4261 0d... 30us : __do_softirq (do_softirq)
- sshd-4261 0d... 31us : __local_bh_disable (__do_softirq)
- sshd-4261 0d... 31us+: add_preempt_count (__local_bh_disable)
- sshd-4261 0d.s4 34us : add_preempt_count (__local_bh_disable)
+# preemptirqsoff latency trace v1.1.5 on 3.8.0-test+
+# --------------------------------------------------------------------
+# latency: 161 us, #339/339, CPU#3 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:4)
+# -----------------
+# | task: ls-2269 (uid:0 nice:0 policy:0 rt_prio:0)
+# -----------------
+# => started at: schedule
+# => ended at: mutex_unlock
+#
+#
+# _------=> CPU#
+# / _-----=> irqs-off
+# | / _----=> need-resched
+# || / _---=> hardirq/softirq
+# ||| / _--=> preempt-depth
+# |||| / delay
+# cmd pid ||||| time | caller
+# \ / ||||| \ | /
+kworker/-59 3...1 0us : __schedule <-schedule
+kworker/-59 3d..1 0us : rcu_preempt_qs <-rcu_note_context_switch
+kworker/-59 3d..1 1us : add_preempt_count <-_raw_spin_lock_irq
+kworker/-59 3d..2 1us : deactivate_task <-__schedule
+kworker/-59 3d..2 1us : dequeue_task <-deactivate_task
+kworker/-59 3d..2 2us : update_rq_clock <-dequeue_task
+kworker/-59 3d..2 2us : dequeue_task_fair <-dequeue_task
+kworker/-59 3d..2 2us : update_curr <-dequeue_task_fair
+kworker/-59 3d..2 2us : update_min_vruntime <-update_curr
+kworker/-59 3d..2 3us : cpuacct_charge <-update_curr
+kworker/-59 3d..2 3us : __rcu_read_lock <-cpuacct_charge
+kworker/-59 3d..2 3us : __rcu_read_unlock <-cpuacct_charge
+kworker/-59 3d..2 3us : update_cfs_rq_blocked_load <-dequeue_task_fair
+kworker/-59 3d..2 4us : clear_buddies <-dequeue_task_fair
+kworker/-59 3d..2 4us : account_entity_dequeue <-dequeue_task_fair
+kworker/-59 3d..2 4us : update_min_vruntime <-dequeue_task_fair
+kworker/-59 3d..2 4us : update_cfs_shares <-dequeue_task_fair
+kworker/-59 3d..2 5us : hrtick_update <-dequeue_task_fair
+kworker/-59 3d..2 5us : wq_worker_sleeping <-__schedule
+kworker/-59 3d..2 5us : kthread_data <-wq_worker_sleeping
+kworker/-59 3d..2 5us : put_prev_task_fair <-__schedule
+kworker/-59 3d..2 6us : pick_next_task_fair <-pick_next_task
+kworker/-59 3d..2 6us : clear_buddies <-pick_next_task_fair
+kworker/-59 3d..2 6us : set_next_entity <-pick_next_task_fair
+kworker/-59 3d..2 6us : update_stats_wait_end <-set_next_entity
+ ls-2269 3d..2 7us : finish_task_switch <-__schedule
+ ls-2269 3d..2 7us : _raw_spin_unlock_irq <-finish_task_switch
+ ls-2269 3d..2 8us : do_IRQ <-ret_from_intr
+ ls-2269 3d..2 8us : irq_enter <-do_IRQ
+ ls-2269 3d..2 8us : rcu_irq_enter <-irq_enter
+ ls-2269 3d..2 9us : add_preempt_count <-irq_enter
+ ls-2269 3d.h2 9us : exit_idle <-do_IRQ
[...]
- sshd-4261 0d.s3 43us : sub_preempt_count (local_bh_enable_ip)
- sshd-4261 0d.s4 44us : sub_preempt_count (local_bh_enable_ip)
- sshd-4261 0d.s3 44us : smp_apic_timer_interrupt (apic_timer_interrupt)
- sshd-4261 0d.s3 45us : irq_enter (smp_apic_timer_interrupt)
- sshd-4261 0d.s3 45us : idle_cpu (irq_enter)
- sshd-4261 0d.s3 46us : add_preempt_count (irq_enter)
- sshd-4261 0d.H3 46us : idle_cpu (irq_enter)
- sshd-4261 0d.H3 47us : hrtimer_interrupt (smp_apic_timer_interrupt)
- sshd-4261 0d.H3 47us : ktime_get (hrtimer_interrupt)
+ ls-2269 3d.h3 20us : sub_preempt_count <-_raw_spin_unlock
+ ls-2269 3d.h2 20us : irq_exit <-do_IRQ
+ ls-2269 3d.h2 21us : sub_preempt_count <-irq_exit
+ ls-2269 3d..3 21us : do_softirq <-irq_exit
+ ls-2269 3d..3 21us : __do_softirq <-call_softirq
+ ls-2269 3d..3 21us+: __local_bh_disable <-__do_softirq
+ ls-2269 3d.s4 29us : sub_preempt_count <-_local_bh_enable_ip
+ ls-2269 3d.s5 29us : sub_preempt_count <-_local_bh_enable_ip
+ ls-2269 3d.s5 31us : do_IRQ <-ret_from_intr
+ ls-2269 3d.s5 31us : irq_enter <-do_IRQ
+ ls-2269 3d.s5 31us : rcu_irq_enter <-irq_enter
[...]
- sshd-4261 0d.H3 81us : tick_program_event (hrtimer_interrupt)
- sshd-4261 0d.H3 82us : ktime_get (tick_program_event)
- sshd-4261 0d.H3 82us : ktime_get_ts (ktime_get)
- sshd-4261 0d.H3 83us : getnstimeofday (ktime_get_ts)
- sshd-4261 0d.H3 83us : set_normalized_timespec (ktime_get_ts)
- sshd-4261 0d.H3 84us : clockevents_program_event (tick_program_event)
- sshd-4261 0d.H3 84us : lapic_next_event (clockevents_program_event)
- sshd-4261 0d.H3 85us : irq_exit (smp_apic_timer_interrupt)
- sshd-4261 0d.H3 85us : sub_preempt_count (irq_exit)
- sshd-4261 0d.s4 86us : sub_preempt_count (irq_exit)
- sshd-4261 0d.s3 86us : add_preempt_count (__local_bh_disable)
+ ls-2269 3d.s5 31us : rcu_irq_enter <-irq_enter
+ ls-2269 3d.s5 32us : add_preempt_count <-irq_enter
+ ls-2269 3d.H5 32us : exit_idle <-do_IRQ
+ ls-2269 3d.H5 32us : handle_irq <-do_IRQ
+ ls-2269 3d.H5 32us : irq_to_desc <-handle_irq
+ ls-2269 3d.H5 33us : handle_fasteoi_irq <-handle_irq
[...]
- sshd-4261 0d.s1 98us : sub_preempt_count (net_rx_action)
- sshd-4261 0d.s. 99us : add_preempt_count (_spin_lock_irq)
- sshd-4261 0d.s1 99us+: _spin_unlock_irq (run_timer_softirq)
- sshd-4261 0d.s. 104us : _local_bh_enable (__do_softirq)
- sshd-4261 0d.s. 104us : sub_preempt_count (_local_bh_enable)
- sshd-4261 0d.s. 105us : _local_bh_enable (__do_softirq)
- sshd-4261 0d.s1 105us : trace_preempt_on (__do_softirq)
-
-
-This is a very interesting trace. It started with the preemption
-of the ls task. We see that the task had the "need_resched" bit
-set via the 'N' in the trace. Interrupts were disabled before
-the spin_lock at the beginning of the trace. We see that a
-schedule took place to run sshd. When the interrupts were
-enabled, we took an interrupt. On return from the interrupt
-handler, the softirq ran. We took another interrupt while
-running the softirq as we see from the capital 'H'.
+ ls-2269 3d.s5 158us : _raw_spin_unlock_irqrestore <-rtl8139_poll
+ ls-2269 3d.s3 158us : net_rps_action_and_irq_enable.isra.65 <-net_rx_action
+ ls-2269 3d.s3 159us : __local_bh_enable <-__do_softirq
+ ls-2269 3d.s3 159us : sub_preempt_count <-__local_bh_enable
+ ls-2269 3d..3 159us : idle_cpu <-irq_exit
+ ls-2269 3d..3 159us : rcu_irq_exit <-irq_exit
+ ls-2269 3d..3 160us : sub_preempt_count <-irq_exit
+ ls-2269 3d... 161us : __mutex_unlock_slowpath <-mutex_unlock
+ ls-2269 3d... 162us+: trace_hardirqs_on <-mutex_unlock
+ ls-2269 3d... 186us : <stack trace>
+ => __mutex_unlock_slowpath
+ => mutex_unlock
+ => process_output
+ => n_tty_write
+ => tty_write
+ => vfs_write
+ => sys_write
+ => system_call_fastpath
+
+This is an interesting trace. It started with kworker running and
+scheduling out and ls taking over. But as soon as ls released the
+rq lock and enabled interrupts (but not preemption) an interrupt
+triggered. When the interrupt finished, it started running softirqs.
+But while the softirq was running, another interrupt triggered.
+When an interrupt is running inside a softirq, the annotation is 'H'.
wakeup
------
+One common case that people are interested in tracing is the
+time it takes for a task that is woken to actually wake up.
+Now for non Real-Time tasks, this can be arbitrary. But tracing
+it none the less can be interesting.
+
+Without function tracing:
+
+ # echo 0 > options/function-trace
+ # echo wakeup > current_tracer
+ # echo 1 > tracing_on
+ # echo 0 > tracing_max_latency
+ # chrt -f 5 sleep 1
+ # echo 0 > tracing_on
+ # cat trace
+# tracer: wakeup
+#
+# wakeup latency trace v1.1.5 on 3.8.0-test+
+# --------------------------------------------------------------------
+# latency: 15 us, #4/4, CPU#3 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:4)
+# -----------------
+# | task: kworker/3:1H-312 (uid:0 nice:-20 policy:0 rt_prio:0)
+# -----------------
+#
+# _------=> CPU#
+# / _-----=> irqs-off
+# | / _----=> need-resched
+# || / _---=> hardirq/softirq
+# ||| / _--=> preempt-depth
+# |||| / delay
+# cmd pid ||||| time | caller
+# \ / ||||| \ | /
+ <idle>-0 3dNs7 0us : 0:120:R + [003] 312:100:R kworker/3:1H
+ <idle>-0 3dNs7 1us+: ttwu_do_activate.constprop.87 <-try_to_wake_up
+ <idle>-0 3d..3 15us : __schedule <-schedule
+ <idle>-0 3d..3 15us : 0:120:R ==> [003] 312:100:R kworker/3:1H
+
+The tracer only traces the highest priority task in the system
+to avoid tracing the normal circumstances. Here we see that
+the kworker with a nice priority of -20 (not very nice), took
+just 15 microseconds from the time it woke up, to the time it
+ran.
+
+Non Real-Time tasks are not that interesting. A more interesting
+trace is to concentrate only on Real-Time tasks.
+
+wakeup_rt
+---------
+
In a Real-Time environment it is very important to know the
wakeup time it takes for the highest priority task that is woken
up to the time that it executes. This is also known as "schedule
@@ -914,124 +1423,229 @@ Real-Time environments are interested in the worst case latency.
That is the longest latency it takes for something to happen,
and not the average. We can have a very fast scheduler that may
only have a large latency once in a while, but that would not
-work well with Real-Time tasks. The wakeup tracer was designed
+work well with Real-Time tasks. The wakeup_rt tracer was designed
to record the worst case wakeups of RT tasks. Non-RT tasks are
not recorded because the tracer only records one worst case and
tracing non-RT tasks that are unpredictable will overwrite the
-worst case latency of RT tasks.
+worst case latency of RT tasks (just run the normal wakeup
+tracer for a while to see that effect).
Since this tracer only deals with RT tasks, we will run this
slightly differently than we did with the previous tracers.
Instead of performing an 'ls', we will run 'sleep 1' under
'chrt' which changes the priority of the task.
- # echo wakeup > current_tracer
- # echo latency-format > trace_options
- # echo 0 > tracing_max_latency
+ # echo 0 > options/function-trace
+ # echo wakeup_rt > current_tracer
# echo 1 > tracing_on
+ # echo 0 > tracing_max_latency
# chrt -f 5 sleep 1
# echo 0 > tracing_on
# cat trace
# tracer: wakeup
#
-wakeup latency trace v1.1.5 on 2.6.26-rc8
---------------------------------------------------------------------
- latency: 4 us, #2/2, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
- -----------------
- | task: sleep-4901 (uid:0 nice:0 policy:1 rt_prio:5)
- -----------------
-
-# _------=> CPU#
-# / _-----=> irqs-off
-# | / _----=> need-resched
-# || / _---=> hardirq/softirq
-# ||| / _--=> preempt-depth
-# |||| /
-# ||||| delay
-# cmd pid ||||| time | caller
-# \ / ||||| \ | /
- <idle>-0 1d.h4 0us+: try_to_wake_up (wake_up_process)
- <idle>-0 1d..4 4us : schedule (cpu_idle)
-
-
-Running this on an idle system, we see that it only took 4
-microseconds to perform the task switch. Note, since the trace
-marker in the schedule is before the actual "switch", we stop
-the tracing when the recorded task is about to schedule in. This
-may change if we add a new marker at the end of the scheduler.
-
-Notice that the recorded task is 'sleep' with the PID of 4901
+# tracer: wakeup_rt
+#
+# wakeup_rt latency trace v1.1.5 on 3.8.0-test+
+# --------------------------------------------------------------------
+# latency: 5 us, #4/4, CPU#3 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:4)
+# -----------------
+# | task: sleep-2389 (uid:0 nice:0 policy:1 rt_prio:5)
+# -----------------
+#
+# _------=> CPU#
+# / _-----=> irqs-off
+# | / _----=> need-resched
+# || / _---=> hardirq/softirq
+# ||| / _--=> preempt-depth
+# |||| / delay
+# cmd pid ||||| time | caller
+# \ / ||||| \ | /
+ <idle>-0 3d.h4 0us : 0:120:R + [003] 2389: 94:R sleep
+ <idle>-0 3d.h4 1us+: ttwu_do_activate.constprop.87 <-try_to_wake_up
+ <idle>-0 3d..3 5us : __schedule <-schedule
+ <idle>-0 3d..3 5us : 0:120:R ==> [003] 2389: 94:R sleep
+
+
+Running this on an idle system, we see that it only took 5 microseconds
+to perform the task switch. Note, since the trace point in the schedule
+is before the actual "switch", we stop the tracing when the recorded task
+is about to schedule in. This may change if we add a new marker at the
+end of the scheduler.
+
+Notice that the recorded task is 'sleep' with the PID of 2389
and it has an rt_prio of 5. This priority is user-space priority
and not the internal kernel priority. The policy is 1 for
SCHED_FIFO and 2 for SCHED_RR.
-Doing the same with chrt -r 5 and ftrace_enabled set.
+Note, that the trace data shows the internal priority (99 - rtprio).
-# tracer: wakeup
+ <idle>-0 3d..3 5us : 0:120:R ==> [003] 2389: 94:R sleep
+
+The 0:120:R means idle was running with a nice priority of 0 (120 - 20)
+and in the running state 'R'. The sleep task was scheduled in with
+2389: 94:R. That is the priority is the kernel rtprio (99 - 5 = 94)
+and it too is in the running state.
+
+Doing the same with chrt -r 5 and function-trace set.
+
+ echo 1 > options/function-trace
+
+# tracer: wakeup_rt
#
-wakeup latency trace v1.1.5 on 2.6.26-rc8
---------------------------------------------------------------------
- latency: 50 us, #60/60, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
- -----------------
- | task: sleep-4068 (uid:0 nice:0 policy:2 rt_prio:5)
- -----------------
-
-# _------=> CPU#
-# / _-----=> irqs-off
-# | / _----=> need-resched
-# || / _---=> hardirq/softirq
-# ||| / _--=> preempt-depth
-# |||| /
-# ||||| delay
-# cmd pid ||||| time | caller
-# \ / ||||| \ | /
-ksoftirq-7 1d.H3 0us : try_to_wake_up (wake_up_process)
-ksoftirq-7 1d.H4 1us : sub_preempt_count (marker_probe_cb)
-ksoftirq-7 1d.H3 2us : check_preempt_wakeup (try_to_wake_up)
-ksoftirq-7 1d.H3 3us : update_curr (check_preempt_wakeup)
-ksoftirq-7 1d.H3 4us : calc_delta_mine (update_curr)
-ksoftirq-7 1d.H3 5us : __resched_task (check_preempt_wakeup)
-ksoftirq-7 1d.H3 6us : task_wake_up_rt (try_to_wake_up)
-ksoftirq-7 1d.H3 7us : _spin_unlock_irqrestore (try_to_wake_up)
-[...]
-ksoftirq-7 1d.H2 17us : irq_exit (smp_apic_timer_interrupt)
-ksoftirq-7 1d.H2 18us : sub_preempt_count (irq_exit)
-ksoftirq-7 1d.s3 19us : sub_preempt_count (irq_exit)
-ksoftirq-7 1..s2 20us : rcu_process_callbacks (__do_softirq)
-[...]
-ksoftirq-7 1..s2 26us : __rcu_process_callbacks (rcu_process_callbacks)
-ksoftirq-7 1d.s2 27us : _local_bh_enable (__do_softirq)
-ksoftirq-7 1d.s2 28us : sub_preempt_count (_local_bh_enable)
-ksoftirq-7 1.N.3 29us : sub_preempt_count (ksoftirqd)
-ksoftirq-7 1.N.2 30us : _cond_resched (ksoftirqd)
-ksoftirq-7 1.N.2 31us : __cond_resched (_cond_resched)
-ksoftirq-7 1.N.2 32us : add_preempt_count (__cond_resched)
-ksoftirq-7 1.N.2 33us : schedule (__cond_resched)
-ksoftirq-7 1.N.2 33us : add_preempt_count (schedule)
-ksoftirq-7 1.N.3 34us : hrtick_clear (schedule)
-ksoftirq-7 1dN.3 35us : _spin_lock (schedule)
-ksoftirq-7 1dN.3 36us : add_preempt_count (_spin_lock)
-ksoftirq-7 1d..4 37us : put_prev_task_fair (schedule)
-ksoftirq-7 1d..4 38us : update_curr (put_prev_task_fair)
-[...]
-ksoftirq-7 1d..5 47us : _spin_trylock (tracing_record_cmdline)
-ksoftirq-7 1d..5 48us : add_preempt_count (_spin_trylock)
-ksoftirq-7 1d..6 49us : _spin_unlock (tracing_record_cmdline)
-ksoftirq-7 1d..6 49us : sub_preempt_count (_spin_unlock)
-ksoftirq-7 1d..4 50us : schedule (__cond_resched)
-
-The interrupt went off while running ksoftirqd. This task runs
-at SCHED_OTHER. Why did not we see the 'N' set early? This may
-be a harmless bug with x86_32 and 4K stacks. On x86_32 with 4K
-stacks configured, the interrupt and softirq run with their own
-stack. Some information is held on the top of the task's stack
-(need_resched and preempt_count are both stored there). The
-setting of the NEED_RESCHED bit is done directly to the task's
-stack, but the reading of the NEED_RESCHED is done by looking at
-the current stack, which in this case is the stack for the hard
-interrupt. This hides the fact that NEED_RESCHED has been set.
-We do not see the 'N' until we switch back to the task's
-assigned stack.
+# wakeup_rt latency trace v1.1.5 on 3.8.0-test+
+# --------------------------------------------------------------------
+# latency: 29 us, #85/85, CPU#3 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:4)
+# -----------------
+# | task: sleep-2448 (uid:0 nice:0 policy:1 rt_prio:5)
+# -----------------
+#
+# _------=> CPU#
+# / _-----=> irqs-off
+# | / _----=> need-resched
+# || / _---=> hardirq/softirq
+# ||| / _--=> preempt-depth
+# |||| / delay
+# cmd pid ||||| time | caller
+# \ / ||||| \ | /
+ <idle>-0 3d.h4 1us+: 0:120:R + [003] 2448: 94:R sleep
+ <idle>-0 3d.h4 2us : ttwu_do_activate.constprop.87 <-try_to_wake_up
+ <idle>-0 3d.h3 3us : check_preempt_curr <-ttwu_do_wakeup
+ <idle>-0 3d.h3 3us : resched_task <-check_preempt_curr
+ <idle>-0 3dNh3 4us : task_woken_rt <-ttwu_do_wakeup
+ <idle>-0 3dNh3 4us : _raw_spin_unlock <-try_to_wake_up
+ <idle>-0 3dNh3 4us : sub_preempt_count <-_raw_spin_unlock
+ <idle>-0 3dNh2 5us : ttwu_stat <-try_to_wake_up
+ <idle>-0 3dNh2 5us : _raw_spin_unlock_irqrestore <-try_to_wake_up
+ <idle>-0 3dNh2 6us : sub_preempt_count <-_raw_spin_unlock_irqrestore
+ <idle>-0 3dNh1 6us : _raw_spin_lock <-__run_hrtimer
+ <idle>-0 3dNh1 6us : add_preempt_count <-_raw_spin_lock
+ <idle>-0 3dNh2 7us : _raw_spin_unlock <-hrtimer_interrupt
+ <idle>-0 3dNh2 7us : sub_preempt_count <-_raw_spin_unlock
+ <idle>-0 3dNh1 7us : tick_program_event <-hrtimer_interrupt
+ <idle>-0 3dNh1 7us : clockevents_program_event <-tick_program_event
+ <idle>-0 3dNh1 8us : ktime_get <-clockevents_program_event
+ <idle>-0 3dNh1 8us : lapic_next_event <-clockevents_program_event
+ <idle>-0 3dNh1 8us : irq_exit <-smp_apic_timer_interrupt
+ <idle>-0 3dNh1 9us : sub_preempt_count <-irq_exit
+ <idle>-0 3dN.2 9us : idle_cpu <-irq_exit
+ <idle>-0 3dN.2 9us : rcu_irq_exit <-irq_exit
+ <idle>-0 3dN.2 10us : rcu_eqs_enter_common.isra.45 <-rcu_irq_exit
+ <idle>-0 3dN.2 10us : sub_preempt_count <-irq_exit
+ <idle>-0 3.N.1 11us : rcu_idle_exit <-cpu_idle
+ <idle>-0 3dN.1 11us : rcu_eqs_exit_common.isra.43 <-rcu_idle_exit
+ <idle>-0 3.N.1 11us : tick_nohz_idle_exit <-cpu_idle
+ <idle>-0 3dN.1 12us : menu_hrtimer_cancel <-tick_nohz_idle_exit
+ <idle>-0 3dN.1 12us : ktime_get <-tick_nohz_idle_exit
+ <idle>-0 3dN.1 12us : tick_do_update_jiffies64 <-tick_nohz_idle_exit
+ <idle>-0 3dN.1 13us : update_cpu_load_nohz <-tick_nohz_idle_exit
+ <idle>-0 3dN.1 13us : _raw_spin_lock <-update_cpu_load_nohz
+ <idle>-0 3dN.1 13us : add_preempt_count <-_raw_spin_lock
+ <idle>-0 3dN.2 13us : __update_cpu_load <-update_cpu_load_nohz
+ <idle>-0 3dN.2 14us : sched_avg_update <-__update_cpu_load
+ <idle>-0 3dN.2 14us : _raw_spin_unlock <-update_cpu_load_nohz
+ <idle>-0 3dN.2 14us : sub_preempt_count <-_raw_spin_unlock
+ <idle>-0 3dN.1 15us : calc_load_exit_idle <-tick_nohz_idle_exit
+ <idle>-0 3dN.1 15us : touch_softlockup_watchdog <-tick_nohz_idle_exit
+ <idle>-0 3dN.1 15us : hrtimer_cancel <-tick_nohz_idle_exit
+ <idle>-0 3dN.1 15us : hrtimer_try_to_cancel <-hrtimer_cancel
+ <idle>-0 3dN.1 16us : lock_hrtimer_base.isra.18 <-hrtimer_try_to_cancel
+ <idle>-0 3dN.1 16us : _raw_spin_lock_irqsave <-lock_hrtimer_base.isra.18
+ <idle>-0 3dN.1 16us : add_preempt_count <-_raw_spin_lock_irqsave
+ <idle>-0 3dN.2 17us : __remove_hrtimer <-remove_hrtimer.part.16
+ <idle>-0 3dN.2 17us : hrtimer_force_reprogram <-__remove_hrtimer
+ <idle>-0 3dN.2 17us : tick_program_event <-hrtimer_force_reprogram
+ <idle>-0 3dN.2 18us : clockevents_program_event <-tick_program_event
+ <idle>-0 3dN.2 18us : ktime_get <-clockevents_program_event
+ <idle>-0 3dN.2 18us : lapic_next_event <-clockevents_program_event
+ <idle>-0 3dN.2 19us : _raw_spin_unlock_irqrestore <-hrtimer_try_to_cancel
+ <idle>-0 3dN.2 19us : sub_preempt_count <-_raw_spin_unlock_irqrestore
+ <idle>-0 3dN.1 19us : hrtimer_forward <-tick_nohz_idle_exit
+ <idle>-0 3dN.1 20us : ktime_add_safe <-hrtimer_forward
+ <idle>-0 3dN.1 20us : ktime_add_safe <-hrtimer_forward
+ <idle>-0 3dN.1 20us : hrtimer_start_range_ns <-hrtimer_start_expires.constprop.11
+ <idle>-0 3dN.1 20us : __hrtimer_start_range_ns <-hrtimer_start_range_ns
+ <idle>-0 3dN.1 21us : lock_hrtimer_base.isra.18 <-__hrtimer_start_range_ns
+ <idle>-0 3dN.1 21us : _raw_spin_lock_irqsave <-lock_hrtimer_base.isra.18
+ <idle>-0 3dN.1 21us : add_preempt_count <-_raw_spin_lock_irqsave
+ <idle>-0 3dN.2 22us : ktime_add_safe <-__hrtimer_start_range_ns
+ <idle>-0 3dN.2 22us : enqueue_hrtimer <-__hrtimer_start_range_ns
+ <idle>-0 3dN.2 22us : tick_program_event <-__hrtimer_start_range_ns
+ <idle>-0 3dN.2 23us : clockevents_program_event <-tick_program_event
+ <idle>-0 3dN.2 23us : ktime_get <-clockevents_program_event
+ <idle>-0 3dN.2 23us : lapic_next_event <-clockevents_program_event
+ <idle>-0 3dN.2 24us : _raw_spin_unlock_irqrestore <-__hrtimer_start_range_ns
+ <idle>-0 3dN.2 24us : sub_preempt_count <-_raw_spin_unlock_irqrestore
+ <idle>-0 3dN.1 24us : account_idle_ticks <-tick_nohz_idle_exit
+ <idle>-0 3dN.1 24us : account_idle_time <-account_idle_ticks
+ <idle>-0 3.N.1 25us : sub_preempt_count <-cpu_idle
+ <idle>-0 3.N.. 25us : schedule <-cpu_idle
+ <idle>-0 3.N.. 25us : __schedule <-preempt_schedule
+ <idle>-0 3.N.. 26us : add_preempt_count <-__schedule
+ <idle>-0 3.N.1 26us : rcu_note_context_switch <-__schedule
+ <idle>-0 3.N.1 26us : rcu_sched_qs <-rcu_note_context_switch
+ <idle>-0 3dN.1 27us : rcu_preempt_qs <-rcu_note_context_switch
+ <idle>-0 3.N.1 27us : _raw_spin_lock_irq <-__schedule
+ <idle>-0 3dN.1 27us : add_preempt_count <-_raw_spin_lock_irq
+ <idle>-0 3dN.2 28us : put_prev_task_idle <-__schedule
+ <idle>-0 3dN.2 28us : pick_next_task_stop <-pick_next_task
+ <idle>-0 3dN.2 28us : pick_next_task_rt <-pick_next_task
+ <idle>-0 3dN.2 29us : dequeue_pushable_task <-pick_next_task_rt
+ <idle>-0 3d..3 29us : __schedule <-preempt_schedule
+ <idle>-0 3d..3 30us : 0:120:R ==> [003] 2448: 94:R sleep
+
+This isn't that big of a trace, even with function tracing enabled,
+so I included the entire trace.
+
+The interrupt went off while when the system was idle. Somewhere
+before task_woken_rt() was called, the NEED_RESCHED flag was set,
+this is indicated by the first occurrence of the 'N' flag.
+
+Latency tracing and events
+--------------------------
+As function tracing can induce a much larger latency, but without
+seeing what happens within the latency it is hard to know what
+caused it. There is a middle ground, and that is with enabling
+events.
+
+ # echo 0 > options/function-trace
+ # echo wakeup_rt > current_tracer
+ # echo 1 > events/enable
+ # echo 1 > tracing_on
+ # echo 0 > tracing_max_latency
+ # chrt -f 5 sleep 1
+ # echo 0 > tracing_on
+ # cat trace
+# tracer: wakeup_rt
+#
+# wakeup_rt latency trace v1.1.5 on 3.8.0-test+
+# --------------------------------------------------------------------
+# latency: 6 us, #12/12, CPU#2 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:4)
+# -----------------
+# | task: sleep-5882 (uid:0 nice:0 policy:1 rt_prio:5)
+# -----------------
+#
+# _------=> CPU#
+# / _-----=> irqs-off
+# | / _----=> need-resched
+# || / _---=> hardirq/softirq
+# ||| / _--=> preempt-depth
+# |||| / delay
+# cmd pid ||||| time | caller
+# \ / ||||| \ | /
+ <idle>-0 2d.h4 0us : 0:120:R + [002] 5882: 94:R sleep
+ <idle>-0 2d.h4 0us : ttwu_do_activate.constprop.87 <-try_to_wake_up
+ <idle>-0 2d.h4 1us : sched_wakeup: comm=sleep pid=5882 prio=94 success=1 target_cpu=002
+ <idle>-0 2dNh2 1us : hrtimer_expire_exit: hrtimer=ffff88007796feb8
+ <idle>-0 2.N.2 2us : power_end: cpu_id=2
+ <idle>-0 2.N.2 3us : cpu_idle: state=4294967295 cpu_id=2
+ <idle>-0 2dN.3 4us : hrtimer_cancel: hrtimer=ffff88007d50d5e0
+ <idle>-0 2dN.3 4us : hrtimer_start: hrtimer=ffff88007d50d5e0 function=tick_sched_timer expires=34311211000000 softexpires=34311211000000
+ <idle>-0 2.N.2 5us : rcu_utilization: Start context switch
+ <idle>-0 2.N.2 5us : rcu_utilization: End context switch
+ <idle>-0 2d..3 6us : __schedule <-schedule
+ <idle>-0 2d..3 6us : 0:120:R ==> [002] 5882: 94:R sleep
+
function
--------
@@ -1039,6 +1653,7 @@ function
This tracer is the function tracer. Enabling the function tracer
can be done from the debug file system. Make sure the
ftrace_enabled is set; otherwise this tracer is a nop.
+See the "ftrace_enabled" section below.
# sysctl kernel.ftrace_enabled=1
# echo function > current_tracer
@@ -1048,23 +1663,23 @@ ftrace_enabled is set; otherwise this tracer is a nop.
# cat trace
# tracer: function
#
-# TASK-PID CPU# TIMESTAMP FUNCTION
-# | | | | |
- bash-4003 [00] 123.638713: finish_task_switch <-schedule
- bash-4003 [00] 123.638714: _spin_unlock_irq <-finish_task_switch
- bash-4003 [00] 123.638714: sub_preempt_count <-_spin_unlock_irq
- bash-4003 [00] 123.638715: hrtick_set <-schedule
- bash-4003 [00] 123.638715: _spin_lock_irqsave <-hrtick_set
- bash-4003 [00] 123.638716: add_preempt_count <-_spin_lock_irqsave
- bash-4003 [00] 123.638716: _spin_unlock_irqrestore <-hrtick_set
- bash-4003 [00] 123.638717: sub_preempt_count <-_spin_unlock_irqrestore
- bash-4003 [00] 123.638717: hrtick_clear <-hrtick_set
- bash-4003 [00] 123.638718: sub_preempt_count <-schedule
- bash-4003 [00] 123.638718: sub_preempt_count <-preempt_schedule
- bash-4003 [00] 123.638719: wait_for_completion <-__stop_machine_run
- bash-4003 [00] 123.638719: wait_for_common <-wait_for_completion
- bash-4003 [00] 123.638720: _spin_lock_irq <-wait_for_common
- bash-4003 [00] 123.638720: add_preempt_count <-_spin_lock_irq
+# entries-in-buffer/entries-written: 24799/24799 #P:4
+#
+# _-----=> irqs-off
+# / _----=> need-resched
+# | / _---=> hardirq/softirq
+# || / _--=> preempt-depth
+# ||| / delay
+# TASK-PID CPU# |||| TIMESTAMP FUNCTION
+# | | | |||| | |
+ bash-1994 [002] .... 3082.063030: mutex_unlock <-rb_simple_write
+ bash-1994 [002] .... 3082.063031: __mutex_unlock_slowpath <-mutex_unlock
+ bash-1994 [002] .... 3082.063031: __fsnotify_parent <-fsnotify_modify
+ bash-1994 [002] .... 3082.063032: fsnotify <-fsnotify_modify
+ bash-1994 [002] .... 3082.063032: __srcu_read_lock <-fsnotify
+ bash-1994 [002] .... 3082.063032: add_preempt_count <-__srcu_read_lock
+ bash-1994 [002] ...1 3082.063032: sub_preempt_count <-__srcu_read_lock
+ bash-1994 [002] .... 3082.063033: __srcu_read_unlock <-fsnotify
[...]
@@ -1214,79 +1829,19 @@ int main (int argc, char **argv)
return 0;
}
+Or this simple script!
-hw-branch-tracer (x86 only)
----------------------------
-
-This tracer uses the x86 last branch tracing hardware feature to
-collect a branch trace on all cpus with relatively low overhead.
-
-The tracer uses a fixed-size circular buffer per cpu and only
-traces ring 0 branches. The trace file dumps that buffer in the
-following format:
-
-# tracer: hw-branch-tracer
-#
-# CPU# TO <- FROM
- 0 scheduler_tick+0xb5/0x1bf <- task_tick_idle+0x5/0x6
- 2 run_posix_cpu_timers+0x2b/0x72a <- run_posix_cpu_timers+0x25/0x72a
- 0 scheduler_tick+0x139/0x1bf <- scheduler_tick+0xed/0x1bf
- 0 scheduler_tick+0x17c/0x1bf <- scheduler_tick+0x148/0x1bf
- 2 run_posix_cpu_timers+0x9e/0x72a <- run_posix_cpu_timers+0x5e/0x72a
- 0 scheduler_tick+0x1b6/0x1bf <- scheduler_tick+0x1aa/0x1bf
-
-
-The tracer may be used to dump the trace for the oops'ing cpu on
-a kernel oops into the system log. To enable this,
-ftrace_dump_on_oops must be set. To set ftrace_dump_on_oops, one
-can either use the sysctl function or set it via the proc system
-interface.
-
- sysctl kernel.ftrace_dump_on_oops=n
-
-or
-
- echo n > /proc/sys/kernel/ftrace_dump_on_oops
-
-If n = 1, ftrace will dump buffers of all CPUs, if n = 2 ftrace will
-only dump the buffer of the CPU that triggered the oops.
-
-Here's an example of such a dump after a null pointer
-dereference in a kernel module:
-
-[57848.105921] BUG: unable to handle kernel NULL pointer dereference at 0000000000000000
-[57848.106019] IP: [<ffffffffa0000006>] open+0x6/0x14 [oops]
-[57848.106019] PGD 2354e9067 PUD 2375e7067 PMD 0
-[57848.106019] Oops: 0002 [#1] SMP
-[57848.106019] last sysfs file: /sys/devices/pci0000:00/0000:00:1e.0/0000:20:05.0/local_cpus
-[57848.106019] Dumping ftrace buffer:
-[57848.106019] ---------------------------------
-[...]
-[57848.106019] 0 chrdev_open+0xe6/0x165 <- cdev_put+0x23/0x24
-[57848.106019] 0 chrdev_open+0x117/0x165 <- chrdev_open+0xfa/0x165
-[57848.106019] 0 chrdev_open+0x120/0x165 <- chrdev_open+0x11c/0x165
-[57848.106019] 0 chrdev_open+0x134/0x165 <- chrdev_open+0x12b/0x165
-[57848.106019] 0 open+0x0/0x14 [oops] <- chrdev_open+0x144/0x165
-[57848.106019] 0 page_fault+0x0/0x30 <- open+0x6/0x14 [oops]
-[57848.106019] 0 error_entry+0x0/0x5b <- page_fault+0x4/0x30
-[57848.106019] 0 error_kernelspace+0x0/0x31 <- error_entry+0x59/0x5b
-[57848.106019] 0 error_sti+0x0/0x1 <- error_kernelspace+0x2d/0x31
-[57848.106019] 0 page_fault+0x9/0x30 <- error_sti+0x0/0x1
-[57848.106019] 0 do_page_fault+0x0/0x881 <- page_fault+0x1a/0x30
-[...]
-[57848.106019] 0 do_page_fault+0x66b/0x881 <- is_prefetch+0x1ee/0x1f2
-[57848.106019] 0 do_page_fault+0x6e0/0x881 <- do_page_fault+0x67a/0x881
-[57848.106019] 0 oops_begin+0x0/0x96 <- do_page_fault+0x6e0/0x881
-[57848.106019] 0 trace_hw_branch_oops+0x0/0x2d <- oops_begin+0x9/0x96
-[...]
-[57848.106019] 0 ds_suspend_bts+0x2a/0xe3 <- ds_suspend_bts+0x1a/0xe3
-[57848.106019] ---------------------------------
-[57848.106019] CPU 0
-[57848.106019] Modules linked in: oops
-[57848.106019] Pid: 5542, comm: cat Tainted: G W 2.6.28 #23
-[57848.106019] RIP: 0010:[<ffffffffa0000006>] [<ffffffffa0000006>] open+0x6/0x14 [oops]
-[57848.106019] RSP: 0018:ffff880235457d48 EFLAGS: 00010246
-[...]
+------
+#!/bin/bash
+
+debugfs=`sed -ne 's/^debugfs \(.*\) debugfs.*/\1/p' /proc/mounts`
+echo nop > $debugfs/tracing/current_tracer
+echo 0 > $debugfs/tracing/tracing_on
+echo $$ > $debugfs/tracing/set_ftrace_pid
+echo function > $debugfs/tracing/current_tracer
+echo 1 > $debugfs/tracing/tracing_on
+exec "$@"
+------
function graph tracer
@@ -1473,16 +2028,18 @@ starts of pointing to a simple return. (Enabling FTRACE will
include the -pg switch in the compiling of the kernel.)
At compile time every C file object is run through the
-recordmcount.pl script (located in the scripts directory). This
-script will process the C object using objdump to find all the
-locations in the .text section that call mcount. (Note, only the
-.text section is processed, since processing other sections like
-.init.text may cause races due to those sections being freed).
+recordmcount program (located in the scripts directory). This
+program will parse the ELF headers in the C object to find all
+the locations in the .text section that call mcount. (Note, only
+white listed .text sections are processed, since processing other
+sections like .init.text may cause races due to those sections
+being freed unexpectedly).
A new section called "__mcount_loc" is created that holds
references to all the mcount call sites in the .text section.
-This section is compiled back into the original object. The
-final linker will add all these references into a single table.
+The recordmcount program re-links this section back into the
+original object. The final linking stage of the kernel will add all these
+references into a single table.
On boot up, before SMP is initialized, the dynamic ftrace code
scans this table and updates all the locations into nops. It
@@ -1493,13 +2050,25 @@ unloaded, it also removes its functions from the ftrace function
list. This is automatic in the module unload code, and the
module author does not need to worry about it.
-When tracing is enabled, kstop_machine is called to prevent
-races with the CPUS executing code being modified (which can
-cause the CPU to do undesirable things), and the nops are
+When tracing is enabled, the process of modifying the function
+tracepoints is dependent on architecture. The old method is to use
+kstop_machine to prevent races with the CPUs executing code being
+modified (which can cause the CPU to do undesirable things, especially
+if the modified code crosses cache (or page) boundaries), and the nops are
patched back to calls. But this time, they do not call mcount
(which is just a function stub). They now call into the ftrace
infrastructure.
+The new method of modifying the function tracepoints is to place
+a breakpoint at the location to be modified, sync all CPUs, modify
+the rest of the instruction not covered by the breakpoint. Sync
+all CPUs again, and then remove the breakpoint with the finished
+version to the ftrace call site.
+
+Some archs do not even need to monkey around with the synchronization,
+and can just slap the new code on top of the old without any
+problems with other CPUs executing it at the same time.
+
One special side-effect to the recording of the functions being
traced is that we can now selectively choose which functions we
wish to trace and which ones we want the mcount calls to remain
@@ -1530,20 +2099,28 @@ mutex_lock
If I am only interested in sys_nanosleep and hrtimer_interrupt:
- # echo sys_nanosleep hrtimer_interrupt \
- > set_ftrace_filter
+ # echo sys_nanosleep hrtimer_interrupt > set_ftrace_filter
# echo function > current_tracer
# echo 1 > tracing_on
# usleep 1
# echo 0 > tracing_on
# cat trace
-# tracer: ftrace
+# tracer: function
+#
+# entries-in-buffer/entries-written: 5/5 #P:4
#
-# TASK-PID CPU# TIMESTAMP FUNCTION
-# | | | | |
- usleep-4134 [00] 1317.070017: hrtimer_interrupt <-smp_apic_timer_interrupt
- usleep-4134 [00] 1317.070111: sys_nanosleep <-syscall_call
- <idle>-0 [00] 1317.070115: hrtimer_interrupt <-smp_apic_timer_interrupt
+# _-----=> irqs-off
+# / _----=> need-resched
+# | / _---=> hardirq/softirq
+# || / _--=> preempt-depth
+# ||| / delay
+# TASK-PID CPU# |||| TIMESTAMP FUNCTION
+# | | | |||| | |
+ usleep-2665 [001] .... 4186.475355: sys_nanosleep <-system_call_fastpath
+ <idle>-0 [001] d.h1 4186.475409: hrtimer_interrupt <-smp_apic_timer_interrupt
+ usleep-2665 [001] d.h1 4186.475426: hrtimer_interrupt <-smp_apic_timer_interrupt
+ <idle>-0 [003] d.h1 4186.475426: hrtimer_interrupt <-smp_apic_timer_interrupt
+ <idle>-0 [002] d.h1 4186.475427: hrtimer_interrupt <-smp_apic_timer_interrupt
To see which functions are being traced, you can cat the file:
@@ -1571,20 +2148,25 @@ Note: It is better to use quotes to enclose the wild cards,
Produces:
-# tracer: ftrace
+# tracer: function
#
-# TASK-PID CPU# TIMESTAMP FUNCTION
-# | | | | |
- bash-4003 [00] 1480.611794: hrtimer_init <-copy_process
- bash-4003 [00] 1480.611941: hrtimer_start <-hrtick_set
- bash-4003 [00] 1480.611956: hrtimer_cancel <-hrtick_clear
- bash-4003 [00] 1480.611956: hrtimer_try_to_cancel <-hrtimer_cancel
- <idle>-0 [00] 1480.612019: hrtimer_get_next_event <-get_next_timer_interrupt
- <idle>-0 [00] 1480.612025: hrtimer_get_next_event <-get_next_timer_interrupt
- <idle>-0 [00] 1480.612032: hrtimer_get_next_event <-get_next_timer_interrupt
- <idle>-0 [00] 1480.612037: hrtimer_get_next_event <-get_next_timer_interrupt
- <idle>-0 [00] 1480.612382: hrtimer_get_next_event <-get_next_timer_interrupt
-
+# entries-in-buffer/entries-written: 897/897 #P:4
+#
+# _-----=> irqs-off
+# / _----=> need-resched
+# | / _---=> hardirq/softirq
+# || / _--=> preempt-depth
+# ||| / delay
+# TASK-PID CPU# |||| TIMESTAMP FUNCTION
+# | | | |||| | |
+ <idle>-0 [003] dN.1 4228.547803: hrtimer_cancel <-tick_nohz_idle_exit
+ <idle>-0 [003] dN.1 4228.547804: hrtimer_try_to_cancel <-hrtimer_cancel
+ <idle>-0 [003] dN.2 4228.547805: hrtimer_force_reprogram <-__remove_hrtimer
+ <idle>-0 [003] dN.1 4228.547805: hrtimer_forward <-tick_nohz_idle_exit
+ <idle>-0 [003] dN.1 4228.547805: hrtimer_start_range_ns <-hrtimer_start_expires.constprop.11
+ <idle>-0 [003] d..1 4228.547858: hrtimer_get_next_event <-get_next_timer_interrupt
+ <idle>-0 [003] d..1 4228.547859: hrtimer_start <-__tick_nohz_idle_enter
+ <idle>-0 [003] d..2 4228.547860: hrtimer_force_reprogram <-__rem
Notice that we lost the sys_nanosleep.
@@ -1651,19 +2233,29 @@ traced.
Produces:
-# tracer: ftrace
+# tracer: function
+#
+# entries-in-buffer/entries-written: 39608/39608 #P:4
#
-# TASK-PID CPU# TIMESTAMP FUNCTION
-# | | | | |
- bash-4043 [01] 115.281644: finish_task_switch <-schedule
- bash-4043 [01] 115.281645: hrtick_set <-schedule
- bash-4043 [01] 115.281645: hrtick_clear <-hrtick_set
- bash-4043 [01] 115.281646: wait_for_completion <-__stop_machine_run
- bash-4043 [01] 115.281647: wait_for_common <-wait_for_completion
- bash-4043 [01] 115.281647: kthread_stop <-stop_machine_run
- bash-4043 [01] 115.281648: init_waitqueue_head <-kthread_stop
- bash-4043 [01] 115.281648: wake_up_process <-kthread_stop
- bash-4043 [01] 115.281649: try_to_wake_up <-wake_up_process
+# _-----=> irqs-off
+# / _----=> need-resched
+# | / _---=> hardirq/softirq
+# || / _--=> preempt-depth
+# ||| / delay
+# TASK-PID CPU# |||| TIMESTAMP FUNCTION
+# | | | |||| | |
+ bash-1994 [000] .... 4342.324896: file_ra_state_init <-do_dentry_open
+ bash-1994 [000] .... 4342.324897: open_check_o_direct <-do_last
+ bash-1994 [000] .... 4342.324897: ima_file_check <-do_last
+ bash-1994 [000] .... 4342.324898: process_measurement <-ima_file_check
+ bash-1994 [000] .... 4342.324898: ima_get_action <-process_measurement
+ bash-1994 [000] .... 4342.324898: ima_match_policy <-ima_get_action
+ bash-1994 [000] .... 4342.324899: do_truncate <-do_last
+ bash-1994 [000] .... 4342.324899: should_remove_suid <-do_truncate
+ bash-1994 [000] .... 4342.324899: notify_change <-do_truncate
+ bash-1994 [000] .... 4342.324900: current_fs_time <-notify_change
+ bash-1994 [000] .... 4342.324900: current_kernel_time <-current_fs_time
+ bash-1994 [000] .... 4342.324900: timespec_trunc <-current_fs_time
We can see that there's no more lock or preempt tracing.
@@ -1729,6 +2321,28 @@ this special filter via:
echo > set_graph_function
+ftrace_enabled
+--------------
+
+Note, the proc sysctl ftrace_enable is a big on/off switch for the
+function tracer. By default it is enabled (when function tracing is
+enabled in the kernel). If it is disabled, all function tracing is
+disabled. This includes not only the function tracers for ftrace, but
+also for any other uses (perf, kprobes, stack tracing, profiling, etc).
+
+Please disable this with care.
+
+This can be disable (and enabled) with:
+
+ sysctl kernel.ftrace_enabled=0
+ sysctl kernel.ftrace_enabled=1
+
+ or
+
+ echo 0 > /proc/sys/kernel/ftrace_enabled
+ echo 1 > /proc/sys/kernel/ftrace_enabled
+
+
Filter commands
---------------
@@ -1763,12 +2377,58 @@ The following commands are supported:
echo '__schedule_bug:traceoff:5' > set_ftrace_filter
+ To always disable tracing when __schedule_bug is hit:
+
+ echo '__schedule_bug:traceoff' > set_ftrace_filter
+
These commands are cumulative whether or not they are appended
to set_ftrace_filter. To remove a command, prepend it by '!'
and drop the parameter:
+ echo '!__schedule_bug:traceoff:0' > set_ftrace_filter
+
+ The above removes the traceoff command for __schedule_bug
+ that have a counter. To remove commands without counters:
+
echo '!__schedule_bug:traceoff' > set_ftrace_filter
+- snapshot
+ Will cause a snapshot to be triggered when the function is hit.
+
+ echo 'native_flush_tlb_others:snapshot' > set_ftrace_filter
+
+ To only snapshot once:
+
+ echo 'native_flush_tlb_others:snapshot:1' > set_ftrace_filter
+
+ To remove the above commands:
+
+ echo '!native_flush_tlb_others:snapshot' > set_ftrace_filter
+ echo '!native_flush_tlb_others:snapshot:0' > set_ftrace_filter
+
+- enable_event/disable_event
+ These commands can enable or disable a trace event. Note, because
+ function tracing callbacks are very sensitive, when these commands
+ are registered, the trace point is activated, but disabled in
+ a "soft" mode. That is, the tracepoint will be called, but
+ just will not be traced. The event tracepoint stays in this mode
+ as long as there's a command that triggers it.
+
+ echo 'try_to_wake_up:enable_event:sched:sched_switch:2' > \
+ set_ftrace_filter
+
+ The format is:
+
+ <function>:enable_event:<system>:<event>[:count]
+ <function>:disable_event:<system>:<event>[:count]
+
+ To remove the events commands:
+
+
+ echo '!try_to_wake_up:enable_event:sched:sched_switch:0' > \
+ set_ftrace_filter
+ echo '!schedule:disable_event:sched:sched_switch' > \
+ set_ftrace_filter
trace_pipe
----------
@@ -1787,28 +2447,31 @@ different. The trace is live.
# cat trace
# tracer: function
#
-# TASK-PID CPU# TIMESTAMP FUNCTION
-# | | | | |
+# entries-in-buffer/entries-written: 0/0 #P:4
+#
+# _-----=> irqs-off
+# / _----=> need-resched
+# | / _---=> hardirq/softirq
+# || / _--=> preempt-depth
+# ||| / delay
+# TASK-PID CPU# |||| TIMESTAMP FUNCTION
+# | | | |||| | |
#
# cat /tmp/trace.out
- bash-4043 [00] 41.267106: finish_task_switch <-schedule
- bash-4043 [00] 41.267106: hrtick_set <-schedule
- bash-4043 [00] 41.267107: hrtick_clear <-hrtick_set
- bash-4043 [00] 41.267108: wait_for_completion <-__stop_machine_run
- bash-4043 [00] 41.267108: wait_for_common <-wait_for_completion
- bash-4043 [00] 41.267109: kthread_stop <-stop_machine_run
- bash-4043 [00] 41.267109: init_waitqueue_head <-kthread_stop
- bash-4043 [00] 41.267110: wake_up_process <-kthread_stop
- bash-4043 [00] 41.267110: try_to_wake_up <-wake_up_process
- bash-4043 [00] 41.267111: select_task_rq_rt <-try_to_wake_up
+ bash-1994 [000] .... 5281.568961: mutex_unlock <-rb_simple_write
+ bash-1994 [000] .... 5281.568963: __mutex_unlock_slowpath <-mutex_unlock
+ bash-1994 [000] .... 5281.568963: __fsnotify_parent <-fsnotify_modify
+ bash-1994 [000] .... 5281.568964: fsnotify <-fsnotify_modify
+ bash-1994 [000] .... 5281.568964: __srcu_read_lock <-fsnotify
+ bash-1994 [000] .... 5281.568964: add_preempt_count <-__srcu_read_lock
+ bash-1994 [000] ...1 5281.568965: sub_preempt_count <-__srcu_read_lock
+ bash-1994 [000] .... 5281.568965: __srcu_read_unlock <-fsnotify
+ bash-1994 [000] .... 5281.568967: sys_dup2 <-system_call_fastpath
Note, reading the trace_pipe file will block until more input is
-added. By changing the tracer, trace_pipe will issue an EOF. We
-needed to set the function tracer _before_ we "cat" the
-trace_pipe file.
-
+added.
trace entries
-------------
@@ -1817,31 +2480,50 @@ Having too much or not enough data can be troublesome in
diagnosing an issue in the kernel. The file buffer_size_kb is
used to modify the size of the internal trace buffers. The
number listed is the number of entries that can be recorded per
-CPU. To know the full size, multiply the number of possible CPUS
+CPU. To know the full size, multiply the number of possible CPUs
with the number of entries.
# cat buffer_size_kb
1408 (units kilobytes)
-Note, to modify this, you must have tracing completely disabled.
-To do that, echo "nop" into the current_tracer. If the
-current_tracer is not set to "nop", an EINVAL error will be
-returned.
+Or simply read buffer_total_size_kb
+
+ # cat buffer_total_size_kb
+5632
+
+To modify the buffer, simple echo in a number (in 1024 byte segments).
- # echo nop > current_tracer
# echo 10000 > buffer_size_kb
# cat buffer_size_kb
10000 (units kilobytes)
-The number of pages which will be allocated is limited to a
-percentage of available memory. Allocating too much will produce
-an error.
+It will try to allocate as much as possible. If you allocate too
+much, it can cause Out-Of-Memory to trigger.
# echo 1000000000000 > buffer_size_kb
-bash: echo: write error: Cannot allocate memory
# cat buffer_size_kb
85
+The per_cpu buffers can be changed individually as well:
+
+ # echo 10000 > per_cpu/cpu0/buffer_size_kb
+ # echo 100 > per_cpu/cpu1/buffer_size_kb
+
+When the per_cpu buffers are not the same, the buffer_size_kb
+at the top level will just show an X
+
+ # cat buffer_size_kb
+X
+
+This is where the buffer_total_size_kb is useful:
+
+ # cat buffer_total_size_kb
+12916
+
+Writing to the top level buffer_size_kb will reset all the buffers
+to be the same again.
+
Snapshot
--------
CONFIG_TRACER_SNAPSHOT makes a generic snapshot feature
@@ -1925,7 +2607,188 @@ bash: echo: write error: Device or resource busy
# cat snapshot
cat: snapshot: Device or resource busy
+
+Instances
+---------
+In the debugfs tracing directory is a directory called "instances".
+This directory can have new directories created inside of it using
+mkdir, and removing directories with rmdir. The directory created
+with mkdir in this directory will already contain files and other
+directories after it is created.
+
+ # mkdir instances/foo
+ # ls instances/foo
+buffer_size_kb buffer_total_size_kb events free_buffer per_cpu
+set_event snapshot trace trace_clock trace_marker trace_options
+trace_pipe tracing_on
+
+As you can see, the new directory looks similar to the tracing directory
+itself. In fact, it is very similar, except that the buffer and
+events are agnostic from the main director, or from any other
+instances that are created.
+
+The files in the new directory work just like the files with the
+same name in the tracing directory except the buffer that is used
+is a separate and new buffer. The files affect that buffer but do not
+affect the main buffer with the exception of trace_options. Currently,
+the trace_options affect all instances and the top level buffer
+the same, but this may change in future releases. That is, options
+may become specific to the instance they reside in.
+
+Notice that none of the function tracer files are there, nor is
+current_tracer and available_tracers. This is because the buffers
+can currently only have events enabled for them.
+
+ # mkdir instances/foo
+ # mkdir instances/bar
+ # mkdir instances/zoot
+ # echo 100000 > buffer_size_kb
+ # echo 1000 > instances/foo/buffer_size_kb
+ # echo 5000 > instances/bar/per_cpu/cpu1/buffer_size_kb
+ # echo function > current_trace
+ # echo 1 > instances/foo/events/sched/sched_wakeup/enable
+ # echo 1 > instances/foo/events/sched/sched_wakeup_new/enable
+ # echo 1 > instances/foo/events/sched/sched_switch/enable
+ # echo 1 > instances/bar/events/irq/enable
+ # echo 1 > instances/zoot/events/syscalls/enable
+ # cat trace_pipe
+CPU:2 [LOST 11745 EVENTS]
+ bash-2044 [002] .... 10594.481032: _raw_spin_lock_irqsave <-get_page_from_freelist
+ bash-2044 [002] d... 10594.481032: add_preempt_count <-_raw_spin_lock_irqsave
+ bash-2044 [002] d..1 10594.481032: __rmqueue <-get_page_from_freelist
+ bash-2044 [002] d..1 10594.481033: _raw_spin_unlock <-get_page_from_freelist
+ bash-2044 [002] d..1 10594.481033: sub_preempt_count <-_raw_spin_unlock
+ bash-2044 [002] d... 10594.481033: get_pageblock_flags_group <-get_pageblock_migratetype
+ bash-2044 [002] d... 10594.481034: __mod_zone_page_state <-get_page_from_freelist
+ bash-2044 [002] d... 10594.481034: zone_statistics <-get_page_from_freelist
+ bash-2044 [002] d... 10594.481034: __inc_zone_state <-zone_statistics
+ bash-2044 [002] d... 10594.481034: __inc_zone_state <-zone_statistics
+ bash-2044 [002] .... 10594.481035: arch_dup_task_struct <-copy_process
+[...]
+
+ # cat instances/foo/trace_pipe
+ bash-1998 [000] d..4 136.676759: sched_wakeup: comm=kworker/0:1 pid=59 prio=120 success=1 target_cpu=000
+ bash-1998 [000] dN.4 136.676760: sched_wakeup: comm=bash pid=1998 prio=120 success=1 target_cpu=000
+ <idle>-0 [003] d.h3 136.676906: sched_wakeup: comm=rcu_preempt pid=9 prio=120 success=1 target_cpu=003
+ <idle>-0 [003] d..3 136.676909: sched_switch: prev_comm=swapper/3 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=rcu_preempt next_pid=9 next_prio=120
+ rcu_preempt-9 [003] d..3 136.676916: sched_switch: prev_comm=rcu_preempt prev_pid=9 prev_prio=120 prev_state=S ==> next_comm=swapper/3 next_pid=0 next_prio=120
+ bash-1998 [000] d..4 136.677014: sched_wakeup: comm=kworker/0:1 pid=59 prio=120 success=1 target_cpu=000
+ bash-1998 [000] dN.4 136.677016: sched_wakeup: comm=bash pid=1998 prio=120 success=1 target_cpu=000
+ bash-1998 [000] d..3 136.677018: sched_switch: prev_comm=bash prev_pid=1998 prev_prio=120 prev_state=R+ ==> next_comm=kworker/0:1 next_pid=59 next_prio=120
+ kworker/0:1-59 [000] d..4 136.677022: sched_wakeup: comm=sshd pid=1995 prio=120 success=1 target_cpu=001
+ kworker/0:1-59 [000] d..3 136.677025: sched_switch: prev_comm=kworker/0:1 prev_pid=59 prev_prio=120 prev_state=S ==> next_comm=bash next_pid=1998 next_prio=120
+[...]
+
+ # cat instances/bar/trace_pipe
+ migration/1-14 [001] d.h3 138.732674: softirq_raise: vec=3 [action=NET_RX]
+ <idle>-0 [001] dNh3 138.732725: softirq_raise: vec=3 [action=NET_RX]
+ bash-1998 [000] d.h1 138.733101: softirq_raise: vec=1 [action=TIMER]
+ bash-1998 [000] d.h1 138.733102: softirq_raise: vec=9 [action=RCU]
+ bash-1998 [000] ..s2 138.733105: softirq_entry: vec=1 [action=TIMER]
+ bash-1998 [000] ..s2 138.733106: softirq_exit: vec=1 [action=TIMER]
+ bash-1998 [000] ..s2 138.733106: softirq_entry: vec=9 [action=RCU]
+ bash-1998 [000] ..s2 138.733109: softirq_exit: vec=9 [action=RCU]
+ sshd-1995 [001] d.h1 138.733278: irq_handler_entry: irq=21 name=uhci_hcd:usb4
+ sshd-1995 [001] d.h1 138.733280: irq_handler_exit: irq=21 ret=unhandled
+ sshd-1995 [001] d.h1 138.733281: irq_handler_entry: irq=21 name=eth0
+ sshd-1995 [001] d.h1 138.733283: irq_handler_exit: irq=21 ret=handled
+[...]
+
+ # cat instances/zoot/trace
+# tracer: nop
+#
+# entries-in-buffer/entries-written: 18996/18996 #P:4
+#
+# _-----=> irqs-off
+# / _----=> need-resched
+# | / _---=> hardirq/softirq
+# || / _--=> preempt-depth
+# ||| / delay
+# TASK-PID CPU# |||| TIMESTAMP FUNCTION
+# | | | |||| | |
+ bash-1998 [000] d... 140.733501: sys_write -> 0x2
+ bash-1998 [000] d... 140.733504: sys_dup2(oldfd: a, newfd: 1)
+ bash-1998 [000] d... 140.733506: sys_dup2 -> 0x1
+ bash-1998 [000] d... 140.733508: sys_fcntl(fd: a, cmd: 1, arg: 0)
+ bash-1998 [000] d... 140.733509: sys_fcntl -> 0x1
+ bash-1998 [000] d... 140.733510: sys_close(fd: a)
+ bash-1998 [000] d... 140.733510: sys_close -> 0x0
+ bash-1998 [000] d... 140.733514: sys_rt_sigprocmask(how: 0, nset: 0, oset: 6e2768, sigsetsize: 8)
+ bash-1998 [000] d... 140.733515: sys_rt_sigprocmask -> 0x0
+ bash-1998 [000] d... 140.733516: sys_rt_sigaction(sig: 2, act: 7fff718846f0, oact: 7fff71884650, sigsetsize: 8)
+ bash-1998 [000] d... 140.733516: sys_rt_sigaction -> 0x0
+
+You can see that the trace of the top most trace buffer shows only
+the function tracing. The foo instance displays wakeups and task
+switches.
+
+To remove the instances, simply delete their directories:
+
+ # rmdir instances/foo
+ # rmdir instances/bar
+ # rmdir instances/zoot
+
+Note, if a process has a trace file open in one of the instance
+directories, the rmdir will fail with EBUSY.
+
+
+Stack trace
-----------
+Since the kernel has a fixed sized stack, it is important not to
+waste it in functions. A kernel developer must be conscience of
+what they allocate on the stack. If they add too much, the system
+can be in danger of a stack overflow, and corruption will occur,
+usually leading to a system panic.
+
+There are some tools that check this, usually with interrupts
+periodically checking usage. But if you can perform a check
+at every function call that will become very useful. As ftrace provides
+a function tracer, it makes it convenient to check the stack size
+at every function call. This is enabled via the stack tracer.
+
+CONFIG_STACK_TRACER enables the ftrace stack tracing functionality.
+To enable it, write a '1' into /proc/sys/kernel/stack_tracer_enabled.
+
+ # echo 1 > /proc/sys/kernel/stack_tracer_enabled
+
+You can also enable it from the kernel command line to trace
+the stack size of the kernel during boot up, by adding "stacktrace"
+to the kernel command line parameter.
+
+After running it for a few minutes, the output looks like:
+
+ # cat stack_max_size
+2928
+
+ # cat stack_trace
+ Depth Size Location (18 entries)
+ ----- ---- --------
+ 0) 2928 224 update_sd_lb_stats+0xbc/0x4ac
+ 1) 2704 160 find_busiest_group+0x31/0x1f1
+ 2) 2544 256 load_balance+0xd9/0x662
+ 3) 2288 80 idle_balance+0xbb/0x130
+ 4) 2208 128 __schedule+0x26e/0x5b9
+ 5) 2080 16 schedule+0x64/0x66
+ 6) 2064 128 schedule_timeout+0x34/0xe0
+ 7) 1936 112 wait_for_common+0x97/0xf1
+ 8) 1824 16 wait_for_completion+0x1d/0x1f
+ 9) 1808 128 flush_work+0xfe/0x119
+ 10) 1680 16 tty_flush_to_ldisc+0x1e/0x20
+ 11) 1664 48 input_available_p+0x1d/0x5c
+ 12) 1616 48 n_tty_poll+0x6d/0x134
+ 13) 1568 64 tty_poll+0x64/0x7f
+ 14) 1504 880 do_select+0x31e/0x511
+ 15) 624 400 core_sys_select+0x177/0x216
+ 16) 224 96 sys_select+0x91/0xb9
+ 17) 128 128 system_call_fastpath+0x16/0x1b
+
+Note, if -mfentry is being used by gcc, functions get traced before
+they set up the stack frame. This means that leaf level functions
+are not tested by the stack tracer when -mfentry is used.
+
+Currently, -mfentry is used by gcc 4.6.0 and above on x86 only.
+
+---------
More details can be found in the source code, in the
kernel/trace/*.c files.
diff --git a/MAINTAINERS b/MAINTAINERS
index 84a9a9ea220..e8f8c354968 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3515,7 +3515,7 @@ F: drivers/isdn/gigaset/
F: include/uapi/linux/gigaset_dev.h
GPIO SUBSYSTEM
-M: Grant Likely <grant.likely@secretlab.ca>
+M: Grant Likely <grant.likely@linaro.org>
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
T: git git://git.secretlab.ca/git/linux-2.6.git
@@ -4348,7 +4348,7 @@ F: drivers/irqchip/
IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-M: Grant Likely <grant.likely@secretlab.ca>
+M: Grant Likely <grant.likely@linaro.org>
T: git git://git.secretlab.ca/git/linux-2.6.git irqdomain/next
S: Maintained
F: Documentation/IRQ-domain.txt
@@ -4835,11 +4835,8 @@ F: arch/powerpc/platforms/40x/
F: arch/powerpc/platforms/44x/
LINUX FOR POWERPC EMBEDDED XILINX VIRTEX
-M: Grant Likely <grant.likely@secretlab.ca>
-W: http://wiki.secretlab.ca/index.php/Linux_on_Xilinx_Virtex
L: linuxppc-dev@lists.ozlabs.org
-T: git git://git.secretlab.ca/git/linux-2.6.git
-S: Maintained
+S: Unmaintained
F: arch/powerpc/*/*virtex*
F: arch/powerpc/*/*/*virtex*
@@ -5857,7 +5854,7 @@ F: Documentation/i2c/busses/i2c-ocores
F: drivers/i2c/busses/i2c-ocores.c
OPEN FIRMWARE AND FLATTENED DEVICE TREE
-M: Grant Likely <grant.likely@secretlab.ca>
+M: Grant Likely <grant.likely@linaro.org>
M: Rob Herring <rob.herring@calxeda.com>
L: devicetree-discuss@lists.ozlabs.org (moderated for non-subscribers)
W: http://fdt.secretlab.ca
@@ -7481,11 +7478,11 @@ S: Maintained
F: drivers/clk/spear/
SPI SUBSYSTEM
-M: Grant Likely <grant.likely@secretlab.ca>
M: Mark Brown <broonie@kernel.org>
+M: Grant Likely <grant.likely@linaro.org>
L: spi-devel-general@lists.sourceforge.net
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git
Q: http://patchwork.kernel.org/project/spi-devel-general/list/
-T: git git://git.secretlab.ca/git/linux-2.6.git
S: Maintained
F: Documentation/spi/
F: drivers/spi/
@@ -8734,7 +8731,7 @@ F: drivers/scsi/vmw_pvscsi.c
F: drivers/scsi/vmw_pvscsi.h
VOLTAGE AND CURRENT REGULATOR FRAMEWORK
-M: Liam Girdwood <lrg@ti.com>
+M: Liam Girdwood <lgirdwood@gmail.com>
M: Mark Brown <broonie@kernel.org>
W: http://opensource.wolfsonmicro.com/node/15
W: http://www.slimlogic.co.uk/?p=48
@@ -8998,9 +8995,7 @@ S: Maintained
F: drivers/net/ethernet/xilinx/xilinx_axienet*
XILINX SYSTEMACE DRIVER
-M: Grant Likely <grant.likely@secretlab.ca>
-W: http://www.secretlab.ca/
-S: Maintained
+S: Unmaintained
F: drivers/block/xsysace.c
XILINX UARTLITE SERIAL DRIVER
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index cb7bcc51608..39253b9aedd 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -322,6 +322,24 @@
};
};
+ spi0 {
+ pinctrl_spi0: spi0-0 {
+ atmel,pins =
+ <0 0 0x1 0x0 /* PA0 periph A SPI0_MISO pin */
+ 0 1 0x1 0x0 /* PA1 periph A SPI0_MOSI pin */
+ 0 2 0x1 0x0>; /* PA2 periph A SPI0_SPCK pin */
+ };
+ };
+
+ spi1 {
+ pinctrl_spi1: spi1-0 {
+ atmel,pins =
+ <1 0 0x1 0x0 /* PB0 periph A SPI1_MISO pin */
+ 1 1 0x1 0x0 /* PB1 periph A SPI1_MOSI pin */
+ 1 2 0x1 0x0>; /* PB2 periph A SPI1_SPCK pin */
+ };
+ };
+
pioA: gpio@fffff400 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x200>;
@@ -471,6 +489,28 @@
status = "disabled";
};
+ spi0: spi@fffc8000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xfffc8000 0x200>;
+ interrupts = <12 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi0>;
+ status = "disabled";
+ };
+
+ spi1: spi@fffcc000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xfffcc000 0x200>;
+ interrupts = <13 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1>;
+ status = "disabled";
+ };
+
adc0: adc@fffe0000 {
compatible = "atmel,at91sam9260-adc";
reg = <0xfffe0000 0x100>;
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 271d4de026e..94b58ab2cc0 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -303,6 +303,24 @@
};
};
+ spi0 {
+ pinctrl_spi0: spi0-0 {
+ atmel,pins =
+ <0 0 0x2 0x0 /* PA0 periph B SPI0_MISO pin */
+ 0 1 0x2 0x0 /* PA1 periph B SPI0_MOSI pin */
+ 0 2 0x2 0x0>; /* PA2 periph B SPI0_SPCK pin */
+ };
+ };
+
+ spi1 {
+ pinctrl_spi1: spi1-0 {
+ atmel,pins =
+ <1 12 0x1 0x0 /* PB12 periph A SPI1_MISO pin */
+ 1 13 0x1 0x0 /* PB13 periph A SPI1_MOSI pin */
+ 1 14 0x1 0x0>; /* PB14 periph A SPI1_SPCK pin */
+ };
+ };
+
pioA: gpio@fffff200 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff200 0x200>;
@@ -462,6 +480,28 @@
reg = <0xfffffd40 0x10>;
status = "disabled";
};
+
+ spi0: spi@fffa4000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xfffa4000 0x200>;
+ interrupts = <14 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi0>;
+ status = "disabled";
+ };
+
+ spi1: spi@fffa8000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xfffa8000 0x200>;
+ interrupts = <15 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1>;
+ status = "disabled";
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9263ek.dts b/arch/arm/boot/dts/at91sam9263ek.dts
index 1eb08728f52..a14e424b2e8 100644
--- a/arch/arm/boot/dts/at91sam9263ek.dts
+++ b/arch/arm/boot/dts/at91sam9263ek.dts
@@ -79,6 +79,16 @@
};
};
};
+
+ spi0: spi@fffa4000 {
+ status = "okay";
+ cs-gpios = <&pioA 5 0>, <0>, <0>, <0>;
+ mtd_dataflash@0 {
+ compatible = "atmel,at45", "atmel,dataflash";
+ spi-max-frequency = <50000000>;
+ reg = <0>;
+ };
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
index da15e83e7f1..23d1f468f27 100644
--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
@@ -96,6 +96,16 @@
status = "okay";
pinctrl-0 = <&pinctrl_ssc0_tx>;
};
+
+ spi0: spi@fffc8000 {
+ status = "okay";
+ cs-gpios = <0>, <&pioC 11 0>, <0>, <0>;
+ mtd_dataflash@0 {
+ compatible = "atmel,at45", "atmel,dataflash";
+ spi-max-frequency = <50000000>;
+ reg = <1>;
+ };
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index 6b1d4cab24c..cfdf429578b 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -322,6 +322,24 @@
};
};
+ spi0 {
+ pinctrl_spi0: spi0-0 {
+ atmel,pins =
+ <1 0 0x1 0x0 /* PB0 periph A SPI0_MISO pin */
+ 1 1 0x1 0x0 /* PB1 periph A SPI0_MOSI pin */
+ 1 2 0x1 0x0>; /* PB2 periph A SPI0_SPCK pin */
+ };
+ };
+
+ spi1 {
+ pinctrl_spi1: spi1-0 {
+ atmel,pins =
+ <1 14 0x1 0x0 /* PB14 periph A SPI1_MISO pin */
+ 1 15 0x1 0x0 /* PB15 periph A SPI1_MOSI pin */
+ 1 16 0x1 0x0>; /* PB16 periph A SPI1_SPCK pin */
+ };
+ };
+
pioA: gpio@fffff200 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff200 0x200>;
@@ -531,6 +549,28 @@
reg = <0xfffffd40 0x10>;
status = "disabled";
};
+
+ spi0: spi@fffa4000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xfffa4000 0x200>;
+ interrupts = <14 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi0>;
+ status = "disabled";
+ };
+
+ spi1: spi@fffa8000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xfffa8000 0x200>;
+ interrupts = <15 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1>;
+ status = "disabled";
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts
index 20c31913c27..92c52a7d70b 100644
--- a/arch/arm/boot/dts/at91sam9m10g45ek.dts
+++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts
@@ -102,6 +102,16 @@
};
};
};
+
+ spi0: spi@fffa4000{
+ status = "okay";
+ cs-gpios = <&pioB 3 0>, <0>, <0>, <0>;
+ mtd_dataflash@0 {
+ compatible = "atmel,at45", "atmel,dataflash";
+ spi-max-frequency = <13000000>;
+ reg = <0>;
+ };
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 7750f98dd76..b2961f1ea51 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -261,6 +261,24 @@
};
};
+ spi0 {
+ pinctrl_spi0: spi0-0 {
+ atmel,pins =
+ <0 11 0x1 0x0 /* PA11 periph A SPI0_MISO pin */
+ 0 12 0x1 0x0 /* PA12 periph A SPI0_MOSI pin */
+ 0 13 0x1 0x0>; /* PA13 periph A SPI0_SPCK pin */
+ };
+ };
+
+ spi1 {
+ pinctrl_spi1: spi1-0 {
+ atmel,pins =
+ <0 21 0x2 0x0 /* PA21 periph B SPI1_MISO pin */
+ 0 22 0x2 0x0 /* PA22 periph B SPI1_MOSI pin */
+ 0 23 0x2 0x0>; /* PA23 periph B SPI1_SPCK pin */
+ };
+ };
+
pioA: gpio@fffff400 {
compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x200>;
@@ -373,6 +391,28 @@
#size-cells = <0>;
status = "disabled";
};
+
+ spi0: spi@f0000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xf0000000 0x100>;
+ interrupts = <13 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi0>;
+ status = "disabled";
+ };
+
+ spi1: spi@f0004000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xf0004000 0x100>;
+ interrupts = <14 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1>;
+ status = "disabled";
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index d400f8de438..34c842b1efb 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -67,6 +67,16 @@
};
};
};
+
+ spi0: spi@f0000000 {
+ status = "okay";
+ cs-gpios = <&pioA 14 0>, <0>, <0>, <0>;
+ m25p80@0 {
+ compatible = "atmel,at25df321a";
+ spi-max-frequency = <50000000>;
+ reg = <0>;
+ };
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index a98c0d50fbb..347b438d47f 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -343,6 +343,24 @@
};
};
+ spi0 {
+ pinctrl_spi0: spi0-0 {
+ atmel,pins =
+ <0 11 0x1 0x0 /* PA11 periph A SPI0_MISO pin */
+ 0 12 0x1 0x0 /* PA12 periph A SPI0_MOSI pin */
+ 0 13 0x1 0x0>; /* PA13 periph A SPI0_SPCK pin */
+ };
+ };
+
+ spi1 {
+ pinctrl_spi1: spi1-0 {
+ atmel,pins =
+ <0 21 0x2 0x0 /* PA21 periph B SPI1_MISO pin */
+ 0 22 0x2 0x0 /* PA22 periph B SPI1_MOSI pin */
+ 0 23 0x2 0x0>; /* PA23 periph B SPI1_SPCK pin */
+ };
+ };
+
pioA: gpio@fffff400 {
compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x200>;
@@ -529,6 +547,28 @@
trigger-value = <0x6>;
};
};
+
+ spi0: spi@f0000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xf0000000 0x100>;
+ interrupts = <13 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi0>;
+ status = "disabled";
+ };
+
+ spi1: spi@f0004000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0xf0004000 0x100>;
+ interrupts = <14 4 3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1>;
+ status = "disabled";
+ };
};
nand0: nand@40000000 {
diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi
index 8a7cf1d9cf5..09f5e667ca7 100644
--- a/arch/arm/boot/dts/at91sam9x5ek.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi
@@ -84,6 +84,16 @@
};
};
};
+
+ spi0: spi@f0000000 {
+ status = "okay";
+ cs-gpios = <&pioA 14 0>, <0>, <0>, <0>;
+ m25p80@0 {
+ compatible = "atmel,at25df321a";
+ spi-max-frequency = <50000000>;
+ reg = <0>;
+ };
+ };
};
usb0: ohci@00600000 {
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index b67cd537411..44199bc2c66 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -232,6 +232,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("t2_clk", "fffdc000.timer", &tc5_clk),
CLKDEV_CON_DEV_ID("hclk", "500000.ohci", &ohci_clk),
CLKDEV_CON_DEV_ID("mci_clk", "fffa8000.mmc", &mmc_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "fffc8000.spi", &spi0_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "fffcc000.spi", &spi1_clk),
/* fake hclk clock */
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &ohci_clk),
CLKDEV_CON_ID("pioA", &pioA_clk),
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index d3addee43d8..2ec5efea3f0 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -262,6 +262,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("mci_clk", "fffd0000.mmc", &mmc1_clk),
CLKDEV_CON_DEV_ID(NULL, "fff84000.i2c", &twi0_clk),
CLKDEV_CON_DEV_ID(NULL, "fff88000.i2c", &twi1_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "fffa4000.spi", &spi0_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "fffa8000.spi", &spi1_clk),
/* fake hclk clock */
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &uhphs_clk),
CLKDEV_CON_DEV_ID(NULL, "fffff200.gpio", &pioA_clk),
diff --git a/arch/arm/mach-at91/at91sam9n12.c b/arch/arm/mach-at91/at91sam9n12.c
index 5dfc8fd8710..ccd078355ee 100644
--- a/arch/arm/mach-at91/at91sam9n12.c
+++ b/arch/arm/mach-at91/at91sam9n12.c
@@ -172,6 +172,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("dma_clk", "ffffec00.dma-controller", &dma_clk),
CLKDEV_CON_DEV_ID(NULL, "f8010000.i2c", &twi0_clk),
CLKDEV_CON_DEV_ID(NULL, "f8014000.i2c", &twi1_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "f0000000.spi", &spi0_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "f0004000.spi", &spi1_clk),
CLKDEV_CON_DEV_ID(NULL, "fffff400.gpio", &pioAB_clk),
CLKDEV_CON_DEV_ID(NULL, "fffff600.gpio", &pioAB_clk),
CLKDEV_CON_DEV_ID(NULL, "fffff800.gpio", &pioCD_clk),
diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c
index 44a9a62dcc1..a200d8a1712 100644
--- a/arch/arm/mach-at91/at91sam9x5.c
+++ b/arch/arm/mach-at91/at91sam9x5.c
@@ -237,6 +237,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID(NULL, "f8010000.i2c", &twi0_clk),
CLKDEV_CON_DEV_ID(NULL, "f8014000.i2c", &twi1_clk),
CLKDEV_CON_DEV_ID(NULL, "f8018000.i2c", &twi2_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "f0000000.spi", &spi0_clk),
+ CLKDEV_CON_DEV_ID("spi_clk", "f0004000.spi", &spi1_clk),
CLKDEV_CON_DEV_ID(NULL, "fffff400.gpio", &pioAB_clk),
CLKDEV_CON_DEV_ID(NULL, "fffff600.gpio", &pioAB_clk),
CLKDEV_CON_DEV_ID(NULL, "fffff800.gpio", &pioCD_clk),
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index 497fcb793dc..d28c7fbaba2 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -97,6 +97,19 @@ static struct s3c2410_uartcfg universal_uartcfgs[] __initdata = {
static struct regulator_consumer_supply max8952_consumer =
REGULATOR_SUPPLY("vdd_arm", NULL);
+static struct regulator_init_data universal_max8952_reg_data = {
+ .constraints = {
+ .name = "VARM_1.2V",
+ .min_uV = 770000,
+ .max_uV = 1400000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ .always_on = 1,
+ .boot_on = 1,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &max8952_consumer,
+};
+
static struct max8952_platform_data universal_max8952_pdata __initdata = {
.gpio_vid0 = EXYNOS4_GPX0(3),
.gpio_vid1 = EXYNOS4_GPX0(4),
@@ -105,19 +118,7 @@ static struct max8952_platform_data universal_max8952_pdata __initdata = {
.dvs_mode = { 48, 32, 28, 18 }, /* 1.25, 1.20, 1.05, 0.95V */
.sync_freq = 0, /* default: fastest */
.ramp_speed = 0, /* default: fastest */
-
- .reg_data = {
- .constraints = {
- .name = "VARM_1.2V",
- .min_uV = 770000,
- .max_uV = 1400000,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
- .always_on = 1,
- .boot_on = 1,
- },
- .num_consumer_supplies = 1,
- .consumer_supplies = &max8952_consumer,
- },
+ .reg_data = &universal_max8952_reg_data,
};
static struct regulator_consumer_supply lp3974_buck1_consumer =
diff --git a/arch/arm/mach-imx/clk-busy.c b/arch/arm/mach-imx/clk-busy.c
index 1ab91b5209e..85b728cc27a 100644
--- a/arch/arm/mach-imx/clk-busy.c
+++ b/arch/arm/mach-imx/clk-busy.c
@@ -169,7 +169,7 @@ struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
busy->mux.reg = reg;
busy->mux.shift = shift;
- busy->mux.width = width;
+ busy->mux.mask = BIT(width) - 1;
busy->mux.lock = &imx_ccm_lock;
busy->mux_ops = &clk_mux_ops;
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c
index 2a17bc506cf..ff3c9f01659 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.c
+++ b/arch/arm/mach-ux500/board-mop500-regulators.c
@@ -5,6 +5,7 @@
*
* Authors: Sundar Iyer <sundar.iyer@stericsson.com>
* Bengt Jonsson <bengt.g.jonsson@stericsson.com>
+ * Daniel Willerud <daniel.willerud@stericsson.com>
*
* MOP500 board specific initialization for regulators
*/
@@ -12,6 +13,7 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/ab8500.h>
#include "board-mop500-regulators.h"
+#include "id.h"
static struct regulator_consumer_supply gpio_en_3v3_consumers[] = {
REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
@@ -53,21 +55,37 @@ struct regulator_init_data tps61052_regulator = {
};
static struct regulator_consumer_supply ab8500_vaux1_consumers[] = {
- /* External displays, connector on board 2v5 power supply */
- REGULATOR_SUPPLY("vaux12v5", "mcde.0"),
+ /* Main display, u8500 R3 uib */
+ REGULATOR_SUPPLY("vddi", "mcde_disp_sony_acx424akp.0"),
+ /* Main display, u8500 uib and ST uib */
+ REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.0"),
+ /* Secondary display, ST uib */
+ REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.1"),
/* SFH7741 proximity sensor */
REGULATOR_SUPPLY("vcc", "gpio-keys.0"),
/* BH1780GLS ambient light sensor */
REGULATOR_SUPPLY("vcc", "2-0029"),
/* lsm303dlh accelerometer */
- REGULATOR_SUPPLY("vdd", "3-0018"),
+ REGULATOR_SUPPLY("vdd", "2-0018"),
+ /* lsm303dlhc accelerometer */
+ REGULATOR_SUPPLY("vdd", "2-0019"),
/* lsm303dlh magnetometer */
- REGULATOR_SUPPLY("vdd", "3-001e"),
+ REGULATOR_SUPPLY("vdd", "2-001e"),
/* Rohm BU21013 Touchscreen devices */
REGULATOR_SUPPLY("avdd", "3-005c"),
REGULATOR_SUPPLY("avdd", "3-005d"),
/* Synaptics RMI4 Touchscreen device */
REGULATOR_SUPPLY("vdd", "3-004b"),
+ /* L3G4200D Gyroscope device */
+ REGULATOR_SUPPLY("vdd", "2-0068"),
+ /* Ambient light sensor device */
+ REGULATOR_SUPPLY("vdd", "3-0029"),
+ /* Pressure sensor device */
+ REGULATOR_SUPPLY("vdd", "2-005c"),
+ /* Cypress TrueTouch Touchscreen device */
+ REGULATOR_SUPPLY("vcpin", "spi8.0"),
+ /* Camera device */
+ REGULATOR_SUPPLY("vaux12v5", "mmio_camera"),
};
static struct regulator_consumer_supply ab8500_vaux2_consumers[] = {
@@ -75,18 +93,50 @@ static struct regulator_consumer_supply ab8500_vaux2_consumers[] = {
REGULATOR_SUPPLY("vmmc", "sdi4"),
/* AB8500 audio codec */
REGULATOR_SUPPLY("vcc-N2158", "ab8500-codec.0"),
+ /* AB8500 accessory detect 1 */
+ REGULATOR_SUPPLY("vcc-N2158", "ab8500-acc-det.0"),
+ /* AB8500 Tv-out device */
+ REGULATOR_SUPPLY("vcc-N2158", "mcde_tv_ab8500.4"),
+ /* AV8100 HDMI device */
+ REGULATOR_SUPPLY("vcc-N2158", "av8100_hdmi.3"),
};
static struct regulator_consumer_supply ab8500_vaux3_consumers[] = {
+ REGULATOR_SUPPLY("v-SD-STM", "stm"),
/* External MMC slot power */
REGULATOR_SUPPLY("vmmc", "sdi0"),
};
+static struct regulator_consumer_supply ab8505_vaux4_consumers[] = {
+};
+
+static struct regulator_consumer_supply ab8505_vaux5_consumers[] = {
+};
+
+static struct regulator_consumer_supply ab8505_vaux6_consumers[] = {
+};
+
+static struct regulator_consumer_supply ab8505_vaux8_consumers[] = {
+ /* AB8500 audio codec device */
+ REGULATOR_SUPPLY("v-aux8", NULL),
+};
+
+static struct regulator_consumer_supply ab8505_vadc_consumers[] = {
+ /* Internal general-purpose ADC */
+ REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"),
+ /* ADC for charger */
+ REGULATOR_SUPPLY("vddadc", "ab8500-charger.0"),
+};
+
static struct regulator_consumer_supply ab8500_vtvout_consumers[] = {
/* TV-out DENC supply */
REGULATOR_SUPPLY("vtvout", "ab8500-denc.0"),
/* Internal general-purpose ADC */
REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"),
+ /* ADC for charger */
+ REGULATOR_SUPPLY("vddadc", "ab8500-charger.0"),
+ /* AB8500 Tv-out device */
+ REGULATOR_SUPPLY("vtvout", "mcde_tv_ab8500.4"),
};
static struct regulator_consumer_supply ab8500_vaud_consumers[] = {
@@ -114,77 +164,90 @@ static struct regulator_consumer_supply ab8500_vintcore_consumers[] = {
REGULATOR_SUPPLY("v-intcore", NULL),
/* USB Transceiver */
REGULATOR_SUPPLY("vddulpivio18", "ab8500-usb.0"),
+ /* Handled by abx500 clk driver */
+ REGULATOR_SUPPLY("v-intcore", "abx500-clk.0"),
+};
+
+static struct regulator_consumer_supply ab8505_usb_consumers[] = {
+ /* HS USB OTG physical interface */
+ REGULATOR_SUPPLY("v-ape", NULL),
};
static struct regulator_consumer_supply ab8500_vana_consumers[] = {
- /* External displays, connector on board, 1v8 power supply */
- REGULATOR_SUPPLY("vsmps2", "mcde.0"),
+ /* DB8500 DSI */
+ REGULATOR_SUPPLY("vdddsi1v2", "mcde"),
+ REGULATOR_SUPPLY("vdddsi1v2", "b2r2_core"),
+ REGULATOR_SUPPLY("vdddsi1v2", "b2r2_1_core"),
+ REGULATOR_SUPPLY("vdddsi1v2", "dsilink.0"),
+ REGULATOR_SUPPLY("vdddsi1v2", "dsilink.1"),
+ REGULATOR_SUPPLY("vdddsi1v2", "dsilink.2"),
+ /* DB8500 CSI */
+ REGULATOR_SUPPLY("vddcsi1v2", "mmio_camera"),
};
/* ab8500 regulator register initialization */
-struct ab8500_regulator_reg_init
-ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
+static struct ab8500_regulator_reg_init ab8500_reg_init[] = {
/*
* VanaRequestCtrl = HP/LP depending on VxRequest
* VextSupply1RequestCtrl = HP/LP depending on VxRequest
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0xf0, 0x00),
/*
* VextSupply2RequestCtrl = HP/LP depending on VxRequest
* VextSupply3RequestCtrl = HP/LP depending on VxRequest
* Vaux1RequestCtrl = HP/LP depending on VxRequest
* Vaux2RequestCtrl = HP/LP depending on VxRequest
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0xff, 0x00),
/*
* Vaux3RequestCtrl = HP/LP depending on VxRequest
* SwHPReq = Control through SWValid disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x07, 0x00),
/*
* VanaSysClkReq1HPValid = disabled
* Vaux1SysClkReq1HPValid = disabled
* Vaux2SysClkReq1HPValid = disabled
* Vaux3SysClkReq1HPValid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0xe8, 0x00),
/*
* VextSupply1SysClkReq1HPValid = disabled
* VextSupply2SysClkReq1HPValid = disabled
* VextSupply3SysClkReq1HPValid = SysClkReq1 controlled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x40),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x70, 0x40),
/*
* VanaHwHPReq1Valid = disabled
* Vaux1HwHPreq1Valid = disabled
* Vaux2HwHPReq1Valid = disabled
* Vaux3HwHPReqValid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0xe8, 0x00),
/*
* VextSupply1HwHPReq1Valid = disabled
* VextSupply2HwHPReq1Valid = disabled
* VextSupply3HwHPReq1Valid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x07, 0x00),
/*
* VanaHwHPReq2Valid = disabled
* Vaux1HwHPReq2Valid = disabled
* Vaux2HwHPReq2Valid = disabled
* Vaux3HwHPReq2Valid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0xe8, 0x00),
/*
* VextSupply1HwHPReq2Valid = disabled
* VextSupply2HwHPReq2Valid = disabled
* VextSupply3HwHPReq2Valid = HWReq2 controlled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x04),
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x07, 0x04),
/*
* VanaSwHPReqValid = disabled
* Vaux1SwHPReqValid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0xa0, 0x00),
/*
* Vaux2SwHPReqValid = disabled
* Vaux3SwHPReqValid = disabled
@@ -192,7 +255,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* VextSupply2SwHPReqValid = disabled
* VextSupply3SwHPReqValid = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x1f, 0x00),
/*
* SysClkReq2Valid1 = SysClkReq2 controlled
* SysClkReq3Valid1 = disabled
@@ -202,7 +265,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* SysClkReq7Valid1 = disabled
* SysClkReq8Valid1 = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0x2a),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0xfe, 0x2a),
/*
* SysClkReq2Valid2 = disabled
* SysClkReq3Valid2 = disabled
@@ -212,7 +275,7 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* SysClkReq7Valid2 = disabled
* SysClkReq8Valid2 = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0x20),
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0xfe, 0x20),
/*
* VTVoutEna = disabled
* Vintcore12Ena = disabled
@@ -220,66 +283,62 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* Vintcore12LP = inactive (HP)
* VTVoutLP = inactive (HP)
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0x10),
+ INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0xfe, 0x10),
/*
* VaudioEna = disabled
* VdmicEna = disabled
* Vamic1Ena = disabled
* Vamic2Ena = disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x1e, 0x00),
/*
* Vamic1_dzout = high-Z when Vamic1 is disabled
* Vamic2_dzout = high-Z when Vamic2 is disabled
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x03, 0x00),
/*
- * VPll = Hw controlled
+ * VPll = Hw controlled (NOTE! PRCMU bits)
* VanaRegu = force off
*/
- INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x02),
+ INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x0f, 0x02),
/*
* VrefDDREna = disabled
* VrefDDRSleepMode = inactive (no pulldown)
*/
- INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x03, 0x00),
/*
- * VextSupply1Regu = HW control
- * VextSupply2Regu = HW control
- * VextSupply3Regu = HW control
+ * VextSupply1Regu = force LP
+ * VextSupply2Regu = force OFF
+ * VextSupply3Regu = force HP (-> STBB2=LP and TPS=LP)
* ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0
* ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0
*/
- INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0x2a),
+ INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0xff, 0x13),
/*
* Vaux1Regu = force HP
* Vaux2Regu = force off
*/
- INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x01),
+ INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x0f, 0x01),
/*
- * Vaux3regu = force off
+ * Vaux3Regu = force off
*/
- INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x03, 0x00),
/*
- * Vsmps1 = 1.15V
+ * Vaux1Sel = 2.8 V
*/
- INIT_REGULATOR_REGISTER(AB8500_VSMPS1SEL1, 0x24),
- /*
- * Vaux1Sel = 2.5 V
- */
- INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x08),
+ INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x0f, 0x0C),
/*
* Vaux2Sel = 2.9 V
*/
- INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0d),
+ INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0f, 0x0d),
/*
* Vaux3Sel = 2.91 V
*/
- INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07),
+ INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07, 0x07),
/*
* VextSupply12LP = disabled (no LP)
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x01, 0x00),
/*
* Vaux1Disch = short discharge time
* Vaux2Disch = short discharge time
@@ -288,33 +347,26 @@ ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS] = {
* VTVoutDisch = short discharge time
* VaudioDisch = short discharge time
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0xfc, 0x00),
/*
* VanaDisch = short discharge time
* VdmicPullDownEna = pulldown disabled when Vdmic is disabled
* VdmicDisch = short discharge time
*/
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x00),
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x16, 0x00),
};
/* AB8500 regulators */
-struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
+static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
/* supplies to the display/camera */
[AB8500_LDO_AUX1] = {
.constraints = {
.name = "V-DISPLAY",
- .min_uV = 2500000,
- .max_uV = 2900000,
+ .min_uV = 2800000,
+ .max_uV = 3300000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
.boot_on = 1, /* display is on at boot */
- /*
- * This voltage cannot be disabled right now because
- * it is somehow affecting the external MMC
- * functionality, though that typically will use
- * AUX3.
- */
- .always_on = 1,
},
.num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers),
.consumer_supplies = ab8500_vaux1_consumers,
@@ -326,7 +378,10 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
.min_uV = 1100000,
.max_uV = 3300000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS,
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
},
.num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers),
.consumer_supplies = ab8500_vaux2_consumers,
@@ -338,7 +393,10 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
.min_uV = 1100000,
.max_uV = 3300000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS,
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
},
.num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers),
.consumer_supplies = ab8500_vaux3_consumers,
@@ -392,18 +450,614 @@ struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
[AB8500_LDO_INTCORE] = {
.constraints = {
.name = "V-INTCORE",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .min_uV = 1250000,
+ .max_uV = 1350000,
+ .input_uV = 1800000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE |
+ REGULATOR_CHANGE_DRMS,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
},
.num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers),
.consumer_supplies = ab8500_vintcore_consumers,
},
- /* supply for U8500 CSI/DSI, VANA LDO */
+ /* supply for U8500 CSI-DSI, VANA LDO */
[AB8500_LDO_ANA] = {
.constraints = {
- .name = "V-CSI/DSI",
+ .name = "V-CSI-DSI",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers),
.consumer_supplies = ab8500_vana_consumers,
},
};
+
+/* supply for VextSupply3 */
+static struct regulator_consumer_supply ab8500_ext_supply3_consumers[] = {
+ /* SIM supply for 3 V SIM cards */
+ REGULATOR_SUPPLY("vinvsim", "sim-detect.0"),
+};
+
+/* extended configuration for VextSupply2, only used for HREFP_V20 boards */
+static struct ab8500_ext_regulator_cfg ab8500_ext_supply2 = {
+ .hwreq = true,
+};
+
+/*
+ * AB8500 external regulators
+ */
+static struct regulator_init_data ab8500_ext_regulators[] = {
+ /* fixed Vbat supplies VSMPS1_EXT_1V8 */
+ [AB8500_EXT_SUPPLY1] = {
+ .constraints = {
+ .name = "ab8500-ext-supply1",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .initial_mode = REGULATOR_MODE_IDLE,
+ .boot_on = 1,
+ .always_on = 1,
+ },
+ },
+ /* fixed Vbat supplies VSMPS2_EXT_1V36 and VSMPS5_EXT_1V15 */
+ [AB8500_EXT_SUPPLY2] = {
+ .constraints = {
+ .name = "ab8500-ext-supply2",
+ .min_uV = 1360000,
+ .max_uV = 1360000,
+ },
+ },
+ /* fixed Vbat supplies VSMPS3_EXT_3V4 and VSMPS4_EXT_3V4 */
+ [AB8500_EXT_SUPPLY3] = {
+ .constraints = {
+ .name = "ab8500-ext-supply3",
+ .min_uV = 3400000,
+ .max_uV = 3400000,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ },
+ .num_consumer_supplies =
+ ARRAY_SIZE(ab8500_ext_supply3_consumers),
+ .consumer_supplies = ab8500_ext_supply3_consumers,
+ },
+};
+
+/* ab8505 regulator register initialization */
+static struct ab8500_regulator_reg_init ab8505_reg_init[] = {
+ /*
+ * VarmRequestCtrl
+ * VsmpsCRequestCtrl
+ * VsmpsARequestCtrl
+ * VsmpsBRequestCtrl
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL1, 0x00, 0x00),
+ /*
+ * VsafeRequestCtrl
+ * VpllRequestCtrl
+ * VanaRequestCtrl = HP/LP depending on VxRequest
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL2, 0x30, 0x00),
+ /*
+ * Vaux1RequestCtrl = HP/LP depending on VxRequest
+ * Vaux2RequestCtrl = HP/LP depending on VxRequest
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL3, 0xf0, 0x00),
+ /*
+ * Vaux3RequestCtrl = HP/LP depending on VxRequest
+ * SwHPReq = Control through SWValid disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL4, 0x07, 0x00),
+ /*
+ * VsmpsASysClkReq1HPValid
+ * VsmpsBSysClkReq1HPValid
+ * VsafeSysClkReq1HPValid
+ * VanaSysClkReq1HPValid = disabled
+ * VpllSysClkReq1HPValid
+ * Vaux1SysClkReq1HPValid = disabled
+ * Vaux2SysClkReq1HPValid = disabled
+ * Vaux3SysClkReq1HPValid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQ1HPVALID1, 0xe8, 0x00),
+ /*
+ * VsmpsCSysClkReq1HPValid
+ * VarmSysClkReq1HPValid
+ * VbbSysClkReq1HPValid
+ * VsmpsMSysClkReq1HPValid
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQ1HPVALID2, 0x00, 0x00),
+ /*
+ * VsmpsAHwHPReq1Valid
+ * VsmpsBHwHPReq1Valid
+ * VsafeHwHPReq1Valid
+ * VanaHwHPReq1Valid = disabled
+ * VpllHwHPReq1Valid
+ * Vaux1HwHPreq1Valid = disabled
+ * Vaux2HwHPReq1Valid = disabled
+ * Vaux3HwHPReqValid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ1VALID1, 0xe8, 0x00),
+ /*
+ * VsmpsMHwHPReq1Valid
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ1VALID2, 0x00, 0x00),
+ /*
+ * VsmpsAHwHPReq2Valid
+ * VsmpsBHwHPReq2Valid
+ * VsafeHwHPReq2Valid
+ * VanaHwHPReq2Valid = disabled
+ * VpllHwHPReq2Valid
+ * Vaux1HwHPReq2Valid = disabled
+ * Vaux2HwHPReq2Valid = disabled
+ * Vaux3HwHPReq2Valid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ2VALID1, 0xe8, 0x00),
+ /*
+ * VsmpsMHwHPReq2Valid
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ2VALID2, 0x00, 0x00),
+ /**
+ * VsmpsCSwHPReqValid
+ * VarmSwHPReqValid
+ * VsmpsASwHPReqValid
+ * VsmpsBSwHPReqValid
+ * VsafeSwHPReqValid
+ * VanaSwHPReqValid
+ * VanaSwHPReqValid = disabled
+ * VpllSwHPReqValid
+ * Vaux1SwHPReqValid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUSWHPREQVALID1, 0xa0, 0x00),
+ /*
+ * Vaux2SwHPReqValid = disabled
+ * Vaux3SwHPReqValid = disabled
+ * VsmpsMSwHPReqValid
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUSWHPREQVALID2, 0x03, 0x00),
+ /*
+ * SysClkReq2Valid1 = SysClkReq2 controlled
+ * SysClkReq3Valid1 = disabled
+ * SysClkReq4Valid1 = SysClkReq4 controlled
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQVALID1, 0x0e, 0x0a),
+ /*
+ * SysClkReq2Valid2 = disabled
+ * SysClkReq3Valid2 = disabled
+ * SysClkReq4Valid2 = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQVALID2, 0x0e, 0x00),
+ /*
+ * Vaux4SwHPReqValid
+ * Vaux4HwHPReq2Valid
+ * Vaux4HwHPReq1Valid
+ * Vaux4SysClkReq1HPValid
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUVAUX4REQVALID, 0x00, 0x00),
+ /*
+ * VadcEna = disabled
+ * VintCore12Ena = disabled
+ * VintCore12Sel = 1.25 V
+ * VintCore12LP = inactive (HP)
+ * VadcLP = inactive (HP)
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUMISC1, 0xfe, 0x10),
+ /*
+ * VaudioEna = disabled
+ * Vaux8Ena = disabled
+ * Vamic1Ena = disabled
+ * Vamic2Ena = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VAUDIOSUPPLY, 0x1e, 0x00),
+ /*
+ * Vamic1_dzout = high-Z when Vamic1 is disabled
+ * Vamic2_dzout = high-Z when Vamic2 is disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUCTRL1VAMIC, 0x03, 0x00),
+ /*
+ * VsmpsARegu
+ * VsmpsASelCtrl
+ * VsmpsAAutoMode
+ * VsmpsAPWMMode
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSMPSAREGU, 0x00, 0x00),
+ /*
+ * VsmpsBRegu
+ * VsmpsBSelCtrl
+ * VsmpsBAutoMode
+ * VsmpsBPWMMode
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSMPSBREGU, 0x00, 0x00),
+ /*
+ * VsafeRegu
+ * VsafeSelCtrl
+ * VsafeAutoMode
+ * VsafePWMMode
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSAFEREGU, 0x00, 0x00),
+ /*
+ * VPll = Hw controlled (NOTE! PRCMU bits)
+ * VanaRegu = force off
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VPLLVANAREGU, 0x0f, 0x02),
+ /*
+ * VextSupply1Regu = force OFF (OTP_ExtSupply12LPnPolarity 1)
+ * VextSupply2Regu = force OFF (OTP_ExtSupply12LPnPolarity 1)
+ * VextSupply3Regu = force OFF (OTP_ExtSupply3LPnPolarity 0)
+ * ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0
+ * ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0
+ */
+ INIT_REGULATOR_REGISTER(AB8505_EXTSUPPLYREGU, 0xff, 0x30),
+ /*
+ * Vaux1Regu = force HP
+ * Vaux2Regu = force off
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VAUX12REGU, 0x0f, 0x01),
+ /*
+ * Vaux3Regu = force off
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VRF1VAUX3REGU, 0x03, 0x00),
+ /*
+ * VsmpsASel1
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSMPSASEL1, 0x00, 0x00),
+ /*
+ * VsmpsASel2
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSMPSASEL2, 0x00, 0x00),
+ /*
+ * VsmpsASel3
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSMPSASEL3, 0x00, 0x00),
+ /*
+ * VsmpsBSel1
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSMPSBSEL1, 0x00, 0x00),
+ /*
+ * VsmpsBSel2
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSMPSBSEL2, 0x00, 0x00),
+ /*
+ * VsmpsBSel3
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSMPSBSEL3, 0x00, 0x00),
+ /*
+ * VsafeSel1
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSAFESEL1, 0x00, 0x00),
+ /*
+ * VsafeSel2
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSAFESEL2, 0x00, 0x00),
+ /*
+ * VsafeSel3
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VSAFESEL3, 0x00, 0x00),
+ /*
+ * Vaux1Sel = 2.8 V
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VAUX1SEL, 0x0f, 0x0C),
+ /*
+ * Vaux2Sel = 2.9 V
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VAUX2SEL, 0x0f, 0x0d),
+ /*
+ * Vaux3Sel = 2.91 V
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VRF1VAUX3SEL, 0x07, 0x07),
+ /*
+ * Vaux4RequestCtrl
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VAUX4REQCTRL, 0x00, 0x00),
+ /*
+ * Vaux4Regu
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VAUX4REGU, 0x00, 0x00),
+ /*
+ * Vaux4Sel
+ */
+ INIT_REGULATOR_REGISTER(AB8505_VAUX4SEL, 0x00, 0x00),
+ /*
+ * Vaux1Disch = short discharge time
+ * Vaux2Disch = short discharge time
+ * Vaux3Disch = short discharge time
+ * Vintcore12Disch = short discharge time
+ * VTVoutDisch = short discharge time
+ * VaudioDisch = short discharge time
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUCTRLDISCH, 0xfc, 0x00),
+ /*
+ * VanaDisch = short discharge time
+ * Vaux8PullDownEna = pulldown disabled when Vaux8 is disabled
+ * Vaux8Disch = short discharge time
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUCTRLDISCH2, 0x16, 0x00),
+ /*
+ * Vaux4Disch = short discharge time
+ */
+ INIT_REGULATOR_REGISTER(AB8505_REGUCTRLDISCH3, 0x01, 0x00),
+ /*
+ * Vaux5Sel
+ * Vaux5LP
+ * Vaux5Ena
+ * Vaux5Disch
+ * Vaux5DisSfst
+ * Vaux5DisPulld
+ */
+ INIT_REGULATOR_REGISTER(AB8505_CTRLVAUX5, 0x00, 0x00),
+ /*
+ * Vaux6Sel
+ * Vaux6LP
+ * Vaux6Ena
+ * Vaux6DisPulld
+ */
+ INIT_REGULATOR_REGISTER(AB8505_CTRLVAUX6, 0x00, 0x00),
+};
+
+struct regulator_init_data ab8505_regulators[AB8505_NUM_REGULATORS] = {
+ /* supplies to the display/camera */
+ [AB8505_LDO_AUX1] = {
+ .constraints = {
+ .name = "V-DISPLAY",
+ .min_uV = 2800000,
+ .max_uV = 3300000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS,
+ .boot_on = 1, /* display is on at boot */
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers),
+ .consumer_supplies = ab8500_vaux1_consumers,
+ },
+ /* supplies to the on-board eMMC */
+ [AB8505_LDO_AUX2] = {
+ .constraints = {
+ .name = "V-eMMC1",
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers),
+ .consumer_supplies = ab8500_vaux2_consumers,
+ },
+ /* supply for VAUX3, supplies to SDcard slots */
+ [AB8505_LDO_AUX3] = {
+ .constraints = {
+ .name = "V-MMC-SD",
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers),
+ .consumer_supplies = ab8500_vaux3_consumers,
+ },
+ /* supply for VAUX4, supplies to NFC and standalone secure element */
+ [AB8505_LDO_AUX4] = {
+ .constraints = {
+ .name = "V-NFC-SE",
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux4_consumers),
+ .consumer_supplies = ab8505_vaux4_consumers,
+ },
+ /* supply for VAUX5, supplies to TBD */
+ [AB8505_LDO_AUX5] = {
+ .constraints = {
+ .name = "V-AUX5",
+ .min_uV = 1050000,
+ .max_uV = 2790000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux5_consumers),
+ .consumer_supplies = ab8505_vaux5_consumers,
+ },
+ /* supply for VAUX6, supplies to TBD */
+ [AB8505_LDO_AUX6] = {
+ .constraints = {
+ .name = "V-AUX6",
+ .min_uV = 1050000,
+ .max_uV = 2790000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux6_consumers),
+ .consumer_supplies = ab8505_vaux6_consumers,
+ },
+ /* supply for gpadc, ADC LDO */
+ [AB8505_LDO_ADC] = {
+ .constraints = {
+ .name = "V-ADC",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8505_vadc_consumers),
+ .consumer_supplies = ab8505_vadc_consumers,
+ },
+ /* supply for ab8500-vaudio, VAUDIO LDO */
+ [AB8505_LDO_AUDIO] = {
+ .constraints = {
+ .name = "V-AUD",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaud_consumers),
+ .consumer_supplies = ab8500_vaud_consumers,
+ },
+ /* supply for v-anamic1 VAMic1-LDO */
+ [AB8505_LDO_ANAMIC1] = {
+ .constraints = {
+ .name = "V-AMIC1",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic1_consumers),
+ .consumer_supplies = ab8500_vamic1_consumers,
+ },
+ /* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */
+ [AB8505_LDO_ANAMIC2] = {
+ .constraints = {
+ .name = "V-AMIC2",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic2_consumers),
+ .consumer_supplies = ab8500_vamic2_consumers,
+ },
+ /* supply for v-aux8, VAUX8 LDO */
+ [AB8505_LDO_AUX8] = {
+ .constraints = {
+ .name = "V-AUX8",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux8_consumers),
+ .consumer_supplies = ab8505_vaux8_consumers,
+ },
+ /* supply for v-intcore12, VINTCORE12 LDO */
+ [AB8505_LDO_INTCORE] = {
+ .constraints = {
+ .name = "V-INTCORE",
+ .min_uV = 1250000,
+ .max_uV = 1350000,
+ .input_uV = 1800000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE |
+ REGULATOR_CHANGE_DRMS,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers),
+ .consumer_supplies = ab8500_vintcore_consumers,
+ },
+ /* supply for LDO USB */
+ [AB8505_LDO_USB] = {
+ .constraints = {
+ .name = "V-USB",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8505_usb_consumers),
+ .consumer_supplies = ab8505_usb_consumers,
+ },
+ /* supply for U8500 CSI-DSI, VANA LDO */
+ [AB8505_LDO_ANA] = {
+ .constraints = {
+ .name = "V-CSI-DSI",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers),
+ .consumer_supplies = ab8500_vana_consumers,
+ },
+};
+
+struct ab8500_regulator_platform_data ab8500_regulator_plat_data = {
+ .reg_init = ab8500_reg_init,
+ .num_reg_init = ARRAY_SIZE(ab8500_reg_init),
+ .regulator = ab8500_regulators,
+ .num_regulator = ARRAY_SIZE(ab8500_regulators),
+ .ext_regulator = ab8500_ext_regulators,
+ .num_ext_regulator = ARRAY_SIZE(ab8500_ext_regulators),
+};
+
+/* Use the AB8500 init settings for AB8505 as they are the same right now */
+struct ab8500_regulator_platform_data ab8505_regulator_plat_data = {
+ .reg_init = ab8505_reg_init,
+ .num_reg_init = ARRAY_SIZE(ab8505_reg_init),
+ .regulator = ab8505_regulators,
+ .num_regulator = ARRAY_SIZE(ab8505_regulators),
+};
+
+static void ab8500_modify_reg_init(int id, u8 mask, u8 value)
+{
+ int i;
+
+ if (cpu_is_u8520()) {
+ for (i = ARRAY_SIZE(ab8505_reg_init) - 1; i >= 0; i--) {
+ if (ab8505_reg_init[i].id == id) {
+ u8 initval = ab8505_reg_init[i].value;
+ initval = (initval & ~mask) | (value & mask);
+ ab8505_reg_init[i].value = initval;
+
+ BUG_ON(mask & ~ab8505_reg_init[i].mask);
+ return;
+ }
+ }
+ } else {
+ for (i = ARRAY_SIZE(ab8500_reg_init) - 1; i >= 0; i--) {
+ if (ab8500_reg_init[i].id == id) {
+ u8 initval = ab8500_reg_init[i].value;
+ initval = (initval & ~mask) | (value & mask);
+ ab8500_reg_init[i].value = initval;
+
+ BUG_ON(mask & ~ab8500_reg_init[i].mask);
+ return;
+ }
+ }
+ }
+
+ BUG_ON(1);
+}
+
+void mop500_regulator_init(void)
+{
+ struct regulator_init_data *regulator;
+
+ /*
+ * Temporarily turn on Vaux2 on 8520 machine
+ */
+ if (cpu_is_u8520()) {
+ /* Vaux2 initialized to be on */
+ ab8500_modify_reg_init(AB8505_VAUX12REGU, 0x0f, 0x05);
+ }
+
+ /*
+ * Handle AB8500_EXT_SUPPLY2 on HREFP_V20_V50 boards (do it for
+ * all HREFP_V20 boards)
+ */
+ if (cpu_is_u8500v20()) {
+ /* VextSupply2RequestCtrl = HP/OFF depending on VxRequest */
+ ab8500_modify_reg_init(AB8500_REGUREQUESTCTRL3, 0x01, 0x01);
+
+ /* VextSupply2SysClkReq1HPValid = SysClkReq1 controlled */
+ ab8500_modify_reg_init(AB8500_REGUSYSCLKREQ1HPVALID2,
+ 0x20, 0x20);
+
+ /* VextSupply2 = force HP at initialization */
+ ab8500_modify_reg_init(AB8500_EXTSUPPLYREGU, 0x0c, 0x04);
+
+ /* enable VextSupply2 during platform active */
+ regulator = &ab8500_ext_regulators[AB8500_EXT_SUPPLY2];
+ regulator->constraints.always_on = 1;
+
+ /* disable VextSupply2 in suspend */
+ regulator = &ab8500_ext_regulators[AB8500_EXT_SUPPLY2];
+ regulator->constraints.state_mem.disabled = 1;
+ regulator->constraints.state_standby.disabled = 1;
+
+ /* enable VextSupply2 HW control (used in suspend) */
+ regulator->driver_data = (void *)&ab8500_ext_supply2;
+ }
+}
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.h b/arch/arm/mach-ux500/board-mop500-regulators.h
index 78a0642a220..9bece38fe93 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.h
+++ b/arch/arm/mach-ux500/board-mop500-regulators.h
@@ -14,10 +14,11 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/ab8500.h>
-extern struct ab8500_regulator_reg_init
-ab8500_regulator_reg_init[AB8500_NUM_REGULATOR_REGISTERS];
-extern struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS];
+extern struct ab8500_regulator_platform_data ab8500_regulator_plat_data;
+extern struct ab8500_regulator_platform_data ab8505_regulator_plat_data;
extern struct regulator_init_data tps61052_regulator;
extern struct regulator_init_data gpio_en_3v3_regulator;
+void mop500_regulator_init(void);
+
#endif
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 87d2d7b38ce..ce672378a83 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -199,10 +199,7 @@ static struct platform_device snowball_sbnet_dev = {
struct ab8500_platform_data ab8500_platdata = {
.irq_base = MOP500_AB8500_IRQ_BASE,
- .regulator_reg_init = ab8500_regulator_reg_init,
- .num_regulator_reg_init = ARRAY_SIZE(ab8500_regulator_reg_init),
- .regulator = ab8500_regulators,
- .num_regulator = ARRAY_SIZE(ab8500_regulators),
+ .regulator = &ab8500_regulator_plat_data,
.gpio = &ab8500_gpio_pdata,
.codec = &ab8500_codec_pdata,
};
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 915683cb67d..c5e20b52e3b 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -21,6 +21,8 @@
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/vexpress.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
#include <asm/arch_timer.h>
#include <asm/mach-types.h>
@@ -433,7 +435,7 @@ static void __init v2m_dt_timer_init(void)
{
struct device_node *node = NULL;
- vexpress_clk_of_init();
+ of_clk_init(NULL);
do {
node = of_find_compatible_node(node, NULL, "arm,sp804");
@@ -441,6 +443,10 @@ static void __init v2m_dt_timer_init(void)
if (node) {
pr_info("Using SP804 '%s' as a clock & events source\n",
node->full_name);
+ WARN_ON(clk_register_clkdev(of_clk_get_by_name(node,
+ "timclken1"), "v2m-timer0", "sp804"));
+ WARN_ON(clk_register_clkdev(of_clk_get_by_name(node,
+ "timclken2"), "v2m-timer1", "sp804"));
v2m_sp804_init(of_iomap(node, 0),
irq_of_parse_and_map(node, 0));
}
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 51afedda9ab..03db14d8ace 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -10,6 +10,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/amba/pl330.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
@@ -1552,6 +1553,9 @@ void __init s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
pd.num_cs = num_cs;
pd.src_clk_nr = src_clk_nr;
pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio;
+#ifdef CONFIG_PL330_DMA
+ pd.filter = pl330_filter;
+#endif
s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0);
}
@@ -1590,6 +1594,9 @@ void __init s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
pd.num_cs = num_cs;
pd.src_clk_nr = src_clk_nr;
pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi1_cfg_gpio;
+#ifdef CONFIG_PL330_DMA
+ pd.filter = pl330_filter;
+#endif
s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi1);
}
@@ -1628,6 +1635,9 @@ void __init s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
pd.num_cs = num_cs;
pd.src_clk_nr = src_clk_nr;
pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi2_cfg_gpio;
+#ifdef CONFIG_PL330_DMA
+ pd.filter = pl330_filter;
+#endif
s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi2);
}
diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c
index f1c9c3e2f67..e97fd60e92e 100644
--- a/arch/mips/bcm63xx/dev-spi.c
+++ b/arch/mips/bcm63xx/dev-spi.c
@@ -85,20 +85,9 @@ static struct platform_device bcm63xx_spi_device = {
int __init bcm63xx_spi_register(void)
{
- struct clk *periph_clk;
-
if (BCMCPU_IS_6328() || BCMCPU_IS_6345())
return -ENODEV;
- periph_clk = clk_get(NULL, "periph");
- if (IS_ERR(periph_clk)) {
- pr_err("unable to get periph clock\n");
- return -ENODEV;
- }
-
- /* Set bus frequency */
- spi_pdata.speed_hz = clk_get_rate(periph_clk);
-
spi_resources[0].start = bcm63xx_regset_address(RSET_SPI);
spi_resources[0].end = spi_resources[0].start;
spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI);
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
index c9bae136260..b0184cf0257 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
@@ -13,7 +13,6 @@ struct bcm63xx_spi_pdata {
unsigned int msg_ctl_width;
int bus_num;
int num_chipselect;
- u32 speed_hz;
};
enum bcm63xx_regs_spi {
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index b8020dc7b71..fa33c546e77 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -22,6 +22,7 @@
#include <linux/pfn.h>
#include <linux/cpuset.h>
#include <linux/node.h>
+#include <linux/slab.h>
#include <asm/sparsemem.h>
#include <asm/prom.h>
#include <asm/smp.h>
diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c
index c2c85f6cd73..a162a7f86b2 100644
--- a/arch/sh/drivers/pci/pcie-sh7786.c
+++ b/arch/sh/drivers/pci/pcie-sh7786.c
@@ -35,7 +35,7 @@ static unsigned int nr_ports;
static struct sh7786_pcie_hwops {
int (*core_init)(void);
- async_func_ptr *port_init_hw;
+ async_func_t port_init_hw;
} *sh7786_pcie_hwops;
static struct resource sh7786_pci0_resources[] = {
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index f2b292925cc..4e595ee8c91 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -247,9 +247,7 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
{
int ret;
- rcu_read_lock();
ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
- rcu_read_unlock();
if (ret)
strncpy(buf, "<unavailable>", buflen);
return ret;
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 6ee17bb391a..b8bdfe61daa 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -101,6 +101,8 @@ static inline int hypervisor_init(void) { return 0; }
extern int platform_bus_init(void);
extern void cpu_dev_init(void);
+struct kobject *virtual_device_parent(struct device *dev);
+
extern int bus_add_device(struct device *dev);
extern void bus_probe_device(struct device *dev);
extern void bus_remove_device(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 8a00dec574d..1a68f947ded 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -1205,26 +1205,10 @@ static void system_root_device_release(struct device *dev)
{
kfree(dev);
}
-/**
- * subsys_system_register - register a subsystem at /sys/devices/system/
- * @subsys: system subsystem
- * @groups: default attributes for the root device
- *
- * All 'system' subsystems have a /sys/devices/system/<name> root device
- * with the name of the subsystem. The root device can carry subsystem-
- * wide attributes. All registered devices are below this single root
- * device and are named after the subsystem with a simple enumeration
- * number appended. The registered devices are not explicitely named;
- * only 'id' in the device needs to be set.
- *
- * Do not use this interface for anything new, it exists for compatibility
- * with bad ideas only. New subsystems should use plain subsystems; and
- * add the subsystem-wide attributes should be added to the subsystem
- * directory itself and not some create fake root-device placed in
- * /sys/devices/system/<name>.
- */
-int subsys_system_register(struct bus_type *subsys,
- const struct attribute_group **groups)
+
+static int subsys_register(struct bus_type *subsys,
+ const struct attribute_group **groups,
+ struct kobject *parent_of_root)
{
struct device *dev;
int err;
@@ -1243,7 +1227,7 @@ int subsys_system_register(struct bus_type *subsys,
if (err < 0)
goto err_name;
- dev->kobj.parent = &system_kset->kobj;
+ dev->kobj.parent = parent_of_root;
dev->groups = groups;
dev->release = system_root_device_release;
@@ -1263,8 +1247,55 @@ err_dev:
bus_unregister(subsys);
return err;
}
+
+/**
+ * subsys_system_register - register a subsystem at /sys/devices/system/
+ * @subsys: system subsystem
+ * @groups: default attributes for the root device
+ *
+ * All 'system' subsystems have a /sys/devices/system/<name> root device
+ * with the name of the subsystem. The root device can carry subsystem-
+ * wide attributes. All registered devices are below this single root
+ * device and are named after the subsystem with a simple enumeration
+ * number appended. The registered devices are not explicitely named;
+ * only 'id' in the device needs to be set.
+ *
+ * Do not use this interface for anything new, it exists for compatibility
+ * with bad ideas only. New subsystems should use plain subsystems; and
+ * add the subsystem-wide attributes should be added to the subsystem
+ * directory itself and not some create fake root-device placed in
+ * /sys/devices/system/<name>.
+ */
+int subsys_system_register(struct bus_type *subsys,
+ const struct attribute_group **groups)
+{
+ return subsys_register(subsys, groups, &system_kset->kobj);
+}
EXPORT_SYMBOL_GPL(subsys_system_register);
+/**
+ * subsys_virtual_register - register a subsystem at /sys/devices/virtual/
+ * @subsys: virtual subsystem
+ * @groups: default attributes for the root device
+ *
+ * All 'virtual' subsystems have a /sys/devices/system/<name> root device
+ * with the name of the subystem. The root device can carry subsystem-wide
+ * attributes. All registered devices are below this single root device.
+ * There's no restriction on device naming. This is for kernel software
+ * constructs which need sysfs interface.
+ */
+int subsys_virtual_register(struct bus_type *subsys,
+ const struct attribute_group **groups)
+{
+ struct kobject *virtual_dir;
+
+ virtual_dir = virtual_device_parent(NULL);
+ if (!virtual_dir)
+ return -ENOMEM;
+
+ return subsys_register(subsys, groups, virtual_dir);
+}
+
int __init buses_init(void)
{
bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index f88d9e259a3..01631243757 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -703,7 +703,7 @@ void device_initialize(struct device *dev)
set_dev_node(dev, -1);
}
-static struct kobject *virtual_device_parent(struct device *dev)
+struct kobject *virtual_device_parent(struct device *dev)
{
static struct kobject *virtual_dir = NULL;
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 5a22bd33ce3..c130536e0ab 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -38,7 +38,8 @@ struct regmap_format {
unsigned int reg, unsigned int val);
void (*format_reg)(void *buf, unsigned int reg, unsigned int shift);
void (*format_val)(void *buf, unsigned int val, unsigned int shift);
- unsigned int (*parse_val)(void *buf);
+ unsigned int (*parse_val)(const void *buf);
+ void (*parse_inplace)(void *buf);
};
struct regmap_async {
@@ -76,6 +77,7 @@ struct regmap {
unsigned int debugfs_tot_len;
struct list_head debugfs_off_cache;
+ struct mutex cache_lock;
#endif
unsigned int max_register;
@@ -125,6 +127,9 @@ struct regmap {
void *cache;
u32 cache_dirty;
+ unsigned long *cache_present;
+ unsigned int cache_present_nbits;
+
struct reg_default *patch;
int patch_regs;
@@ -187,12 +192,35 @@ int regcache_read(struct regmap *map,
int regcache_write(struct regmap *map,
unsigned int reg, unsigned int value);
int regcache_sync(struct regmap *map);
-
-unsigned int regcache_get_val(const void *base, unsigned int idx,
- unsigned int word_size);
-bool regcache_set_val(void *base, unsigned int idx,
- unsigned int val, unsigned int word_size);
+int regcache_sync_block(struct regmap *map, void *block,
+ unsigned int block_base, unsigned int start,
+ unsigned int end);
+
+static inline const void *regcache_get_val_addr(struct regmap *map,
+ const void *base,
+ unsigned int idx)
+{
+ return base + (map->cache_word_size * idx);
+}
+
+unsigned int regcache_get_val(struct regmap *map, const void *base,
+ unsigned int idx);
+bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
+ unsigned int val);
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
+int regcache_set_reg_present(struct regmap *map, unsigned int reg);
+
+static inline bool regcache_reg_present(struct regmap *map, unsigned int reg)
+{
+ if (!map->cache_present)
+ return true;
+ if (reg > map->cache_present_nbits)
+ return false;
+ return map->cache_present[BIT_WORD(reg)] & BIT_MASK(reg);
+}
+
+int _regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len, bool async);
void regmap_async_complete_cb(struct regmap_async *async, int ret);
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index afd6aa91a0d..e210a6d1406 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -260,8 +260,7 @@ static int regcache_lzo_read(struct regmap *map,
ret = regcache_lzo_decompress_cache_block(map, lzo_block);
if (ret >= 0)
/* fetch the value from the cache */
- *value = regcache_get_val(lzo_block->dst, blkpos,
- map->cache_word_size);
+ *value = regcache_get_val(map, lzo_block->dst, blkpos);
kfree(lzo_block->dst);
/* restore the pointer and length of the compressed block */
@@ -304,8 +303,7 @@ static int regcache_lzo_write(struct regmap *map,
}
/* write the new value to the cache */
- if (regcache_set_val(lzo_block->dst, blkpos, value,
- map->cache_word_size)) {
+ if (regcache_set_val(map, lzo_block->dst, blkpos, value)) {
kfree(lzo_block->dst);
goto out;
}
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 79f4fca9877..aa0875f6f1b 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -47,22 +47,21 @@ static inline void regcache_rbtree_get_base_top_reg(
*top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
}
-static unsigned int regcache_rbtree_get_register(
- struct regcache_rbtree_node *rbnode, unsigned int idx,
- unsigned int word_size)
+static unsigned int regcache_rbtree_get_register(struct regmap *map,
+ struct regcache_rbtree_node *rbnode, unsigned int idx)
{
- return regcache_get_val(rbnode->block, idx, word_size);
+ return regcache_get_val(map, rbnode->block, idx);
}
-static void regcache_rbtree_set_register(struct regcache_rbtree_node *rbnode,
- unsigned int idx, unsigned int val,
- unsigned int word_size)
+static void regcache_rbtree_set_register(struct regmap *map,
+ struct regcache_rbtree_node *rbnode,
+ unsigned int idx, unsigned int val)
{
- regcache_set_val(rbnode->block, idx, val, word_size);
+ regcache_set_val(map, rbnode->block, idx, val);
}
static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
- unsigned int reg)
+ unsigned int reg)
{
struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
struct rb_node *node;
@@ -139,15 +138,21 @@ static int rbtree_show(struct seq_file *s, void *ignored)
struct regcache_rbtree_node *n;
struct rb_node *node;
unsigned int base, top;
+ size_t mem_size;
int nodes = 0;
int registers = 0;
int this_registers, average;
map->lock(map);
+ mem_size = sizeof(*rbtree_ctx);
+ mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long);
+
for (node = rb_first(&rbtree_ctx->root); node != NULL;
node = rb_next(node)) {
n = container_of(node, struct regcache_rbtree_node, node);
+ mem_size += sizeof(*n);
+ mem_size += (n->blklen * map->cache_word_size);
regcache_rbtree_get_base_top_reg(map, n, &base, &top);
this_registers = ((top - base) / map->reg_stride) + 1;
@@ -162,8 +167,8 @@ static int rbtree_show(struct seq_file *s, void *ignored)
else
average = 0;
- seq_printf(s, "%d nodes, %d registers, average %d registers\n",
- nodes, registers, average);
+ seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
+ nodes, registers, average, mem_size);
map->unlock(map);
@@ -260,8 +265,9 @@ static int regcache_rbtree_read(struct regmap *map,
rbnode = regcache_rbtree_lookup(map, reg);
if (rbnode) {
reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
- *value = regcache_rbtree_get_register(rbnode, reg_tmp,
- map->cache_word_size);
+ if (!regcache_reg_present(map, reg))
+ return -ENOENT;
+ *value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
} else {
return -ENOENT;
}
@@ -270,21 +276,23 @@ static int regcache_rbtree_read(struct regmap *map,
}
-static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode,
+static int regcache_rbtree_insert_to_block(struct regmap *map,
+ struct regcache_rbtree_node *rbnode,
unsigned int pos, unsigned int reg,
- unsigned int value, unsigned int word_size)
+ unsigned int value)
{
u8 *blk;
blk = krealloc(rbnode->block,
- (rbnode->blklen + 1) * word_size, GFP_KERNEL);
+ (rbnode->blklen + 1) * map->cache_word_size,
+ GFP_KERNEL);
if (!blk)
return -ENOMEM;
/* insert the register value in the correct place in the rbnode block */
- memmove(blk + (pos + 1) * word_size,
- blk + pos * word_size,
- (rbnode->blklen - pos) * word_size);
+ memmove(blk + (pos + 1) * map->cache_word_size,
+ blk + pos * map->cache_word_size,
+ (rbnode->blklen - pos) * map->cache_word_size);
/* update the rbnode block, its size and the base register */
rbnode->block = blk;
@@ -292,7 +300,7 @@ static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode,
if (!pos)
rbnode->base_reg = reg;
- regcache_rbtree_set_register(rbnode, pos, value, word_size);
+ regcache_rbtree_set_register(map, rbnode, pos, value);
return 0;
}
@@ -302,25 +310,24 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
struct regcache_rbtree_ctx *rbtree_ctx;
struct regcache_rbtree_node *rbnode, *rbnode_tmp;
struct rb_node *node;
- unsigned int val;
unsigned int reg_tmp;
unsigned int pos;
int i;
int ret;
rbtree_ctx = map->cache;
+ /* update the reg_present bitmap, make space if necessary */
+ ret = regcache_set_reg_present(map, reg);
+ if (ret < 0)
+ return ret;
+
/* if we can't locate it in the cached rbnode we'll have
* to traverse the rbtree looking for it.
*/
rbnode = regcache_rbtree_lookup(map, reg);
if (rbnode) {
reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
- val = regcache_rbtree_get_register(rbnode, reg_tmp,
- map->cache_word_size);
- if (val == value)
- return 0;
- regcache_rbtree_set_register(rbnode, reg_tmp, value,
- map->cache_word_size);
+ regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
} else {
/* look for an adjacent register to the one we are about to add */
for (node = rb_first(&rbtree_ctx->root); node;
@@ -337,9 +344,10 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
pos = i + 1;
else
pos = i;
- ret = regcache_rbtree_insert_to_block(rbnode_tmp, pos,
- reg, value,
- map->cache_word_size);
+ ret = regcache_rbtree_insert_to_block(map,
+ rbnode_tmp,
+ pos, reg,
+ value);
if (ret)
return ret;
rbtree_ctx->cached_rbnode = rbnode_tmp;
@@ -354,7 +362,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
if (!rbnode)
return -ENOMEM;
- rbnode->blklen = 1;
+ rbnode->blklen = sizeof(*rbnode);
rbnode->base_reg = reg;
rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
GFP_KERNEL);
@@ -362,7 +370,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
kfree(rbnode);
return -ENOMEM;
}
- regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size);
+ regcache_rbtree_set_register(map, rbnode, 0, value);
regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
rbtree_ctx->cached_rbnode = rbnode;
}
@@ -376,10 +384,8 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
struct regcache_rbtree_ctx *rbtree_ctx;
struct rb_node *node;
struct regcache_rbtree_node *rbnode;
- unsigned int regtmp;
- unsigned int val;
int ret;
- int i, base, end;
+ int base, end;
rbtree_ctx = map->cache;
for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
@@ -402,27 +408,13 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
else
end = rbnode->blklen;
- for (i = base; i < end; i++) {
- regtmp = rbnode->base_reg + (i * map->reg_stride);
- val = regcache_rbtree_get_register(rbnode, i,
- map->cache_word_size);
-
- /* Is this the hardware default? If so skip. */
- ret = regcache_lookup_reg(map, regtmp);
- if (ret >= 0 && val == map->reg_defaults[ret].def)
- continue;
-
- map->cache_bypass = 1;
- ret = _regmap_write(map, regtmp, val);
- map->cache_bypass = 0;
- if (ret)
- return ret;
- dev_dbg(map->dev, "Synced register %#x, value %#x\n",
- regtmp, val);
- }
+ ret = regcache_sync_block(map, rbnode->block, rbnode->base_reg,
+ base, end);
+ if (ret != 0)
+ return ret;
}
- return 0;
+ return regmap_async_complete(map);
}
struct regcache_ops regcache_rbtree_ops = {
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index e69ff3e4742..75923f2396b 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -45,8 +45,8 @@ static int regcache_hw_init(struct regmap *map)
tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
if (!tmp_buf)
return -EINVAL;
- ret = regmap_bulk_read(map, 0, tmp_buf,
- map->num_reg_defaults_raw);
+ ret = regmap_raw_read(map, 0, tmp_buf,
+ map->num_reg_defaults_raw);
map->cache_bypass = cache_bypass;
if (ret < 0) {
kfree(tmp_buf);
@@ -58,8 +58,7 @@ static int regcache_hw_init(struct regmap *map)
/* calculate the size of reg_defaults */
for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
- val = regcache_get_val(map->reg_defaults_raw,
- i, map->cache_word_size);
+ val = regcache_get_val(map, map->reg_defaults_raw, i);
if (regmap_volatile(map, i * map->reg_stride))
continue;
count++;
@@ -75,8 +74,7 @@ static int regcache_hw_init(struct regmap *map)
/* fill the reg_defaults */
map->num_reg_defaults = count;
for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
- val = regcache_get_val(map->reg_defaults_raw,
- i, map->cache_word_size);
+ val = regcache_get_val(map, map->reg_defaults_raw, i);
if (regmap_volatile(map, i * map->reg_stride))
continue;
map->reg_defaults[j].reg = i * map->reg_stride;
@@ -123,6 +121,8 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
map->reg_defaults_raw = config->reg_defaults_raw;
map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
+ map->cache_present = NULL;
+ map->cache_present_nbits = 0;
map->cache = NULL;
map->cache_ops = cache_types[i];
@@ -181,6 +181,7 @@ void regcache_exit(struct regmap *map)
BUG_ON(!map->cache_ops);
+ kfree(map->cache_present);
kfree(map->reg_defaults);
if (map->cache_free)
kfree(map->reg_defaults_raw);
@@ -417,28 +418,68 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
}
EXPORT_SYMBOL_GPL(regcache_cache_bypass);
-bool regcache_set_val(void *base, unsigned int idx,
- unsigned int val, unsigned int word_size)
+int regcache_set_reg_present(struct regmap *map, unsigned int reg)
{
- switch (word_size) {
+ unsigned long *cache_present;
+ unsigned int cache_present_size;
+ unsigned int nregs;
+ int i;
+
+ nregs = reg + 1;
+ cache_present_size = BITS_TO_LONGS(nregs);
+ cache_present_size *= sizeof(long);
+
+ if (!map->cache_present) {
+ cache_present = kmalloc(cache_present_size, GFP_KERNEL);
+ if (!cache_present)
+ return -ENOMEM;
+ bitmap_zero(cache_present, nregs);
+ map->cache_present = cache_present;
+ map->cache_present_nbits = nregs;
+ }
+
+ if (nregs > map->cache_present_nbits) {
+ cache_present = krealloc(map->cache_present,
+ cache_present_size, GFP_KERNEL);
+ if (!cache_present)
+ return -ENOMEM;
+ for (i = 0; i < nregs; i++)
+ if (i >= map->cache_present_nbits)
+ clear_bit(i, cache_present);
+ map->cache_present = cache_present;
+ map->cache_present_nbits = nregs;
+ }
+
+ set_bit(reg, map->cache_present);
+ return 0;
+}
+
+bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
+ unsigned int val)
+{
+ if (regcache_get_val(map, base, idx) == val)
+ return true;
+
+ /* Use device native format if possible */
+ if (map->format.format_val) {
+ map->format.format_val(base + (map->cache_word_size * idx),
+ val, 0);
+ return false;
+ }
+
+ switch (map->cache_word_size) {
case 1: {
u8 *cache = base;
- if (cache[idx] == val)
- return true;
cache[idx] = val;
break;
}
case 2: {
u16 *cache = base;
- if (cache[idx] == val)
- return true;
cache[idx] = val;
break;
}
case 4: {
u32 *cache = base;
- if (cache[idx] == val)
- return true;
cache[idx] = val;
break;
}
@@ -448,13 +489,18 @@ bool regcache_set_val(void *base, unsigned int idx,
return false;
}
-unsigned int regcache_get_val(const void *base, unsigned int idx,
- unsigned int word_size)
+unsigned int regcache_get_val(struct regmap *map, const void *base,
+ unsigned int idx)
{
if (!base)
return -EINVAL;
- switch (word_size) {
+ /* Use device native format if possible */
+ if (map->format.parse_val)
+ return map->format.parse_val(regcache_get_val_addr(map, base,
+ idx));
+
+ switch (map->cache_word_size) {
case 1: {
const u8 *cache = base;
return cache[idx];
@@ -498,3 +544,117 @@ int regcache_lookup_reg(struct regmap *map, unsigned int reg)
else
return -ENOENT;
}
+
+static int regcache_sync_block_single(struct regmap *map, void *block,
+ unsigned int block_base,
+ unsigned int start, unsigned int end)
+{
+ unsigned int i, regtmp, val;
+ int ret;
+
+ for (i = start; i < end; i++) {
+ regtmp = block_base + (i * map->reg_stride);
+
+ if (!regcache_reg_present(map, regtmp))
+ continue;
+
+ val = regcache_get_val(map, block, i);
+
+ /* Is this the hardware default? If so skip. */
+ ret = regcache_lookup_reg(map, regtmp);
+ if (ret >= 0 && val == map->reg_defaults[ret].def)
+ continue;
+
+ map->cache_bypass = 1;
+
+ ret = _regmap_write(map, regtmp, val);
+
+ map->cache_bypass = 0;
+ if (ret != 0)
+ return ret;
+ dev_dbg(map->dev, "Synced register %#x, value %#x\n",
+ regtmp, val);
+ }
+
+ return 0;
+}
+
+static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
+ unsigned int base, unsigned int cur)
+{
+ size_t val_bytes = map->format.val_bytes;
+ int ret, count;
+
+ if (*data == NULL)
+ return 0;
+
+ count = cur - base;
+
+ dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
+ count * val_bytes, count, base, cur - 1);
+
+ map->cache_bypass = 1;
+
+ ret = _regmap_raw_write(map, base, *data, count * val_bytes,
+ false);
+
+ map->cache_bypass = 0;
+
+ *data = NULL;
+
+ return ret;
+}
+
+static int regcache_sync_block_raw(struct regmap *map, void *block,
+ unsigned int block_base, unsigned int start,
+ unsigned int end)
+{
+ unsigned int i, val;
+ unsigned int regtmp = 0;
+ unsigned int base = 0;
+ const void *data = NULL;
+ int ret;
+
+ for (i = start; i < end; i++) {
+ regtmp = block_base + (i * map->reg_stride);
+
+ if (!regcache_reg_present(map, regtmp)) {
+ ret = regcache_sync_block_raw_flush(map, &data,
+ base, regtmp);
+ if (ret != 0)
+ return ret;
+ continue;
+ }
+
+ val = regcache_get_val(map, block, i);
+
+ /* Is this the hardware default? If so skip. */
+ ret = regcache_lookup_reg(map, regtmp);
+ if (ret >= 0 && val == map->reg_defaults[ret].def) {
+ ret = regcache_sync_block_raw_flush(map, &data,
+ base, regtmp);
+ if (ret != 0)
+ return ret;
+ continue;
+ }
+
+ if (!data) {
+ data = regcache_get_val_addr(map, block, i);
+ base = regtmp;
+ }
+ }
+
+ return regcache_sync_block_raw_flush(map, &data, base, regtmp);
+}
+
+int regcache_sync_block(struct regmap *map, void *block,
+ unsigned int block_base, unsigned int start,
+ unsigned int end)
+{
+ if (regmap_can_raw_write(map))
+ return regcache_sync_block_raw(map, block, block_base,
+ start, end);
+ else
+ return regcache_sync_block_single(map, block, block_base,
+ start, end);
+}
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 81d6f605c92..23b701f5fd2 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -88,16 +88,16 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
* If we don't have a cache build one so we don't have to do a
* linear scan each time.
*/
+ mutex_lock(&map->cache_lock);
+ i = base;
if (list_empty(&map->debugfs_off_cache)) {
- for (i = base; i <= map->max_register; i += map->reg_stride) {
+ for (; i <= map->max_register; i += map->reg_stride) {
/* Skip unprinted registers, closing off cache entry */
if (!regmap_readable(map, i) ||
regmap_precious(map, i)) {
if (c) {
c->max = p - 1;
- fpos_offset = c->max - c->min;
- reg_offset = fpos_offset / map->debugfs_tot_len;
- c->max_reg = c->base_reg + reg_offset;
+ c->max_reg = i - map->reg_stride;
list_add_tail(&c->list,
&map->debugfs_off_cache);
c = NULL;
@@ -111,6 +111,7 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c) {
regmap_debugfs_free_dump_cache(map);
+ mutex_unlock(&map->cache_lock);
return base;
}
c->min = p;
@@ -124,9 +125,7 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
/* Close the last entry off if we didn't scan beyond it */
if (c) {
c->max = p - 1;
- fpos_offset = c->max - c->min;
- reg_offset = fpos_offset / map->debugfs_tot_len;
- c->max_reg = c->base_reg + reg_offset;
+ c->max_reg = i - map->reg_stride;
list_add_tail(&c->list,
&map->debugfs_off_cache);
}
@@ -145,12 +144,14 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
fpos_offset = from - c->min;
reg_offset = fpos_offset / map->debugfs_tot_len;
*pos = c->min + (reg_offset * map->debugfs_tot_len);
+ mutex_unlock(&map->cache_lock);
return c->base_reg + reg_offset;
}
*pos = c->max;
ret = c->max_reg;
}
+ mutex_unlock(&map->cache_lock);
return ret;
}
@@ -311,6 +312,79 @@ static const struct file_operations regmap_range_fops = {
.llseek = default_llseek,
};
+static ssize_t regmap_reg_ranges_read_file(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct regmap *map = file->private_data;
+ struct regmap_debugfs_off_cache *c;
+ loff_t p = 0;
+ size_t buf_pos = 0;
+ char *buf;
+ char *entry;
+ int ret;
+
+ if (*ppos < 0 || !count)
+ return -EINVAL;
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!entry) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ /* While we are at it, build the register dump cache
+ * now so the read() operation on the `registers' file
+ * can benefit from using the cache. We do not care
+ * about the file position information that is contained
+ * in the cache, just about the actual register blocks */
+ regmap_calc_tot_len(map, buf, count);
+ regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
+
+ /* Reset file pointer as the fixed-format of the `registers'
+ * file is not compatible with the `range' file */
+ p = 0;
+ mutex_lock(&map->cache_lock);
+ list_for_each_entry(c, &map->debugfs_off_cache, list) {
+ snprintf(entry, PAGE_SIZE, "%x-%x",
+ c->base_reg, c->max_reg);
+ if (p >= *ppos) {
+ if (buf_pos + 1 + strlen(entry) > count)
+ break;
+ snprintf(buf + buf_pos, count - buf_pos,
+ "%s", entry);
+ buf_pos += strlen(entry);
+ buf[buf_pos] = '\n';
+ buf_pos++;
+ }
+ p += strlen(entry) + 1;
+ }
+ mutex_unlock(&map->cache_lock);
+
+ kfree(entry);
+ ret = buf_pos;
+
+ if (copy_to_user(user_buf, buf, buf_pos)) {
+ ret = -EFAULT;
+ goto out_buf;
+ }
+
+ *ppos += buf_pos;
+out_buf:
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations regmap_reg_ranges_fops = {
+ .open = simple_open,
+ .read = regmap_reg_ranges_read_file,
+ .llseek = default_llseek,
+};
+
static ssize_t regmap_access_read_file(struct file *file,
char __user *user_buf, size_t count,
loff_t *ppos)
@@ -385,6 +459,7 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
struct regmap_range_node *range_node;
INIT_LIST_HEAD(&map->debugfs_off_cache);
+ mutex_init(&map->cache_lock);
if (name) {
map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
@@ -403,6 +478,9 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
debugfs_create_file("name", 0400, map->debugfs,
map, &regmap_name_fops);
+ debugfs_create_file("range", 0400, map->debugfs,
+ map, &regmap_reg_ranges_fops);
+
if (map->max_register) {
debugfs_create_file("registers", 0400, map->debugfs,
map, &regmap_map_fops);
@@ -435,7 +513,9 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
void regmap_debugfs_exit(struct regmap *map)
{
debugfs_remove_recursive(map->debugfs);
+ mutex_lock(&map->cache_lock);
regmap_debugfs_free_dump_cache(map);
+ mutex_unlock(&map->cache_lock);
kfree(map->debugfs_name);
}
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 020ea2b9fd2..1643e889baf 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -460,7 +460,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
chip->name, d);
if (ret != 0) {
- dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret);
+ dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
+ irq, chip->name, ret);
goto err_domain;
}
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 58cfb323242..a941dcfe759 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -228,30 +228,39 @@ static void regmap_format_32_native(void *buf, unsigned int val,
*(u32 *)buf = val << shift;
}
-static unsigned int regmap_parse_8(void *buf)
+static void regmap_parse_inplace_noop(void *buf)
{
- u8 *b = buf;
+}
+
+static unsigned int regmap_parse_8(const void *buf)
+{
+ const u8 *b = buf;
return b[0];
}
-static unsigned int regmap_parse_16_be(void *buf)
+static unsigned int regmap_parse_16_be(const void *buf)
+{
+ const __be16 *b = buf;
+
+ return be16_to_cpu(b[0]);
+}
+
+static void regmap_parse_16_be_inplace(void *buf)
{
__be16 *b = buf;
b[0] = be16_to_cpu(b[0]);
-
- return b[0];
}
-static unsigned int regmap_parse_16_native(void *buf)
+static unsigned int regmap_parse_16_native(const void *buf)
{
return *(u16 *)buf;
}
-static unsigned int regmap_parse_24(void *buf)
+static unsigned int regmap_parse_24(const void *buf)
{
- u8 *b = buf;
+ const u8 *b = buf;
unsigned int ret = b[2];
ret |= ((unsigned int)b[1]) << 8;
ret |= ((unsigned int)b[0]) << 16;
@@ -259,16 +268,21 @@ static unsigned int regmap_parse_24(void *buf)
return ret;
}
-static unsigned int regmap_parse_32_be(void *buf)
+static unsigned int regmap_parse_32_be(const void *buf)
+{
+ const __be32 *b = buf;
+
+ return be32_to_cpu(b[0]);
+}
+
+static void regmap_parse_32_be_inplace(void *buf)
{
__be32 *b = buf;
b[0] = be32_to_cpu(b[0]);
-
- return b[0];
}
-static unsigned int regmap_parse_32_native(void *buf)
+static unsigned int regmap_parse_32_native(const void *buf)
{
return *(u32 *)buf;
}
@@ -555,16 +569,21 @@ struct regmap *regmap_init(struct device *dev,
goto err_map;
}
+ if (val_endian == REGMAP_ENDIAN_NATIVE)
+ map->format.parse_inplace = regmap_parse_inplace_noop;
+
switch (config->val_bits) {
case 8:
map->format.format_val = regmap_format_8;
map->format.parse_val = regmap_parse_8;
+ map->format.parse_inplace = regmap_parse_inplace_noop;
break;
case 16:
switch (val_endian) {
case REGMAP_ENDIAN_BIG:
map->format.format_val = regmap_format_16_be;
map->format.parse_val = regmap_parse_16_be;
+ map->format.parse_inplace = regmap_parse_16_be_inplace;
break;
case REGMAP_ENDIAN_NATIVE:
map->format.format_val = regmap_format_16_native;
@@ -585,6 +604,7 @@ struct regmap *regmap_init(struct device *dev,
case REGMAP_ENDIAN_BIG:
map->format.format_val = regmap_format_32_be;
map->format.parse_val = regmap_parse_32_be;
+ map->format.parse_inplace = regmap_parse_32_be_inplace;
break;
case REGMAP_ENDIAN_NATIVE:
map->format.format_val = regmap_format_32_native;
@@ -917,8 +937,8 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
return 0;
}
-static int _regmap_raw_write(struct regmap *map, unsigned int reg,
- const void *val, size_t val_len, bool async)
+int _regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len, bool async)
{
struct regmap_range_node *range;
unsigned long flags;
@@ -930,7 +950,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
size_t len;
int i;
- BUG_ON(!map->bus);
+ WARN_ON(!map->bus);
/* Check for unwritable registers before we start */
if (map->writeable_reg)
@@ -943,8 +963,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
unsigned int ival;
int val_bytes = map->format.val_bytes;
for (i = 0; i < val_len / val_bytes; i++) {
- memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
- ival = map->format.parse_val(map->work_buf);
+ ival = map->format.parse_val(val + (i * val_bytes));
ret = regcache_write(map, reg + (i * map->reg_stride),
ival);
if (ret) {
@@ -999,6 +1018,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
if (!async)
return -ENOMEM;
+ trace_regmap_async_write_start(map->dev, reg, val_len);
+
async->work_buf = kzalloc(map->format.buf_size,
GFP_KERNEL | GFP_DMA);
if (!async->work_buf) {
@@ -1079,6 +1100,17 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
return ret;
}
+/**
+ * regmap_can_raw_write - Test if regmap_raw_write() is supported
+ *
+ * @map: Map to check.
+ */
+bool regmap_can_raw_write(struct regmap *map)
+{
+ return map->bus && map->format.format_val && map->format.format_reg;
+}
+EXPORT_SYMBOL_GPL(regmap_can_raw_write);
+
static int _regmap_bus_formatted_write(void *context, unsigned int reg,
unsigned int val)
{
@@ -1086,7 +1118,7 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
struct regmap_range_node *range;
struct regmap *map = context;
- BUG_ON(!map->bus || !map->format.format_write);
+ WARN_ON(!map->bus || !map->format.format_write);
range = _regmap_range_lookup(map, reg);
if (range) {
@@ -1112,7 +1144,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
{
struct regmap *map = context;
- BUG_ON(!map->bus || !map->format.format_val);
+ WARN_ON(!map->bus || !map->format.format_val);
map->format.format_val(map->work_buf + map->format.reg_bytes
+ map->format.pad_bytes, val, 0);
@@ -1202,12 +1234,10 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
{
int ret;
- if (!map->bus)
+ if (!regmap_can_raw_write(map))
return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
- if (reg % map->reg_stride)
- return -EINVAL;
map->lock(map->lock_arg);
@@ -1242,7 +1272,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
if (!map->bus)
return -EINVAL;
- if (!map->format.parse_val)
+ if (!map->format.parse_inplace)
return -EINVAL;
if (reg % map->reg_stride)
return -EINVAL;
@@ -1260,7 +1290,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
goto out;
}
for (i = 0; i < val_count * val_bytes; i += val_bytes)
- map->format.parse_val(wval + i);
+ map->format.parse_inplace(wval + i);
}
/*
* Some devices does not support bulk write, for
@@ -1338,7 +1368,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
u8 *u8 = map->work_buf;
int ret;
- BUG_ON(!map->bus);
+ WARN_ON(!map->bus);
range = _regmap_range_lookup(map, reg);
if (range) {
@@ -1393,7 +1423,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
int ret;
void *context = _regmap_map_get_context(map);
- BUG_ON(!map->reg_read);
+ WARN_ON(!map->reg_read);
if (!map->cache_bypass) {
ret = regcache_read(map, reg, val);
@@ -1521,7 +1551,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
if (!map->bus)
return -EINVAL;
- if (!map->format.parse_val)
+ if (!map->format.parse_inplace)
return -EINVAL;
if (reg % map->reg_stride)
return -EINVAL;
@@ -1548,7 +1578,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
}
for (i = 0; i < val_count * val_bytes; i += val_bytes)
- map->format.parse_val(val + i);
+ map->format.parse_inplace(val + i);
} else {
for (i = 0; i < val_count; i++) {
unsigned int ival;
@@ -1642,6 +1672,8 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
struct regmap *map = async->map;
bool wake;
+ trace_regmap_async_io_complete(map->dev);
+
spin_lock(&map->async_lock);
list_del(&async->list);
@@ -1688,6 +1720,8 @@ int regmap_async_complete(struct regmap *map)
if (!map->bus->async_write)
return 0;
+ trace_regmap_async_complete_start(map->dev);
+
wait_event(map->async_waitq, regmap_async_is_done(map));
spin_lock_irqsave(&map->async_lock, flags);
@@ -1695,6 +1729,8 @@ int regmap_async_complete(struct regmap *map)
map->async_ret = 0;
spin_unlock_irqrestore(&map->async_lock, flags);
+ trace_regmap_async_complete_done(map->dev);
+
return ret;
}
EXPORT_SYMBOL_GPL(regmap_async_complete);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index a47e6ee98b8..0357ac44638 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -55,6 +55,16 @@ config COMMON_CLK_MAX77686
---help---
This driver supports Maxim 77686 crystal oscillator clock.
+config COMMON_CLK_SI5351
+ tristate "Clock driver for SiLabs 5351A/B/C"
+ depends on I2C
+ depends on OF
+ select REGMAP_I2C
+ select RATIONAL
+ ---help---
+ This driver supports Silicon Labs 5351A/B/C programmable clock
+ generators.
+
config CLK_TWL6040
tristate "External McPDM functional clock from twl6040"
depends on TWL6040_CORE
@@ -63,6 +73,14 @@ config CLK_TWL6040
McPDM. McPDM module is using the external bit clock on the McPDM bus
as functional clock.
+config COMMON_CLK_AXI_CLKGEN
+ tristate "AXI clkgen driver"
+ depends on ARCH_ZYNQ || MICROBLAZE
+ help
+ ---help---
+ Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx
+ FPGAs. It is commonly used in Analog Devices' reference designs.
+
endmenu
source "drivers/clk/mvebu/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 300d4775d92..e7f7fe9b2f0 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_COMMON_CLK) += clk-fixed-factor.o
obj-$(CONFIG_COMMON_CLK) += clk-fixed-rate.o
obj-$(CONFIG_COMMON_CLK) += clk-gate.o
obj-$(CONFIG_COMMON_CLK) += clk-mux.o
+obj-$(CONFIG_COMMON_CLK) += clk-composite.o
# SoCs specific
obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o
@@ -23,6 +24,7 @@ ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_ARCH_MMP) += mmp/
endif
obj-$(CONFIG_MACH_LOONGSON1) += clk-ls1x.o
+obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
obj-$(CONFIG_ARCH_ZYNQ) += clk-zynq.o
@@ -31,6 +33,8 @@ obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_X86) += x86/
# Chip specific
+obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o
obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
+obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
new file mode 100644
index 00000000000..8137327847c
--- /dev/null
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -0,0 +1,331 @@
+/*
+ * AXI clkgen driver
+ *
+ * Copyright 2012-2013 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/err.h>
+
+#define AXI_CLKGEN_REG_UPDATE_ENABLE 0x04
+#define AXI_CLKGEN_REG_CLK_OUT1 0x08
+#define AXI_CLKGEN_REG_CLK_OUT2 0x0c
+#define AXI_CLKGEN_REG_CLK_DIV 0x10
+#define AXI_CLKGEN_REG_CLK_FB1 0x14
+#define AXI_CLKGEN_REG_CLK_FB2 0x18
+#define AXI_CLKGEN_REG_LOCK1 0x1c
+#define AXI_CLKGEN_REG_LOCK2 0x20
+#define AXI_CLKGEN_REG_LOCK3 0x24
+#define AXI_CLKGEN_REG_FILTER1 0x28
+#define AXI_CLKGEN_REG_FILTER2 0x2c
+
+struct axi_clkgen {
+ void __iomem *base;
+ struct clk_hw clk_hw;
+};
+
+static uint32_t axi_clkgen_lookup_filter(unsigned int m)
+{
+ switch (m) {
+ case 0:
+ return 0x01001990;
+ case 1:
+ return 0x01001190;
+ case 2:
+ return 0x01009890;
+ case 3:
+ return 0x01001890;
+ case 4:
+ return 0x01008890;
+ case 5 ... 8:
+ return 0x01009090;
+ case 9 ... 11:
+ return 0x01000890;
+ case 12:
+ return 0x08009090;
+ case 13 ... 22:
+ return 0x01001090;
+ case 23 ... 36:
+ return 0x01008090;
+ case 37 ... 46:
+ return 0x08001090;
+ default:
+ return 0x08008090;
+ }
+}
+
+static const uint32_t axi_clkgen_lock_table[] = {
+ 0x060603e8, 0x060603e8, 0x080803e8, 0x0b0b03e8,
+ 0x0e0e03e8, 0x111103e8, 0x131303e8, 0x161603e8,
+ 0x191903e8, 0x1c1c03e8, 0x1f1f0384, 0x1f1f0339,
+ 0x1f1f02ee, 0x1f1f02bc, 0x1f1f028a, 0x1f1f0271,
+ 0x1f1f023f, 0x1f1f0226, 0x1f1f020d, 0x1f1f01f4,
+ 0x1f1f01db, 0x1f1f01c2, 0x1f1f01a9, 0x1f1f0190,
+ 0x1f1f0190, 0x1f1f0177, 0x1f1f015e, 0x1f1f015e,
+ 0x1f1f0145, 0x1f1f0145, 0x1f1f012c, 0x1f1f012c,
+ 0x1f1f012c, 0x1f1f0113, 0x1f1f0113, 0x1f1f0113,
+};
+
+static uint32_t axi_clkgen_lookup_lock(unsigned int m)
+{
+ if (m < ARRAY_SIZE(axi_clkgen_lock_table))
+ return axi_clkgen_lock_table[m];
+ return 0x1f1f00fa;
+}
+
+static const unsigned int fpfd_min = 10000;
+static const unsigned int fpfd_max = 300000;
+static const unsigned int fvco_min = 600000;
+static const unsigned int fvco_max = 1200000;
+
+static void axi_clkgen_calc_params(unsigned long fin, unsigned long fout,
+ unsigned int *best_d, unsigned int *best_m, unsigned int *best_dout)
+{
+ unsigned long d, d_min, d_max, _d_min, _d_max;
+ unsigned long m, m_min, m_max;
+ unsigned long f, dout, best_f, fvco;
+
+ fin /= 1000;
+ fout /= 1000;
+
+ best_f = ULONG_MAX;
+ *best_d = 0;
+ *best_m = 0;
+ *best_dout = 0;
+
+ d_min = max_t(unsigned long, DIV_ROUND_UP(fin, fpfd_max), 1);
+ d_max = min_t(unsigned long, fin / fpfd_min, 80);
+
+ m_min = max_t(unsigned long, DIV_ROUND_UP(fvco_min, fin) * d_min, 1);
+ m_max = min_t(unsigned long, fvco_max * d_max / fin, 64);
+
+ for (m = m_min; m <= m_max; m++) {
+ _d_min = max(d_min, DIV_ROUND_UP(fin * m, fvco_max));
+ _d_max = min(d_max, fin * m / fvco_min);
+
+ for (d = _d_min; d <= _d_max; d++) {
+ fvco = fin * m / d;
+
+ dout = DIV_ROUND_CLOSEST(fvco, fout);
+ dout = clamp_t(unsigned long, dout, 1, 128);
+ f = fvco / dout;
+ if (abs(f - fout) < abs(best_f - fout)) {
+ best_f = f;
+ *best_d = d;
+ *best_m = m;
+ *best_dout = dout;
+ if (best_f == fout)
+ return;
+ }
+ }
+ }
+}
+
+static void axi_clkgen_calc_clk_params(unsigned int divider, unsigned int *low,
+ unsigned int *high, unsigned int *edge, unsigned int *nocount)
+{
+ if (divider == 1)
+ *nocount = 1;
+ else
+ *nocount = 0;
+
+ *high = divider / 2;
+ *edge = divider % 2;
+ *low = divider - *high;
+}
+
+static void axi_clkgen_write(struct axi_clkgen *axi_clkgen,
+ unsigned int reg, unsigned int val)
+{
+ writel(val, axi_clkgen->base + reg);
+}
+
+static void axi_clkgen_read(struct axi_clkgen *axi_clkgen,
+ unsigned int reg, unsigned int *val)
+{
+ *val = readl(axi_clkgen->base + reg);
+}
+
+static struct axi_clkgen *clk_hw_to_axi_clkgen(struct clk_hw *clk_hw)
+{
+ return container_of(clk_hw, struct axi_clkgen, clk_hw);
+}
+
+static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
+ unsigned int d, m, dout;
+ unsigned int nocount;
+ unsigned int high;
+ unsigned int edge;
+ unsigned int low;
+ uint32_t filter;
+ uint32_t lock;
+
+ if (parent_rate == 0 || rate == 0)
+ return -EINVAL;
+
+ axi_clkgen_calc_params(parent_rate, rate, &d, &m, &dout);
+
+ if (d == 0 || dout == 0 || m == 0)
+ return -EINVAL;
+
+ filter = axi_clkgen_lookup_filter(m - 1);
+ lock = axi_clkgen_lookup_lock(m - 1);
+
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_UPDATE_ENABLE, 0);
+
+ axi_clkgen_calc_clk_params(dout, &low, &high, &edge, &nocount);
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_OUT1,
+ (high << 6) | low);
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_OUT2,
+ (edge << 7) | (nocount << 6));
+
+ axi_clkgen_calc_clk_params(d, &low, &high, &edge, &nocount);
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_DIV,
+ (edge << 13) | (nocount << 12) | (high << 6) | low);
+
+ axi_clkgen_calc_clk_params(m, &low, &high, &edge, &nocount);
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_FB1,
+ (high << 6) | low);
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_FB2,
+ (edge << 7) | (nocount << 6));
+
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_LOCK1, lock & 0x3ff);
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_LOCK2,
+ (((lock >> 16) & 0x1f) << 10) | 0x1);
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_LOCK3,
+ (((lock >> 24) & 0x1f) << 10) | 0x3e9);
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_FILTER1, filter >> 16);
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_FILTER2, filter);
+
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_UPDATE_ENABLE, 1);
+
+ return 0;
+}
+
+static long axi_clkgen_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned int d, m, dout;
+
+ axi_clkgen_calc_params(*parent_rate, rate, &d, &m, &dout);
+
+ if (d == 0 || dout == 0 || m == 0)
+ return -EINVAL;
+
+ return *parent_rate / d * m / dout;
+}
+
+static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
+ unsigned long parent_rate)
+{
+ struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
+ unsigned int d, m, dout;
+ unsigned int reg;
+ unsigned long long tmp;
+
+ axi_clkgen_read(axi_clkgen, AXI_CLKGEN_REG_CLK_OUT1, &reg);
+ dout = (reg & 0x3f) + ((reg >> 6) & 0x3f);
+ axi_clkgen_read(axi_clkgen, AXI_CLKGEN_REG_CLK_DIV, &reg);
+ d = (reg & 0x3f) + ((reg >> 6) & 0x3f);
+ axi_clkgen_read(axi_clkgen, AXI_CLKGEN_REG_CLK_FB1, &reg);
+ m = (reg & 0x3f) + ((reg >> 6) & 0x3f);
+
+ if (d == 0 || dout == 0)
+ return 0;
+
+ tmp = (unsigned long long)(parent_rate / d) * m;
+ do_div(tmp, dout);
+
+ if (tmp > ULONG_MAX)
+ return ULONG_MAX;
+
+ return tmp;
+}
+
+static const struct clk_ops axi_clkgen_ops = {
+ .recalc_rate = axi_clkgen_recalc_rate,
+ .round_rate = axi_clkgen_round_rate,
+ .set_rate = axi_clkgen_set_rate,
+};
+
+static int axi_clkgen_probe(struct platform_device *pdev)
+{
+ struct axi_clkgen *axi_clkgen;
+ struct clk_init_data init;
+ const char *parent_name;
+ const char *clk_name;
+ struct resource *mem;
+ struct clk *clk;
+
+ axi_clkgen = devm_kzalloc(&pdev->dev, sizeof(*axi_clkgen), GFP_KERNEL);
+ if (!axi_clkgen)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ axi_clkgen->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(axi_clkgen->base))
+ return PTR_ERR(axi_clkgen->base);
+
+ parent_name = of_clk_get_parent_name(pdev->dev.of_node, 0);
+ if (!parent_name)
+ return -EINVAL;
+
+ clk_name = pdev->dev.of_node->name;
+ of_property_read_string(pdev->dev.of_node, "clock-output-names",
+ &clk_name);
+
+ init.name = clk_name;
+ init.ops = &axi_clkgen_ops;
+ init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ axi_clkgen->clk_hw.init = &init;
+ clk = devm_clk_register(&pdev->dev, &axi_clkgen->clk_hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ return of_clk_add_provider(pdev->dev.of_node, of_clk_src_simple_get,
+ clk);
+}
+
+static int axi_clkgen_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id axi_clkgen_ids[] = {
+ { .compatible = "adi,axi-clkgen-1.00.a" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, axi_clkgen_ids);
+
+static struct platform_driver axi_clkgen_driver = {
+ .driver = {
+ .name = "adi-axi-clkgen",
+ .owner = THIS_MODULE,
+ .of_match_table = axi_clkgen_ids,
+ },
+ .probe = axi_clkgen_probe,
+ .remove = axi_clkgen_remove,
+};
+module_platform_driver(axi_clkgen_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Driver for the Analog Devices' AXI clkgen pcore clock generator");
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
new file mode 100644
index 00000000000..a33f46f20a4
--- /dev/null
+++ b/drivers/clk/clk-composite.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2013 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#define to_clk_composite(_hw) container_of(_hw, struct clk_composite, hw)
+
+static u8 clk_composite_get_parent(struct clk_hw *hw)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *mux_ops = composite->mux_ops;
+ struct clk_hw *mux_hw = composite->mux_hw;
+
+ mux_hw->clk = hw->clk;
+
+ return mux_ops->get_parent(mux_hw);
+}
+
+static int clk_composite_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *mux_ops = composite->mux_ops;
+ struct clk_hw *mux_hw = composite->mux_hw;
+
+ mux_hw->clk = hw->clk;
+
+ return mux_ops->set_parent(mux_hw, index);
+}
+
+static unsigned long clk_composite_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *rate_ops = composite->rate_ops;
+ struct clk_hw *rate_hw = composite->rate_hw;
+
+ rate_hw->clk = hw->clk;
+
+ return rate_ops->recalc_rate(rate_hw, parent_rate);
+}
+
+static long clk_composite_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *rate_ops = composite->rate_ops;
+ struct clk_hw *rate_hw = composite->rate_hw;
+
+ rate_hw->clk = hw->clk;
+
+ return rate_ops->round_rate(rate_hw, rate, prate);
+}
+
+static int clk_composite_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *rate_ops = composite->rate_ops;
+ struct clk_hw *rate_hw = composite->rate_hw;
+
+ rate_hw->clk = hw->clk;
+
+ return rate_ops->set_rate(rate_hw, rate, parent_rate);
+}
+
+static int clk_composite_is_enabled(struct clk_hw *hw)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *gate_ops = composite->gate_ops;
+ struct clk_hw *gate_hw = composite->gate_hw;
+
+ gate_hw->clk = hw->clk;
+
+ return gate_ops->is_enabled(gate_hw);
+}
+
+static int clk_composite_enable(struct clk_hw *hw)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *gate_ops = composite->gate_ops;
+ struct clk_hw *gate_hw = composite->gate_hw;
+
+ gate_hw->clk = hw->clk;
+
+ return gate_ops->enable(gate_hw);
+}
+
+static void clk_composite_disable(struct clk_hw *hw)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *gate_ops = composite->gate_ops;
+ struct clk_hw *gate_hw = composite->gate_hw;
+
+ gate_hw->clk = hw->clk;
+
+ gate_ops->disable(gate_hw);
+}
+
+struct clk *clk_register_composite(struct device *dev, const char *name,
+ const char **parent_names, int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+ unsigned long flags)
+{
+ struct clk *clk;
+ struct clk_init_data init;
+ struct clk_composite *composite;
+ struct clk_ops *clk_composite_ops;
+
+ composite = kzalloc(sizeof(*composite), GFP_KERNEL);
+ if (!composite) {
+ pr_err("%s: could not allocate composite clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.flags = flags | CLK_IS_BASIC;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ clk_composite_ops = &composite->ops;
+
+ if (mux_hw && mux_ops) {
+ if (!mux_ops->get_parent || !mux_ops->set_parent) {
+ clk = ERR_PTR(-EINVAL);
+ goto err;
+ }
+
+ composite->mux_hw = mux_hw;
+ composite->mux_ops = mux_ops;
+ clk_composite_ops->get_parent = clk_composite_get_parent;
+ clk_composite_ops->set_parent = clk_composite_set_parent;
+ }
+
+ if (rate_hw && rate_ops) {
+ if (!rate_ops->recalc_rate) {
+ clk = ERR_PTR(-EINVAL);
+ goto err;
+ }
+
+ /* .round_rate is a prerequisite for .set_rate */
+ if (rate_ops->round_rate) {
+ clk_composite_ops->round_rate = clk_composite_round_rate;
+ if (rate_ops->set_rate) {
+ clk_composite_ops->set_rate = clk_composite_set_rate;
+ }
+ } else {
+ WARN(rate_ops->set_rate,
+ "%s: missing round_rate op is required\n",
+ __func__);
+ }
+
+ composite->rate_hw = rate_hw;
+ composite->rate_ops = rate_ops;
+ clk_composite_ops->recalc_rate = clk_composite_recalc_rate;
+ }
+
+ if (gate_hw && gate_ops) {
+ if (!gate_ops->is_enabled || !gate_ops->enable ||
+ !gate_ops->disable) {
+ clk = ERR_PTR(-EINVAL);
+ goto err;
+ }
+
+ composite->gate_hw = gate_hw;
+ composite->gate_ops = gate_ops;
+ clk_composite_ops->is_enabled = clk_composite_is_enabled;
+ clk_composite_ops->enable = clk_composite_enable;
+ clk_composite_ops->disable = clk_composite_disable;
+ }
+
+ init.ops = clk_composite_ops;
+ composite->hw.init = &init;
+
+ clk = clk_register(dev, &composite->hw);
+ if (IS_ERR(clk))
+ goto err;
+
+ if (composite->mux_hw)
+ composite->mux_hw->clk = clk;
+
+ if (composite->rate_hw)
+ composite->rate_hw->clk = clk;
+
+ if (composite->gate_hw)
+ composite->gate_hw->clk = clk;
+
+ return clk;
+
+err:
+ kfree(composite);
+ return clk;
+}
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 68b40210117..6d967416043 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -109,8 +109,9 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
div = _get_div(divider, val);
if (!div) {
- WARN(1, "%s: Invalid divisor for clock %s\n", __func__,
- __clk_get_name(hw->clk));
+ WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
+ "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
+ __clk_get_name(hw->clk));
return parent_rate;
}
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 1ef271e4759..9ff7d510faa 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -11,6 +11,7 @@
#include <linux/clk-provider.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/of.h>
/*
* DOC: basic fixed multiplier and divider clock that cannot gate
@@ -96,3 +97,38 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
return clk;
}
+#ifdef CONFIG_OF
+/**
+ * of_fixed_factor_clk_setup() - Setup function for simple fixed factor clock
+ */
+void __init of_fixed_factor_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ const char *clk_name = node->name;
+ const char *parent_name;
+ u32 div, mult;
+
+ if (of_property_read_u32(node, "clock-div", &div)) {
+ pr_err("%s Fixed factor clock <%s> must have a clock-div property\n",
+ __func__, node->name);
+ return;
+ }
+
+ if (of_property_read_u32(node, "clock-mult", &mult)) {
+ pr_err("%s Fixed factor clock <%s> must have a clokc-mult property\n",
+ __func__, node->name);
+ return;
+ }
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+ parent_name = of_clk_get_parent_name(node, 0);
+
+ clk = clk_register_fixed_factor(NULL, clk_name, parent_name, 0,
+ mult, div);
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+}
+EXPORT_SYMBOL_GPL(of_fixed_factor_clk_setup);
+CLK_OF_DECLARE(fixed_factor_clk, "fixed-factor-clock",
+ of_fixed_factor_clk_setup);
+#endif
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 508c032edce..25b1734560d 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -32,6 +32,7 @@
static u8 clk_mux_get_parent(struct clk_hw *hw)
{
struct clk_mux *mux = to_clk_mux(hw);
+ int num_parents = __clk_get_num_parents(hw->clk);
u32 val;
/*
@@ -42,7 +43,16 @@ static u8 clk_mux_get_parent(struct clk_hw *hw)
* val = 0x4 really means "bit 2, index starts at bit 0"
*/
val = readl(mux->reg) >> mux->shift;
- val &= (1 << mux->width) - 1;
+ val &= mux->mask;
+
+ if (mux->table) {
+ int i;
+
+ for (i = 0; i < num_parents; i++)
+ if (mux->table[i] == val)
+ return i;
+ return -EINVAL;
+ }
if (val && (mux->flags & CLK_MUX_INDEX_BIT))
val = ffs(val) - 1;
@@ -50,7 +60,7 @@ static u8 clk_mux_get_parent(struct clk_hw *hw)
if (val && (mux->flags & CLK_MUX_INDEX_ONE))
val--;
- if (val >= __clk_get_num_parents(hw->clk))
+ if (val >= num_parents)
return -EINVAL;
return val;
@@ -62,17 +72,22 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
u32 val;
unsigned long flags = 0;
- if (mux->flags & CLK_MUX_INDEX_BIT)
- index = (1 << ffs(index));
+ if (mux->table)
+ index = mux->table[index];
- if (mux->flags & CLK_MUX_INDEX_ONE)
- index++;
+ else {
+ if (mux->flags & CLK_MUX_INDEX_BIT)
+ index = (1 << ffs(index));
+
+ if (mux->flags & CLK_MUX_INDEX_ONE)
+ index++;
+ }
if (mux->lock)
spin_lock_irqsave(mux->lock, flags);
val = readl(mux->reg);
- val &= ~(((1 << mux->width) - 1) << mux->shift);
+ val &= ~(mux->mask << mux->shift);
val |= index << mux->shift;
writel(val, mux->reg);
@@ -88,10 +103,10 @@ const struct clk_ops clk_mux_ops = {
};
EXPORT_SYMBOL_GPL(clk_mux_ops);
-struct clk *clk_register_mux(struct device *dev, const char *name,
+struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char **parent_names, u8 num_parents, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_mux_flags, spinlock_t *lock)
+ void __iomem *reg, u8 shift, u32 mask,
+ u8 clk_mux_flags, u32 *table, spinlock_t *lock)
{
struct clk_mux *mux;
struct clk *clk;
@@ -113,9 +128,10 @@ struct clk *clk_register_mux(struct device *dev, const char *name,
/* struct clk_mux assignments */
mux->reg = reg;
mux->shift = shift;
- mux->width = width;
+ mux->mask = mask;
mux->flags = clk_mux_flags;
mux->lock = lock;
+ mux->table = table;
mux->hw.init = &init;
clk = clk_register(dev, &mux->hw);
@@ -125,3 +141,15 @@ struct clk *clk_register_mux(struct device *dev, const char *name,
return clk;
}
+
+struct clk *clk_register_mux(struct device *dev, const char *name,
+ const char **parent_names, u8 num_parents, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_mux_flags, spinlock_t *lock)
+{
+ u32 mask = BIT(width) - 1;
+
+ return clk_register_mux_table(dev, name, parent_names, num_parents,
+ flags, reg, shift, mask, clk_mux_flags,
+ NULL, lock);
+}
diff --git a/drivers/clk/clk-prima2.c b/drivers/clk/clk-prima2.c
index f8e9d0c27be..643ca653fef 100644
--- a/drivers/clk/clk-prima2.c
+++ b/drivers/clk/clk-prima2.c
@@ -1113,7 +1113,7 @@ void __init sirfsoc_of_clk_init(void)
for (i = pll1; i < maxclk; i++) {
prima2_clks[i] = clk_register(NULL, prima2_clk_hw_array[i]);
- BUG_ON(!prima2_clks[i]);
+ BUG_ON(IS_ERR(prima2_clks[i]));
}
clk_register_clkdev(prima2_clks[cpu], NULL, "cpu");
clk_register_clkdev(prima2_clks[io], NULL, "io");
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
new file mode 100644
index 00000000000..892728412e9
--- /dev/null
+++ b/drivers/clk/clk-si5351.c
@@ -0,0 +1,1510 @@
+/*
+ * clk-si5351.c: Silicon Laboratories Si5351A/B/C I2C Clock Generator
+ *
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ * Rabeeh Khoury <rabeeh@solid-run.com>
+ *
+ * References:
+ * [1] "Si5351A/B/C Data Sheet"
+ * http://www.silabs.com/Support%20Documents/TechnicalDocs/Si5351.pdf
+ * [2] "Manually Generating an Si5351 Register Map"
+ * http://www.silabs.com/Support%20Documents/TechnicalDocs/AN619.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/rational.h>
+#include <linux/i2c.h>
+#include <linux/of_platform.h>
+#include <linux/platform_data/si5351.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <asm/div64.h>
+
+#include "clk-si5351.h"
+
+struct si5351_driver_data;
+
+struct si5351_parameters {
+ unsigned long p1;
+ unsigned long p2;
+ unsigned long p3;
+ int valid;
+};
+
+struct si5351_hw_data {
+ struct clk_hw hw;
+ struct si5351_driver_data *drvdata;
+ struct si5351_parameters params;
+ unsigned char num;
+};
+
+struct si5351_driver_data {
+ enum si5351_variant variant;
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct clk_onecell_data onecell;
+
+ struct clk *pxtal;
+ const char *pxtal_name;
+ struct clk_hw xtal;
+ struct clk *pclkin;
+ const char *pclkin_name;
+ struct clk_hw clkin;
+
+ struct si5351_hw_data pll[2];
+ struct si5351_hw_data *msynth;
+ struct si5351_hw_data *clkout;
+};
+
+static const char const *si5351_input_names[] = {
+ "xtal", "clkin"
+};
+static const char const *si5351_pll_names[] = {
+ "plla", "pllb", "vxco"
+};
+static const char const *si5351_msynth_names[] = {
+ "ms0", "ms1", "ms2", "ms3", "ms4", "ms5", "ms6", "ms7"
+};
+static const char const *si5351_clkout_names[] = {
+ "clk0", "clk1", "clk2", "clk3", "clk4", "clk5", "clk6", "clk7"
+};
+
+/*
+ * Si5351 i2c regmap
+ */
+static inline u8 si5351_reg_read(struct si5351_driver_data *drvdata, u8 reg)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(drvdata->regmap, reg, &val);
+ if (ret) {
+ dev_err(&drvdata->client->dev,
+ "unable to read from reg%02x\n", reg);
+ return 0;
+ }
+
+ return (u8)val;
+}
+
+static inline int si5351_bulk_read(struct si5351_driver_data *drvdata,
+ u8 reg, u8 count, u8 *buf)
+{
+ return regmap_bulk_read(drvdata->regmap, reg, buf, count);
+}
+
+static inline int si5351_reg_write(struct si5351_driver_data *drvdata,
+ u8 reg, u8 val)
+{
+ return regmap_write(drvdata->regmap, reg, val);
+}
+
+static inline int si5351_bulk_write(struct si5351_driver_data *drvdata,
+ u8 reg, u8 count, const u8 *buf)
+{
+ return regmap_raw_write(drvdata->regmap, reg, buf, count);
+}
+
+static inline int si5351_set_bits(struct si5351_driver_data *drvdata,
+ u8 reg, u8 mask, u8 val)
+{
+ return regmap_update_bits(drvdata->regmap, reg, mask, val);
+}
+
+static inline u8 si5351_msynth_params_address(int num)
+{
+ if (num > 5)
+ return SI5351_CLK6_PARAMETERS + (num - 6);
+ return SI5351_CLK0_PARAMETERS + (SI5351_PARAMETERS_LENGTH * num);
+}
+
+static void si5351_read_parameters(struct si5351_driver_data *drvdata,
+ u8 reg, struct si5351_parameters *params)
+{
+ u8 buf[SI5351_PARAMETERS_LENGTH];
+
+ switch (reg) {
+ case SI5351_CLK6_PARAMETERS:
+ case SI5351_CLK7_PARAMETERS:
+ buf[0] = si5351_reg_read(drvdata, reg);
+ params->p1 = buf[0];
+ params->p2 = 0;
+ params->p3 = 1;
+ break;
+ default:
+ si5351_bulk_read(drvdata, reg, SI5351_PARAMETERS_LENGTH, buf);
+ params->p1 = ((buf[2] & 0x03) << 16) | (buf[3] << 8) | buf[4];
+ params->p2 = ((buf[5] & 0x0f) << 16) | (buf[6] << 8) | buf[7];
+ params->p3 = ((buf[5] & 0xf0) << 12) | (buf[0] << 8) | buf[1];
+ }
+ params->valid = 1;
+}
+
+static void si5351_write_parameters(struct si5351_driver_data *drvdata,
+ u8 reg, struct si5351_parameters *params)
+{
+ u8 buf[SI5351_PARAMETERS_LENGTH];
+
+ switch (reg) {
+ case SI5351_CLK6_PARAMETERS:
+ case SI5351_CLK7_PARAMETERS:
+ buf[0] = params->p1 & 0xff;
+ si5351_reg_write(drvdata, reg, buf[0]);
+ break;
+ default:
+ buf[0] = ((params->p3 & 0x0ff00) >> 8) & 0xff;
+ buf[1] = params->p3 & 0xff;
+ /* save rdiv and divby4 */
+ buf[2] = si5351_reg_read(drvdata, reg + 2) & ~0x03;
+ buf[2] |= ((params->p1 & 0x30000) >> 16) & 0x03;
+ buf[3] = ((params->p1 & 0x0ff00) >> 8) & 0xff;
+ buf[4] = params->p1 & 0xff;
+ buf[5] = ((params->p3 & 0xf0000) >> 12) |
+ ((params->p2 & 0xf0000) >> 16);
+ buf[6] = ((params->p2 & 0x0ff00) >> 8) & 0xff;
+ buf[7] = params->p2 & 0xff;
+ si5351_bulk_write(drvdata, reg, SI5351_PARAMETERS_LENGTH, buf);
+ }
+}
+
+static bool si5351_regmap_is_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SI5351_DEVICE_STATUS:
+ case SI5351_INTERRUPT_STATUS:
+ case SI5351_PLL_RESET:
+ return true;
+ }
+ return false;
+}
+
+static bool si5351_regmap_is_writeable(struct device *dev, unsigned int reg)
+{
+ /* reserved registers */
+ if (reg >= 4 && reg <= 8)
+ return false;
+ if (reg >= 10 && reg <= 14)
+ return false;
+ if (reg >= 173 && reg <= 176)
+ return false;
+ if (reg >= 178 && reg <= 182)
+ return false;
+ /* read-only */
+ if (reg == SI5351_DEVICE_STATUS)
+ return false;
+ return true;
+}
+
+static struct regmap_config si5351_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = 187,
+ .writeable_reg = si5351_regmap_is_writeable,
+ .volatile_reg = si5351_regmap_is_volatile,
+};
+
+/*
+ * Si5351 xtal clock input
+ */
+static int si5351_xtal_prepare(struct clk_hw *hw)
+{
+ struct si5351_driver_data *drvdata =
+ container_of(hw, struct si5351_driver_data, xtal);
+ si5351_set_bits(drvdata, SI5351_FANOUT_ENABLE,
+ SI5351_XTAL_ENABLE, SI5351_XTAL_ENABLE);
+ return 0;
+}
+
+static void si5351_xtal_unprepare(struct clk_hw *hw)
+{
+ struct si5351_driver_data *drvdata =
+ container_of(hw, struct si5351_driver_data, xtal);
+ si5351_set_bits(drvdata, SI5351_FANOUT_ENABLE,
+ SI5351_XTAL_ENABLE, 0);
+}
+
+static const struct clk_ops si5351_xtal_ops = {
+ .prepare = si5351_xtal_prepare,
+ .unprepare = si5351_xtal_unprepare,
+};
+
+/*
+ * Si5351 clkin clock input (Si5351C only)
+ */
+static int si5351_clkin_prepare(struct clk_hw *hw)
+{
+ struct si5351_driver_data *drvdata =
+ container_of(hw, struct si5351_driver_data, clkin);
+ si5351_set_bits(drvdata, SI5351_FANOUT_ENABLE,
+ SI5351_CLKIN_ENABLE, SI5351_CLKIN_ENABLE);
+ return 0;
+}
+
+static void si5351_clkin_unprepare(struct clk_hw *hw)
+{
+ struct si5351_driver_data *drvdata =
+ container_of(hw, struct si5351_driver_data, clkin);
+ si5351_set_bits(drvdata, SI5351_FANOUT_ENABLE,
+ SI5351_CLKIN_ENABLE, 0);
+}
+
+/*
+ * CMOS clock source constraints:
+ * The input frequency range of the PLL is 10Mhz to 40MHz.
+ * If CLKIN is >40MHz, the input divider must be used.
+ */
+static unsigned long si5351_clkin_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct si5351_driver_data *drvdata =
+ container_of(hw, struct si5351_driver_data, clkin);
+ unsigned long rate;
+ unsigned char idiv;
+
+ rate = parent_rate;
+ if (parent_rate > 160000000) {
+ idiv = SI5351_CLKIN_DIV_8;
+ rate /= 8;
+ } else if (parent_rate > 80000000) {
+ idiv = SI5351_CLKIN_DIV_4;
+ rate /= 4;
+ } else if (parent_rate > 40000000) {
+ idiv = SI5351_CLKIN_DIV_2;
+ rate /= 2;
+ } else {
+ idiv = SI5351_CLKIN_DIV_1;
+ }
+
+ si5351_set_bits(drvdata, SI5351_PLL_INPUT_SOURCE,
+ SI5351_CLKIN_DIV_MASK, idiv);
+
+ dev_dbg(&drvdata->client->dev, "%s - clkin div = %d, rate = %lu\n",
+ __func__, (1 << (idiv >> 6)), rate);
+
+ return rate;
+}
+
+static const struct clk_ops si5351_clkin_ops = {
+ .prepare = si5351_clkin_prepare,
+ .unprepare = si5351_clkin_unprepare,
+ .recalc_rate = si5351_clkin_recalc_rate,
+};
+
+/*
+ * Si5351 vxco clock input (Si5351B only)
+ */
+
+static int si5351_vxco_prepare(struct clk_hw *hw)
+{
+ struct si5351_hw_data *hwdata =
+ container_of(hw, struct si5351_hw_data, hw);
+
+ dev_warn(&hwdata->drvdata->client->dev, "VXCO currently unsupported\n");
+
+ return 0;
+}
+
+static void si5351_vxco_unprepare(struct clk_hw *hw)
+{
+}
+
+static unsigned long si5351_vxco_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+static int si5351_vxco_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent)
+{
+ return 0;
+}
+
+static const struct clk_ops si5351_vxco_ops = {
+ .prepare = si5351_vxco_prepare,
+ .unprepare = si5351_vxco_unprepare,
+ .recalc_rate = si5351_vxco_recalc_rate,
+ .set_rate = si5351_vxco_set_rate,
+};
+
+/*
+ * Si5351 pll a/b
+ *
+ * Feedback Multisynth Divider Equations [2]
+ *
+ * fVCO = fIN * (a + b/c)
+ *
+ * with 15 + 0/1048575 <= (a + b/c) <= 90 + 0/1048575 and
+ * fIN = fXTAL or fIN = fCLKIN/CLKIN_DIV
+ *
+ * Feedback Multisynth Register Equations
+ *
+ * (1) MSNx_P1[17:0] = 128 * a + floor(128 * b/c) - 512
+ * (2) MSNx_P2[19:0] = 128 * b - c * floor(128 * b/c) = (128*b) mod c
+ * (3) MSNx_P3[19:0] = c
+ *
+ * Transposing (2) yields: (4) floor(128 * b/c) = (128 * b / MSNx_P2)/c
+ *
+ * Using (4) on (1) yields:
+ * MSNx_P1 = 128 * a + (128 * b/MSNx_P2)/c - 512
+ * MSNx_P1 + 512 + MSNx_P2/c = 128 * a + 128 * b/c
+ *
+ * a + b/c = (MSNx_P1 + MSNx_P2/MSNx_P3 + 512)/128
+ * = (MSNx_P1*MSNx_P3 + MSNx_P2 + 512*MSNx_P3)/(128*MSNx_P3)
+ *
+ */
+static int _si5351_pll_reparent(struct si5351_driver_data *drvdata,
+ int num, enum si5351_pll_src parent)
+{
+ u8 mask = (num == 0) ? SI5351_PLLA_SOURCE : SI5351_PLLB_SOURCE;
+
+ if (parent == SI5351_PLL_SRC_DEFAULT)
+ return 0;
+
+ if (num > 2)
+ return -EINVAL;
+
+ if (drvdata->variant != SI5351_VARIANT_C &&
+ parent != SI5351_PLL_SRC_XTAL)
+ return -EINVAL;
+
+ si5351_set_bits(drvdata, SI5351_PLL_INPUT_SOURCE, mask,
+ (parent == SI5351_PLL_SRC_XTAL) ? 0 : mask);
+ return 0;
+}
+
+static unsigned char si5351_pll_get_parent(struct clk_hw *hw)
+{
+ struct si5351_hw_data *hwdata =
+ container_of(hw, struct si5351_hw_data, hw);
+ u8 mask = (hwdata->num == 0) ? SI5351_PLLA_SOURCE : SI5351_PLLB_SOURCE;
+ u8 val;
+
+ val = si5351_reg_read(hwdata->drvdata, SI5351_PLL_INPUT_SOURCE);
+
+ return (val & mask) ? 1 : 0;
+}
+
+static int si5351_pll_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct si5351_hw_data *hwdata =
+ container_of(hw, struct si5351_hw_data, hw);
+
+ if (hwdata->drvdata->variant != SI5351_VARIANT_C &&
+ index > 0)
+ return -EPERM;
+
+ if (index > 1)
+ return -EINVAL;
+
+ return _si5351_pll_reparent(hwdata->drvdata, hwdata->num,
+ (index == 0) ? SI5351_PLL_SRC_XTAL :
+ SI5351_PLL_SRC_CLKIN);
+}
+
+static unsigned long si5351_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct si5351_hw_data *hwdata =
+ container_of(hw, struct si5351_hw_data, hw);
+ u8 reg = (hwdata->num == 0) ? SI5351_PLLA_PARAMETERS :
+ SI5351_PLLB_PARAMETERS;
+ unsigned long long rate;
+
+ if (!hwdata->params.valid)
+ si5351_read_parameters(hwdata->drvdata, reg, &hwdata->params);
+
+ if (hwdata->params.p3 == 0)
+ return parent_rate;
+
+ /* fVCO = fIN * (P1*P3 + 512*P3 + P2)/(128*P3) */
+ rate = hwdata->params.p1 * hwdata->params.p3;
+ rate += 512 * hwdata->params.p3;
+ rate += hwdata->params.p2;
+ rate *= parent_rate;
+ do_div(rate, 128 * hwdata->params.p3);
+
+ dev_dbg(&hwdata->drvdata->client->dev,
+ "%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, parent_rate = %lu, rate = %lu\n",
+ __func__, __clk_get_name(hwdata->hw.clk),
+ hwdata->params.p1, hwdata->params.p2, hwdata->params.p3,
+ parent_rate, (unsigned long)rate);
+
+ return (unsigned long)rate;
+}
+
+static long si5351_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct si5351_hw_data *hwdata =
+ container_of(hw, struct si5351_hw_data, hw);
+ unsigned long rfrac, denom, a, b, c;
+ unsigned long long lltmp;
+
+ if (rate < SI5351_PLL_VCO_MIN)
+ rate = SI5351_PLL_VCO_MIN;
+ if (rate > SI5351_PLL_VCO_MAX)
+ rate = SI5351_PLL_VCO_MAX;
+
+ /* determine integer part of feedback equation */
+ a = rate / *parent_rate;
+
+ if (a < SI5351_PLL_A_MIN)
+ rate = *parent_rate * SI5351_PLL_A_MIN;
+ if (a > SI5351_PLL_A_MAX)
+ rate = *parent_rate * SI5351_PLL_A_MAX;
+
+ /* find best approximation for b/c = fVCO mod fIN */
+ denom = 1000 * 1000;
+ lltmp = rate % (*parent_rate);
+ lltmp *= denom;
+ do_div(lltmp, *parent_rate);
+ rfrac = (unsigned long)lltmp;
+
+ b = 0;
+ c = 1;
+ if (rfrac)
+ rational_best_approximation(rfrac, denom,
+ SI5351_PLL_B_MAX, SI5351_PLL_C_MAX, &b, &c);
+
+ /* calculate parameters */
+ hwdata->params.p3 = c;
+ hwdata->params.p2 = (128 * b) % c;
+ hwdata->params.p1 = 128 * a;
+ hwdata->params.p1 += (128 * b / c);
+ hwdata->params.p1 -= 512;
+
+ /* recalculate rate by fIN * (a + b/c) */
+ lltmp = *parent_rate;
+ lltmp *= b;
+ do_div(lltmp, c);
+
+ rate = (unsigned long)lltmp;
+ rate += *parent_rate * a;
+
+ dev_dbg(&hwdata->drvdata->client->dev,
+ "%s - %s: a = %lu, b = %lu, c = %lu, parent_rate = %lu, rate = %lu\n",
+ __func__, __clk_get_name(hwdata->hw.clk), a, b, c,
+ *parent_rate, rate);
+
+ return rate;
+}
+
+static int si5351_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct si5351_hw_data *hwdata =
+ container_of(hw, struct si5351_hw_data, hw);
+ u8 reg = (hwdata->num == 0) ? SI5351_PLLA_PARAMETERS :
+ SI5351_PLLB_PARAMETERS;
+
+ /* write multisynth parameters */
+ si5351_write_parameters(hwdata->drvdata, reg, &hwdata->params);
+
+ /* plla/pllb ctrl is in clk6/clk7 ctrl registers */
+ si5351_set_bits(hwdata->drvdata, SI5351_CLK6_CTRL + hwdata->num,
+ SI5351_CLK_INTEGER_MODE,
+ (hwdata->params.p2 == 0) ? SI5351_CLK_INTEGER_MODE : 0);
+
+ dev_dbg(&hwdata->drvdata->client->dev,
+ "%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, parent_rate = %lu, rate = %lu\n",
+ __func__, __clk_get_name(hwdata->hw.clk),
+ hwdata->params.p1, hwdata->params.p2, hwdata->params.p3,
+ parent_rate, rate);
+
+ return 0;
+}
+
+static const struct clk_ops si5351_pll_ops = {
+ .set_parent = si5351_pll_set_parent,
+ .get_parent = si5351_pll_get_parent,
+ .recalc_rate = si5351_pll_recalc_rate,
+ .round_rate = si5351_pll_round_rate,
+ .set_rate = si5351_pll_set_rate,
+};
+
+/*
+ * Si5351 multisync divider
+ *
+ * for fOUT <= 150 MHz:
+ *
+ * fOUT = (fIN * (a + b/c)) / CLKOUTDIV
+ *
+ * with 6 + 0/1048575 <= (a + b/c) <= 1800 + 0/1048575 and
+ * fIN = fVCO0, fVCO1
+ *
+ * Output Clock Multisynth Register Equations
+ *
+ * MSx_P1[17:0] = 128 * a + floor(128 * b/c) - 512
+ * MSx_P2[19:0] = 128 * b - c * floor(128 * b/c) = (128*b) mod c
+ * MSx_P3[19:0] = c
+ *
+ * MS[6,7] are integer (P1) divide only, P2 = 0, P3 = 0
+ *
+ * for 150MHz < fOUT <= 160MHz:
+ *
+ * MSx_P1 = 0, MSx_P2 = 0, MSx_P3 = 1, MSx_INT = 1, MSx_DIVBY4 = 11b
+ */
+static int _si5351_msynth_reparent(struct si5351_driver_data *drvdata,
+ int num, enum si5351_multisynth_src parent)
+{
+ if (parent == SI5351_MULTISYNTH_SRC_DEFAULT)
+ return 0;
+
+ if (num > 8)
+ return -EINVAL;
+
+ si5351_set_bits(drvdata, SI5351_CLK0_CTRL + num, SI5351_CLK_PLL_SELECT,
+ (parent == SI5351_MULTISYNTH_SRC_VCO0) ? 0 :
+ SI5351_CLK_PLL_SELECT);
+ return 0;
+}
+
+static unsigned char si5351_msynth_get_parent(struct clk_hw *hw)
+{
+ struct si5351_hw_data *hwdata =
+ container_of(hw, struct si5351_hw_data, hw);
+ u8 val;
+
+ val = si5351_reg_read(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num);
+
+ return (val & SI5351_CLK_PLL_SELECT) ? 1 : 0;
+}
+
+static int si5351_msynth_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct si5351_hw_data *hwdata =
+ container_of(hw, struct si5351_hw_data, hw);
+
+ return _si5351_msynth_reparent(hwdata->drvdata, hwdata->num,
+ (index == 0) ? SI5351_MULTISYNTH_SRC_VCO0 :
+ SI5351_MULTISYNTH_SRC_VCO1);
+}
+
+static unsigned long si5351_msynth_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct si5351_hw_data *hwdata =
+ container_of(hw, struct si5351_hw_data, hw);
+ u8 reg = si5351_msynth_params_address(hwdata->num);
+ unsigned long long rate;
+ unsigned long m;
+
+ if (!hwdata->params.valid)
+ si5351_read_parameters(hwdata->drvdata, reg, &hwdata->params);
+
+ if (hwdata->params.p3 == 0)
+ return parent_rate;
+
+ /*
+ * multisync0-5: fOUT = (128 * P3 * fIN) / (P1*P3 + P2 + 512*P3)
+ * multisync6-7: fOUT = fIN / P1
+ */
+ rate = parent_rate;
+ if (hwdata->num > 5) {
+ m = hwdata->params.p1;
+ } else if ((si5351_reg_read(hwdata->drvdata, reg + 2) &
+ SI5351_OUTPUT_CLK_DIVBY4) == SI5351_OUTPUT_CLK_DIVBY4) {
+ m = 4;
+ } else {
+ rate *= 128 * hwdata->params.p3;
+ m = hwdata->params.p1 * hwdata->params.p3;
+ m += hwdata->params.p2;
+ m += 512 * hwdata->params.p3;
+ }
+
+ if (m == 0)
+ return 0;
+ do_div(rate, m);
+
+ dev_dbg(&hwdata->drvdata->client->dev,
+ "%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, m = %lu, parent_rate = %lu, rate = %lu\n",
+ __func__, __clk_get_name(hwdata->hw.clk),
+ hwdata->params.p1, hwdata->params.p2, hwdata->params.p3,
+ m, parent_rate, (unsigned long)rate);
+
+ return (unsigned long)rate;
+}
+
+static long si5351_msynth_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct si5351_hw_data *hwdata =
+ container_of(hw, struct si5351_hw_data, hw);
+ unsigned long long lltmp;
+ unsigned long a, b, c;
+ int divby4;
+
+ /* multisync6-7 can only handle freqencies < 150MHz */
+ if (hwdata->num >= 6 && rate > SI5351_MULTISYNTH67_MAX_FREQ)
+ rate = SI5351_MULTISYNTH67_MAX_FREQ;
+
+ /* multisync frequency is 1MHz .. 160MHz */
+ if (rate > SI5351_MULTISYNTH_MAX_FREQ)
+ rate = SI5351_MULTISYNTH_MAX_FREQ;
+ if (rate < SI5351_MULTISYNTH_MIN_FREQ)
+ rate = SI5351_MULTISYNTH_MIN_FREQ;
+
+ divby4 = 0;
+ if (rate > SI5351_MULTISYNTH_DIVBY4_FREQ)
+ divby4 = 1;
+
+ /* multisync can set pll */
+ if (__clk_get_flags(hwdata->hw.clk) & CLK_SET_RATE_PARENT) {
+ /*
+ * find largest integer divider for max
+ * vco frequency and given target rate
+ */
+ if (divby4 == 0) {
+ lltmp = SI5351_PLL_VCO_MAX;
+ do_div(lltmp, rate);
+ a = (unsigned long)lltmp;
+ } else
+ a = 4;
+
+ b = 0;
+ c = 1;
+
+ *parent_rate = a * rate;
+ } else {
+ unsigned long rfrac, denom;
+
+ /* disable divby4 */
+ if (divby4) {
+ rate = SI5351_MULTISYNTH_DIVBY4_FREQ;
+ divby4 = 0;
+ }
+
+ /* determine integer part of divider equation */
+ a = *parent_rate / rate;
+ if (a < SI5351_MULTISYNTH_A_MIN)
+ a = SI5351_MULTISYNTH_A_MIN;
+ if (hwdata->num >= 6 && a > SI5351_MULTISYNTH67_A_MAX)
+ a = SI5351_MULTISYNTH67_A_MAX;
+ else if (a > SI5351_MULTISYNTH_A_MAX)
+ a = SI5351_MULTISYNTH_A_MAX;
+
+ /* find best approximation for b/c = fVCO mod fOUT */
+ denom = 1000 * 1000;
+ lltmp = (*parent_rate) % rate;
+ lltmp *= denom;
+ do_div(lltmp, rate);
+ rfrac = (unsigned long)lltmp;
+
+ b = 0;
+ c = 1;
+ if (rfrac)
+ rational_best_approximation(rfrac, denom,
+ SI5351_MULTISYNTH_B_MAX, SI5351_MULTISYNTH_C_MAX,
+ &b, &c);
+ }
+
+ /* recalculate rate by fOUT = fIN / (a + b/c) */
+ lltmp = *parent_rate;
+ lltmp *= c;
+ do_div(lltmp, a * c + b);
+ rate = (unsigned long)lltmp;
+
+ /* calculate parameters */
+ if (divby4) {
+ hwdata->params.p3 = 1;
+ hwdata->params.p2 = 0;
+ hwdata->params.p1 = 0;
+ } else {
+ hwdata->params.p3 = c;
+ hwdata->params.p2 = (128 * b) % c;
+ hwdata->params.p1 = 128 * a;
+ hwdata->params.p1 += (128 * b / c);
+ hwdata->params.p1 -= 512;
+ }
+
+ dev_dbg(&hwdata->drvdata->client->dev,
+ "%s - %s: a = %lu, b = %lu, c = %lu, divby4 = %d, parent_rate = %lu, rate = %lu\n",
+ __func__, __clk_get_name(hwdata->hw.clk), a, b, c, divby4,
+ *parent_rate, rate);
+
+ return rate;
+}
+
+static int si5351_msynth_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct si5351_hw_data *hwdata =
+ container_of(hw, struct si5351_hw_data, hw);
+ u8 reg = si5351_msynth_params_address(hwdata->num);
+ int divby4 = 0;
+
+ /* write multisynth parameters */
+ si5351_write_parameters(hwdata->drvdata, reg, &hwdata->params);
+
+ if (rate > SI5351_MULTISYNTH_DIVBY4_FREQ)
+ divby4 = 1;
+
+ /* enable/disable integer mode and divby4 on multisynth0-5 */
+ if (hwdata->num < 6) {
+ si5351_set_bits(hwdata->drvdata, reg + 2,
+ SI5351_OUTPUT_CLK_DIVBY4,
+ (divby4) ? SI5351_OUTPUT_CLK_DIVBY4 : 0);
+ si5351_set_bits(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num,
+ SI5351_CLK_INTEGER_MODE,
+ (hwdata->params.p2 == 0) ? SI5351_CLK_INTEGER_MODE : 0);
+ }
+
+ dev_dbg(&hwdata->drvdata->client->dev,
+ "%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, divby4 = %d, parent_rate = %lu, rate = %lu\n",
+ __func__, __clk_get_name(hwdata->hw.clk),
+ hwdata->params.p1, hwdata->params.p2, hwdata->params.p3,
+ divby4, parent_rate, rate);
+
+ return 0;
+}
+
+static const struct clk_ops si5351_msynth_ops = {
+ .set_parent = si5351_msynth_set_parent,
+ .get_parent = si5351_msynth_get_parent,
+ .recalc_rate = si5351_msynth_recalc_rate,
+ .round_rate = si5351_msynth_round_rate,
+ .set_rate = si5351_msynth_set_rate,
+};
+
+/*
+ * Si5351 clkout divider
+ */
+static int _si5351_clkout_reparent(struct si5351_driver_data *drvdata,
+ int num, enum si5351_clkout_src parent)
+{
+ u8 val;
+
+ if (num > 8)
+ return -EINVAL;
+
+ switch (parent) {
+ case SI5351_CLKOUT_SRC_MSYNTH_N:
+ val = SI5351_CLK_INPUT_MULTISYNTH_N;
+ break;
+ case SI5351_CLKOUT_SRC_MSYNTH_0_4:
+ /* clk0/clk4 can only connect to its own multisync */
+ if (num == 0 || num == 4)
+ val = SI5351_CLK_INPUT_MULTISYNTH_N;
+ else
+ val = SI5351_CLK_INPUT_MULTISYNTH_0_4;
+ break;
+ case SI5351_CLKOUT_SRC_XTAL:
+ val = SI5351_CLK_INPUT_XTAL;
+ break;
+ case SI5351_CLKOUT_SRC_CLKIN:
+ if (drvdata->variant != SI5351_VARIANT_C)
+ return -EINVAL;
+
+ val = SI5351_CLK_INPUT_CLKIN;
+ break;
+ default:
+ return 0;
+ }
+
+ si5351_set_bits(drvdata, SI5351_CLK0_CTRL + num,
+ SI5351_CLK_INPUT_MASK, val);
+ return 0;
+}
+
+static int _si5351_clkout_set_drive_strength(
+ struct si5351_driver_data *drvdata, int num,
+ enum si5351_drive_strength drive)
+{
+ u8 mask;
+
+ if (num > 8)
+ return -EINVAL;
+
+ switch (drive) {
+ case SI5351_DRIVE_2MA:
+ mask = SI5351_CLK_DRIVE_STRENGTH_2MA;
+ break;
+ case SI5351_DRIVE_4MA:
+ mask = SI5351_CLK_DRIVE_STRENGTH_4MA;
+ break;
+ case SI5351_DRIVE_6MA:
+ mask = SI5351_CLK_DRIVE_STRENGTH_6MA;
+ break;
+ case SI5351_DRIVE_8MA:
+ mask = SI5351_CLK_DRIVE_STRENGTH_8MA;
+ break;
+ default:
+ return 0;
+ }
+
+ si5351_set_bits(drvdata, SI5351_CLK0_CTRL + num,
+ SI5351_CLK_DRIVE_STRENGTH_MASK, mask);
+ return 0;
+}
+
+static int si5351_clkout_prepare(struct clk_hw *hw)
+{
+ struct si5351_hw_data *hwdata =
+ container_of(hw, struct si5351_hw_data, hw);
+
+ si5351_set_bits(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num,
+ SI5351_CLK_POWERDOWN, 0);
+ si5351_set_bits(hwdata->drvdata, SI5351_OUTPUT_ENABLE_CTRL,
+ (1 << hwdata->num), 0);
+ return 0;
+}
+
+static void si5351_clkout_unprepare(struct clk_hw *hw)
+{
+ struct si5351_hw_data *hwdata =
+ container_of(hw, struct si5351_hw_data, hw);
+
+ si5351_set_bits(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num,
+ SI5351_CLK_POWERDOWN, SI5351_CLK_POWERDOWN);
+ si5351_set_bits(hwdata->drvdata, SI5351_OUTPUT_ENABLE_CTRL,
+ (1 << hwdata->num), (1 << hwdata->num));
+}
+
+static u8 si5351_clkout_get_parent(struct clk_hw *hw)
+{
+ struct si5351_hw_data *hwdata =
+ container_of(hw, struct si5351_hw_data, hw);
+ int index = 0;
+ unsigned char val;
+
+ val = si5351_reg_read(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num);
+ switch (val & SI5351_CLK_INPUT_MASK) {
+ case SI5351_CLK_INPUT_MULTISYNTH_N:
+ index = 0;
+ break;
+ case SI5351_CLK_INPUT_MULTISYNTH_0_4:
+ index = 1;
+ break;
+ case SI5351_CLK_INPUT_XTAL:
+ index = 2;
+ break;
+ case SI5351_CLK_INPUT_CLKIN:
+ index = 3;
+ break;
+ }
+
+ return index;
+}
+
+static int si5351_clkout_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct si5351_hw_data *hwdata =
+ container_of(hw, struct si5351_hw_data, hw);
+ enum si5351_clkout_src parent = SI5351_CLKOUT_SRC_DEFAULT;
+
+ switch (index) {
+ case 0:
+ parent = SI5351_CLKOUT_SRC_MSYNTH_N;
+ break;
+ case 1:
+ parent = SI5351_CLKOUT_SRC_MSYNTH_0_4;
+ break;
+ case 2:
+ parent = SI5351_CLKOUT_SRC_XTAL;
+ break;
+ case 3:
+ parent = SI5351_CLKOUT_SRC_CLKIN;
+ break;
+ }
+
+ return _si5351_clkout_reparent(hwdata->drvdata, hwdata->num, parent);
+}
+
+static unsigned long si5351_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct si5351_hw_data *hwdata =
+ container_of(hw, struct si5351_hw_data, hw);
+ unsigned char reg;
+ unsigned char rdiv;
+
+ if (hwdata->num > 5)
+ reg = si5351_msynth_params_address(hwdata->num) + 2;
+ else
+ reg = SI5351_CLK6_7_OUTPUT_DIVIDER;
+
+ rdiv = si5351_reg_read(hwdata->drvdata, reg);
+ if (hwdata->num == 6) {
+ rdiv &= SI5351_OUTPUT_CLK6_DIV_MASK;
+ } else {
+ rdiv &= SI5351_OUTPUT_CLK_DIV_MASK;
+ rdiv >>= SI5351_OUTPUT_CLK_DIV_SHIFT;
+ }
+
+ return parent_rate >> rdiv;
+}
+
+static long si5351_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct si5351_hw_data *hwdata =
+ container_of(hw, struct si5351_hw_data, hw);
+ unsigned char rdiv;
+
+ /* clkout6/7 can only handle output freqencies < 150MHz */
+ if (hwdata->num >= 6 && rate > SI5351_CLKOUT67_MAX_FREQ)
+ rate = SI5351_CLKOUT67_MAX_FREQ;
+
+ /* clkout freqency is 8kHz - 160MHz */
+ if (rate > SI5351_CLKOUT_MAX_FREQ)
+ rate = SI5351_CLKOUT_MAX_FREQ;
+ if (rate < SI5351_CLKOUT_MIN_FREQ)
+ rate = SI5351_CLKOUT_MIN_FREQ;
+
+ /* request frequency if multisync master */
+ if (__clk_get_flags(hwdata->hw.clk) & CLK_SET_RATE_PARENT) {
+ /* use r divider for frequencies below 1MHz */
+ rdiv = SI5351_OUTPUT_CLK_DIV_1;
+ while (rate < SI5351_MULTISYNTH_MIN_FREQ &&
+ rdiv < SI5351_OUTPUT_CLK_DIV_128) {
+ rdiv += 1;
+ rate *= 2;
+ }
+ *parent_rate = rate;
+ } else {
+ unsigned long new_rate, new_err, err;
+
+ /* round to closed rdiv */
+ rdiv = SI5351_OUTPUT_CLK_DIV_1;
+ new_rate = *parent_rate;
+ err = abs(new_rate - rate);
+ do {
+ new_rate >>= 1;
+ new_err = abs(new_rate - rate);
+ if (new_err > err || rdiv == SI5351_OUTPUT_CLK_DIV_128)
+ break;
+ rdiv++;
+ err = new_err;
+ } while (1);
+ }
+ rate = *parent_rate >> rdiv;
+
+ dev_dbg(&hwdata->drvdata->client->dev,
+ "%s - %s: rdiv = %u, parent_rate = %lu, rate = %lu\n",
+ __func__, __clk_get_name(hwdata->hw.clk), (1 << rdiv),
+ *parent_rate, rate);
+
+ return rate;
+}
+
+static int si5351_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct si5351_hw_data *hwdata =
+ container_of(hw, struct si5351_hw_data, hw);
+ unsigned long new_rate, new_err, err;
+ unsigned char rdiv;
+
+ /* round to closed rdiv */
+ rdiv = SI5351_OUTPUT_CLK_DIV_1;
+ new_rate = parent_rate;
+ err = abs(new_rate - rate);
+ do {
+ new_rate >>= 1;
+ new_err = abs(new_rate - rate);
+ if (new_err > err || rdiv == SI5351_OUTPUT_CLK_DIV_128)
+ break;
+ rdiv++;
+ err = new_err;
+ } while (1);
+
+ /* write output divider */
+ switch (hwdata->num) {
+ case 6:
+ si5351_set_bits(hwdata->drvdata, SI5351_CLK6_7_OUTPUT_DIVIDER,
+ SI5351_OUTPUT_CLK6_DIV_MASK, rdiv);
+ break;
+ case 7:
+ si5351_set_bits(hwdata->drvdata, SI5351_CLK6_7_OUTPUT_DIVIDER,
+ SI5351_OUTPUT_CLK_DIV_MASK,
+ rdiv << SI5351_OUTPUT_CLK_DIV_SHIFT);
+ break;
+ default:
+ si5351_set_bits(hwdata->drvdata,
+ si5351_msynth_params_address(hwdata->num) + 2,
+ SI5351_OUTPUT_CLK_DIV_MASK,
+ rdiv << SI5351_OUTPUT_CLK_DIV_SHIFT);
+ }
+
+ /* powerup clkout */
+ si5351_set_bits(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num,
+ SI5351_CLK_POWERDOWN, 0);
+
+ dev_dbg(&hwdata->drvdata->client->dev,
+ "%s - %s: rdiv = %u, parent_rate = %lu, rate = %lu\n",
+ __func__, __clk_get_name(hwdata->hw.clk), (1 << rdiv),
+ parent_rate, rate);
+
+ return 0;
+}
+
+static const struct clk_ops si5351_clkout_ops = {
+ .prepare = si5351_clkout_prepare,
+ .unprepare = si5351_clkout_unprepare,
+ .set_parent = si5351_clkout_set_parent,
+ .get_parent = si5351_clkout_get_parent,
+ .recalc_rate = si5351_clkout_recalc_rate,
+ .round_rate = si5351_clkout_round_rate,
+ .set_rate = si5351_clkout_set_rate,
+};
+
+/*
+ * Si5351 i2c probe and DT
+ */
+#ifdef CONFIG_OF
+static const struct of_device_id si5351_dt_ids[] = {
+ { .compatible = "silabs,si5351a", .data = (void *)SI5351_VARIANT_A, },
+ { .compatible = "silabs,si5351a-msop",
+ .data = (void *)SI5351_VARIANT_A3, },
+ { .compatible = "silabs,si5351b", .data = (void *)SI5351_VARIANT_B, },
+ { .compatible = "silabs,si5351c", .data = (void *)SI5351_VARIANT_C, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, si5351_dt_ids);
+
+static int si5351_dt_parse(struct i2c_client *client)
+{
+ struct device_node *child, *np = client->dev.of_node;
+ struct si5351_platform_data *pdata;
+ const struct of_device_id *match;
+ struct property *prop;
+ const __be32 *p;
+ int num = 0;
+ u32 val;
+
+ if (np == NULL)
+ return 0;
+
+ match = of_match_node(si5351_dt_ids, np);
+ if (match == NULL)
+ return -EINVAL;
+
+ pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->variant = (enum si5351_variant)match->data;
+ pdata->clk_xtal = of_clk_get(np, 0);
+ if (!IS_ERR(pdata->clk_xtal))
+ clk_put(pdata->clk_xtal);
+ pdata->clk_clkin = of_clk_get(np, 1);
+ if (!IS_ERR(pdata->clk_clkin))
+ clk_put(pdata->clk_clkin);
+
+ /*
+ * property silabs,pll-source : <num src>, [<..>]
+ * allow to selectively set pll source
+ */
+ of_property_for_each_u32(np, "silabs,pll-source", prop, p, num) {
+ if (num >= 2) {
+ dev_err(&client->dev,
+ "invalid pll %d on pll-source prop\n", num);
+ return -EINVAL;
+ }
+
+ p = of_prop_next_u32(prop, p, &val);
+ if (!p) {
+ dev_err(&client->dev,
+ "missing pll-source for pll %d\n", num);
+ return -EINVAL;
+ }
+
+ switch (val) {
+ case 0:
+ pdata->pll_src[num] = SI5351_PLL_SRC_XTAL;
+ break;
+ case 1:
+ if (pdata->variant != SI5351_VARIANT_C) {
+ dev_err(&client->dev,
+ "invalid parent %d for pll %d\n",
+ val, num);
+ return -EINVAL;
+ }
+ pdata->pll_src[num] = SI5351_PLL_SRC_CLKIN;
+ break;
+ default:
+ dev_err(&client->dev,
+ "invalid parent %d for pll %d\n", val, num);
+ return -EINVAL;
+ }
+ }
+
+ /* per clkout properties */
+ for_each_child_of_node(np, child) {
+ if (of_property_read_u32(child, "reg", &num)) {
+ dev_err(&client->dev, "missing reg property of %s\n",
+ child->name);
+ return -EINVAL;
+ }
+
+ if (num >= 8 ||
+ (pdata->variant == SI5351_VARIANT_A3 && num >= 3)) {
+ dev_err(&client->dev, "invalid clkout %d\n", num);
+ return -EINVAL;
+ }
+
+ if (!of_property_read_u32(child, "silabs,multisynth-source",
+ &val)) {
+ switch (val) {
+ case 0:
+ pdata->clkout[num].multisynth_src =
+ SI5351_MULTISYNTH_SRC_VCO0;
+ break;
+ case 1:
+ pdata->clkout[num].multisynth_src =
+ SI5351_MULTISYNTH_SRC_VCO1;
+ break;
+ default:
+ dev_err(&client->dev,
+ "invalid parent %d for multisynth %d\n",
+ val, num);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(child, "silabs,clock-source", &val)) {
+ switch (val) {
+ case 0:
+ pdata->clkout[num].clkout_src =
+ SI5351_CLKOUT_SRC_MSYNTH_N;
+ break;
+ case 1:
+ pdata->clkout[num].clkout_src =
+ SI5351_CLKOUT_SRC_MSYNTH_0_4;
+ break;
+ case 2:
+ pdata->clkout[num].clkout_src =
+ SI5351_CLKOUT_SRC_XTAL;
+ break;
+ case 3:
+ if (pdata->variant != SI5351_VARIANT_C) {
+ dev_err(&client->dev,
+ "invalid parent %d for clkout %d\n",
+ val, num);
+ return -EINVAL;
+ }
+ pdata->clkout[num].clkout_src =
+ SI5351_CLKOUT_SRC_CLKIN;
+ break;
+ default:
+ dev_err(&client->dev,
+ "invalid parent %d for clkout %d\n",
+ val, num);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(child, "silabs,drive-strength",
+ &val)) {
+ switch (val) {
+ case SI5351_DRIVE_2MA:
+ case SI5351_DRIVE_4MA:
+ case SI5351_DRIVE_6MA:
+ case SI5351_DRIVE_8MA:
+ pdata->clkout[num].drive = val;
+ break;
+ default:
+ dev_err(&client->dev,
+ "invalid drive strength %d for clkout %d\n",
+ val, num);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(child, "clock-frequency", &val))
+ pdata->clkout[num].rate = val;
+
+ pdata->clkout[num].pll_master =
+ of_property_read_bool(child, "silabs,pll-master");
+ }
+ client->dev.platform_data = pdata;
+
+ return 0;
+}
+#else
+static int si5351_dt_parse(struct i2c_client *client)
+{
+ return 0;
+}
+#endif /* CONFIG_OF */
+
+static int si5351_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct si5351_platform_data *pdata;
+ struct si5351_driver_data *drvdata;
+ struct clk_init_data init;
+ struct clk *clk;
+ const char *parent_names[4];
+ u8 num_parents, num_clocks;
+ int ret, n;
+
+ ret = si5351_dt_parse(client);
+ if (ret)
+ return ret;
+
+ pdata = client->dev.platform_data;
+ if (!pdata)
+ return -EINVAL;
+
+ drvdata = devm_kzalloc(&client->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (drvdata == NULL) {
+ dev_err(&client->dev, "unable to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(client, drvdata);
+ drvdata->client = client;
+ drvdata->variant = pdata->variant;
+ drvdata->pxtal = pdata->clk_xtal;
+ drvdata->pclkin = pdata->clk_clkin;
+
+ drvdata->regmap = devm_regmap_init_i2c(client, &si5351_regmap_config);
+ if (IS_ERR(drvdata->regmap)) {
+ dev_err(&client->dev, "failed to allocate register map\n");
+ return PTR_ERR(drvdata->regmap);
+ }
+
+ /* Disable interrupts */
+ si5351_reg_write(drvdata, SI5351_INTERRUPT_MASK, 0xf0);
+ /* Set disabled output drivers to drive low */
+ si5351_reg_write(drvdata, SI5351_CLK3_0_DISABLE_STATE, 0x00);
+ si5351_reg_write(drvdata, SI5351_CLK7_4_DISABLE_STATE, 0x00);
+ /* Ensure pll select is on XTAL for Si5351A/B */
+ if (drvdata->variant != SI5351_VARIANT_C)
+ si5351_set_bits(drvdata, SI5351_PLL_INPUT_SOURCE,
+ SI5351_PLLA_SOURCE | SI5351_PLLB_SOURCE, 0);
+
+ /* setup clock configuration */
+ for (n = 0; n < 2; n++) {
+ ret = _si5351_pll_reparent(drvdata, n, pdata->pll_src[n]);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed to reparent pll %d to %d\n",
+ n, pdata->pll_src[n]);
+ return ret;
+ }
+ }
+
+ for (n = 0; n < 8; n++) {
+ ret = _si5351_msynth_reparent(drvdata, n,
+ pdata->clkout[n].multisynth_src);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed to reparent multisynth %d to %d\n",
+ n, pdata->clkout[n].multisynth_src);
+ return ret;
+ }
+
+ ret = _si5351_clkout_reparent(drvdata, n,
+ pdata->clkout[n].clkout_src);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed to reparent clkout %d to %d\n",
+ n, pdata->clkout[n].clkout_src);
+ return ret;
+ }
+
+ ret = _si5351_clkout_set_drive_strength(drvdata, n,
+ pdata->clkout[n].drive);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed set drive strength of clkout%d to %d\n",
+ n, pdata->clkout[n].drive);
+ return ret;
+ }
+ }
+
+ /* register xtal input clock gate */
+ memset(&init, 0, sizeof(init));
+ init.name = si5351_input_names[0];
+ init.ops = &si5351_xtal_ops;
+ init.flags = 0;
+ if (!IS_ERR(drvdata->pxtal)) {
+ drvdata->pxtal_name = __clk_get_name(drvdata->pxtal);
+ init.parent_names = &drvdata->pxtal_name;
+ init.num_parents = 1;
+ }
+ drvdata->xtal.init = &init;
+ clk = devm_clk_register(&client->dev, &drvdata->xtal);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n", init.name);
+ return PTR_ERR(clk);
+ }
+
+ /* register clkin input clock gate */
+ if (drvdata->variant == SI5351_VARIANT_C) {
+ memset(&init, 0, sizeof(init));
+ init.name = si5351_input_names[1];
+ init.ops = &si5351_clkin_ops;
+ if (!IS_ERR(drvdata->pclkin)) {
+ drvdata->pclkin_name = __clk_get_name(drvdata->pclkin);
+ init.parent_names = &drvdata->pclkin_name;
+ init.num_parents = 1;
+ }
+ drvdata->clkin.init = &init;
+ clk = devm_clk_register(&client->dev, &drvdata->clkin);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n",
+ init.name);
+ return PTR_ERR(clk);
+ }
+ }
+
+ /* Si5351C allows to mux either xtal or clkin to PLL input */
+ num_parents = (drvdata->variant == SI5351_VARIANT_C) ? 2 : 1;
+ parent_names[0] = si5351_input_names[0];
+ parent_names[1] = si5351_input_names[1];
+
+ /* register PLLA */
+ drvdata->pll[0].num = 0;
+ drvdata->pll[0].drvdata = drvdata;
+ drvdata->pll[0].hw.init = &init;
+ memset(&init, 0, sizeof(init));
+ init.name = si5351_pll_names[0];
+ init.ops = &si5351_pll_ops;
+ init.flags = 0;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ clk = devm_clk_register(&client->dev, &drvdata->pll[0].hw);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n", init.name);
+ return -EINVAL;
+ }
+
+ /* register PLLB or VXCO (Si5351B) */
+ drvdata->pll[1].num = 1;
+ drvdata->pll[1].drvdata = drvdata;
+ drvdata->pll[1].hw.init = &init;
+ memset(&init, 0, sizeof(init));
+ if (drvdata->variant == SI5351_VARIANT_B) {
+ init.name = si5351_pll_names[2];
+ init.ops = &si5351_vxco_ops;
+ init.flags = CLK_IS_ROOT;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ } else {
+ init.name = si5351_pll_names[1];
+ init.ops = &si5351_pll_ops;
+ init.flags = 0;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ }
+ clk = devm_clk_register(&client->dev, &drvdata->pll[1].hw);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n", init.name);
+ return -EINVAL;
+ }
+
+ /* register clk multisync and clk out divider */
+ num_clocks = (drvdata->variant == SI5351_VARIANT_A3) ? 3 : 8;
+ parent_names[0] = si5351_pll_names[0];
+ if (drvdata->variant == SI5351_VARIANT_B)
+ parent_names[1] = si5351_pll_names[2];
+ else
+ parent_names[1] = si5351_pll_names[1];
+
+ drvdata->msynth = devm_kzalloc(&client->dev, num_clocks *
+ sizeof(*drvdata->msynth), GFP_KERNEL);
+ drvdata->clkout = devm_kzalloc(&client->dev, num_clocks *
+ sizeof(*drvdata->clkout), GFP_KERNEL);
+
+ drvdata->onecell.clk_num = num_clocks;
+ drvdata->onecell.clks = devm_kzalloc(&client->dev,
+ num_clocks * sizeof(*drvdata->onecell.clks), GFP_KERNEL);
+
+ if (WARN_ON(!drvdata->msynth || !drvdata->clkout ||
+ !drvdata->onecell.clks))
+ return -ENOMEM;
+
+ for (n = 0; n < num_clocks; n++) {
+ drvdata->msynth[n].num = n;
+ drvdata->msynth[n].drvdata = drvdata;
+ drvdata->msynth[n].hw.init = &init;
+ memset(&init, 0, sizeof(init));
+ init.name = si5351_msynth_names[n];
+ init.ops = &si5351_msynth_ops;
+ init.flags = 0;
+ if (pdata->clkout[n].pll_master)
+ init.flags |= CLK_SET_RATE_PARENT;
+ init.parent_names = parent_names;
+ init.num_parents = 2;
+ clk = devm_clk_register(&client->dev, &drvdata->msynth[n].hw);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n",
+ init.name);
+ return -EINVAL;
+ }
+ }
+
+ num_parents = (drvdata->variant == SI5351_VARIANT_C) ? 4 : 3;
+ parent_names[2] = si5351_input_names[0];
+ parent_names[3] = si5351_input_names[1];
+ for (n = 0; n < num_clocks; n++) {
+ parent_names[0] = si5351_msynth_names[n];
+ parent_names[1] = (n < 4) ? si5351_msynth_names[0] :
+ si5351_msynth_names[4];
+
+ drvdata->clkout[n].num = n;
+ drvdata->clkout[n].drvdata = drvdata;
+ drvdata->clkout[n].hw.init = &init;
+ memset(&init, 0, sizeof(init));
+ init.name = si5351_clkout_names[n];
+ init.ops = &si5351_clkout_ops;
+ init.flags = 0;
+ if (pdata->clkout[n].clkout_src == SI5351_CLKOUT_SRC_MSYNTH_N)
+ init.flags |= CLK_SET_RATE_PARENT;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+ clk = devm_clk_register(&client->dev, &drvdata->clkout[n].hw);
+ if (IS_ERR(clk)) {
+ dev_err(&client->dev, "unable to register %s\n",
+ init.name);
+ return -EINVAL;
+ }
+ drvdata->onecell.clks[n] = clk;
+ }
+
+ ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get,
+ &drvdata->onecell);
+ if (ret) {
+ dev_err(&client->dev, "unable to add clk provider\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id si5351_i2c_ids[] = {
+ { "silabs,si5351", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si5351_i2c_ids);
+
+static struct i2c_driver si5351_driver = {
+ .driver = {
+ .name = "si5351",
+ .of_match_table = of_match_ptr(si5351_dt_ids),
+ },
+ .probe = si5351_i2c_probe,
+ .id_table = si5351_i2c_ids,
+};
+module_i2c_driver(si5351_driver);
+
+MODULE_AUTHOR("Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com");
+MODULE_DESCRIPTION("Silicon Labs Si5351A/B/C clock generator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-si5351.h b/drivers/clk/clk-si5351.h
new file mode 100644
index 00000000000..af41b5080f4
--- /dev/null
+++ b/drivers/clk/clk-si5351.h
@@ -0,0 +1,155 @@
+/*
+ * clk-si5351.h: Silicon Laboratories Si5351A/B/C I2C Clock Generator
+ *
+ * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
+ * Rabeeh Khoury <rabeeh@solid-run.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _CLK_SI5351_H_
+#define _CLK_SI5351_H_
+
+#define SI5351_BUS_BASE_ADDR 0x60
+
+#define SI5351_PLL_VCO_MIN 600000000
+#define SI5351_PLL_VCO_MAX 900000000
+#define SI5351_MULTISYNTH_MIN_FREQ 1000000
+#define SI5351_MULTISYNTH_DIVBY4_FREQ 150000000
+#define SI5351_MULTISYNTH_MAX_FREQ 160000000
+#define SI5351_MULTISYNTH67_MAX_FREQ SI5351_MULTISYNTH_DIVBY4_FREQ
+#define SI5351_CLKOUT_MIN_FREQ 8000
+#define SI5351_CLKOUT_MAX_FREQ SI5351_MULTISYNTH_MAX_FREQ
+#define SI5351_CLKOUT67_MAX_FREQ SI5351_MULTISYNTH67_MAX_FREQ
+
+#define SI5351_PLL_A_MIN 15
+#define SI5351_PLL_A_MAX 90
+#define SI5351_PLL_B_MAX (SI5351_PLL_C_MAX-1)
+#define SI5351_PLL_C_MAX 1048575
+#define SI5351_MULTISYNTH_A_MIN 6
+#define SI5351_MULTISYNTH_A_MAX 1800
+#define SI5351_MULTISYNTH67_A_MAX 254
+#define SI5351_MULTISYNTH_B_MAX (SI5351_MULTISYNTH_C_MAX-1)
+#define SI5351_MULTISYNTH_C_MAX 1048575
+#define SI5351_MULTISYNTH_P1_MAX ((1<<18)-1)
+#define SI5351_MULTISYNTH_P2_MAX ((1<<20)-1)
+#define SI5351_MULTISYNTH_P3_MAX ((1<<20)-1)
+
+#define SI5351_DEVICE_STATUS 0
+#define SI5351_INTERRUPT_STATUS 1
+#define SI5351_INTERRUPT_MASK 2
+#define SI5351_STATUS_SYS_INIT (1<<7)
+#define SI5351_STATUS_LOL_B (1<<6)
+#define SI5351_STATUS_LOL_A (1<<5)
+#define SI5351_STATUS_LOS (1<<4)
+#define SI5351_OUTPUT_ENABLE_CTRL 3
+#define SI5351_OEB_PIN_ENABLE_CTRL 9
+#define SI5351_PLL_INPUT_SOURCE 15
+#define SI5351_CLKIN_DIV_MASK (3<<6)
+#define SI5351_CLKIN_DIV_1 (0<<6)
+#define SI5351_CLKIN_DIV_2 (1<<6)
+#define SI5351_CLKIN_DIV_4 (2<<6)
+#define SI5351_CLKIN_DIV_8 (3<<6)
+#define SI5351_PLLB_SOURCE (1<<3)
+#define SI5351_PLLA_SOURCE (1<<2)
+
+#define SI5351_CLK0_CTRL 16
+#define SI5351_CLK1_CTRL 17
+#define SI5351_CLK2_CTRL 18
+#define SI5351_CLK3_CTRL 19
+#define SI5351_CLK4_CTRL 20
+#define SI5351_CLK5_CTRL 21
+#define SI5351_CLK6_CTRL 22
+#define SI5351_CLK7_CTRL 23
+#define SI5351_CLK_POWERDOWN (1<<7)
+#define SI5351_CLK_INTEGER_MODE (1<<6)
+#define SI5351_CLK_PLL_SELECT (1<<5)
+#define SI5351_CLK_INVERT (1<<4)
+#define SI5351_CLK_INPUT_MASK (3<<2)
+#define SI5351_CLK_INPUT_XTAL (0<<2)
+#define SI5351_CLK_INPUT_CLKIN (1<<2)
+#define SI5351_CLK_INPUT_MULTISYNTH_0_4 (2<<2)
+#define SI5351_CLK_INPUT_MULTISYNTH_N (3<<2)
+#define SI5351_CLK_DRIVE_STRENGTH_MASK (3<<0)
+#define SI5351_CLK_DRIVE_STRENGTH_2MA (0<<0)
+#define SI5351_CLK_DRIVE_STRENGTH_4MA (1<<0)
+#define SI5351_CLK_DRIVE_STRENGTH_6MA (2<<0)
+#define SI5351_CLK_DRIVE_STRENGTH_8MA (3<<0)
+
+#define SI5351_CLK3_0_DISABLE_STATE 24
+#define SI5351_CLK7_4_DISABLE_STATE 25
+#define SI5351_CLK_DISABLE_STATE_LOW 0
+#define SI5351_CLK_DISABLE_STATE_HIGH 1
+#define SI5351_CLK_DISABLE_STATE_FLOAT 2
+#define SI5351_CLK_DISABLE_STATE_NEVER 3
+
+#define SI5351_PARAMETERS_LENGTH 8
+#define SI5351_PLLA_PARAMETERS 26
+#define SI5351_PLLB_PARAMETERS 34
+#define SI5351_CLK0_PARAMETERS 42
+#define SI5351_CLK1_PARAMETERS 50
+#define SI5351_CLK2_PARAMETERS 58
+#define SI5351_CLK3_PARAMETERS 66
+#define SI5351_CLK4_PARAMETERS 74
+#define SI5351_CLK5_PARAMETERS 82
+#define SI5351_CLK6_PARAMETERS 90
+#define SI5351_CLK7_PARAMETERS 91
+#define SI5351_CLK6_7_OUTPUT_DIVIDER 92
+#define SI5351_OUTPUT_CLK_DIV_MASK (7 << 4)
+#define SI5351_OUTPUT_CLK6_DIV_MASK (7 << 0)
+#define SI5351_OUTPUT_CLK_DIV_SHIFT 4
+#define SI5351_OUTPUT_CLK_DIV6_SHIFT 0
+#define SI5351_OUTPUT_CLK_DIV_1 0
+#define SI5351_OUTPUT_CLK_DIV_2 1
+#define SI5351_OUTPUT_CLK_DIV_4 2
+#define SI5351_OUTPUT_CLK_DIV_8 3
+#define SI5351_OUTPUT_CLK_DIV_16 4
+#define SI5351_OUTPUT_CLK_DIV_32 5
+#define SI5351_OUTPUT_CLK_DIV_64 6
+#define SI5351_OUTPUT_CLK_DIV_128 7
+#define SI5351_OUTPUT_CLK_DIVBY4 (3<<2)
+
+#define SI5351_SSC_PARAM0 149
+#define SI5351_SSC_PARAM1 150
+#define SI5351_SSC_PARAM2 151
+#define SI5351_SSC_PARAM3 152
+#define SI5351_SSC_PARAM4 153
+#define SI5351_SSC_PARAM5 154
+#define SI5351_SSC_PARAM6 155
+#define SI5351_SSC_PARAM7 156
+#define SI5351_SSC_PARAM8 157
+#define SI5351_SSC_PARAM9 158
+#define SI5351_SSC_PARAM10 159
+#define SI5351_SSC_PARAM11 160
+#define SI5351_SSC_PARAM12 161
+
+#define SI5351_VXCO_PARAMETERS_LOW 162
+#define SI5351_VXCO_PARAMETERS_MID 163
+#define SI5351_VXCO_PARAMETERS_HIGH 164
+
+#define SI5351_CLK0_PHASE_OFFSET 165
+#define SI5351_CLK1_PHASE_OFFSET 166
+#define SI5351_CLK2_PHASE_OFFSET 167
+#define SI5351_CLK3_PHASE_OFFSET 168
+#define SI5351_CLK4_PHASE_OFFSET 169
+#define SI5351_CLK5_PHASE_OFFSET 170
+
+#define SI5351_PLL_RESET 177
+#define SI5351_PLL_RESET_B (1<<7)
+#define SI5351_PLL_RESET_A (1<<5)
+
+#define SI5351_CRYSTAL_LOAD 183
+#define SI5351_CRYSTAL_LOAD_MASK (3<<6)
+#define SI5351_CRYSTAL_LOAD_6PF (1<<6)
+#define SI5351_CRYSTAL_LOAD_8PF (2<<6)
+#define SI5351_CRYSTAL_LOAD_10PF (3<<6)
+
+#define SI5351_FANOUT_ENABLE 187
+#define SI5351_CLKIN_ENABLE (1<<7)
+#define SI5351_XTAL_ENABLE (1<<6)
+#define SI5351_MULTISYNTH_ENABLE (1<<4)
+
+#endif
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index 09c63315e57..debf688afa8 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -488,6 +488,7 @@ static int vtwm_pll_set_rate(struct clk_hw *hw, unsigned long rate,
case PLL_TYPE_WM8750:
wm8750_find_pll_bits(rate, parent_rate, &filter, &mul, &div1, &div2);
pll_val = WM8750_BITS_TO_VAL(filter, mul, div1, div2);
+ break;
default:
pr_err("%s: invalid pll type\n", __func__);
return 0;
@@ -523,6 +524,7 @@ static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
case PLL_TYPE_WM8750:
wm8750_find_pll_bits(rate, *prate, &filter, &mul, &div1, &div2);
round_rate = WM8750_BITS_TO_FREQ(*prate, mul, div1, div2);
+ break;
default:
round_rate = 0;
}
diff --git a/drivers/clk/clk-zynq.c b/drivers/clk/clk-zynq.c
index b14a25f3925..32062977f45 100644
--- a/drivers/clk/clk-zynq.c
+++ b/drivers/clk/clk-zynq.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/clk-provider.h>
+#include <linux/clk/zynq.h>
static void __iomem *slcr_base;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index ed87b240580..934cfd18f72 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -19,14 +19,77 @@
#include <linux/of.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/sched.h>
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);
+static struct task_struct *prepare_owner;
+static struct task_struct *enable_owner;
+
+static int prepare_refcnt;
+static int enable_refcnt;
+
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
+/*** locking ***/
+static void clk_prepare_lock(void)
+{
+ if (!mutex_trylock(&prepare_lock)) {
+ if (prepare_owner == current) {
+ prepare_refcnt++;
+ return;
+ }
+ mutex_lock(&prepare_lock);
+ }
+ WARN_ON_ONCE(prepare_owner != NULL);
+ WARN_ON_ONCE(prepare_refcnt != 0);
+ prepare_owner = current;
+ prepare_refcnt = 1;
+}
+
+static void clk_prepare_unlock(void)
+{
+ WARN_ON_ONCE(prepare_owner != current);
+ WARN_ON_ONCE(prepare_refcnt == 0);
+
+ if (--prepare_refcnt)
+ return;
+ prepare_owner = NULL;
+ mutex_unlock(&prepare_lock);
+}
+
+static unsigned long clk_enable_lock(void)
+{
+ unsigned long flags;
+
+ if (!spin_trylock_irqsave(&enable_lock, flags)) {
+ if (enable_owner == current) {
+ enable_refcnt++;
+ return flags;
+ }
+ spin_lock_irqsave(&enable_lock, flags);
+ }
+ WARN_ON_ONCE(enable_owner != NULL);
+ WARN_ON_ONCE(enable_refcnt != 0);
+ enable_owner = current;
+ enable_refcnt = 1;
+ return flags;
+}
+
+static void clk_enable_unlock(unsigned long flags)
+{
+ WARN_ON_ONCE(enable_owner != current);
+ WARN_ON_ONCE(enable_refcnt == 0);
+
+ if (--enable_refcnt)
+ return;
+ enable_owner = NULL;
+ spin_unlock_irqrestore(&enable_lock, flags);
+}
+
/*** debugfs support ***/
#ifdef CONFIG_COMMON_CLK_DEBUG
@@ -69,7 +132,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
seq_printf(s, "---------------------------------------------------------------------\n");
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
hlist_for_each_entry(c, &clk_root_list, child_node)
clk_summary_show_subtree(s, c, 0);
@@ -77,7 +140,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
hlist_for_each_entry(c, &clk_orphan_list, child_node)
clk_summary_show_subtree(s, c, 0);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return 0;
}
@@ -130,7 +193,7 @@ static int clk_dump(struct seq_file *s, void *data)
seq_printf(s, "{");
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
hlist_for_each_entry(c, &clk_root_list, child_node) {
if (!first_node)
@@ -144,7 +207,7 @@ static int clk_dump(struct seq_file *s, void *data)
clk_dump_subtree(s, c, 0);
}
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
seq_printf(s, "}");
return 0;
@@ -280,6 +343,39 @@ out:
}
/**
+ * clk_debug_reparent - reparent clk node in the debugfs clk tree
+ * @clk: the clk being reparented
+ * @new_parent: the new clk parent, may be NULL
+ *
+ * Rename clk entry in the debugfs clk tree if debugfs has been
+ * initialized. Otherwise it bails out early since the debugfs clk tree
+ * will be created lazily by clk_debug_init as part of a late_initcall.
+ *
+ * Caller must hold prepare_lock.
+ */
+static void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
+{
+ struct dentry *d;
+ struct dentry *new_parent_d;
+
+ if (!inited)
+ return;
+
+ if (new_parent)
+ new_parent_d = new_parent->dentry;
+ else
+ new_parent_d = orphandir;
+
+ d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
+ new_parent_d, clk->name);
+ if (d)
+ clk->dentry = d;
+ else
+ pr_debug("%s: failed to rename debugfs entry for %s\n",
+ __func__, clk->name);
+}
+
+/**
* clk_debug_init - lazily create the debugfs clk tree visualization
*
* clks are often initialized very early during boot before memory can
@@ -316,7 +412,7 @@ static int __init clk_debug_init(void)
if (!orphandir)
return -ENOMEM;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_debug_create_subtree(clk, rootdir);
@@ -326,16 +422,45 @@ static int __init clk_debug_init(void)
inited = 1;
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return 0;
}
late_initcall(clk_debug_init);
#else
static inline int clk_debug_register(struct clk *clk) { return 0; }
+static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
+{
+}
#endif
/* caller must hold prepare_lock */
+static void clk_unprepare_unused_subtree(struct clk *clk)
+{
+ struct clk *child;
+
+ if (!clk)
+ return;
+
+ hlist_for_each_entry(child, &clk->children, child_node)
+ clk_unprepare_unused_subtree(child);
+
+ if (clk->prepare_count)
+ return;
+
+ if (clk->flags & CLK_IGNORE_UNUSED)
+ return;
+
+ if (__clk_is_prepared(clk)) {
+ if (clk->ops->unprepare_unused)
+ clk->ops->unprepare_unused(clk->hw);
+ else if (clk->ops->unprepare)
+ clk->ops->unprepare(clk->hw);
+ }
+}
+EXPORT_SYMBOL_GPL(__clk_get_flags);
+
+/* caller must hold prepare_lock */
static void clk_disable_unused_subtree(struct clk *clk)
{
struct clk *child;
@@ -347,7 +472,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
hlist_for_each_entry(child, &clk->children, child_node)
clk_disable_unused_subtree(child);
- spin_lock_irqsave(&enable_lock, flags);
+ flags = clk_enable_lock();
if (clk->enable_count)
goto unlock_out;
@@ -368,17 +493,30 @@ static void clk_disable_unused_subtree(struct clk *clk)
}
unlock_out:
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(flags);
out:
return;
}
+static bool clk_ignore_unused;
+static int __init clk_ignore_unused_setup(char *__unused)
+{
+ clk_ignore_unused = true;
+ return 1;
+}
+__setup("clk_ignore_unused", clk_ignore_unused_setup);
+
static int clk_disable_unused(void)
{
struct clk *clk;
- mutex_lock(&prepare_lock);
+ if (clk_ignore_unused) {
+ pr_warn("clk: Not disabling unused clocks\n");
+ return 0;
+ }
+
+ clk_prepare_lock();
hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_disable_unused_subtree(clk);
@@ -386,7 +524,13 @@ static int clk_disable_unused(void)
hlist_for_each_entry(clk, &clk_orphan_list, child_node)
clk_disable_unused_subtree(clk);
- mutex_unlock(&prepare_lock);
+ hlist_for_each_entry(clk, &clk_root_list, child_node)
+ clk_unprepare_unused_subtree(clk);
+
+ hlist_for_each_entry(clk, &clk_orphan_list, child_node)
+ clk_unprepare_unused_subtree(clk);
+
+ clk_prepare_unlock();
return 0;
}
@@ -451,6 +595,27 @@ unsigned long __clk_get_flags(struct clk *clk)
return !clk ? 0 : clk->flags;
}
+bool __clk_is_prepared(struct clk *clk)
+{
+ int ret;
+
+ if (!clk)
+ return false;
+
+ /*
+ * .is_prepared is optional for clocks that can prepare
+ * fall back to software usage counter if it is missing
+ */
+ if (!clk->ops->is_prepared) {
+ ret = clk->prepare_count ? 1 : 0;
+ goto out;
+ }
+
+ ret = clk->ops->is_prepared(clk->hw);
+out:
+ return !!ret;
+}
+
bool __clk_is_enabled(struct clk *clk)
{
int ret;
@@ -548,9 +713,9 @@ void __clk_unprepare(struct clk *clk)
*/
void clk_unprepare(struct clk *clk)
{
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
__clk_unprepare(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_unprepare);
@@ -596,9 +761,9 @@ int clk_prepare(struct clk *clk)
{
int ret;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
ret = __clk_prepare(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -640,9 +805,9 @@ void clk_disable(struct clk *clk)
{
unsigned long flags;
- spin_lock_irqsave(&enable_lock, flags);
+ flags = clk_enable_lock();
__clk_disable(clk);
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(flags);
}
EXPORT_SYMBOL_GPL(clk_disable);
@@ -693,9 +858,9 @@ int clk_enable(struct clk *clk)
unsigned long flags;
int ret;
- spin_lock_irqsave(&enable_lock, flags);
+ flags = clk_enable_lock();
ret = __clk_enable(clk);
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(flags);
return ret;
}
@@ -740,9 +905,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned long ret;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
ret = __clk_round_rate(clk, rate);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -837,13 +1002,13 @@ unsigned long clk_get_rate(struct clk *clk)
{
unsigned long rate;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
__clk_recalc_rates(clk, 0);
rate = __clk_get_rate(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return rate;
}
@@ -876,16 +1041,16 @@ static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
else
new_rate = parent_rate;
- /* abort the rate change if a driver returns NOTIFY_BAD */
+ /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
if (clk->notifier_count)
ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
- if (ret == NOTIFY_BAD)
+ if (ret & NOTIFY_STOP_MASK)
goto out;
hlist_for_each_entry(child, &clk->children, child_node) {
ret = __clk_speculate_rates(child, new_rate);
- if (ret == NOTIFY_BAD)
+ if (ret & NOTIFY_STOP_MASK)
break;
}
@@ -974,11 +1139,11 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
int ret = NOTIFY_DONE;
if (clk->rate == clk->new_rate)
- return 0;
+ return NULL;
if (clk->notifier_count) {
ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
- if (ret == NOTIFY_BAD)
+ if (ret & NOTIFY_STOP_MASK)
fail_clk = clk;
}
@@ -1048,7 +1213,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
int ret = 0;
/* prevent racing with updates to the clock topology */
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
/* bail early if nothing to do */
if (rate == clk->rate)
@@ -1080,7 +1245,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
clk_change_rate(top);
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -1096,9 +1261,9 @@ struct clk *clk_get_parent(struct clk *clk)
{
struct clk *parent;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
parent = __clk_get_parent(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return parent;
}
@@ -1162,16 +1327,8 @@ out:
return ret;
}
-void __clk_reparent(struct clk *clk, struct clk *new_parent)
+static void clk_reparent(struct clk *clk, struct clk *new_parent)
{
-#ifdef CONFIG_COMMON_CLK_DEBUG
- struct dentry *d;
- struct dentry *new_parent_d;
-#endif
-
- if (!clk || !new_parent)
- return;
-
hlist_del(&clk->child_node);
if (new_parent)
@@ -1179,39 +1336,20 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent)
else
hlist_add_head(&clk->child_node, &clk_orphan_list);
-#ifdef CONFIG_COMMON_CLK_DEBUG
- if (!inited)
- goto out;
-
- if (new_parent)
- new_parent_d = new_parent->dentry;
- else
- new_parent_d = orphandir;
-
- d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
- new_parent_d, clk->name);
- if (d)
- clk->dentry = d;
- else
- pr_debug("%s: failed to rename debugfs entry for %s\n",
- __func__, clk->name);
-out:
-#endif
-
clk->parent = new_parent;
+}
+void __clk_reparent(struct clk *clk, struct clk *new_parent)
+{
+ clk_reparent(clk, new_parent);
+ clk_debug_reparent(clk, new_parent);
__clk_recalc_rates(clk, POST_RATE_CHANGE);
}
-static int __clk_set_parent(struct clk *clk, struct clk *parent)
+static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
{
- struct clk *old_parent;
- unsigned long flags;
- int ret = -EINVAL;
u8 i;
- old_parent = clk->parent;
-
if (!clk->parents)
clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
GFP_KERNEL);
@@ -1231,36 +1369,79 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
}
}
- if (i == clk->num_parents) {
- pr_debug("%s: clock %s is not a possible parent of clock %s\n",
- __func__, parent->name, clk->name);
- goto out;
- }
+ return i;
+}
- /* migrate prepare and enable */
+static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
+{
+ unsigned long flags;
+ int ret = 0;
+ struct clk *old_parent = clk->parent;
+ bool migrated_enable = false;
+
+ /* migrate prepare */
if (clk->prepare_count)
__clk_prepare(parent);
- /* FIXME replace with clk_is_enabled(clk) someday */
- spin_lock_irqsave(&enable_lock, flags);
- if (clk->enable_count)
+ flags = clk_enable_lock();
+
+ /* migrate enable */
+ if (clk->enable_count) {
__clk_enable(parent);
- spin_unlock_irqrestore(&enable_lock, flags);
+ migrated_enable = true;
+ }
+
+ /* update the clk tree topology */
+ clk_reparent(clk, parent);
+
+ clk_enable_unlock(flags);
/* change clock input source */
- ret = clk->ops->set_parent(clk->hw, i);
+ if (parent && clk->ops->set_parent)
+ ret = clk->ops->set_parent(clk->hw, p_index);
- /* clean up old prepare and enable */
- spin_lock_irqsave(&enable_lock, flags);
- if (clk->enable_count)
+ if (ret) {
+ /*
+ * The error handling is tricky due to that we need to release
+ * the spinlock while issuing the .set_parent callback. This
+ * means the new parent might have been enabled/disabled in
+ * between, which must be considered when doing rollback.
+ */
+ flags = clk_enable_lock();
+
+ clk_reparent(clk, old_parent);
+
+ if (migrated_enable && clk->enable_count) {
+ __clk_disable(parent);
+ } else if (migrated_enable && (clk->enable_count == 0)) {
+ __clk_disable(old_parent);
+ } else if (!migrated_enable && clk->enable_count) {
+ __clk_disable(parent);
+ __clk_enable(old_parent);
+ }
+
+ clk_enable_unlock(flags);
+
+ if (clk->prepare_count)
+ __clk_unprepare(parent);
+
+ return ret;
+ }
+
+ /* clean up enable for old parent if migration was done */
+ if (migrated_enable) {
+ flags = clk_enable_lock();
__clk_disable(old_parent);
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(flags);
+ }
+ /* clean up prepare for old parent if migration was done */
if (clk->prepare_count)
__clk_unprepare(old_parent);
-out:
- return ret;
+ /* update debugfs with new clk tree topology */
+ clk_debug_reparent(clk, parent);
+ return 0;
}
/**
@@ -1278,44 +1459,59 @@ out:
int clk_set_parent(struct clk *clk, struct clk *parent)
{
int ret = 0;
+ u8 p_index = 0;
+ unsigned long p_rate = 0;
if (!clk || !clk->ops)
return -EINVAL;
- if (!clk->ops->set_parent)
+ /* verify ops for for multi-parent clks */
+ if ((clk->num_parents > 1) && (!clk->ops->set_parent))
return -ENOSYS;
/* prevent racing with updates to the clock topology */
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
if (clk->parent == parent)
goto out;
+ /* check that we are allowed to re-parent if the clock is in use */
+ if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* try finding the new parent index */
+ if (parent) {
+ p_index = clk_fetch_parent_index(clk, parent);
+ p_rate = parent->rate;
+ if (p_index == clk->num_parents) {
+ pr_debug("%s: clk %s can not be parent of clk %s\n",
+ __func__, parent->name, clk->name);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
/* propagate PRE_RATE_CHANGE notifications */
if (clk->notifier_count)
- ret = __clk_speculate_rates(clk, parent->rate);
+ ret = __clk_speculate_rates(clk, p_rate);
/* abort if a driver objects */
- if (ret == NOTIFY_STOP)
+ if (ret & NOTIFY_STOP_MASK)
goto out;
- /* only re-parent if the clock is not in use */
- if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
- ret = -EBUSY;
- else
- ret = __clk_set_parent(clk, parent);
+ /* do the re-parent */
+ ret = __clk_set_parent(clk, parent, p_index);
- /* propagate ABORT_RATE_CHANGE if .set_parent failed */
- if (ret) {
+ /* propagate rate recalculation accordingly */
+ if (ret)
__clk_recalc_rates(clk, ABORT_RATE_CHANGE);
- goto out;
- }
-
- /* propagate rate recalculation downstream */
- __clk_reparent(clk, parent);
+ else
+ __clk_recalc_rates(clk, POST_RATE_CHANGE);
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -1338,7 +1534,7 @@ int __clk_init(struct device *dev, struct clk *clk)
if (!clk)
return -EINVAL;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
/* check to see if a clock with this name is already registered */
if (__clk_lookup(clk->name)) {
@@ -1462,7 +1658,7 @@ int __clk_init(struct device *dev, struct clk *clk)
clk_debug_register(clk);
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -1696,7 +1892,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
if (!clk || !nb)
return -EINVAL;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
/* search the list of notifiers for this clk */
list_for_each_entry(cn, &clk_notifier_list, node)
@@ -1720,7 +1916,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
clk->notifier_count++;
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -1745,7 +1941,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
if (!clk || !nb)
return -EINVAL;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
list_for_each_entry(cn, &clk_notifier_list, node)
if (cn->clk == clk)
@@ -1766,7 +1962,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
ret = -ENOENT;
}
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
diff --git a/drivers/clk/mvebu/clk-core.c b/drivers/clk/mvebu/clk-core.c
index 69056a7479e..2628610c192 100644
--- a/drivers/clk/mvebu/clk-core.c
+++ b/drivers/clk/mvebu/clk-core.c
@@ -156,7 +156,7 @@ static u32 __init armada_370_get_cpu_freq(void __iomem *sar)
cpu_freq_select = ((readl(sar) >> SARL_A370_PCLK_FREQ_OPT) &
SARL_A370_PCLK_FREQ_OPT_MASK);
- if (cpu_freq_select > ARRAY_SIZE(armada_370_cpu_frequencies)) {
+ if (cpu_freq_select >= ARRAY_SIZE(armada_370_cpu_frequencies)) {
pr_err("CPU freq select unsuported %d\n", cpu_freq_select);
cpu_freq = 0;
} else
@@ -278,7 +278,7 @@ static u32 __init armada_xp_get_cpu_freq(void __iomem *sar)
cpu_freq_select |= (((readl(sar+4) >> SARH_AXP_PCLK_FREQ_OPT) &
SARH_AXP_PCLK_FREQ_OPT_MASK)
<< SARH_AXP_PCLK_FREQ_OPT_SHIFT);
- if (cpu_freq_select > ARRAY_SIZE(armada_xp_cpu_frequencies)) {
+ if (cpu_freq_select >= ARRAY_SIZE(armada_xp_cpu_frequencies)) {
pr_err("CPU freq select unsuported: %d\n", cpu_freq_select);
cpu_freq = 0;
} else
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index 9dd2551a0a4..b0fbc071549 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -16,7 +16,6 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/delay.h>
-#include "clk-cpu.h"
#define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
#define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
@@ -173,17 +172,5 @@ clks_out:
kfree(cpuclk);
}
-static const __initconst struct of_device_id clk_cpu_match[] = {
- {
- .compatible = "marvell,armada-xp-cpu-clock",
- .data = of_cpu_clk_setup,
- },
- {
- /* sentinel */
- },
-};
-
-void __init mvebu_cpu_clk_init(void)
-{
- of_clk_init(clk_cpu_match);
-}
+CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock",
+ of_cpu_clk_setup);
diff --git a/drivers/clk/mvebu/clk-cpu.h b/drivers/clk/mvebu/clk-cpu.h
deleted file mode 100644
index 08e2affba4e..00000000000
--- a/drivers/clk/mvebu/clk-cpu.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Marvell MVEBU CPU clock handling.
- *
- * Copyright (C) 2012 Marvell
- *
- * Gregory CLEMENT <gregory.clement@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __MVEBU_CLK_CPU_H
-#define __MVEBU_CLK_CPU_H
-
-#ifdef CONFIG_MVEBU_CLK_CPU
-void __init mvebu_cpu_clk_init(void);
-#else
-static inline void mvebu_cpu_clk_init(void) {}
-#endif
-
-#endif
diff --git a/drivers/clk/mvebu/clk.c b/drivers/clk/mvebu/clk.c
index 855681b8a9d..29f10fb3006 100644
--- a/drivers/clk/mvebu/clk.c
+++ b/drivers/clk/mvebu/clk.c
@@ -10,18 +10,14 @@
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/of_address.h>
-#include <linux/clk/mvebu.h>
#include <linux/of.h>
#include "clk-core.h"
-#include "clk-cpu.h"
#include "clk-gating-ctrl.h"
void __init mvebu_clocks_init(void)
{
mvebu_core_clk_init();
mvebu_gating_clk_init();
- mvebu_cpu_clk_init();
+ of_clk_init(NULL);
}
diff --git a/drivers/clk/mxs/clk.c b/drivers/clk/mxs/clk.c
index b24d56067c8..5301bce8957 100644
--- a/drivers/clk/mxs/clk.c
+++ b/drivers/clk/mxs/clk.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/spinlock.h>
+#include "clk.h"
DEFINE_SPINLOCK(mxs_lock);
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index 82abea366b7..35e7e2698e1 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -960,47 +960,47 @@ void __init spear1340_clk_init(void)
SPEAR1340_SPDIF_IN_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "d0100000.spdif-in");
- clk = clk_register_gate(NULL, "acp_clk", "acp_mclk", 0,
+ clk = clk_register_gate(NULL, "acp_clk", "ahb_clk", 0,
SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_ACP_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "acp_clk");
- clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mclk", 0,
+ clk = clk_register_gate(NULL, "plgpio_clk", "ahb_clk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PLGPIO_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "e2800000.gpio");
- clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mclk", 0,
+ clk = clk_register_gate(NULL, "video_dec_clk", "ahb_clk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_DEC_CLK_ENB,
0, &_lock);
clk_register_clkdev(clk, NULL, "video_dec");
- clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mclk", 0,
+ clk = clk_register_gate(NULL, "video_enc_clk", "ahb_clk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_ENC_CLK_ENB,
0, &_lock);
clk_register_clkdev(clk, NULL, "video_enc");
- clk = clk_register_gate(NULL, "video_in_clk", "video_in_mclk", 0,
+ clk = clk_register_gate(NULL, "video_in_clk", "ahb_clk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_IN_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "spear_vip");
- clk = clk_register_gate(NULL, "cam0_clk", "cam0_mclk", 0,
+ clk = clk_register_gate(NULL, "cam0_clk", "ahb_clk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM0_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "d0200000.cam0");
- clk = clk_register_gate(NULL, "cam1_clk", "cam1_mclk", 0,
+ clk = clk_register_gate(NULL, "cam1_clk", "ahb_clk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "d0300000.cam1");
- clk = clk_register_gate(NULL, "cam2_clk", "cam2_mclk", 0,
+ clk = clk_register_gate(NULL, "cam2_clk", "ahb_clk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM2_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "d0400000.cam2");
- clk = clk_register_gate(NULL, "cam3_clk", "cam3_mclk", 0,
+ clk = clk_register_gate(NULL, "cam3_clk", "ahb_clk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM3_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "d0500000.cam3");
diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile
new file mode 100644
index 00000000000..b5bac917612
--- /dev/null
+++ b/drivers/clk/sunxi/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for sunxi specific clk
+#
+
+obj-y += clk-sunxi.o clk-factors.o
diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c
new file mode 100644
index 00000000000..88523f91d9b
--- /dev/null
+++ b/drivers/clk/sunxi/clk-factors.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) 2013 Emilio López <emilio@elopez.com.ar>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Adjustable factor-based clock implementation
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/string.h>
+
+#include <linux/delay.h>
+
+#include "clk-factors.h"
+
+/*
+ * DOC: basic adjustable factor-based clock that cannot gate
+ *
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * enable - clk_enable only ensures that parents are enabled
+ * rate - rate is adjustable.
+ * clk->rate = (parent->rate * N * (K + 1) >> P) / (M + 1)
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+struct clk_factors {
+ struct clk_hw hw;
+ void __iomem *reg;
+ struct clk_factors_config *config;
+ void (*get_factors) (u32 *rate, u32 parent, u8 *n, u8 *k, u8 *m, u8 *p);
+ spinlock_t *lock;
+};
+
+#define to_clk_factors(_hw) container_of(_hw, struct clk_factors, hw)
+
+#define SETMASK(len, pos) (((-1U) >> (31-len)) << (pos))
+#define CLRMASK(len, pos) (~(SETMASK(len, pos)))
+#define FACTOR_GET(bit, len, reg) (((reg) & SETMASK(len, bit)) >> (bit))
+
+#define FACTOR_SET(bit, len, reg, val) \
+ (((reg) & CLRMASK(len, bit)) | (val << (bit)))
+
+static unsigned long clk_factors_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u8 n = 1, k = 0, p = 0, m = 0;
+ u32 reg;
+ unsigned long rate;
+ struct clk_factors *factors = to_clk_factors(hw);
+ struct clk_factors_config *config = factors->config;
+
+ /* Fetch the register value */
+ reg = readl(factors->reg);
+
+ /* Get each individual factor if applicable */
+ if (config->nwidth != SUNXI_FACTORS_NOT_APPLICABLE)
+ n = FACTOR_GET(config->nshift, config->nwidth, reg);
+ if (config->kwidth != SUNXI_FACTORS_NOT_APPLICABLE)
+ k = FACTOR_GET(config->kshift, config->kwidth, reg);
+ if (config->mwidth != SUNXI_FACTORS_NOT_APPLICABLE)
+ m = FACTOR_GET(config->mshift, config->mwidth, reg);
+ if (config->pwidth != SUNXI_FACTORS_NOT_APPLICABLE)
+ p = FACTOR_GET(config->pshift, config->pwidth, reg);
+
+ /* Calculate the rate */
+ rate = (parent_rate * n * (k + 1) >> p) / (m + 1);
+
+ return rate;
+}
+
+static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_factors *factors = to_clk_factors(hw);
+ factors->get_factors((u32 *)&rate, (u32)*parent_rate,
+ NULL, NULL, NULL, NULL);
+
+ return rate;
+}
+
+static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ u8 n, k, m, p;
+ u32 reg;
+ struct clk_factors *factors = to_clk_factors(hw);
+ struct clk_factors_config *config = factors->config;
+ unsigned long flags = 0;
+
+ factors->get_factors((u32 *)&rate, (u32)parent_rate, &n, &k, &m, &p);
+
+ if (factors->lock)
+ spin_lock_irqsave(factors->lock, flags);
+
+ /* Fetch the register value */
+ reg = readl(factors->reg);
+
+ /* Set up the new factors - macros do not do anything if width is 0 */
+ reg = FACTOR_SET(config->nshift, config->nwidth, reg, n);
+ reg = FACTOR_SET(config->kshift, config->kwidth, reg, k);
+ reg = FACTOR_SET(config->mshift, config->mwidth, reg, m);
+ reg = FACTOR_SET(config->pshift, config->pwidth, reg, p);
+
+ /* Apply them now */
+ writel(reg, factors->reg);
+
+ /* delay 500us so pll stabilizes */
+ __delay((rate >> 20) * 500 / 2);
+
+ if (factors->lock)
+ spin_unlock_irqrestore(factors->lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops clk_factors_ops = {
+ .recalc_rate = clk_factors_recalc_rate,
+ .round_rate = clk_factors_round_rate,
+ .set_rate = clk_factors_set_rate,
+};
+
+/**
+ * clk_register_factors - register a factors clock with
+ * the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust factors
+ * @config: shift and width of factors n, k, m and p
+ * @get_factors: function to calculate the factors for a given frequency
+ * @lock: shared register lock for this clock
+ */
+struct clk *clk_register_factors(struct device *dev, const char *name,
+ const char *parent_name,
+ unsigned long flags, void __iomem *reg,
+ struct clk_factors_config *config,
+ void (*get_factors)(u32 *rate, u32 parent,
+ u8 *n, u8 *k, u8 *m, u8 *p),
+ spinlock_t *lock)
+{
+ struct clk_factors *factors;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ /* allocate the factors */
+ factors = kzalloc(sizeof(struct clk_factors), GFP_KERNEL);
+ if (!factors) {
+ pr_err("%s: could not allocate factors clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &clk_factors_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_factors assignments */
+ factors->reg = reg;
+ factors->config = config;
+ factors->lock = lock;
+ factors->hw.init = &init;
+ factors->get_factors = get_factors;
+
+ /* register the clock */
+ clk = clk_register(dev, &factors->hw);
+
+ if (IS_ERR(clk))
+ kfree(factors);
+
+ return clk;
+}
diff --git a/drivers/clk/sunxi/clk-factors.h b/drivers/clk/sunxi/clk-factors.h
new file mode 100644
index 00000000000..f49851cc438
--- /dev/null
+++ b/drivers/clk/sunxi/clk-factors.h
@@ -0,0 +1,27 @@
+#ifndef __MACH_SUNXI_CLK_FACTORS_H
+#define __MACH_SUNXI_CLK_FACTORS_H
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+
+#define SUNXI_FACTORS_NOT_APPLICABLE (0)
+
+struct clk_factors_config {
+ u8 nshift;
+ u8 nwidth;
+ u8 kshift;
+ u8 kwidth;
+ u8 mshift;
+ u8 mwidth;
+ u8 pshift;
+ u8 pwidth;
+};
+
+struct clk *clk_register_factors(struct device *dev, const char *name,
+ const char *parent_name,
+ unsigned long flags, void __iomem *reg,
+ struct clk_factors_config *config,
+ void (*get_factors) (u32 *rate, u32 parent_rate,
+ u8 *n, u8 *k, u8 *m, u8 *p),
+ spinlock_t *lock);
+#endif
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
new file mode 100644
index 00000000000..8492ad1d536
--- /dev/null
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -0,0 +1,469 @@
+/*
+ * Copyright 2013 Emilio López
+ *
+ * Emilio López <emilio@elopez.com.ar>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/sunxi.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "clk-factors.h"
+
+static DEFINE_SPINLOCK(clk_lock);
+
+/**
+ * sunxi_osc_clk_setup() - Setup function for gatable oscillator
+ */
+
+#define SUNXI_OSC24M_GATE 0
+
+static void __init sunxi_osc_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ struct clk_fixed_rate *fixed;
+ struct clk_gate *gate;
+ const char *clk_name = node->name;
+ u32 rate;
+
+ /* allocate fixed-rate and gate clock structs */
+ fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL);
+ if (!fixed)
+ return;
+ gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
+ if (!gate) {
+ kfree(fixed);
+ return;
+ }
+
+ if (of_property_read_u32(node, "clock-frequency", &rate))
+ return;
+
+ /* set up gate and fixed rate properties */
+ gate->reg = of_iomap(node, 0);
+ gate->bit_idx = SUNXI_OSC24M_GATE;
+ gate->lock = &clk_lock;
+ fixed->fixed_rate = rate;
+
+ clk = clk_register_composite(NULL, clk_name,
+ NULL, 0,
+ NULL, NULL,
+ &fixed->hw, &clk_fixed_rate_ops,
+ &gate->hw, &clk_gate_ops,
+ CLK_IS_ROOT);
+
+ if (clk) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ clk_register_clkdev(clk, clk_name, NULL);
+ }
+}
+
+
+
+/**
+ * sunxi_get_pll1_factors() - calculates n, k, m, p factors for PLL1
+ * PLL1 rate is calculated as follows
+ * rate = (parent_rate * n * (k + 1) >> p) / (m + 1);
+ * parent_rate is always 24Mhz
+ */
+
+static void sunxi_get_pll1_factors(u32 *freq, u32 parent_rate,
+ u8 *n, u8 *k, u8 *m, u8 *p)
+{
+ u8 div;
+
+ /* Normalize value to a 6M multiple */
+ div = *freq / 6000000;
+ *freq = 6000000 * div;
+
+ /* we were called to round the frequency, we can now return */
+ if (n == NULL)
+ return;
+
+ /* m is always zero for pll1 */
+ *m = 0;
+
+ /* k is 1 only on these cases */
+ if (*freq >= 768000000 || *freq == 42000000 || *freq == 54000000)
+ *k = 1;
+ else
+ *k = 0;
+
+ /* p will be 3 for divs under 10 */
+ if (div < 10)
+ *p = 3;
+
+ /* p will be 2 for divs between 10 - 20 and odd divs under 32 */
+ else if (div < 20 || (div < 32 && (div & 1)))
+ *p = 2;
+
+ /* p will be 1 for even divs under 32, divs under 40 and odd pairs
+ * of divs between 40-62 */
+ else if (div < 40 || (div < 64 && (div & 2)))
+ *p = 1;
+
+ /* any other entries have p = 0 */
+ else
+ *p = 0;
+
+ /* calculate a suitable n based on k and p */
+ div <<= *p;
+ div /= (*k + 1);
+ *n = div / 4;
+}
+
+
+
+/**
+ * sunxi_get_apb1_factors() - calculates m, p factors for APB1
+ * APB1 rate is calculated as follows
+ * rate = (parent_rate >> p) / (m + 1);
+ */
+
+static void sunxi_get_apb1_factors(u32 *freq, u32 parent_rate,
+ u8 *n, u8 *k, u8 *m, u8 *p)
+{
+ u8 calcm, calcp;
+
+ if (parent_rate < *freq)
+ *freq = parent_rate;
+
+ parent_rate = (parent_rate + (*freq - 1)) / *freq;
+
+ /* Invalid rate! */
+ if (parent_rate > 32)
+ return;
+
+ if (parent_rate <= 4)
+ calcp = 0;
+ else if (parent_rate <= 8)
+ calcp = 1;
+ else if (parent_rate <= 16)
+ calcp = 2;
+ else
+ calcp = 3;
+
+ calcm = (parent_rate >> calcp) - 1;
+
+ *freq = (parent_rate >> calcp) / (calcm + 1);
+
+ /* we were called to round the frequency, we can now return */
+ if (n == NULL)
+ return;
+
+ *m = calcm;
+ *p = calcp;
+}
+
+
+
+/**
+ * sunxi_factors_clk_setup() - Setup function for factor clocks
+ */
+
+struct factors_data {
+ struct clk_factors_config *table;
+ void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p);
+};
+
+static struct clk_factors_config pll1_config = {
+ .nshift = 8,
+ .nwidth = 5,
+ .kshift = 4,
+ .kwidth = 2,
+ .mshift = 0,
+ .mwidth = 2,
+ .pshift = 16,
+ .pwidth = 2,
+};
+
+static struct clk_factors_config apb1_config = {
+ .mshift = 0,
+ .mwidth = 5,
+ .pshift = 16,
+ .pwidth = 2,
+};
+
+static const __initconst struct factors_data pll1_data = {
+ .table = &pll1_config,
+ .getter = sunxi_get_pll1_factors,
+};
+
+static const __initconst struct factors_data apb1_data = {
+ .table = &apb1_config,
+ .getter = sunxi_get_apb1_factors,
+};
+
+static void __init sunxi_factors_clk_setup(struct device_node *node,
+ struct factors_data *data)
+{
+ struct clk *clk;
+ const char *clk_name = node->name;
+ const char *parent;
+ void *reg;
+
+ reg = of_iomap(node, 0);
+
+ parent = of_clk_get_parent_name(node, 0);
+
+ clk = clk_register_factors(NULL, clk_name, parent, 0, reg,
+ data->table, data->getter, &clk_lock);
+
+ if (clk) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ clk_register_clkdev(clk, clk_name, NULL);
+ }
+}
+
+
+
+/**
+ * sunxi_mux_clk_setup() - Setup function for muxes
+ */
+
+#define SUNXI_MUX_GATE_WIDTH 2
+
+struct mux_data {
+ u8 shift;
+};
+
+static const __initconst struct mux_data cpu_data = {
+ .shift = 16,
+};
+
+static const __initconst struct mux_data apb1_mux_data = {
+ .shift = 24,
+};
+
+static void __init sunxi_mux_clk_setup(struct device_node *node,
+ struct mux_data *data)
+{
+ struct clk *clk;
+ const char *clk_name = node->name;
+ const char *parents[5];
+ void *reg;
+ int i = 0;
+
+ reg = of_iomap(node, 0);
+
+ while (i < 5 && (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
+ i++;
+
+ clk = clk_register_mux(NULL, clk_name, parents, i, 0, reg,
+ data->shift, SUNXI_MUX_GATE_WIDTH,
+ 0, &clk_lock);
+
+ if (clk) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ clk_register_clkdev(clk, clk_name, NULL);
+ }
+}
+
+
+
+/**
+ * sunxi_divider_clk_setup() - Setup function for simple divider clocks
+ */
+
+#define SUNXI_DIVISOR_WIDTH 2
+
+struct div_data {
+ u8 shift;
+ u8 pow;
+};
+
+static const __initconst struct div_data axi_data = {
+ .shift = 0,
+ .pow = 0,
+};
+
+static const __initconst struct div_data ahb_data = {
+ .shift = 4,
+ .pow = 1,
+};
+
+static const __initconst struct div_data apb0_data = {
+ .shift = 8,
+ .pow = 1,
+};
+
+static void __init sunxi_divider_clk_setup(struct device_node *node,
+ struct div_data *data)
+{
+ struct clk *clk;
+ const char *clk_name = node->name;
+ const char *clk_parent;
+ void *reg;
+
+ reg = of_iomap(node, 0);
+
+ clk_parent = of_clk_get_parent_name(node, 0);
+
+ clk = clk_register_divider(NULL, clk_name, clk_parent, 0,
+ reg, data->shift, SUNXI_DIVISOR_WIDTH,
+ data->pow ? CLK_DIVIDER_POWER_OF_TWO : 0,
+ &clk_lock);
+ if (clk) {
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ clk_register_clkdev(clk, clk_name, NULL);
+ }
+}
+
+
+
+/**
+ * sunxi_gates_clk_setup() - Setup function for leaf gates on clocks
+ */
+
+#define SUNXI_GATES_MAX_SIZE 64
+
+struct gates_data {
+ DECLARE_BITMAP(mask, SUNXI_GATES_MAX_SIZE);
+};
+
+static const __initconst struct gates_data axi_gates_data = {
+ .mask = {1},
+};
+
+static const __initconst struct gates_data ahb_gates_data = {
+ .mask = {0x7F77FFF, 0x14FB3F},
+};
+
+static const __initconst struct gates_data apb0_gates_data = {
+ .mask = {0x4EF},
+};
+
+static const __initconst struct gates_data apb1_gates_data = {
+ .mask = {0xFF00F7},
+};
+
+static void __init sunxi_gates_clk_setup(struct device_node *node,
+ struct gates_data *data)
+{
+ struct clk_onecell_data *clk_data;
+ const char *clk_parent;
+ const char *clk_name;
+ void *reg;
+ int qty;
+ int i = 0;
+ int j = 0;
+ int ignore;
+
+ reg = of_iomap(node, 0);
+
+ clk_parent = of_clk_get_parent_name(node, 0);
+
+ /* Worst-case size approximation and memory allocation */
+ qty = find_last_bit(data->mask, SUNXI_GATES_MAX_SIZE);
+ clk_data = kmalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
+ if (!clk_data)
+ return;
+ clk_data->clks = kzalloc((qty+1) * sizeof(struct clk *), GFP_KERNEL);
+ if (!clk_data->clks) {
+ kfree(clk_data);
+ return;
+ }
+
+ for_each_set_bit(i, data->mask, SUNXI_GATES_MAX_SIZE) {
+ of_property_read_string_index(node, "clock-output-names",
+ j, &clk_name);
+
+ /* No driver claims this clock, but it should remain gated */
+ ignore = !strcmp("ahb_sdram", clk_name) ? CLK_IGNORE_UNUSED : 0;
+
+ clk_data->clks[i] = clk_register_gate(NULL, clk_name,
+ clk_parent, ignore,
+ reg + 4 * (i/32), i % 32,
+ 0, &clk_lock);
+ WARN_ON(IS_ERR(clk_data->clks[i]));
+
+ j++;
+ }
+
+ /* Adjust to the real max */
+ clk_data->clk_num = i;
+
+ of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+/* Matches for of_clk_init */
+static const __initconst struct of_device_id clk_match[] = {
+ {.compatible = "allwinner,sun4i-osc-clk", .data = sunxi_osc_clk_setup,},
+ {}
+};
+
+/* Matches for factors clocks */
+static const __initconst struct of_device_id clk_factors_match[] = {
+ {.compatible = "allwinner,sun4i-pll1-clk", .data = &pll1_data,},
+ {.compatible = "allwinner,sun4i-apb1-clk", .data = &apb1_data,},
+ {}
+};
+
+/* Matches for divider clocks */
+static const __initconst struct of_device_id clk_div_match[] = {
+ {.compatible = "allwinner,sun4i-axi-clk", .data = &axi_data,},
+ {.compatible = "allwinner,sun4i-ahb-clk", .data = &ahb_data,},
+ {.compatible = "allwinner,sun4i-apb0-clk", .data = &apb0_data,},
+ {}
+};
+
+/* Matches for mux clocks */
+static const __initconst struct of_device_id clk_mux_match[] = {
+ {.compatible = "allwinner,sun4i-cpu-clk", .data = &cpu_data,},
+ {.compatible = "allwinner,sun4i-apb1-mux-clk", .data = &apb1_mux_data,},
+ {}
+};
+
+/* Matches for gate clocks */
+static const __initconst struct of_device_id clk_gates_match[] = {
+ {.compatible = "allwinner,sun4i-axi-gates-clk", .data = &axi_gates_data,},
+ {.compatible = "allwinner,sun4i-ahb-gates-clk", .data = &ahb_gates_data,},
+ {.compatible = "allwinner,sun4i-apb0-gates-clk", .data = &apb0_gates_data,},
+ {.compatible = "allwinner,sun4i-apb1-gates-clk", .data = &apb1_gates_data,},
+ {}
+};
+
+static void __init of_sunxi_table_clock_setup(const struct of_device_id *clk_match,
+ void *function)
+{
+ struct device_node *np;
+ const struct div_data *data;
+ const struct of_device_id *match;
+ void (*setup_function)(struct device_node *, const void *) = function;
+
+ for_each_matching_node(np, clk_match) {
+ match = of_match_node(clk_match, np);
+ data = match->data;
+ setup_function(np, data);
+ }
+}
+
+void __init sunxi_init_clocks(void)
+{
+ /* Register all the simple sunxi clocks on DT */
+ of_clk_init(clk_match);
+
+ /* Register factor clocks */
+ of_sunxi_table_clock_setup(clk_factors_match, sunxi_factors_clk_setup);
+
+ /* Register divider clocks */
+ of_sunxi_table_clock_setup(clk_div_match, sunxi_divider_clk_setup);
+
+ /* Register mux clocks */
+ of_sunxi_table_clock_setup(clk_mux_match, sunxi_mux_clk_setup);
+
+ /* Register gate clocks */
+ of_sunxi_table_clock_setup(clk_gates_match, sunxi_gates_clk_setup);
+}
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 0744731c622..a09d7dcaf18 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -355,15 +355,16 @@ struct clk *tegra_clk_register_periph_nodiv(const char *name,
struct tegra_clk_periph *periph, void __iomem *clk_base,
u32 offset);
-#define TEGRA_CLK_PERIPH(_mux_shift, _mux_width, _mux_flags, \
+#define TEGRA_CLK_PERIPH(_mux_shift, _mux_mask, _mux_flags, \
_div_shift, _div_width, _div_frac_width, \
_div_flags, _clk_num, _enb_refcnt, _regs, \
- _gate_flags) \
+ _gate_flags, _table) \
{ \
.mux = { \
.flags = _mux_flags, \
.shift = _mux_shift, \
- .width = _mux_width, \
+ .mask = _mux_mask, \
+ .table = _table, \
}, \
.divider = { \
.flags = _div_flags, \
@@ -393,26 +394,36 @@ struct tegra_periph_init_data {
const char *dev_id;
};
-#define TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parent_names, _offset, \
- _mux_shift, _mux_width, _mux_flags, _div_shift, \
+#define TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parent_names, _offset,\
+ _mux_shift, _mux_mask, _mux_flags, _div_shift, \
_div_width, _div_frac_width, _div_flags, _regs, \
- _clk_num, _enb_refcnt, _gate_flags, _clk_id) \
+ _clk_num, _enb_refcnt, _gate_flags, _clk_id, _table) \
{ \
.name = _name, \
.clk_id = _clk_id, \
.parent_names = _parent_names, \
.num_parents = ARRAY_SIZE(_parent_names), \
- .periph = TEGRA_CLK_PERIPH(_mux_shift, _mux_width, \
+ .periph = TEGRA_CLK_PERIPH(_mux_shift, _mux_mask, \
_mux_flags, _div_shift, \
_div_width, _div_frac_width, \
_div_flags, _clk_num, \
_enb_refcnt, _regs, \
- _gate_flags), \
+ _gate_flags, _table), \
.offset = _offset, \
.con_id = _con_id, \
.dev_id = _dev_id, \
}
+#define TEGRA_INIT_DATA(_name, _con_id, _dev_id, _parent_names, _offset,\
+ _mux_shift, _mux_width, _mux_flags, _div_shift, \
+ _div_width, _div_frac_width, _div_flags, _regs, \
+ _clk_num, _enb_refcnt, _gate_flags, _clk_id) \
+ TEGRA_INIT_DATA_TABLE(_name, _con_id, _dev_id, _parent_names, _offset,\
+ _mux_shift, BIT(_mux_width) - 1, _mux_flags, \
+ _div_shift, _div_width, _div_frac_width, _div_flags, \
+ _regs, _clk_num, _enb_refcnt, _gate_flags, _clk_id,\
+ NULL)
+
/**
* struct clk_super_mux - super clock
*
diff --git a/drivers/clk/ux500/Makefile b/drivers/clk/ux500/Makefile
index bcc0c11a507..c6a806ed0e8 100644
--- a/drivers/clk/ux500/Makefile
+++ b/drivers/clk/ux500/Makefile
@@ -5,6 +5,7 @@
# Clock types
obj-y += clk-prcc.o
obj-y += clk-prcmu.o
+obj-y += clk-sysctrl.o
# Clock definitions
obj-y += u8500_clk.o
diff --git a/drivers/clk/ux500/abx500-clk.c b/drivers/clk/ux500/abx500-clk.c
index 9f7400d74fa..a0fca004abc 100644
--- a/drivers/clk/ux500/abx500-clk.c
+++ b/drivers/clk/ux500/abx500-clk.c
@@ -12,13 +12,78 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/mfd/abx500/ab8500.h>
-
-/* TODO: Add clock implementations here */
-
+#include <linux/mfd/abx500/ab8500-sysctrl.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include "clk.h"
/* Clock definitions for ab8500 */
static int ab8500_reg_clks(struct device *dev)
{
+ int ret;
+ struct clk *clk;
+
+ const char *intclk_parents[] = {"ab8500_sysclk", "ulpclk"};
+ u16 intclk_reg_sel[] = {0 , AB8500_SYSULPCLKCTRL1};
+ u8 intclk_reg_mask[] = {0 , AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_MASK};
+ u8 intclk_reg_bits[] = {
+ 0 ,
+ (1 << AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_SHIFT)
+ };
+
+ dev_info(dev, "register clocks for ab850x\n");
+
+ /* Enable SWAT */
+ ret = ab8500_sysctrl_set(AB8500_SWATCTRL, AB8500_SWATCTRL_SWATENABLE);
+ if (ret)
+ return ret;
+
+ /* ab8500_sysclk */
+ clk = clk_reg_prcmu_gate("ab8500_sysclk", NULL, PRCMU_SYSCLK,
+ CLK_IS_ROOT);
+ clk_register_clkdev(clk, "sysclk", "ab8500-usb.0");
+ clk_register_clkdev(clk, "sysclk", "ab-iddet.0");
+ clk_register_clkdev(clk, "sysclk", "ab85xx-codec.0");
+ clk_register_clkdev(clk, "sysclk", "shrm_bus");
+
+ /* ab8500_sysclk2 */
+ clk = clk_reg_sysctrl_gate(dev , "ab8500_sysclk2", "ab8500_sysclk",
+ AB8500_SYSULPCLKCTRL1, AB8500_SYSULPCLKCTRL1_SYSCLKBUF2REQ,
+ AB8500_SYSULPCLKCTRL1_SYSCLKBUF2REQ, 0, 0);
+ clk_register_clkdev(clk, "sysclk", "0-0070");
+
+ /* ab8500_sysclk3 */
+ clk = clk_reg_sysctrl_gate(dev , "ab8500_sysclk3", "ab8500_sysclk",
+ AB8500_SYSULPCLKCTRL1, AB8500_SYSULPCLKCTRL1_SYSCLKBUF3REQ,
+ AB8500_SYSULPCLKCTRL1_SYSCLKBUF3REQ, 0, 0);
+ clk_register_clkdev(clk, "sysclk", "cg1960_core.0");
+
+ /* ab8500_sysclk4 */
+ clk = clk_reg_sysctrl_gate(dev , "ab8500_sysclk4", "ab8500_sysclk",
+ AB8500_SYSULPCLKCTRL1, AB8500_SYSULPCLKCTRL1_SYSCLKBUF4REQ,
+ AB8500_SYSULPCLKCTRL1_SYSCLKBUF4REQ, 0, 0);
+
+ /* ab_ulpclk */
+ clk = clk_reg_sysctrl_gate_fixed_rate(dev, "ulpclk", NULL,
+ AB8500_SYSULPCLKCTRL1, AB8500_SYSULPCLKCTRL1_ULPCLKREQ,
+ AB8500_SYSULPCLKCTRL1_ULPCLKREQ,
+ 38400000, 9000, CLK_IS_ROOT);
+ clk_register_clkdev(clk, "ulpclk", "ab85xx-codec.0");
+
+ /* ab8500_intclk */
+ clk = clk_reg_sysctrl_set_parent(dev , "intclk", intclk_parents, 2,
+ intclk_reg_sel, intclk_reg_mask, intclk_reg_bits, 0);
+ clk_register_clkdev(clk, "intclk", "ab85xx-codec.0");
+ clk_register_clkdev(clk, NULL, "ab8500-pwm.1");
+
+ /* ab8500_audioclk */
+ clk = clk_reg_sysctrl_gate(dev , "audioclk", "intclk",
+ AB8500_SYSULPCLKCTRL1, AB8500_SYSULPCLKCTRL1_AUDIOCLKENA,
+ AB8500_SYSULPCLKCTRL1_AUDIOCLKENA, 0, 0);
+ clk_register_clkdev(clk, "audioclk", "ab85xx-codec.0");
+
return 0;
}
diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c
index 74faa7e3cf5..293a2885441 100644
--- a/drivers/clk/ux500/clk-prcmu.c
+++ b/drivers/clk/ux500/clk-prcmu.c
@@ -20,15 +20,23 @@
struct clk_prcmu {
struct clk_hw hw;
u8 cg_sel;
+ int is_prepared;
int is_enabled;
+ int opp_requested;
};
/* PRCMU clock operations. */
static int clk_prcmu_prepare(struct clk_hw *hw)
{
+ int ret;
struct clk_prcmu *clk = to_clk_prcmu(hw);
- return prcmu_request_clock(clk->cg_sel, true);
+
+ ret = prcmu_request_clock(clk->cg_sel, true);
+ if (!ret)
+ clk->is_prepared = 1;
+
+ return ret;;
}
static void clk_prcmu_unprepare(struct clk_hw *hw)
@@ -36,7 +44,15 @@ static void clk_prcmu_unprepare(struct clk_hw *hw)
struct clk_prcmu *clk = to_clk_prcmu(hw);
if (prcmu_request_clock(clk->cg_sel, false))
pr_err("clk_prcmu: %s failed to disable %s.\n", __func__,
- hw->init->name);
+ __clk_get_name(hw->clk));
+ else
+ clk->is_prepared = 0;
+}
+
+static int clk_prcmu_is_prepared(struct clk_hw *hw)
+{
+ struct clk_prcmu *clk = to_clk_prcmu(hw);
+ return clk->is_prepared;
}
static int clk_prcmu_enable(struct clk_hw *hw)
@@ -79,58 +95,52 @@ static int clk_prcmu_set_rate(struct clk_hw *hw, unsigned long rate,
return prcmu_set_clock_rate(clk->cg_sel, rate);
}
-static int request_ape_opp100(bool enable)
-{
- static int reqs;
- int err = 0;
-
- if (enable) {
- if (!reqs)
- err = prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP,
- "clock", 100);
- if (!err)
- reqs++;
- } else {
- reqs--;
- if (!reqs)
- prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
- "clock");
- }
- return err;
-}
-
static int clk_prcmu_opp_prepare(struct clk_hw *hw)
{
int err;
struct clk_prcmu *clk = to_clk_prcmu(hw);
- err = request_ape_opp100(true);
- if (err) {
- pr_err("clk_prcmu: %s failed to request APE OPP100 for %s.\n",
- __func__, hw->init->name);
- return err;
+ if (!clk->opp_requested) {
+ err = prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP,
+ (char *)__clk_get_name(hw->clk),
+ 100);
+ if (err) {
+ pr_err("clk_prcmu: %s fail req APE OPP for %s.\n",
+ __func__, __clk_get_name(hw->clk));
+ return err;
+ }
+ clk->opp_requested = 1;
}
err = prcmu_request_clock(clk->cg_sel, true);
- if (err)
- request_ape_opp100(false);
+ if (err) {
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
+ (char *)__clk_get_name(hw->clk));
+ clk->opp_requested = 0;
+ return err;
+ }
- return err;
+ clk->is_prepared = 1;
+ return 0;
}
static void clk_prcmu_opp_unprepare(struct clk_hw *hw)
{
struct clk_prcmu *clk = to_clk_prcmu(hw);
- if (prcmu_request_clock(clk->cg_sel, false))
- goto out_error;
- if (request_ape_opp100(false))
- goto out_error;
- return;
-
-out_error:
- pr_err("clk_prcmu: %s failed to disable %s.\n", __func__,
- hw->init->name);
+ if (prcmu_request_clock(clk->cg_sel, false)) {
+ pr_err("clk_prcmu: %s failed to disable %s.\n", __func__,
+ __clk_get_name(hw->clk));
+ return;
+ }
+
+ if (clk->opp_requested) {
+ prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
+ (char *)__clk_get_name(hw->clk));
+ clk->opp_requested = 0;
+ }
+
+ clk->is_prepared = 0;
}
static int clk_prcmu_opp_volt_prepare(struct clk_hw *hw)
@@ -138,38 +148,49 @@ static int clk_prcmu_opp_volt_prepare(struct clk_hw *hw)
int err;
struct clk_prcmu *clk = to_clk_prcmu(hw);
- err = prcmu_request_ape_opp_100_voltage(true);
- if (err) {
- pr_err("clk_prcmu: %s failed to request APE OPP VOLT for %s.\n",
- __func__, hw->init->name);
- return err;
+ if (!clk->opp_requested) {
+ err = prcmu_request_ape_opp_100_voltage(true);
+ if (err) {
+ pr_err("clk_prcmu: %s fail req APE OPP VOLT for %s.\n",
+ __func__, __clk_get_name(hw->clk));
+ return err;
+ }
+ clk->opp_requested = 1;
}
err = prcmu_request_clock(clk->cg_sel, true);
- if (err)
+ if (err) {
prcmu_request_ape_opp_100_voltage(false);
+ clk->opp_requested = 0;
+ return err;
+ }
- return err;
+ clk->is_prepared = 1;
+ return 0;
}
static void clk_prcmu_opp_volt_unprepare(struct clk_hw *hw)
{
struct clk_prcmu *clk = to_clk_prcmu(hw);
- if (prcmu_request_clock(clk->cg_sel, false))
- goto out_error;
- if (prcmu_request_ape_opp_100_voltage(false))
- goto out_error;
- return;
-
-out_error:
- pr_err("clk_prcmu: %s failed to disable %s.\n", __func__,
- hw->init->name);
+ if (prcmu_request_clock(clk->cg_sel, false)) {
+ pr_err("clk_prcmu: %s failed to disable %s.\n", __func__,
+ __clk_get_name(hw->clk));
+ return;
+ }
+
+ if (clk->opp_requested) {
+ prcmu_request_ape_opp_100_voltage(false);
+ clk->opp_requested = 0;
+ }
+
+ clk->is_prepared = 0;
}
static struct clk_ops clk_prcmu_scalable_ops = {
.prepare = clk_prcmu_prepare,
.unprepare = clk_prcmu_unprepare,
+ .is_prepared = clk_prcmu_is_prepared,
.enable = clk_prcmu_enable,
.disable = clk_prcmu_disable,
.is_enabled = clk_prcmu_is_enabled,
@@ -181,6 +202,7 @@ static struct clk_ops clk_prcmu_scalable_ops = {
static struct clk_ops clk_prcmu_gate_ops = {
.prepare = clk_prcmu_prepare,
.unprepare = clk_prcmu_unprepare,
+ .is_prepared = clk_prcmu_is_prepared,
.enable = clk_prcmu_enable,
.disable = clk_prcmu_disable,
.is_enabled = clk_prcmu_is_enabled,
@@ -202,6 +224,7 @@ static struct clk_ops clk_prcmu_rate_ops = {
static struct clk_ops clk_prcmu_opp_gate_ops = {
.prepare = clk_prcmu_opp_prepare,
.unprepare = clk_prcmu_opp_unprepare,
+ .is_prepared = clk_prcmu_is_prepared,
.enable = clk_prcmu_enable,
.disable = clk_prcmu_disable,
.is_enabled = clk_prcmu_is_enabled,
@@ -211,6 +234,7 @@ static struct clk_ops clk_prcmu_opp_gate_ops = {
static struct clk_ops clk_prcmu_opp_volt_scalable_ops = {
.prepare = clk_prcmu_opp_volt_prepare,
.unprepare = clk_prcmu_opp_volt_unprepare,
+ .is_prepared = clk_prcmu_is_prepared,
.enable = clk_prcmu_enable,
.disable = clk_prcmu_disable,
.is_enabled = clk_prcmu_is_enabled,
@@ -242,7 +266,9 @@ static struct clk *clk_reg_prcmu(const char *name,
}
clk->cg_sel = cg_sel;
+ clk->is_prepared = 1;
clk->is_enabled = 1;
+ clk->opp_requested = 0;
/* "rate" can be used for changing the initial frequency */
if (rate)
prcmu_set_clock_rate(cg_sel, rate);
diff --git a/drivers/clk/ux500/clk-sysctrl.c b/drivers/clk/ux500/clk-sysctrl.c
new file mode 100644
index 00000000000..bc7e9bde792
--- /dev/null
+++ b/drivers/clk/ux500/clk-sysctrl.c
@@ -0,0 +1,221 @@
+/*
+ * Sysctrl clock implementation for ux500 platform.
+ *
+ * Copyright (C) 2013 ST-Ericsson SA
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mfd/abx500/ab8500-sysctrl.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+#define SYSCTRL_MAX_NUM_PARENTS 4
+
+#define to_clk_sysctrl(_hw) container_of(_hw, struct clk_sysctrl, hw)
+
+struct clk_sysctrl {
+ struct clk_hw hw;
+ struct device *dev;
+ u8 parent_index;
+ u16 reg_sel[SYSCTRL_MAX_NUM_PARENTS];
+ u8 reg_mask[SYSCTRL_MAX_NUM_PARENTS];
+ u8 reg_bits[SYSCTRL_MAX_NUM_PARENTS];
+ unsigned long rate;
+ unsigned long enable_delay_us;
+};
+
+/* Sysctrl clock operations. */
+
+static int clk_sysctrl_prepare(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_sysctrl *clk = to_clk_sysctrl(hw);
+
+ ret = ab8500_sysctrl_write(clk->reg_sel[0], clk->reg_mask[0],
+ clk->reg_bits[0]);
+
+ if (!ret && clk->enable_delay_us)
+ usleep_range(clk->enable_delay_us, clk->enable_delay_us);
+
+ return ret;
+}
+
+static void clk_sysctrl_unprepare(struct clk_hw *hw)
+{
+ struct clk_sysctrl *clk = to_clk_sysctrl(hw);
+ if (ab8500_sysctrl_clear(clk->reg_sel[0], clk->reg_mask[0]))
+ dev_err(clk->dev, "clk_sysctrl: %s fail to clear %s.\n",
+ __func__, __clk_get_name(hw->clk));
+}
+
+static unsigned long clk_sysctrl_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_sysctrl *clk = to_clk_sysctrl(hw);
+ return clk->rate;
+}
+
+static int clk_sysctrl_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_sysctrl *clk = to_clk_sysctrl(hw);
+ u8 old_index = clk->parent_index;
+ int ret = 0;
+
+ if (clk->reg_sel[old_index]) {
+ ret = ab8500_sysctrl_clear(clk->reg_sel[old_index],
+ clk->reg_mask[old_index]);
+ if (ret)
+ return ret;
+ }
+
+ if (clk->reg_sel[index]) {
+ ret = ab8500_sysctrl_write(clk->reg_sel[index],
+ clk->reg_mask[index],
+ clk->reg_bits[index]);
+ if (ret) {
+ if (clk->reg_sel[old_index])
+ ab8500_sysctrl_write(clk->reg_sel[old_index],
+ clk->reg_mask[old_index],
+ clk->reg_bits[old_index]);
+ return ret;
+ }
+ }
+ clk->parent_index = index;
+
+ return ret;
+}
+
+static u8 clk_sysctrl_get_parent(struct clk_hw *hw)
+{
+ struct clk_sysctrl *clk = to_clk_sysctrl(hw);
+ return clk->parent_index;
+}
+
+static struct clk_ops clk_sysctrl_gate_ops = {
+ .prepare = clk_sysctrl_prepare,
+ .unprepare = clk_sysctrl_unprepare,
+};
+
+static struct clk_ops clk_sysctrl_gate_fixed_rate_ops = {
+ .prepare = clk_sysctrl_prepare,
+ .unprepare = clk_sysctrl_unprepare,
+ .recalc_rate = clk_sysctrl_recalc_rate,
+};
+
+static struct clk_ops clk_sysctrl_set_parent_ops = {
+ .set_parent = clk_sysctrl_set_parent,
+ .get_parent = clk_sysctrl_get_parent,
+};
+
+static struct clk *clk_reg_sysctrl(struct device *dev,
+ const char *name,
+ const char **parent_names,
+ u8 num_parents,
+ u16 *reg_sel,
+ u8 *reg_mask,
+ u8 *reg_bits,
+ unsigned long rate,
+ unsigned long enable_delay_us,
+ unsigned long flags,
+ struct clk_ops *clk_sysctrl_ops)
+{
+ struct clk_sysctrl *clk;
+ struct clk_init_data clk_sysctrl_init;
+ struct clk *clk_reg;
+ int i;
+
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ if (!name || (num_parents > SYSCTRL_MAX_NUM_PARENTS)) {
+ dev_err(dev, "clk_sysctrl: invalid arguments passed\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ clk = devm_kzalloc(dev, sizeof(struct clk_sysctrl), GFP_KERNEL);
+ if (!clk) {
+ dev_err(dev, "clk_sysctrl: could not allocate clk\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < num_parents; i++) {
+ clk->reg_sel[i] = reg_sel[i];
+ clk->reg_bits[i] = reg_bits[i];
+ clk->reg_mask[i] = reg_mask[i];
+ }
+
+ clk->parent_index = 0;
+ clk->rate = rate;
+ clk->enable_delay_us = enable_delay_us;
+ clk->dev = dev;
+
+ clk_sysctrl_init.name = name;
+ clk_sysctrl_init.ops = clk_sysctrl_ops;
+ clk_sysctrl_init.flags = flags;
+ clk_sysctrl_init.parent_names = parent_names;
+ clk_sysctrl_init.num_parents = num_parents;
+ clk->hw.init = &clk_sysctrl_init;
+
+ clk_reg = devm_clk_register(clk->dev, &clk->hw);
+ if (IS_ERR(clk_reg))
+ dev_err(dev, "clk_sysctrl: clk_register failed\n");
+
+ return clk_reg;
+}
+
+struct clk *clk_reg_sysctrl_gate(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ u16 reg_sel,
+ u8 reg_mask,
+ u8 reg_bits,
+ unsigned long enable_delay_us,
+ unsigned long flags)
+{
+ const char **parent_names = (parent_name ? &parent_name : NULL);
+ u8 num_parents = (parent_name ? 1 : 0);
+
+ return clk_reg_sysctrl(dev, name, parent_names, num_parents,
+ &reg_sel, &reg_mask, &reg_bits, 0, enable_delay_us,
+ flags, &clk_sysctrl_gate_ops);
+}
+
+struct clk *clk_reg_sysctrl_gate_fixed_rate(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ u16 reg_sel,
+ u8 reg_mask,
+ u8 reg_bits,
+ unsigned long rate,
+ unsigned long enable_delay_us,
+ unsigned long flags)
+{
+ const char **parent_names = (parent_name ? &parent_name : NULL);
+ u8 num_parents = (parent_name ? 1 : 0);
+
+ return clk_reg_sysctrl(dev, name, parent_names, num_parents,
+ &reg_sel, &reg_mask, &reg_bits,
+ rate, enable_delay_us, flags,
+ &clk_sysctrl_gate_fixed_rate_ops);
+}
+
+struct clk *clk_reg_sysctrl_set_parent(struct device *dev,
+ const char *name,
+ const char **parent_names,
+ u8 num_parents,
+ u16 *reg_sel,
+ u8 *reg_mask,
+ u8 *reg_bits,
+ unsigned long flags)
+{
+ return clk_reg_sysctrl(dev, name, parent_names, num_parents,
+ reg_sel, reg_mask, reg_bits, 0, 0, flags,
+ &clk_sysctrl_set_parent_ops);
+}
diff --git a/drivers/clk/ux500/clk.h b/drivers/clk/ux500/clk.h
index c3e449169a8..a2bb92d85ee 100644
--- a/drivers/clk/ux500/clk.h
+++ b/drivers/clk/ux500/clk.h
@@ -11,16 +11,18 @@
#define __UX500_CLK_H
#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/types.h>
struct clk *clk_reg_prcc_pclk(const char *name,
const char *parent_name,
- unsigned int phy_base,
+ resource_size_t phy_base,
u32 cg_sel,
unsigned long flags);
struct clk *clk_reg_prcc_kclk(const char *name,
const char *parent_name,
- unsigned int phy_base,
+ resource_size_t phy_base,
u32 cg_sel,
unsigned long flags);
@@ -57,4 +59,32 @@ struct clk *clk_reg_prcmu_opp_volt_scalable(const char *name,
unsigned long rate,
unsigned long flags);
+struct clk *clk_reg_sysctrl_gate(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ u16 reg_sel,
+ u8 reg_mask,
+ u8 reg_bits,
+ unsigned long enable_delay_us,
+ unsigned long flags);
+
+struct clk *clk_reg_sysctrl_gate_fixed_rate(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ u16 reg_sel,
+ u8 reg_mask,
+ u8 reg_bits,
+ unsigned long rate,
+ unsigned long enable_delay_us,
+ unsigned long flags);
+
+struct clk *clk_reg_sysctrl_set_parent(struct device *dev,
+ const char *name,
+ const char **parent_names,
+ u8 num_parents,
+ u16 *reg_sel,
+ u8 *reg_mask,
+ u8 *reg_bits,
+ unsigned long flags);
+
#endif /* __UX500_CLK_H */
diff --git a/drivers/clk/versatile/Makefile b/drivers/clk/versatile/Makefile
index ec3b88fe3e6..c16ca787170 100644
--- a/drivers/clk/versatile/Makefile
+++ b/drivers/clk/versatile/Makefile
@@ -3,5 +3,5 @@ obj-$(CONFIG_ICST) += clk-icst.o
obj-$(CONFIG_ARCH_INTEGRATOR) += clk-integrator.o
obj-$(CONFIG_INTEGRATOR_IMPD1) += clk-impd1.o
obj-$(CONFIG_ARCH_REALVIEW) += clk-realview.o
-obj-$(CONFIG_ARCH_VEXPRESS) += clk-vexpress.o
+obj-$(CONFIG_ARCH_VEXPRESS) += clk-vexpress.o clk-sp810.o
obj-$(CONFIG_VEXPRESS_CONFIG) += clk-vexpress-osc.o
diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
new file mode 100644
index 00000000000..bf9b15a585e
--- /dev/null
+++ b/drivers/clk/versatile/clk-sp810.c
@@ -0,0 +1,188 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2013 ARM Limited
+ */
+
+#include <linux/amba/sp810.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#define to_clk_sp810_timerclken(_hw) \
+ container_of(_hw, struct clk_sp810_timerclken, hw)
+
+struct clk_sp810;
+
+struct clk_sp810_timerclken {
+ struct clk_hw hw;
+ struct clk *clk;
+ struct clk_sp810 *sp810;
+ int channel;
+};
+
+struct clk_sp810 {
+ struct device_node *node;
+ int refclk_index, timclk_index;
+ void __iomem *base;
+ spinlock_t lock;
+ struct clk_sp810_timerclken timerclken[4];
+ struct clk *refclk;
+ struct clk *timclk;
+};
+
+static u8 clk_sp810_timerclken_get_parent(struct clk_hw *hw)
+{
+ struct clk_sp810_timerclken *timerclken = to_clk_sp810_timerclken(hw);
+ u32 val = readl(timerclken->sp810->base + SCCTRL);
+
+ return !!(val & (1 << SCCTRL_TIMERENnSEL_SHIFT(timerclken->channel)));
+}
+
+static int clk_sp810_timerclken_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_sp810_timerclken *timerclken = to_clk_sp810_timerclken(hw);
+ struct clk_sp810 *sp810 = timerclken->sp810;
+ u32 val, shift = SCCTRL_TIMERENnSEL_SHIFT(timerclken->channel);
+ unsigned long flags = 0;
+
+ if (WARN_ON(index > 1))
+ return -EINVAL;
+
+ spin_lock_irqsave(&sp810->lock, flags);
+
+ val = readl(sp810->base + SCCTRL);
+ val &= ~(1 << shift);
+ val |= index << shift;
+ writel(val, sp810->base + SCCTRL);
+
+ spin_unlock_irqrestore(&sp810->lock, flags);
+
+ return 0;
+}
+
+/*
+ * FIXME - setting the parent every time .prepare is invoked is inefficient.
+ * This is better handled by a dedicated clock tree configuration mechanism at
+ * init-time. Revisit this later when such a mechanism exists
+ */
+static int clk_sp810_timerclken_prepare(struct clk_hw *hw)
+{
+ struct clk_sp810_timerclken *timerclken = to_clk_sp810_timerclken(hw);
+ struct clk_sp810 *sp810 = timerclken->sp810;
+ struct clk *old_parent = __clk_get_parent(hw->clk);
+ struct clk *new_parent;
+
+ if (!sp810->refclk)
+ sp810->refclk = of_clk_get(sp810->node, sp810->refclk_index);
+
+ if (!sp810->timclk)
+ sp810->timclk = of_clk_get(sp810->node, sp810->timclk_index);
+
+ if (WARN_ON(IS_ERR(sp810->refclk) || IS_ERR(sp810->timclk)))
+ return -ENOENT;
+
+ /* Select fastest parent */
+ if (clk_get_rate(sp810->refclk) > clk_get_rate(sp810->timclk))
+ new_parent = sp810->refclk;
+ else
+ new_parent = sp810->timclk;
+
+ /* Switch the parent if necessary */
+ if (old_parent != new_parent) {
+ clk_prepare(new_parent);
+ clk_set_parent(hw->clk, new_parent);
+ clk_unprepare(old_parent);
+ }
+
+ return 0;
+}
+
+static void clk_sp810_timerclken_unprepare(struct clk_hw *hw)
+{
+ struct clk_sp810_timerclken *timerclken = to_clk_sp810_timerclken(hw);
+ struct clk_sp810 *sp810 = timerclken->sp810;
+
+ clk_put(sp810->timclk);
+ clk_put(sp810->refclk);
+}
+
+static const struct clk_ops clk_sp810_timerclken_ops = {
+ .prepare = clk_sp810_timerclken_prepare,
+ .unprepare = clk_sp810_timerclken_unprepare,
+ .get_parent = clk_sp810_timerclken_get_parent,
+ .set_parent = clk_sp810_timerclken_set_parent,
+};
+
+struct clk *clk_sp810_timerclken_of_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct clk_sp810 *sp810 = data;
+
+ if (WARN_ON(clkspec->args_count != 1 || clkspec->args[0] >
+ ARRAY_SIZE(sp810->timerclken)))
+ return NULL;
+
+ return sp810->timerclken[clkspec->args[0]].clk;
+}
+
+void __init clk_sp810_of_setup(struct device_node *node)
+{
+ struct clk_sp810 *sp810 = kzalloc(sizeof(*sp810), GFP_KERNEL);
+ const char *parent_names[2];
+ char name[12];
+ struct clk_init_data init;
+ int i;
+
+ if (!sp810) {
+ pr_err("Failed to allocate memory for SP810!\n");
+ return;
+ }
+
+ sp810->refclk_index = of_property_match_string(node, "clock-names",
+ "refclk");
+ parent_names[0] = of_clk_get_parent_name(node, sp810->refclk_index);
+
+ sp810->timclk_index = of_property_match_string(node, "clock-names",
+ "timclk");
+ parent_names[1] = of_clk_get_parent_name(node, sp810->timclk_index);
+
+ if (parent_names[0] <= 0 || parent_names[1] <= 0) {
+ pr_warn("Failed to obtain parent clocks for SP810!\n");
+ return;
+ }
+
+ sp810->node = node;
+ sp810->base = of_iomap(node, 0);
+ spin_lock_init(&sp810->lock);
+
+ init.name = name;
+ init.ops = &clk_sp810_timerclken_ops;
+ init.flags = CLK_IS_BASIC;
+ init.parent_names = parent_names;
+ init.num_parents = ARRAY_SIZE(parent_names);
+
+ for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) {
+ snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
+
+ sp810->timerclken[i].sp810 = sp810;
+ sp810->timerclken[i].channel = i;
+ sp810->timerclken[i].hw.init = &init;
+
+ sp810->timerclken[i].clk = clk_register(NULL,
+ &sp810->timerclken[i].hw);
+ WARN_ON(IS_ERR(sp810->timerclken[i].clk));
+ }
+
+ of_clk_add_provider(node, clk_sp810_timerclken_of_get, sp810);
+}
+CLK_OF_DECLARE(sp810, "arm,sp810", clk_sp810_of_setup);
diff --git a/drivers/clk/versatile/clk-vexpress.c b/drivers/clk/versatile/clk-vexpress.c
index 82b45aad8cc..a4a728d0509 100644
--- a/drivers/clk/versatile/clk-vexpress.c
+++ b/drivers/clk/versatile/clk-vexpress.c
@@ -15,8 +15,6 @@
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
#include <linux/vexpress.h>
static struct clk *vexpress_sp810_timerclken[4];
@@ -86,50 +84,3 @@ void __init vexpress_clk_init(void __iomem *sp810_base)
WARN_ON(clk_register_clkdev(vexpress_sp810_timerclken[1],
"v2m-timer1", "sp804"));
}
-
-#if defined(CONFIG_OF)
-
-struct clk *vexpress_sp810_of_get(struct of_phandle_args *clkspec, void *data)
-{
- if (WARN_ON(clkspec->args_count != 1 || clkspec->args[0] >
- ARRAY_SIZE(vexpress_sp810_timerclken)))
- return NULL;
-
- return vexpress_sp810_timerclken[clkspec->args[0]];
-}
-
-void __init vexpress_clk_of_init(void)
-{
- struct device_node *node;
- struct clk *clk;
- struct clk *refclk, *timclk;
-
- of_clk_init(NULL);
-
- node = of_find_compatible_node(NULL, NULL, "arm,sp810");
- vexpress_sp810_init(of_iomap(node, 0));
- of_clk_add_provider(node, vexpress_sp810_of_get, NULL);
-
- /* Select "better" (faster) parent for SP804 timers */
- refclk = of_clk_get_by_name(node, "refclk");
- timclk = of_clk_get_by_name(node, "timclk");
- if (!WARN_ON(IS_ERR(refclk) || IS_ERR(timclk))) {
- int i = 0;
-
- if (clk_get_rate(refclk) > clk_get_rate(timclk))
- clk = refclk;
- else
- clk = timclk;
-
- for (i = 0; i < ARRAY_SIZE(vexpress_sp810_timerclken); i++)
- WARN_ON(clk_set_parent(vexpress_sp810_timerclken[i],
- clk));
- }
-
- WARN_ON(clk_register_clkdev(vexpress_sp810_timerclken[0],
- "v2m-timer0", "sp804"));
- WARN_ON(clk_register_clkdev(vexpress_sp810_timerclken[1],
- "v2m-timer1", "sp804"));
-}
-
-#endif
diff --git a/drivers/clocksource/sunxi_timer.c b/drivers/clocksource/sunxi_timer.c
index 4086b916715..0ce85e29769 100644
--- a/drivers/clocksource/sunxi_timer.c
+++ b/drivers/clocksource/sunxi_timer.c
@@ -23,7 +23,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sunxi_timer.h>
-#include <linux/clk-provider.h>
+#include <linux/clk/sunxi.h>
#define TIMER_CTL_REG 0x00
#define TIMER_CTL_ENABLE (1 << 0)
@@ -123,7 +123,7 @@ void __init sunxi_timer_init(void)
if (irq <= 0)
panic("Can't parse IRQ");
- of_clk_init(NULL);
+ sunxi_init_clocks();
clk = of_clk_get(node, 0);
if (IS_ERR(clk))
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 2ec7725f4a0..a9bb140bc86 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -753,6 +753,7 @@ static struct mfd_cell ab3100_devs[] = {
},
{
.name = "ab3100-regulators",
+ .of_compatible = "stericsson,ab3100-regulators",
.id = -1,
},
{
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index c79ab843333..493948a38fc 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -220,35 +220,6 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
return ret;
}
-static int pm8606_preg_enable(struct regulator_dev *rdev)
-{
- struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
-
- return pm860x_set_bits(info->i2c, rdev->desc->enable_reg,
- 1 << rdev->desc->enable_mask, 0);
-}
-
-static int pm8606_preg_disable(struct regulator_dev *rdev)
-{
- struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
-
- return pm860x_set_bits(info->i2c, rdev->desc->enable_reg,
- 1 << rdev->desc->enable_mask,
- 1 << rdev->desc->enable_mask);
-}
-
-static int pm8606_preg_is_enabled(struct regulator_dev *rdev)
-{
- struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
- int ret;
-
- ret = pm860x_reg_read(info->i2c, rdev->desc->enable_reg);
- if (ret < 0)
- return ret;
-
- return !((unsigned char)ret & (1 << rdev->desc->enable_mask));
-}
-
static struct regulator_ops pm8607_regulator_ops = {
.list_voltage = pm8607_list_voltage,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -259,9 +230,9 @@ static struct regulator_ops pm8607_regulator_ops = {
};
static struct regulator_ops pm8606_preg_ops = {
- .enable = pm8606_preg_enable,
- .disable = pm8606_preg_disable,
- .is_enabled = pm8606_preg_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
};
#define PM8606_PREG(ereg, ebit) \
@@ -274,6 +245,7 @@ static struct regulator_ops pm8606_preg_ops = {
.owner = THIS_MODULE, \
.enable_reg = PM8606_##ereg, \
.enable_mask = (ebit), \
+ .enable_is_inverted = true, \
}, \
}
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 6e8250382de..47a34ff88f9 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -12,7 +12,7 @@ obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
-obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
+obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o ab8500-ext.o
obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o
obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o
obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 111ec69a3e9..3be9e46594a 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -17,6 +17,8 @@
#include <linux/regulator/driver.h>
#include <linux/mfd/ab3100.h>
#include <linux/mfd/abx500.h>
+#include <linux/of.h>
+#include <linux/regulator/of_regulator.h>
/* LDO registers and some handy masking definitions for AB3100 */
#define AB3100_LDO_A 0x40
@@ -345,7 +347,11 @@ static int ab3100_get_voltage_regulator_external(struct regulator_dev *reg)
{
struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
- return abreg->plfdata->external_voltage;
+ if (abreg->plfdata)
+ return abreg->plfdata->external_voltage;
+ else
+ /* TODO: encode external voltage into device tree */
+ return 0;
}
static struct regulator_ops regulator_ops_fixed = {
@@ -488,16 +494,174 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
},
};
+static int ab3100_regulator_register(struct platform_device *pdev,
+ struct ab3100_platform_data *plfdata,
+ struct regulator_init_data *init_data,
+ struct device_node *np,
+ int id)
+{
+ struct regulator_desc *desc;
+ struct ab3100_regulator *reg;
+ struct regulator_dev *rdev;
+ struct regulator_config config = { };
+ int err, i;
+
+ for (i = 0; i < AB3100_NUM_REGULATORS; i++) {
+ desc = &ab3100_regulator_desc[i];
+ if (desc->id == id)
+ break;
+ }
+ if (desc->id != id)
+ return -ENODEV;
+
+ /* Same index used for this array */
+ reg = &ab3100_regulators[i];
+
+ /*
+ * Initialize per-regulator struct.
+ * Inherit platform data, this comes down from the
+ * i2c boarddata, from the machine. So if you want to
+ * see what it looks like for a certain machine, go
+ * into the machine I2C setup.
+ */
+ reg->dev = &pdev->dev;
+ if (plfdata) {
+ reg->plfdata = plfdata;
+ config.init_data = &plfdata->reg_constraints[i];
+ } else if (np) {
+ config.of_node = np;
+ config.init_data = init_data;
+ }
+ config.dev = &pdev->dev;
+ config.driver_data = reg;
+
+ rdev = regulator_register(desc, &config);
+ if (IS_ERR(rdev)) {
+ err = PTR_ERR(rdev);
+ dev_err(&pdev->dev,
+ "%s: failed to register regulator %s err %d\n",
+ __func__, desc->name,
+ err);
+ return err;
+ }
+
+ /* Then set a pointer back to the registered regulator */
+ reg->rdev = rdev;
+ return 0;
+}
+
+static struct of_regulator_match ab3100_regulator_matches[] = {
+ { .name = "ab3100_ldo_a", .driver_data = (void *) AB3100_LDO_A, },
+ { .name = "ab3100_ldo_c", .driver_data = (void *) AB3100_LDO_C, },
+ { .name = "ab3100_ldo_d", .driver_data = (void *) AB3100_LDO_D, },
+ { .name = "ab3100_ldo_e", .driver_data = (void *) AB3100_LDO_E, },
+ { .name = "ab3100_ldo_f", .driver_data = (void *) AB3100_LDO_F },
+ { .name = "ab3100_ldo_g", .driver_data = (void *) AB3100_LDO_G },
+ { .name = "ab3100_ldo_h", .driver_data = (void *) AB3100_LDO_H },
+ { .name = "ab3100_ldo_k", .driver_data = (void *) AB3100_LDO_K },
+ { .name = "ab3100_ext", .driver_data = (void *) AB3100_LDO_EXT },
+ { .name = "ab3100_buck", .driver_data = (void *) AB3100_BUCK },
+};
+
/*
- * NOTE: the following functions are regulators pluralis - it is the
- * binding to the AB3100 core driver and the parent platform device
- * for all the different regulators.
+ * Initial settings of ab3100 registers.
+ * Common for below LDO regulator settings are that
+ * bit 7-5 controls voltage. Bit 4 turns regulator ON(1) or OFF(0).
+ * Bit 3-2 controls sleep enable and bit 1-0 controls sleep mode.
*/
+/* LDO_A 0x16: 2.75V, ON, SLEEP_A, SLEEP OFF GND */
+#define LDO_A_SETTING 0x16
+/* LDO_C 0x10: 2.65V, ON, SLEEP_A or B, SLEEP full power */
+#define LDO_C_SETTING 0x10
+/* LDO_D 0x10: 2.65V, ON, sleep mode not used */
+#define LDO_D_SETTING 0x10
+/* LDO_E 0x10: 1.8V, ON, SLEEP_A or B, SLEEP full power */
+#define LDO_E_SETTING 0x10
+/* LDO_E SLEEP 0x00: 1.8V, not used, SLEEP_A or B, not used */
+#define LDO_E_SLEEP_SETTING 0x00
+/* LDO_F 0xD0: 2.5V, ON, SLEEP_A or B, SLEEP full power */
+#define LDO_F_SETTING 0xD0
+/* LDO_G 0x00: 2.85V, OFF, SLEEP_A or B, SLEEP full power */
+#define LDO_G_SETTING 0x00
+/* LDO_H 0x18: 2.75V, ON, SLEEP_B, SLEEP full power */
+#define LDO_H_SETTING 0x18
+/* LDO_K 0x00: 2.75V, OFF, SLEEP_A or B, SLEEP full power */
+#define LDO_K_SETTING 0x00
+/* LDO_EXT 0x00: Voltage not set, OFF, not used, not used */
+#define LDO_EXT_SETTING 0x00
+/* BUCK 0x7D: 1.2V, ON, SLEEP_A and B, SLEEP low power */
+#define BUCK_SETTING 0x7D
+/* BUCK SLEEP 0xAC: 1.05V, Not used, SLEEP_A and B, Not used */
+#define BUCK_SLEEP_SETTING 0xAC
+
+static const u8 ab3100_reg_initvals[] = {
+ LDO_A_SETTING,
+ LDO_C_SETTING,
+ LDO_E_SETTING,
+ LDO_E_SLEEP_SETTING,
+ LDO_F_SETTING,
+ LDO_G_SETTING,
+ LDO_H_SETTING,
+ LDO_K_SETTING,
+ LDO_EXT_SETTING,
+ BUCK_SETTING,
+ BUCK_SLEEP_SETTING,
+ LDO_D_SETTING,
+};
+
+static int ab3100_regulators_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < AB3100_NUM_REGULATORS; i++) {
+ struct ab3100_regulator *reg = &ab3100_regulators[i];
+
+ regulator_unregister(reg->rdev);
+ reg->rdev = NULL;
+ }
+ return 0;
+}
+
+static int
+ab3100_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
+{
+ int err, i;
+
+ /*
+ * Set up the regulator registers, as was previously done with
+ * platform data.
+ */
+ /* Set up regulators */
+ for (i = 0; i < ARRAY_SIZE(ab3100_reg_init_order); i++) {
+ err = abx500_set_register_interruptible(&pdev->dev, 0,
+ ab3100_reg_init_order[i],
+ ab3100_reg_initvals[i]);
+ if (err) {
+ dev_err(&pdev->dev, "regulator initialization failed with error %d\n",
+ err);
+ return err;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ab3100_regulator_matches); i++) {
+ err = ab3100_regulator_register(
+ pdev, NULL, ab3100_regulator_matches[i].init_data,
+ ab3100_regulator_matches[i].of_node,
+ (int) ab3100_regulator_matches[i].driver_data);
+ if (err) {
+ ab3100_regulators_remove(pdev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int ab3100_regulators_probe(struct platform_device *pdev)
{
struct ab3100_platform_data *plfdata = pdev->dev.platform_data;
- struct regulator_config config = { };
+ struct device_node *np = pdev->dev.of_node;
int err = 0;
u8 data;
int i;
@@ -516,6 +680,18 @@ static int ab3100_regulators_probe(struct platform_device *pdev)
dev_notice(&pdev->dev,
"chip is in inactive mode (Cold start)\n");
+ if (np) {
+ err = of_regulator_match(&pdev->dev, np,
+ ab3100_regulator_matches,
+ ARRAY_SIZE(ab3100_regulator_matches));
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Error parsing regulator init data: %d\n", err);
+ return err;
+ }
+ return ab3100_regulator_of_probe(pdev, np);
+ }
+
/* Set up regulators */
for (i = 0; i < ARRAY_SIZE(ab3100_reg_init_order); i++) {
err = abx500_set_register_interruptible(&pdev->dev, 0,
@@ -530,59 +706,19 @@ static int ab3100_regulators_probe(struct platform_device *pdev)
/* Register the regulators */
for (i = 0; i < AB3100_NUM_REGULATORS; i++) {
- struct ab3100_regulator *reg = &ab3100_regulators[i];
- struct regulator_dev *rdev;
-
- /*
- * Initialize per-regulator struct.
- * Inherit platform data, this comes down from the
- * i2c boarddata, from the machine. So if you want to
- * see what it looks like for a certain machine, go
- * into the machine I2C setup.
- */
- reg->dev = &pdev->dev;
- reg->plfdata = plfdata;
+ struct regulator_desc *desc = &ab3100_regulator_desc[i];
- config.dev = &pdev->dev;
- config.driver_data = reg;
- config.init_data = &plfdata->reg_constraints[i];
-
- /*
- * Register the regulator, pass around
- * the ab3100_regulator struct
- */
- rdev = regulator_register(&ab3100_regulator_desc[i], &config);
- if (IS_ERR(rdev)) {
- err = PTR_ERR(rdev);
- dev_err(&pdev->dev,
- "%s: failed to register regulator %s err %d\n",
- __func__, ab3100_regulator_desc[i].name,
- err);
- /* remove the already registered regulators */
- while (--i >= 0)
- regulator_unregister(ab3100_regulators[i].rdev);
+ err = ab3100_regulator_register(pdev, plfdata, NULL, NULL,
+ desc->id);
+ if (err) {
+ ab3100_regulators_remove(pdev);
return err;
}
-
- /* Then set a pointer back to the registered regulator */
- reg->rdev = rdev;
}
return 0;
}
-static int ab3100_regulators_remove(struct platform_device *pdev)
-{
- int i;
-
- for (i = 0; i < AB3100_NUM_REGULATORS; i++) {
- struct ab3100_regulator *reg = &ab3100_regulators[i];
-
- regulator_unregister(reg->rdev);
- }
- return 0;
-}
-
static struct platform_driver ab3100_regulators_driver = {
.driver = {
.name = "ab3100-regulators",
diff --git a/drivers/regulator/ab8500-ext.c b/drivers/regulator/ab8500-ext.c
new file mode 100644
index 00000000000..b4d45472aae
--- /dev/null
+++ b/drivers/regulator/ab8500-ext.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Authors: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
+ *
+ * This file is based on drivers/regulator/ab8500.c
+ *
+ * AB8500 external regulators
+ *
+ * ab8500-ext supports the following regulators:
+ * - VextSupply3
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/regulator/ab8500.h>
+
+/**
+ * struct ab8500_ext_regulator_info - ab8500 regulator information
+ * @dev: device pointer
+ * @desc: regulator description
+ * @rdev: regulator device
+ * @cfg: regulator configuration (extension of regulator FW configuration)
+ * @update_bank: bank to control on/off
+ * @update_reg: register to control on/off
+ * @update_mask: mask to enable/disable and set mode of regulator
+ * @update_val: bits holding the regulator current mode
+ * @update_val_hp: bits to set EN pin active (LPn pin deactive)
+ * normally this means high power mode
+ * @update_val_lp: bits to set EN pin active and LPn pin active
+ * normally this means low power mode
+ * @update_val_hw: bits to set regulator pins in HW control
+ * SysClkReq pins and logic will choose mode
+ */
+struct ab8500_ext_regulator_info {
+ struct device *dev;
+ struct regulator_desc desc;
+ struct regulator_dev *rdev;
+ struct ab8500_ext_regulator_cfg *cfg;
+ u8 update_bank;
+ u8 update_reg;
+ u8 update_mask;
+ u8 update_val;
+ u8 update_val_hp;
+ u8 update_val_lp;
+ u8 update_val_hw;
+};
+
+static int ab8500_ext_regulator_enable(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+ u8 regval;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ /*
+ * To satisfy both HW high power request and SW request, the regulator
+ * must be on in high power.
+ */
+ if (info->cfg && info->cfg->hwreq)
+ regval = info->update_val_hp;
+ else
+ regval = info->update_val;
+
+ ret = abx500_mask_and_set_register_interruptible(info->dev,
+ info->update_bank, info->update_reg,
+ info->update_mask, regval);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(info->rdev),
+ "couldn't set enable bits for regulator\n");
+ return ret;
+ }
+
+ dev_dbg(rdev_get_dev(rdev),
+ "%s-enable (bank, reg, mask, value): 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, regval);
+
+ return 0;
+}
+
+static int ab8500_ext_regulator_disable(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+ u8 regval;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Set the regulator in HW request mode if configured
+ */
+ if (info->cfg && info->cfg->hwreq)
+ regval = info->update_val_hw;
+ else
+ regval = 0;
+
+ ret = abx500_mask_and_set_register_interruptible(info->dev,
+ info->update_bank, info->update_reg,
+ info->update_mask, regval);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(info->rdev),
+ "couldn't set disable bits for regulator\n");
+ return ret;
+ }
+
+ dev_dbg(rdev_get_dev(rdev), "%s-disable (bank, reg, mask, value):"
+ " 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, regval);
+
+ return 0;
+}
+
+static int ab8500_ext_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ int ret;
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+ u8 regval;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = abx500_get_register_interruptible(info->dev,
+ info->update_bank, info->update_reg, &regval);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't read 0x%x register\n", info->update_reg);
+ return ret;
+ }
+
+ dev_dbg(rdev_get_dev(rdev), "%s-is_enabled (bank, reg, mask, value):"
+ " 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, regval);
+
+ if (((regval & info->update_mask) == info->update_val_lp) ||
+ ((regval & info->update_mask) == info->update_val_hp))
+ return 1;
+ else
+ return 0;
+}
+
+static int ab8500_ext_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ int ret = 0;
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+ u8 regval;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ regval = info->update_val_hp;
+ break;
+ case REGULATOR_MODE_IDLE:
+ regval = info->update_val_lp;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* If regulator is enabled and info->cfg->hwreq is set, the regulator
+ must be on in high power, so we don't need to write the register with
+ the same value.
+ */
+ if (ab8500_ext_regulator_is_enabled(rdev) &&
+ !(info->cfg && info->cfg->hwreq)) {
+ ret = abx500_mask_and_set_register_interruptible(info->dev,
+ info->update_bank, info->update_reg,
+ info->update_mask, regval);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "Could not set regulator mode.\n");
+ return ret;
+ }
+
+ dev_dbg(rdev_get_dev(rdev),
+ "%s-set_mode (bank, reg, mask, value): "
+ "0x%x, 0x%x, 0x%x, 0x%x\n",
+ info->desc.name, info->update_bank, info->update_reg,
+ info->update_mask, regval);
+ }
+
+ info->update_val = regval;
+
+ return 0;
+}
+
+static unsigned int ab8500_ext_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ if (info->update_val == info->update_val_hp)
+ ret = REGULATOR_MODE_NORMAL;
+ else if (info->update_val == info->update_val_lp)
+ ret = REGULATOR_MODE_IDLE;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int ab8500_ext_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct regulation_constraints *regu_constraints = rdev->constraints;
+
+ if (regu_constraints == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator constraints null pointer\n");
+ return -EINVAL;
+ }
+ /* return the uV for the fixed regulators */
+ if (regu_constraints->min_uV && regu_constraints->max_uV) {
+ if (regu_constraints->min_uV == regu_constraints->max_uV)
+ return regu_constraints->min_uV;
+ }
+ return -EINVAL;
+}
+
+static struct regulator_ops ab8500_ext_regulator_ops = {
+ .enable = ab8500_ext_regulator_enable,
+ .disable = ab8500_ext_regulator_disable,
+ .is_enabled = ab8500_ext_regulator_is_enabled,
+ .set_mode = ab8500_ext_regulator_set_mode,
+ .get_mode = ab8500_ext_regulator_get_mode,
+ .list_voltage = ab8500_ext_list_voltage,
+};
+
+static struct ab8500_ext_regulator_info
+ ab8500_ext_regulator_info[AB8500_NUM_EXT_REGULATORS] = {
+ [AB8500_EXT_SUPPLY1] = {
+ .desc = {
+ .name = "VEXTSUPPLY1",
+ .ops = &ab8500_ext_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_EXT_SUPPLY1,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .update_bank = 0x04,
+ .update_reg = 0x08,
+ .update_mask = 0x03,
+ .update_val = 0x01,
+ .update_val_hp = 0x01,
+ .update_val_lp = 0x03,
+ .update_val_hw = 0x02,
+ },
+ [AB8500_EXT_SUPPLY2] = {
+ .desc = {
+ .name = "VEXTSUPPLY2",
+ .ops = &ab8500_ext_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_EXT_SUPPLY2,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .update_bank = 0x04,
+ .update_reg = 0x08,
+ .update_mask = 0x0c,
+ .update_val = 0x04,
+ .update_val_hp = 0x04,
+ .update_val_lp = 0x0c,
+ .update_val_hw = 0x08,
+ },
+ [AB8500_EXT_SUPPLY3] = {
+ .desc = {
+ .name = "VEXTSUPPLY3",
+ .ops = &ab8500_ext_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_EXT_SUPPLY3,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ },
+ .update_bank = 0x04,
+ .update_reg = 0x08,
+ .update_mask = 0x30,
+ .update_val = 0x10,
+ .update_val_hp = 0x10,
+ .update_val_lp = 0x30,
+ .update_val_hw = 0x20,
+ },
+};
+
+int ab8500_ext_regulator_init(struct platform_device *pdev)
+{
+ struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
+ struct ab8500_platform_data *ppdata;
+ struct ab8500_regulator_platform_data *pdata;
+ struct regulator_config config = { };
+ int i, err;
+
+ if (!ab8500) {
+ dev_err(&pdev->dev, "null mfd parent\n");
+ return -EINVAL;
+ }
+ ppdata = dev_get_platdata(ab8500->dev);
+ if (!ppdata) {
+ dev_err(&pdev->dev, "null parent pdata\n");
+ return -EINVAL;
+ }
+
+ pdata = ppdata->regulator;
+ if (!pdata) {
+ dev_err(&pdev->dev, "null pdata\n");
+ return -EINVAL;
+ }
+
+ /* make sure the platform data has the correct size */
+ if (pdata->num_ext_regulator != ARRAY_SIZE(ab8500_ext_regulator_info)) {
+ dev_err(&pdev->dev, "Configuration error: size mismatch.\n");
+ return -EINVAL;
+ }
+
+ /* check for AB8500 2.x */
+ if (is_ab8500_2p0_or_earlier(ab8500)) {
+ struct ab8500_ext_regulator_info *info;
+
+ /* VextSupply3LPn is inverted on AB8500 2.x */
+ info = &ab8500_ext_regulator_info[AB8500_EXT_SUPPLY3];
+ info->update_val = 0x30;
+ info->update_val_hp = 0x30;
+ info->update_val_lp = 0x10;
+ }
+
+ /* register all regulators */
+ for (i = 0; i < ARRAY_SIZE(ab8500_ext_regulator_info); i++) {
+ struct ab8500_ext_regulator_info *info = NULL;
+
+ /* assign per-regulator data */
+ info = &ab8500_ext_regulator_info[i];
+ info->dev = &pdev->dev;
+ info->cfg = (struct ab8500_ext_regulator_cfg *)
+ pdata->ext_regulator[i].driver_data;
+
+ config.dev = &pdev->dev;
+ config.init_data = &pdata->ext_regulator[i];
+ config.driver_data = info;
+
+ /* register regulator with framework */
+ info->rdev = regulator_register(&info->desc, &config);
+ if (IS_ERR(info->rdev)) {
+ err = PTR_ERR(info->rdev);
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ info->desc.name);
+ /* when we fail, un-register all earlier regulators */
+ while (--i >= 0) {
+ info = &ab8500_ext_regulator_info[i];
+ regulator_unregister(info->rdev);
+ }
+ return err;
+ }
+
+ dev_dbg(rdev_get_dev(info->rdev),
+ "%s-probed\n", info->desc.name);
+ }
+
+ return 0;
+}
+
+void ab8500_ext_regulator_exit(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ab8500_ext_regulator_info); i++) {
+ struct ab8500_ext_regulator_info *info = NULL;
+ info = &ab8500_ext_regulator_info[i];
+
+ dev_vdbg(rdev_get_dev(info->rdev),
+ "%s-remove\n", info->desc.name);
+
+ regulator_unregister(info->rdev);
+ }
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Bengt Jonsson <bengt.g.jonsson@stericsson.com>");
+MODULE_DESCRIPTION("AB8500 external regulator driver");
+MODULE_ALIAS("platform:ab8500-ext-regulator");
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index 09014f38a94..f6656b8c28b 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -5,11 +5,15 @@
*
* Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
* Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
+ * Daniel Willerud <daniel.willerud@stericsson.com> for ST-Ericsson
*
* AB8500 peripheral regulators
*
* AB8500 supports the following regulators:
* VAUX1/2/3, VINTCORE, VTVOUT, VUSB, VAUDIO, VAMIC1/2, VDMIC, VANA
+ *
+ * AB8505 supports the following regulators:
+ * VAUX1/2/3/4/5/6, VINTCORE, VADC, VUSB, VAUDIO, VAMIC1/2, VDMIC, VANA
*/
#include <linux/init.h>
#include <linux/kernel.h>
@@ -26,33 +30,64 @@
#include <linux/slab.h>
/**
+ * struct ab8500_shared_mode - is used when mode is shared between
+ * two regulators.
+ * @shared_regulator: pointer to the other sharing regulator
+ * @lp_mode_req: low power mode requested by this regulator
+ */
+struct ab8500_shared_mode {
+ struct ab8500_regulator_info *shared_regulator;
+ bool lp_mode_req;
+};
+
+/**
* struct ab8500_regulator_info - ab8500 regulator information
* @dev: device pointer
* @desc: regulator description
* @regulator_dev: regulator device
+ * @shared_mode: used when mode is shared between two regulators
+ * @load_lp_uA: maximum load in idle (low power) mode
* @update_bank: bank to control on/off
* @update_reg: register to control on/off
- * @update_mask: mask to enable/disable regulator
- * @update_val_enable: bits to enable the regulator in normal (high power) mode
+ * @update_mask: mask to enable/disable and set mode of regulator
+ * @update_val: bits holding the regulator current mode
+ * @update_val_idle: bits to enable the regulator in idle (low power) mode
+ * @update_val_normal: bits to enable the regulator in normal (high power) mode
+ * @mode_bank: bank with location of mode register
+ * @mode_reg: mode register
+ * @mode_mask: mask for setting mode
+ * @mode_val_idle: mode setting for low power
+ * @mode_val_normal: mode setting for normal power
* @voltage_bank: bank to control regulator voltage
* @voltage_reg: register to control regulator voltage
* @voltage_mask: mask to control regulator voltage
- * @voltage_shift: shift to control regulator voltage
- * @delay: startup/set voltage delay in us
*/
struct ab8500_regulator_info {
struct device *dev;
struct regulator_desc desc;
struct regulator_dev *regulator;
+ struct ab8500_shared_mode *shared_mode;
+ int load_lp_uA;
u8 update_bank;
u8 update_reg;
u8 update_mask;
- u8 update_val_enable;
+ u8 update_val;
+ u8 update_val_idle;
+ u8 update_val_normal;
+ u8 mode_bank;
+ u8 mode_reg;
+ u8 mode_mask;
+ u8 mode_val_idle;
+ u8 mode_val_normal;
u8 voltage_bank;
u8 voltage_reg;
u8 voltage_mask;
- u8 voltage_shift;
- unsigned int delay;
+ struct {
+ u8 voltage_limit;
+ u8 voltage_bank;
+ u8 voltage_reg;
+ u8 voltage_mask;
+ } expand_register;
};
/* voltage tables for the vauxn/vintcore supplies */
@@ -86,6 +121,44 @@ static const unsigned int ldo_vaux3_voltages[] = {
2910000,
};
+static const unsigned int ldo_vaux56_voltages[] = {
+ 1800000,
+ 1050000,
+ 1100000,
+ 1200000,
+ 1500000,
+ 2200000,
+ 2500000,
+ 2790000,
+};
+
+static const unsigned int ldo_vaux3_ab8540_voltages[] = {
+ 1200000,
+ 1500000,
+ 1800000,
+ 2100000,
+ 2500000,
+ 2750000,
+ 2790000,
+ 2910000,
+ 3050000,
+};
+
+static const unsigned int ldo_vaux56_ab8540_voltages[] = {
+ 750000, 760000, 770000, 780000, 790000, 800000,
+ 810000, 820000, 830000, 840000, 850000, 860000,
+ 870000, 880000, 890000, 900000, 910000, 920000,
+ 930000, 940000, 950000, 960000, 970000, 980000,
+ 990000, 1000000, 1010000, 1020000, 1030000,
+ 1040000, 1050000, 1060000, 1070000, 1080000,
+ 1090000, 1100000, 1110000, 1120000, 1130000,
+ 1140000, 1150000, 1160000, 1170000, 1180000,
+ 1190000, 1200000, 1210000, 1220000, 1230000,
+ 1240000, 1250000, 1260000, 1270000, 1280000,
+ 1290000, 1300000, 1310000, 1320000, 1330000,
+ 1340000, 1350000, 1360000, 1800000, 2790000,
+};
+
static const unsigned int ldo_vintcore_voltages[] = {
1200000,
1225000,
@@ -96,6 +169,72 @@ static const unsigned int ldo_vintcore_voltages[] = {
1350000,
};
+static const unsigned int ldo_sdio_voltages[] = {
+ 1160000,
+ 1050000,
+ 1100000,
+ 1500000,
+ 1800000,
+ 2200000,
+ 2910000,
+ 3050000,
+};
+
+static const unsigned int fixed_1200000_voltage[] = {
+ 1200000,
+};
+
+static const unsigned int fixed_1800000_voltage[] = {
+ 1800000,
+};
+
+static const unsigned int fixed_2000000_voltage[] = {
+ 2000000,
+};
+
+static const unsigned int fixed_2050000_voltage[] = {
+ 2050000,
+};
+
+static const unsigned int fixed_3300000_voltage[] = {
+ 3300000,
+};
+
+static const unsigned int ldo_vana_voltages[] = {
+ 1050000,
+ 1075000,
+ 1100000,
+ 1125000,
+ 1150000,
+ 1175000,
+ 1200000,
+ 1225000,
+};
+
+static const unsigned int ldo_vaudio_voltages[] = {
+ 2000000,
+ 2100000,
+ 2200000,
+ 2300000,
+ 2400000,
+ 2500000,
+ 2600000,
+ 2600000, /* Duplicated in Vaudio and IsoUicc Control register. */
+};
+
+static const unsigned int ldo_vdmic_voltages[] = {
+ 1800000,
+ 1900000,
+ 2000000,
+ 2850000,
+};
+
+static DEFINE_MUTEX(shared_mode_mutex);
+static struct ab8500_shared_mode ldo_anamic1_shared;
+static struct ab8500_shared_mode ldo_anamic2_shared;
+static struct ab8500_shared_mode ab8540_ldo_anamic1_shared;
+static struct ab8500_shared_mode ab8540_ldo_anamic2_shared;
+
static int ab8500_regulator_enable(struct regulator_dev *rdev)
{
int ret;
@@ -108,15 +247,17 @@ static int ab8500_regulator_enable(struct regulator_dev *rdev)
ret = abx500_mask_and_set_register_interruptible(info->dev,
info->update_bank, info->update_reg,
- info->update_mask, info->update_val_enable);
- if (ret < 0)
+ info->update_mask, info->update_val);
+ if (ret < 0) {
dev_err(rdev_get_dev(rdev),
"couldn't set enable bits for regulator\n");
+ return ret;
+ }
dev_vdbg(rdev_get_dev(rdev),
"%s-enable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
info->desc.name, info->update_bank, info->update_reg,
- info->update_mask, info->update_val_enable);
+ info->update_mask, info->update_val);
return ret;
}
@@ -134,9 +275,11 @@ static int ab8500_regulator_disable(struct regulator_dev *rdev)
ret = abx500_mask_and_set_register_interruptible(info->dev,
info->update_bank, info->update_reg,
info->update_mask, 0x0);
- if (ret < 0)
+ if (ret < 0) {
dev_err(rdev_get_dev(rdev),
"couldn't set disable bits for regulator\n");
+ return ret;
+ }
dev_vdbg(rdev_get_dev(rdev),
"%s-disable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
@@ -172,14 +315,170 @@ static int ab8500_regulator_is_enabled(struct regulator_dev *rdev)
info->update_mask, regval);
if (regval & info->update_mask)
- return true;
+ return 1;
+ else
+ return 0;
+}
+
+static unsigned int ab8500_regulator_get_optimum_mode(
+ struct regulator_dev *rdev, int input_uV,
+ int output_uV, int load_uA)
+{
+ unsigned int mode;
+
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ if (load_uA <= info->load_lp_uA)
+ mode = REGULATOR_MODE_IDLE;
+ else
+ mode = REGULATOR_MODE_NORMAL;
+
+ return mode;
+}
+
+static int ab8500_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ int ret = 0;
+ u8 bank, reg, mask, val;
+ bool lp_mode_req = false;
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ if (info->mode_mask) {
+ bank = info->mode_bank;
+ reg = info->mode_reg;
+ mask = info->mode_mask;
+ } else {
+ bank = info->update_bank;
+ reg = info->update_reg;
+ mask = info->update_mask;
+ }
+
+ if (info->shared_mode)
+ mutex_lock(&shared_mode_mutex);
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ if (info->shared_mode)
+ lp_mode_req = false;
+
+ if (info->mode_mask)
+ val = info->mode_val_normal;
+ else
+ val = info->update_val_normal;
+ break;
+ case REGULATOR_MODE_IDLE:
+ if (info->shared_mode) {
+ struct ab8500_regulator_info *shared_regulator;
+
+ shared_regulator = info->shared_mode->shared_regulator;
+ if (!shared_regulator->shared_mode->lp_mode_req) {
+ /* Other regulator prevent LP mode */
+ info->shared_mode->lp_mode_req = true;
+ goto out_unlock;
+ }
+
+ lp_mode_req = true;
+ }
+
+ if (info->mode_mask)
+ val = info->mode_val_idle;
+ else
+ val = info->update_val_idle;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (info->mode_mask || ab8500_regulator_is_enabled(rdev)) {
+ ret = abx500_mask_and_set_register_interruptible(info->dev,
+ bank, reg, mask, val);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't set regulator mode\n");
+ goto out_unlock;
+ }
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-set_mode (bank, reg, mask, value): "
+ "0x%x, 0x%x, 0x%x, 0x%x\n",
+ info->desc.name, bank, reg,
+ mask, val);
+ }
+
+ if (!info->mode_mask)
+ info->update_val = val;
+
+ if (info->shared_mode)
+ info->shared_mode->lp_mode_req = lp_mode_req;
+
+out_unlock:
+ if (info->shared_mode)
+ mutex_unlock(&shared_mode_mutex);
+
+ return ret;
+}
+
+static unsigned int ab8500_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+ u8 val;
+ u8 val_normal;
+ u8 val_idle;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ /* Need special handling for shared mode */
+ if (info->shared_mode) {
+ if (info->shared_mode->lp_mode_req)
+ return REGULATOR_MODE_IDLE;
+ else
+ return REGULATOR_MODE_NORMAL;
+ }
+
+ if (info->mode_mask) {
+ /* Dedicated register for handling mode */
+ ret = abx500_get_register_interruptible(info->dev,
+ info->mode_bank, info->mode_reg, &val);
+ val = val & info->mode_mask;
+
+ val_normal = info->mode_val_normal;
+ val_idle = info->mode_val_idle;
+ } else {
+ /* Mode register same as enable register */
+ val = info->update_val;
+ val_normal = info->update_val_normal;
+ val_idle = info->update_val_idle;
+ }
+
+ if (val == val_normal)
+ ret = REGULATOR_MODE_NORMAL;
+ else if (val == val_idle)
+ ret = REGULATOR_MODE_IDLE;
else
- return false;
+ ret = -EINVAL;
+
+ return ret;
}
static int ab8500_regulator_get_voltage_sel(struct regulator_dev *rdev)
{
- int ret, val;
+ int ret, voltage_shift;
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
u8 regval;
@@ -188,6 +487,8 @@ static int ab8500_regulator_get_voltage_sel(struct regulator_dev *rdev)
return -EINVAL;
}
+ voltage_shift = ffs(info->voltage_mask) - 1;
+
ret = abx500_get_register_interruptible(info->dev,
info->voltage_bank, info->voltage_reg, &regval);
if (ret < 0) {
@@ -201,16 +502,62 @@ static int ab8500_regulator_get_voltage_sel(struct regulator_dev *rdev)
"0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
info->desc.name, info->voltage_bank,
info->voltage_reg, info->voltage_mask,
- info->voltage_shift, regval);
+ voltage_shift, regval);
- val = regval & info->voltage_mask;
- return val >> info->voltage_shift;
+ return (regval & info->voltage_mask) >> voltage_shift;
+}
+
+static int ab8540_aux3_regulator_get_voltage_sel(struct regulator_dev *rdev)
+{
+ int ret, voltage_shift;
+ struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+ u8 regval, regval_expand;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ ret = abx500_get_register_interruptible(info->dev,
+ info->expand_register.voltage_bank,
+ info->expand_register.voltage_reg, &regval_expand);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't read voltage expand reg for regulator\n");
+ return ret;
+ }
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-get_voltage expand (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
+ info->desc.name, info->expand_register.voltage_bank,
+ info->expand_register.voltage_reg,
+ info->expand_register.voltage_mask, regval_expand);
+
+ if (regval_expand & info->expand_register.voltage_mask)
+ return info->expand_register.voltage_limit;
+
+ ret = abx500_get_register_interruptible(info->dev,
+ info->voltage_bank, info->voltage_reg, &regval);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't read voltage reg for regulator\n");
+ return ret;
+ }
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-get_voltage (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
+ info->desc.name, info->voltage_bank, info->voltage_reg,
+ info->voltage_mask, regval);
+
+ voltage_shift = ffs(info->voltage_mask) - 1;
+
+ return (regval & info->voltage_mask) >> voltage_shift;
}
static int ab8500_regulator_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
- int ret;
+ int ret, voltage_shift;
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
u8 regval;
@@ -219,8 +566,10 @@ static int ab8500_regulator_set_voltage_sel(struct regulator_dev *rdev,
return -EINVAL;
}
+ voltage_shift = ffs(info->voltage_mask) - 1;
+
/* set the registers for the request */
- regval = (u8)selector << info->voltage_shift;
+ regval = (u8)selector << voltage_shift;
ret = abx500_mask_and_set_register_interruptible(info->dev,
info->voltage_bank, info->voltage_reg,
info->voltage_mask, regval);
@@ -237,32 +586,121 @@ static int ab8500_regulator_set_voltage_sel(struct regulator_dev *rdev,
return ret;
}
-static int ab8500_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
- unsigned int old_sel,
- unsigned int new_sel)
+static int ab8540_aux3_regulator_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned selector)
{
+ int ret;
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+ u8 regval, regval_expand;
+
+ if (info == NULL) {
+ dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
+ return -EINVAL;
+ }
+
+ if (selector < info->expand_register.voltage_limit) {
+ int voltage_shift = ffs(info->voltage_mask) - 1;
+
+ regval = (u8)selector << voltage_shift;
+ ret = abx500_mask_and_set_register_interruptible(info->dev,
+ info->voltage_bank, info->voltage_reg,
+ info->voltage_mask, regval);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't set voltage reg for regulator\n");
+ return ret;
+ }
- return info->delay;
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-set_voltage (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
+ info->desc.name, info->voltage_bank, info->voltage_reg,
+ info->voltage_mask, regval);
+
+ regval_expand = 0;
+ } else {
+ regval_expand = info->expand_register.voltage_mask;
+ }
+
+ ret = abx500_mask_and_set_register_interruptible(info->dev,
+ info->expand_register.voltage_bank,
+ info->expand_register.voltage_reg,
+ info->expand_register.voltage_mask,
+ regval_expand);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev),
+ "couldn't set expand voltage reg for regulator\n");
+ return ret;
+ }
+
+ dev_vdbg(rdev_get_dev(rdev),
+ "%s-set_voltage expand (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
+ info->desc.name, info->expand_register.voltage_bank,
+ info->expand_register.voltage_reg,
+ info->expand_register.voltage_mask, regval_expand);
+
+ return 0;
}
-static struct regulator_ops ab8500_regulator_ops = {
+static struct regulator_ops ab8500_regulator_volt_mode_ops = {
+ .enable = ab8500_regulator_enable,
+ .disable = ab8500_regulator_disable,
+ .is_enabled = ab8500_regulator_is_enabled,
+ .get_optimum_mode = ab8500_regulator_get_optimum_mode,
+ .set_mode = ab8500_regulator_set_mode,
+ .get_mode = ab8500_regulator_get_mode,
+ .get_voltage_sel = ab8500_regulator_get_voltage_sel,
+ .set_voltage_sel = ab8500_regulator_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_table,
+};
+
+static struct regulator_ops ab8540_aux3_regulator_volt_mode_ops = {
+ .enable = ab8500_regulator_enable,
+ .disable = ab8500_regulator_disable,
+ .get_optimum_mode = ab8500_regulator_get_optimum_mode,
+ .set_mode = ab8500_regulator_set_mode,
+ .get_mode = ab8500_regulator_get_mode,
+ .is_enabled = ab8500_regulator_is_enabled,
+ .get_voltage_sel = ab8540_aux3_regulator_get_voltage_sel,
+ .set_voltage_sel = ab8540_aux3_regulator_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_table,
+};
+
+static struct regulator_ops ab8500_regulator_volt_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
.get_voltage_sel = ab8500_regulator_get_voltage_sel,
.set_voltage_sel = ab8500_regulator_set_voltage_sel,
.list_voltage = regulator_list_voltage_table,
- .set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel,
};
-static struct regulator_ops ab8500_regulator_fixed_ops = {
+static struct regulator_ops ab8500_regulator_mode_ops = {
+ .enable = ab8500_regulator_enable,
+ .disable = ab8500_regulator_disable,
+ .is_enabled = ab8500_regulator_is_enabled,
+ .get_optimum_mode = ab8500_regulator_get_optimum_mode,
+ .set_mode = ab8500_regulator_set_mode,
+ .get_mode = ab8500_regulator_get_mode,
+ .list_voltage = regulator_list_voltage_table,
+};
+
+static struct regulator_ops ab8500_regulator_ops = {
+ .enable = ab8500_regulator_enable,
+ .disable = ab8500_regulator_disable,
+ .is_enabled = ab8500_regulator_is_enabled,
+ .list_voltage = regulator_list_voltage_table,
+};
+
+static struct regulator_ops ab8500_regulator_anamic_mode_ops = {
.enable = ab8500_regulator_enable,
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
- .list_voltage = regulator_list_voltage_linear,
+ .set_mode = ab8500_regulator_set_mode,
+ .get_mode = ab8500_regulator_get_mode,
+ .list_voltage = regulator_list_voltage_table,
};
+/* AB8500 regulator information */
static struct ab8500_regulator_info
ab8500_regulator_info[AB8500_NUM_REGULATORS] = {
/*
@@ -274,17 +712,21 @@ static struct ab8500_regulator_info
[AB8500_LDO_AUX1] = {
.desc = {
.name = "LDO-AUX1",
- .ops = &ab8500_regulator_ops,
+ .ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_AUX1,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
.volt_table = ldo_vauxn_voltages,
+ .enable_time = 200,
},
+ .load_lp_uA = 5000,
.update_bank = 0x04,
.update_reg = 0x09,
.update_mask = 0x03,
- .update_val_enable = 0x01,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
.voltage_bank = 0x04,
.voltage_reg = 0x1f,
.voltage_mask = 0x0f,
@@ -292,17 +734,21 @@ static struct ab8500_regulator_info
[AB8500_LDO_AUX2] = {
.desc = {
.name = "LDO-AUX2",
- .ops = &ab8500_regulator_ops,
+ .ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_AUX2,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
.volt_table = ldo_vauxn_voltages,
+ .enable_time = 200,
},
+ .load_lp_uA = 5000,
.update_bank = 0x04,
.update_reg = 0x09,
.update_mask = 0x0c,
- .update_val_enable = 0x04,
+ .update_val = 0x04,
+ .update_val_idle = 0x0c,
+ .update_val_normal = 0x04,
.voltage_bank = 0x04,
.voltage_reg = 0x20,
.voltage_mask = 0x0f,
@@ -310,17 +756,21 @@ static struct ab8500_regulator_info
[AB8500_LDO_AUX3] = {
.desc = {
.name = "LDO-AUX3",
- .ops = &ab8500_regulator_ops,
+ .ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_AUX3,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vaux3_voltages),
.volt_table = ldo_vaux3_voltages,
+ .enable_time = 450,
},
+ .load_lp_uA = 5000,
.update_bank = 0x04,
.update_reg = 0x0a,
.update_mask = 0x03,
- .update_val_enable = 0x01,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
.voltage_bank = 0x04,
.voltage_reg = 0x21,
.voltage_mask = 0x07,
@@ -328,21 +778,24 @@ static struct ab8500_regulator_info
[AB8500_LDO_INTCORE] = {
.desc = {
.name = "LDO-INTCORE",
- .ops = &ab8500_regulator_ops,
+ .ops = &ab8500_regulator_volt_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_INTCORE,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vintcore_voltages),
.volt_table = ldo_vintcore_voltages,
+ .enable_time = 750,
},
+ .load_lp_uA = 5000,
.update_bank = 0x03,
.update_reg = 0x80,
.update_mask = 0x44,
- .update_val_enable = 0x04,
+ .update_val = 0x44,
+ .update_val_idle = 0x44,
+ .update_val_normal = 0x04,
.voltage_bank = 0x03,
.voltage_reg = 0x80,
.voltage_mask = 0x38,
- .voltage_shift = 3,
},
/*
@@ -353,112 +806,984 @@ static struct ab8500_regulator_info
[AB8500_LDO_TVOUT] = {
.desc = {
.name = "LDO-TVOUT",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_mode_ops,
.type = REGULATOR_VOLTAGE,
.id = AB8500_LDO_TVOUT,
.owner = THIS_MODULE,
.n_voltages = 1,
- .min_uV = 2000000,
+ .volt_table = fixed_2000000_voltage,
+ .enable_time = 500,
+ },
+ .load_lp_uA = 1000,
+ .update_bank = 0x03,
+ .update_reg = 0x80,
+ .update_mask = 0x82,
+ .update_val = 0x02,
+ .update_val_idle = 0x82,
+ .update_val_normal = 0x02,
+ },
+ [AB8500_LDO_AUDIO] = {
+ .desc = {
+ .name = "LDO-AUDIO",
+ .ops = &ab8500_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_AUDIO,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ .enable_time = 140,
+ .volt_table = fixed_2000000_voltage,
+ },
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x02,
+ .update_val = 0x02,
+ },
+ [AB8500_LDO_ANAMIC1] = {
+ .desc = {
+ .name = "LDO-ANAMIC1",
+ .ops = &ab8500_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_ANAMIC1,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ .enable_time = 500,
+ .volt_table = fixed_2050000_voltage,
+ },
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x08,
+ .update_val = 0x08,
+ },
+ [AB8500_LDO_ANAMIC2] = {
+ .desc = {
+ .name = "LDO-ANAMIC2",
+ .ops = &ab8500_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_ANAMIC2,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ .enable_time = 500,
+ .volt_table = fixed_2050000_voltage,
+ },
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x10,
+ .update_val = 0x10,
+ },
+ [AB8500_LDO_DMIC] = {
+ .desc = {
+ .name = "LDO-DMIC",
+ .ops = &ab8500_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_DMIC,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ .enable_time = 420,
+ .volt_table = fixed_1800000_voltage,
+ },
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x04,
+ .update_val = 0x04,
+ },
+
+ /*
+ * Regulators with fixed voltage and normal/idle modes
+ */
+ [AB8500_LDO_ANA] = {
+ .desc = {
+ .name = "LDO-ANA",
+ .ops = &ab8500_regulator_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8500_LDO_ANA,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ .enable_time = 140,
+ .volt_table = fixed_1200000_voltage,
+ },
+ .load_lp_uA = 1000,
+ .update_bank = 0x04,
+ .update_reg = 0x06,
+ .update_mask = 0x0c,
+ .update_val = 0x04,
+ .update_val_idle = 0x0c,
+ .update_val_normal = 0x04,
+ },
+};
+
+/* AB8505 regulator information */
+static struct ab8500_regulator_info
+ ab8505_regulator_info[AB8505_NUM_REGULATORS] = {
+ /*
+ * Variable Voltage Regulators
+ * name, min mV, max mV,
+ * update bank, reg, mask, enable val
+ * volt bank, reg, mask
+ */
+ [AB8505_LDO_AUX1] = {
+ .desc = {
+ .name = "LDO-AUX1",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8505_LDO_AUX1,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
+ .volt_table = ldo_vauxn_voltages,
+ },
+ .load_lp_uA = 5000,
+ .update_bank = 0x04,
+ .update_reg = 0x09,
+ .update_mask = 0x03,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x1f,
+ .voltage_mask = 0x0f,
+ },
+ [AB8505_LDO_AUX2] = {
+ .desc = {
+ .name = "LDO-AUX2",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8505_LDO_AUX2,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
+ .volt_table = ldo_vauxn_voltages,
+ },
+ .load_lp_uA = 5000,
+ .update_bank = 0x04,
+ .update_reg = 0x09,
+ .update_mask = 0x0c,
+ .update_val = 0x04,
+ .update_val_idle = 0x0c,
+ .update_val_normal = 0x04,
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x20,
+ .voltage_mask = 0x0f,
+ },
+ [AB8505_LDO_AUX3] = {
+ .desc = {
+ .name = "LDO-AUX3",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8505_LDO_AUX3,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vaux3_voltages),
+ .volt_table = ldo_vaux3_voltages,
+ },
+ .load_lp_uA = 5000,
+ .update_bank = 0x04,
+ .update_reg = 0x0a,
+ .update_mask = 0x03,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x21,
+ .voltage_mask = 0x07,
+ },
+ [AB8505_LDO_AUX4] = {
+ .desc = {
+ .name = "LDO-AUX4",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8505_LDO_AUX4,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
+ .volt_table = ldo_vauxn_voltages,
+ },
+ .load_lp_uA = 5000,
+ /* values for Vaux4Regu register */
+ .update_bank = 0x04,
+ .update_reg = 0x2e,
+ .update_mask = 0x03,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
+ /* values for Vaux4SEL register */
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x2f,
+ .voltage_mask = 0x0f,
+ },
+ [AB8505_LDO_AUX5] = {
+ .desc = {
+ .name = "LDO-AUX5",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8505_LDO_AUX5,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vaux56_voltages),
+ .volt_table = ldo_vaux56_voltages,
+ },
+ .load_lp_uA = 2000,
+ /* values for CtrlVaux5 register */
+ .update_bank = 0x01,
+ .update_reg = 0x55,
+ .update_mask = 0x18,
+ .update_val = 0x10,
+ .update_val_idle = 0x18,
+ .update_val_normal = 0x10,
+ .voltage_bank = 0x01,
+ .voltage_reg = 0x55,
+ .voltage_mask = 0x07,
+ },
+ [AB8505_LDO_AUX6] = {
+ .desc = {
+ .name = "LDO-AUX6",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8505_LDO_AUX6,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vaux56_voltages),
+ .volt_table = ldo_vaux56_voltages,
+ },
+ .load_lp_uA = 2000,
+ /* values for CtrlVaux6 register */
+ .update_bank = 0x01,
+ .update_reg = 0x56,
+ .update_mask = 0x18,
+ .update_val = 0x10,
+ .update_val_idle = 0x18,
+ .update_val_normal = 0x10,
+ .voltage_bank = 0x01,
+ .voltage_reg = 0x56,
+ .voltage_mask = 0x07,
+ },
+ [AB8505_LDO_INTCORE] = {
+ .desc = {
+ .name = "LDO-INTCORE",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8505_LDO_INTCORE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vintcore_voltages),
+ .volt_table = ldo_vintcore_voltages,
+ },
+ .load_lp_uA = 5000,
+ .update_bank = 0x03,
+ .update_reg = 0x80,
+ .update_mask = 0x44,
+ .update_val = 0x04,
+ .update_val_idle = 0x44,
+ .update_val_normal = 0x04,
+ .voltage_bank = 0x03,
+ .voltage_reg = 0x80,
+ .voltage_mask = 0x38,
+ },
+
+ /*
+ * Fixed Voltage Regulators
+ * name, fixed mV,
+ * update bank, reg, mask, enable val
+ */
+ [AB8505_LDO_ADC] = {
+ .desc = {
+ .name = "LDO-ADC",
+ .ops = &ab8500_regulator_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8505_LDO_ADC,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ .volt_table = fixed_2000000_voltage,
.enable_time = 10000,
},
- .delay = 10000,
+ .load_lp_uA = 1000,
.update_bank = 0x03,
.update_reg = 0x80,
.update_mask = 0x82,
- .update_val_enable = 0x02,
+ .update_val = 0x02,
+ .update_val_idle = 0x82,
+ .update_val_normal = 0x02,
},
- [AB8500_LDO_USB] = {
+ [AB8505_LDO_USB] = {
.desc = {
.name = "LDO-USB",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_mode_ops,
.type = REGULATOR_VOLTAGE,
- .id = AB8500_LDO_USB,
+ .id = AB8505_LDO_USB,
.owner = THIS_MODULE,
.n_voltages = 1,
- .min_uV = 3300000,
+ .volt_table = fixed_3300000_voltage,
},
.update_bank = 0x03,
.update_reg = 0x82,
.update_mask = 0x03,
- .update_val_enable = 0x01,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
},
- [AB8500_LDO_AUDIO] = {
+ [AB8505_LDO_AUDIO] = {
.desc = {
.name = "LDO-AUDIO",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_volt_ops,
.type = REGULATOR_VOLTAGE,
- .id = AB8500_LDO_AUDIO,
+ .id = AB8505_LDO_AUDIO,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vaudio_voltages),
+ .volt_table = ldo_vaudio_voltages,
+ },
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x02,
+ .update_val = 0x02,
+ .voltage_bank = 0x01,
+ .voltage_reg = 0x57,
+ .voltage_mask = 0x70,
+ },
+ [AB8505_LDO_ANAMIC1] = {
+ .desc = {
+ .name = "LDO-ANAMIC1",
+ .ops = &ab8500_regulator_anamic_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8505_LDO_ANAMIC1,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ .volt_table = fixed_2050000_voltage,
+ },
+ .shared_mode = &ldo_anamic1_shared,
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x08,
+ .update_val = 0x08,
+ .mode_bank = 0x01,
+ .mode_reg = 0x54,
+ .mode_mask = 0x04,
+ .mode_val_idle = 0x04,
+ .mode_val_normal = 0x00,
+ },
+ [AB8505_LDO_ANAMIC2] = {
+ .desc = {
+ .name = "LDO-ANAMIC2",
+ .ops = &ab8500_regulator_anamic_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8505_LDO_ANAMIC2,
.owner = THIS_MODULE,
.n_voltages = 1,
- .min_uV = 2000000,
+ .volt_table = fixed_2050000_voltage,
+ },
+ .shared_mode = &ldo_anamic2_shared,
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x10,
+ .update_val = 0x10,
+ .mode_bank = 0x01,
+ .mode_reg = 0x54,
+ .mode_mask = 0x04,
+ .mode_val_idle = 0x04,
+ .mode_val_normal = 0x00,
+ },
+ [AB8505_LDO_AUX8] = {
+ .desc = {
+ .name = "LDO-AUX8",
+ .ops = &ab8500_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8505_LDO_AUX8,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ .volt_table = fixed_1800000_voltage,
+ },
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x04,
+ .update_val = 0x04,
+ },
+ /*
+ * Regulators with fixed voltage and normal/idle modes
+ */
+ [AB8505_LDO_ANA] = {
+ .desc = {
+ .name = "LDO-ANA",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8505_LDO_ANA,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vana_voltages),
+ .volt_table = ldo_vana_voltages,
+ },
+ .load_lp_uA = 1000,
+ .update_bank = 0x04,
+ .update_reg = 0x06,
+ .update_mask = 0x0c,
+ .update_val = 0x04,
+ .update_val_idle = 0x0c,
+ .update_val_normal = 0x04,
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x29,
+ .voltage_mask = 0x7,
+ },
+};
+
+/* AB9540 regulator information */
+static struct ab8500_regulator_info
+ ab9540_regulator_info[AB9540_NUM_REGULATORS] = {
+ /*
+ * Variable Voltage Regulators
+ * name, min mV, max mV,
+ * update bank, reg, mask, enable val
+ * volt bank, reg, mask
+ */
+ [AB9540_LDO_AUX1] = {
+ .desc = {
+ .name = "LDO-AUX1",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB9540_LDO_AUX1,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
+ .volt_table = ldo_vauxn_voltages,
+ },
+ .load_lp_uA = 5000,
+ .update_bank = 0x04,
+ .update_reg = 0x09,
+ .update_mask = 0x03,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x1f,
+ .voltage_mask = 0x0f,
+ },
+ [AB9540_LDO_AUX2] = {
+ .desc = {
+ .name = "LDO-AUX2",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB9540_LDO_AUX2,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
+ .volt_table = ldo_vauxn_voltages,
+ },
+ .load_lp_uA = 5000,
+ .update_bank = 0x04,
+ .update_reg = 0x09,
+ .update_mask = 0x0c,
+ .update_val = 0x04,
+ .update_val_idle = 0x0c,
+ .update_val_normal = 0x04,
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x20,
+ .voltage_mask = 0x0f,
+ },
+ [AB9540_LDO_AUX3] = {
+ .desc = {
+ .name = "LDO-AUX3",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB9540_LDO_AUX3,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vaux3_voltages),
+ .volt_table = ldo_vaux3_voltages,
+ },
+ .load_lp_uA = 5000,
+ .update_bank = 0x04,
+ .update_reg = 0x0a,
+ .update_mask = 0x03,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x21,
+ .voltage_mask = 0x07,
+ },
+ [AB9540_LDO_AUX4] = {
+ .desc = {
+ .name = "LDO-AUX4",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB9540_LDO_AUX4,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
+ .volt_table = ldo_vauxn_voltages,
+ },
+ .load_lp_uA = 5000,
+ /* values for Vaux4Regu register */
+ .update_bank = 0x04,
+ .update_reg = 0x2e,
+ .update_mask = 0x03,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
+ /* values for Vaux4SEL register */
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x2f,
+ .voltage_mask = 0x0f,
+ },
+ [AB9540_LDO_INTCORE] = {
+ .desc = {
+ .name = "LDO-INTCORE",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB9540_LDO_INTCORE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vintcore_voltages),
+ .volt_table = ldo_vintcore_voltages,
+ },
+ .load_lp_uA = 5000,
+ .update_bank = 0x03,
+ .update_reg = 0x80,
+ .update_mask = 0x44,
+ .update_val = 0x44,
+ .update_val_idle = 0x44,
+ .update_val_normal = 0x04,
+ .voltage_bank = 0x03,
+ .voltage_reg = 0x80,
+ .voltage_mask = 0x38,
+ },
+
+ /*
+ * Fixed Voltage Regulators
+ * name, fixed mV,
+ * update bank, reg, mask, enable val
+ */
+ [AB9540_LDO_TVOUT] = {
+ .desc = {
+ .name = "LDO-TVOUT",
+ .ops = &ab8500_regulator_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB9540_LDO_TVOUT,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ .volt_table = fixed_2000000_voltage,
+ .enable_time = 10000,
+ },
+ .load_lp_uA = 1000,
+ .update_bank = 0x03,
+ .update_reg = 0x80,
+ .update_mask = 0x82,
+ .update_val = 0x02,
+ .update_val_idle = 0x82,
+ .update_val_normal = 0x02,
+ },
+ [AB9540_LDO_USB] = {
+ .desc = {
+ .name = "LDO-USB",
+ .ops = &ab8500_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB9540_LDO_USB,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ .volt_table = fixed_3300000_voltage,
+ },
+ .update_bank = 0x03,
+ .update_reg = 0x82,
+ .update_mask = 0x03,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
+ },
+ [AB9540_LDO_AUDIO] = {
+ .desc = {
+ .name = "LDO-AUDIO",
+ .ops = &ab8500_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB9540_LDO_AUDIO,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ .volt_table = fixed_2000000_voltage,
},
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x02,
- .update_val_enable = 0x02,
+ .update_val = 0x02,
},
- [AB8500_LDO_ANAMIC1] = {
+ [AB9540_LDO_ANAMIC1] = {
.desc = {
.name = "LDO-ANAMIC1",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
- .id = AB8500_LDO_ANAMIC1,
+ .id = AB9540_LDO_ANAMIC1,
.owner = THIS_MODULE,
.n_voltages = 1,
- .min_uV = 2050000,
+ .volt_table = fixed_2050000_voltage,
},
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x08,
- .update_val_enable = 0x08,
+ .update_val = 0x08,
},
- [AB8500_LDO_ANAMIC2] = {
+ [AB9540_LDO_ANAMIC2] = {
.desc = {
.name = "LDO-ANAMIC2",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
- .id = AB8500_LDO_ANAMIC2,
+ .id = AB9540_LDO_ANAMIC2,
.owner = THIS_MODULE,
.n_voltages = 1,
- .min_uV = 2050000,
+ .volt_table = fixed_2050000_voltage,
},
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x10,
- .update_val_enable = 0x10,
+ .update_val = 0x10,
},
- [AB8500_LDO_DMIC] = {
+ [AB9540_LDO_DMIC] = {
.desc = {
.name = "LDO-DMIC",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
- .id = AB8500_LDO_DMIC,
+ .id = AB9540_LDO_DMIC,
.owner = THIS_MODULE,
.n_voltages = 1,
- .min_uV = 1800000,
+ .volt_table = fixed_1800000_voltage,
},
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x04,
- .update_val_enable = 0x04,
+ .update_val = 0x04,
},
- [AB8500_LDO_ANA] = {
+
+ /*
+ * Regulators with fixed voltage and normal/idle modes
+ */
+ [AB9540_LDO_ANA] = {
.desc = {
.name = "LDO-ANA",
- .ops = &ab8500_regulator_fixed_ops,
+ .ops = &ab8500_regulator_mode_ops,
.type = REGULATOR_VOLTAGE,
- .id = AB8500_LDO_ANA,
+ .id = AB9540_LDO_ANA,
.owner = THIS_MODULE,
.n_voltages = 1,
- .min_uV = 1200000,
+ .volt_table = fixed_1200000_voltage,
},
+ .load_lp_uA = 1000,
.update_bank = 0x04,
.update_reg = 0x06,
.update_mask = 0x0c,
- .update_val_enable = 0x04,
+ .update_val = 0x08,
+ .update_val_idle = 0x0c,
+ .update_val_normal = 0x08,
},
+};
+/* AB8540 regulator information */
+static struct ab8500_regulator_info
+ ab8540_regulator_info[AB8540_NUM_REGULATORS] = {
+ /*
+ * Variable Voltage Regulators
+ * name, min mV, max mV,
+ * update bank, reg, mask, enable val
+ * volt bank, reg, mask
+ */
+ [AB8540_LDO_AUX1] = {
+ .desc = {
+ .name = "LDO-AUX1",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8540_LDO_AUX1,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
+ .volt_table = ldo_vauxn_voltages,
+ },
+ .load_lp_uA = 5000,
+ .update_bank = 0x04,
+ .update_reg = 0x09,
+ .update_mask = 0x03,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x1f,
+ .voltage_mask = 0x0f,
+ },
+ [AB8540_LDO_AUX2] = {
+ .desc = {
+ .name = "LDO-AUX2",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8540_LDO_AUX2,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
+ .volt_table = ldo_vauxn_voltages,
+ },
+ .load_lp_uA = 5000,
+ .update_bank = 0x04,
+ .update_reg = 0x09,
+ .update_mask = 0x0c,
+ .update_val = 0x04,
+ .update_val_idle = 0x0c,
+ .update_val_normal = 0x04,
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x20,
+ .voltage_mask = 0x0f,
+ },
+ [AB8540_LDO_AUX3] = {
+ .desc = {
+ .name = "LDO-AUX3",
+ .ops = &ab8540_aux3_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8540_LDO_AUX3,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vaux3_ab8540_voltages),
+ .volt_table = ldo_vaux3_ab8540_voltages,
+ },
+ .load_lp_uA = 5000,
+ .update_bank = 0x04,
+ .update_reg = 0x0a,
+ .update_mask = 0x03,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x21,
+ .voltage_mask = 0x07,
+ .expand_register = {
+ .voltage_limit = 8,
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x01,
+ .voltage_mask = 0x10,
+ }
+ },
+ [AB8540_LDO_AUX4] = {
+ .desc = {
+ .name = "LDO-AUX4",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8540_LDO_AUX4,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
+ .volt_table = ldo_vauxn_voltages,
+ },
+ .load_lp_uA = 5000,
+ /* values for Vaux4Regu register */
+ .update_bank = 0x04,
+ .update_reg = 0x2e,
+ .update_mask = 0x03,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
+ /* values for Vaux4SEL register */
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x2f,
+ .voltage_mask = 0x0f,
+ },
+ [AB8540_LDO_AUX5] = {
+ .desc = {
+ .name = "LDO-AUX5",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8540_LDO_AUX5,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vaux56_ab8540_voltages),
+ .volt_table = ldo_vaux56_ab8540_voltages,
+ },
+ .load_lp_uA = 20000,
+ /* values for Vaux5Regu register */
+ .update_bank = 0x04,
+ .update_reg = 0x32,
+ .update_mask = 0x03,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
+ /* values for Vaux5SEL register */
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x33,
+ .voltage_mask = 0x3f,
+ },
+ [AB8540_LDO_AUX6] = {
+ .desc = {
+ .name = "LDO-AUX6",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8540_LDO_AUX6,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vaux56_ab8540_voltages),
+ .volt_table = ldo_vaux56_ab8540_voltages,
+ },
+ .load_lp_uA = 20000,
+ /* values for Vaux6Regu register */
+ .update_bank = 0x04,
+ .update_reg = 0x35,
+ .update_mask = 0x03,
+ .update_val = 0x01,
+ .update_val_idle = 0x03,
+ .update_val_normal = 0x01,
+ /* values for Vaux6SEL register */
+ .voltage_bank = 0x04,
+ .voltage_reg = 0x36,
+ .voltage_mask = 0x3f,
+ },
+ [AB8540_LDO_INTCORE] = {
+ .desc = {
+ .name = "LDO-INTCORE",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8540_LDO_INTCORE,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vintcore_voltages),
+ .volt_table = ldo_vintcore_voltages,
+ },
+ .load_lp_uA = 5000,
+ .update_bank = 0x03,
+ .update_reg = 0x80,
+ .update_mask = 0x44,
+ .update_val = 0x44,
+ .update_val_idle = 0x44,
+ .update_val_normal = 0x04,
+ .voltage_bank = 0x03,
+ .voltage_reg = 0x80,
+ .voltage_mask = 0x38,
+ },
+ /*
+ * Fixed Voltage Regulators
+ * name, fixed mV,
+ * update bank, reg, mask, enable val
+ */
+ [AB8540_LDO_TVOUT] = {
+ .desc = {
+ .name = "LDO-TVOUT",
+ .ops = &ab8500_regulator_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8540_LDO_TVOUT,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ .volt_table = fixed_2000000_voltage,
+ .enable_time = 10000,
+ },
+ .load_lp_uA = 1000,
+ .update_bank = 0x03,
+ .update_reg = 0x80,
+ .update_mask = 0x82,
+ .update_val = 0x02,
+ .update_val_idle = 0x82,
+ .update_val_normal = 0x02,
+ },
+ [AB8540_LDO_AUDIO] = {
+ .desc = {
+ .name = "LDO-AUDIO",
+ .ops = &ab8500_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8540_LDO_AUDIO,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ .volt_table = fixed_2000000_voltage,
+ },
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x02,
+ .update_val = 0x02,
+ },
+ [AB8540_LDO_ANAMIC1] = {
+ .desc = {
+ .name = "LDO-ANAMIC1",
+ .ops = &ab8500_regulator_anamic_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8540_LDO_ANAMIC1,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ .volt_table = fixed_2050000_voltage,
+ },
+ .shared_mode = &ab8540_ldo_anamic1_shared,
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x08,
+ .update_val = 0x08,
+ .mode_bank = 0x03,
+ .mode_reg = 0x83,
+ .mode_mask = 0x20,
+ .mode_val_idle = 0x20,
+ .mode_val_normal = 0x00,
+ },
+ [AB8540_LDO_ANAMIC2] = {
+ .desc = {
+ .name = "LDO-ANAMIC2",
+ .ops = &ab8500_regulator_anamic_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8540_LDO_ANAMIC2,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ .volt_table = fixed_2050000_voltage,
+ },
+ .shared_mode = &ab8540_ldo_anamic2_shared,
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x10,
+ .update_val = 0x10,
+ .mode_bank = 0x03,
+ .mode_reg = 0x83,
+ .mode_mask = 0x20,
+ .mode_val_idle = 0x20,
+ .mode_val_normal = 0x00,
+ },
+ [AB8540_LDO_DMIC] = {
+ .desc = {
+ .name = "LDO-DMIC",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8540_LDO_DMIC,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_vdmic_voltages),
+ .volt_table = ldo_vdmic_voltages,
+ },
+ .load_lp_uA = 1000,
+ .update_bank = 0x03,
+ .update_reg = 0x83,
+ .update_mask = 0x04,
+ .update_val = 0x04,
+ .voltage_bank = 0x03,
+ .voltage_reg = 0x83,
+ .voltage_mask = 0xc0,
+ },
+
+ /*
+ * Regulators with fixed voltage and normal/idle modes
+ */
+ [AB8540_LDO_ANA] = {
+ .desc = {
+ .name = "LDO-ANA",
+ .ops = &ab8500_regulator_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8540_LDO_ANA,
+ .owner = THIS_MODULE,
+ .n_voltages = 1,
+ .volt_table = fixed_1200000_voltage,
+ },
+ .load_lp_uA = 1000,
+ .update_bank = 0x04,
+ .update_reg = 0x06,
+ .update_mask = 0x0c,
+ .update_val = 0x04,
+ .update_val_idle = 0x0c,
+ .update_val_normal = 0x04,
+ },
+ [AB8540_LDO_SDIO] = {
+ .desc = {
+ .name = "LDO-SDIO",
+ .ops = &ab8500_regulator_volt_mode_ops,
+ .type = REGULATOR_VOLTAGE,
+ .id = AB8540_LDO_SDIO,
+ .owner = THIS_MODULE,
+ .n_voltages = ARRAY_SIZE(ldo_sdio_voltages),
+ .volt_table = ldo_sdio_voltages,
+ },
+ .load_lp_uA = 5000,
+ .update_bank = 0x03,
+ .update_reg = 0x88,
+ .update_mask = 0x30,
+ .update_val = 0x10,
+ .update_val_idle = 0x30,
+ .update_val_normal = 0x10,
+ .voltage_bank = 0x03,
+ .voltage_reg = 0x88,
+ .voltage_mask = 0x07,
+ },
+};
+
+static struct ab8500_shared_mode ldo_anamic1_shared = {
+ .shared_regulator = &ab8505_regulator_info[AB8505_LDO_ANAMIC2],
+};
+
+static struct ab8500_shared_mode ldo_anamic2_shared = {
+ .shared_regulator = &ab8505_regulator_info[AB8505_LDO_ANAMIC1],
+};
+
+static struct ab8500_shared_mode ab8540_ldo_anamic1_shared = {
+ .shared_regulator = &ab8540_regulator_info[AB8540_LDO_ANAMIC2],
+};
+
+static struct ab8500_shared_mode ab8540_ldo_anamic2_shared = {
+ .shared_regulator = &ab8540_regulator_info[AB8540_LDO_ANAMIC1],
};
struct ab8500_reg_init {
@@ -474,13 +1799,13 @@ struct ab8500_reg_init {
.mask = _mask, \
}
+/* AB8500 register init */
static struct ab8500_reg_init ab8500_reg_init[] = {
/*
* 0x30, VanaRequestCtrl
- * 0x0C, VpllRequestCtrl
* 0xc0, VextSupply1RequestCtrl
*/
- REG_INIT(AB8500_REGUREQUESTCTRL2, 0x03, 0x04, 0xfc),
+ REG_INIT(AB8500_REGUREQUESTCTRL2, 0x03, 0x04, 0xf0),
/*
* 0x03, VextSupply2RequestCtrl
* 0x0c, VextSupply3RequestCtrl
@@ -547,13 +1872,21 @@ static struct ab8500_reg_init ab8500_reg_init[] = {
REG_INIT(AB8500_REGUSWHPREQVALID2, 0x03, 0x0e, 0x1f),
/*
* 0x02, SysClkReq2Valid1
- * ...
+ * 0x04, SysClkReq3Valid1
+ * 0x08, SysClkReq4Valid1
+ * 0x10, SysClkReq5Valid1
+ * 0x20, SysClkReq6Valid1
+ * 0x40, SysClkReq7Valid1
* 0x80, SysClkReq8Valid1
*/
REG_INIT(AB8500_REGUSYSCLKREQVALID1, 0x03, 0x0f, 0xfe),
/*
* 0x02, SysClkReq2Valid2
- * ...
+ * 0x04, SysClkReq3Valid2
+ * 0x08, SysClkReq4Valid2
+ * 0x10, SysClkReq5Valid2
+ * 0x20, SysClkReq6Valid2
+ * 0x40, SysClkReq7Valid2
* 0x80, SysClkReq8Valid2
*/
REG_INIT(AB8500_REGUSYSCLKREQVALID2, 0x03, 0x10, 0xfe),
@@ -578,8 +1911,8 @@ static struct ab8500_reg_init ab8500_reg_init[] = {
*/
REG_INIT(AB8500_REGUCTRL1VAMIC, 0x03, 0x84, 0x03),
/*
+ * 0x03, VpllRegu (NOTE! PRCMU register bits)
* 0x0c, VanaRegu
- * 0x03, VpllRegu
*/
REG_INIT(AB8500_VPLLVANAREGU, 0x04, 0x06, 0x0f),
/*
@@ -605,10 +1938,6 @@ static struct ab8500_reg_init ab8500_reg_init[] = {
*/
REG_INIT(AB8500_VRF1VAUX3REGU, 0x04, 0x0a, 0x03),
/*
- * 0x3f, Vsmps1Sel1
- */
- REG_INIT(AB8500_VSMPS1SEL1, 0x04, 0x13, 0x3f),
- /*
* 0x0f, Vaux1Sel
*/
REG_INIT(AB8500_VAUX1SEL, 0x04, 0x1f, 0x0f),
@@ -641,52 +1970,1073 @@ static struct ab8500_reg_init ab8500_reg_init[] = {
REG_INIT(AB8500_REGUCTRLDISCH2, 0x04, 0x44, 0x16),
};
-static int
-ab8500_regulator_init_registers(struct platform_device *pdev, int id, int value)
+/* AB8505 register init */
+static struct ab8500_reg_init ab8505_reg_init[] = {
+ /*
+ * 0x03, VarmRequestCtrl
+ * 0x0c, VsmpsCRequestCtrl
+ * 0x30, VsmpsARequestCtrl
+ * 0xc0, VsmpsBRequestCtrl
+ */
+ REG_INIT(AB8505_REGUREQUESTCTRL1, 0x03, 0x03, 0xff),
+ /*
+ * 0x03, VsafeRequestCtrl
+ * 0x0c, VpllRequestCtrl
+ * 0x30, VanaRequestCtrl
+ */
+ REG_INIT(AB8505_REGUREQUESTCTRL2, 0x03, 0x04, 0x3f),
+ /*
+ * 0x30, Vaux1RequestCtrl
+ * 0xc0, Vaux2RequestCtrl
+ */
+ REG_INIT(AB8505_REGUREQUESTCTRL3, 0x03, 0x05, 0xf0),
+ /*
+ * 0x03, Vaux3RequestCtrl
+ * 0x04, SwHPReq
+ */
+ REG_INIT(AB8505_REGUREQUESTCTRL4, 0x03, 0x06, 0x07),
+ /*
+ * 0x01, VsmpsASysClkReq1HPValid
+ * 0x02, VsmpsBSysClkReq1HPValid
+ * 0x04, VsafeSysClkReq1HPValid
+ * 0x08, VanaSysClkReq1HPValid
+ * 0x10, VpllSysClkReq1HPValid
+ * 0x20, Vaux1SysClkReq1HPValid
+ * 0x40, Vaux2SysClkReq1HPValid
+ * 0x80, Vaux3SysClkReq1HPValid
+ */
+ REG_INIT(AB8505_REGUSYSCLKREQ1HPVALID1, 0x03, 0x07, 0xff),
+ /*
+ * 0x01, VsmpsCSysClkReq1HPValid
+ * 0x02, VarmSysClkReq1HPValid
+ * 0x04, VbbSysClkReq1HPValid
+ * 0x08, VsmpsMSysClkReq1HPValid
+ */
+ REG_INIT(AB8505_REGUSYSCLKREQ1HPVALID2, 0x03, 0x08, 0x0f),
+ /*
+ * 0x01, VsmpsAHwHPReq1Valid
+ * 0x02, VsmpsBHwHPReq1Valid
+ * 0x04, VsafeHwHPReq1Valid
+ * 0x08, VanaHwHPReq1Valid
+ * 0x10, VpllHwHPReq1Valid
+ * 0x20, Vaux1HwHPReq1Valid
+ * 0x40, Vaux2HwHPReq1Valid
+ * 0x80, Vaux3HwHPReq1Valid
+ */
+ REG_INIT(AB8505_REGUHWHPREQ1VALID1, 0x03, 0x09, 0xff),
+ /*
+ * 0x08, VsmpsMHwHPReq1Valid
+ */
+ REG_INIT(AB8505_REGUHWHPREQ1VALID2, 0x03, 0x0a, 0x08),
+ /*
+ * 0x01, VsmpsAHwHPReq2Valid
+ * 0x02, VsmpsBHwHPReq2Valid
+ * 0x04, VsafeHwHPReq2Valid
+ * 0x08, VanaHwHPReq2Valid
+ * 0x10, VpllHwHPReq2Valid
+ * 0x20, Vaux1HwHPReq2Valid
+ * 0x40, Vaux2HwHPReq2Valid
+ * 0x80, Vaux3HwHPReq2Valid
+ */
+ REG_INIT(AB8505_REGUHWHPREQ2VALID1, 0x03, 0x0b, 0xff),
+ /*
+ * 0x08, VsmpsMHwHPReq2Valid
+ */
+ REG_INIT(AB8505_REGUHWHPREQ2VALID2, 0x03, 0x0c, 0x08),
+ /*
+ * 0x01, VsmpsCSwHPReqValid
+ * 0x02, VarmSwHPReqValid
+ * 0x04, VsmpsASwHPReqValid
+ * 0x08, VsmpsBSwHPReqValid
+ * 0x10, VsafeSwHPReqValid
+ * 0x20, VanaSwHPReqValid
+ * 0x40, VpllSwHPReqValid
+ * 0x80, Vaux1SwHPReqValid
+ */
+ REG_INIT(AB8505_REGUSWHPREQVALID1, 0x03, 0x0d, 0xff),
+ /*
+ * 0x01, Vaux2SwHPReqValid
+ * 0x02, Vaux3SwHPReqValid
+ * 0x20, VsmpsMSwHPReqValid
+ */
+ REG_INIT(AB8505_REGUSWHPREQVALID2, 0x03, 0x0e, 0x23),
+ /*
+ * 0x02, SysClkReq2Valid1
+ * 0x04, SysClkReq3Valid1
+ * 0x08, SysClkReq4Valid1
+ */
+ REG_INIT(AB8505_REGUSYSCLKREQVALID1, 0x03, 0x0f, 0x0e),
+ /*
+ * 0x02, SysClkReq2Valid2
+ * 0x04, SysClkReq3Valid2
+ * 0x08, SysClkReq4Valid2
+ */
+ REG_INIT(AB8505_REGUSYSCLKREQVALID2, 0x03, 0x10, 0x0e),
+ /*
+ * 0x01, Vaux4SwHPReqValid
+ * 0x02, Vaux4HwHPReq2Valid
+ * 0x04, Vaux4HwHPReq1Valid
+ * 0x08, Vaux4SysClkReq1HPValid
+ */
+ REG_INIT(AB8505_REGUVAUX4REQVALID, 0x03, 0x11, 0x0f),
+ /*
+ * 0x02, VadcEna
+ * 0x04, VintCore12Ena
+ * 0x38, VintCore12Sel
+ * 0x40, VintCore12LP
+ * 0x80, VadcLP
+ */
+ REG_INIT(AB8505_REGUMISC1, 0x03, 0x80, 0xfe),
+ /*
+ * 0x02, VaudioEna
+ * 0x04, VdmicEna
+ * 0x08, Vamic1Ena
+ * 0x10, Vamic2Ena
+ */
+ REG_INIT(AB8505_VAUDIOSUPPLY, 0x03, 0x83, 0x1e),
+ /*
+ * 0x01, Vamic1_dzout
+ * 0x02, Vamic2_dzout
+ */
+ REG_INIT(AB8505_REGUCTRL1VAMIC, 0x03, 0x84, 0x03),
+ /*
+ * 0x03, VsmpsARegu
+ * 0x0c, VsmpsASelCtrl
+ * 0x10, VsmpsAAutoMode
+ * 0x20, VsmpsAPWMMode
+ */
+ REG_INIT(AB8505_VSMPSAREGU, 0x04, 0x03, 0x3f),
+ /*
+ * 0x03, VsmpsBRegu
+ * 0x0c, VsmpsBSelCtrl
+ * 0x10, VsmpsBAutoMode
+ * 0x20, VsmpsBPWMMode
+ */
+ REG_INIT(AB8505_VSMPSBREGU, 0x04, 0x04, 0x3f),
+ /*
+ * 0x03, VsafeRegu
+ * 0x0c, VsafeSelCtrl
+ * 0x10, VsafeAutoMode
+ * 0x20, VsafePWMMode
+ */
+ REG_INIT(AB8505_VSAFEREGU, 0x04, 0x05, 0x3f),
+ /*
+ * 0x03, VpllRegu (NOTE! PRCMU register bits)
+ * 0x0c, VanaRegu
+ */
+ REG_INIT(AB8505_VPLLVANAREGU, 0x04, 0x06, 0x0f),
+ /*
+ * 0x03, VextSupply1Regu
+ * 0x0c, VextSupply2Regu
+ * 0x30, VextSupply3Regu
+ * 0x40, ExtSupply2Bypass
+ * 0x80, ExtSupply3Bypass
+ */
+ REG_INIT(AB8505_EXTSUPPLYREGU, 0x04, 0x08, 0xff),
+ /*
+ * 0x03, Vaux1Regu
+ * 0x0c, Vaux2Regu
+ */
+ REG_INIT(AB8505_VAUX12REGU, 0x04, 0x09, 0x0f),
+ /*
+ * 0x0f, Vaux3Regu
+ */
+ REG_INIT(AB8505_VRF1VAUX3REGU, 0x04, 0x0a, 0x0f),
+ /*
+ * 0x3f, VsmpsASel1
+ */
+ REG_INIT(AB8505_VSMPSASEL1, 0x04, 0x13, 0x3f),
+ /*
+ * 0x3f, VsmpsASel2
+ */
+ REG_INIT(AB8505_VSMPSASEL2, 0x04, 0x14, 0x3f),
+ /*
+ * 0x3f, VsmpsASel3
+ */
+ REG_INIT(AB8505_VSMPSASEL3, 0x04, 0x15, 0x3f),
+ /*
+ * 0x3f, VsmpsBSel1
+ */
+ REG_INIT(AB8505_VSMPSBSEL1, 0x04, 0x17, 0x3f),
+ /*
+ * 0x3f, VsmpsBSel2
+ */
+ REG_INIT(AB8505_VSMPSBSEL2, 0x04, 0x18, 0x3f),
+ /*
+ * 0x3f, VsmpsBSel3
+ */
+ REG_INIT(AB8505_VSMPSBSEL3, 0x04, 0x19, 0x3f),
+ /*
+ * 0x7f, VsafeSel1
+ */
+ REG_INIT(AB8505_VSAFESEL1, 0x04, 0x1b, 0x7f),
+ /*
+ * 0x3f, VsafeSel2
+ */
+ REG_INIT(AB8505_VSAFESEL2, 0x04, 0x1c, 0x7f),
+ /*
+ * 0x3f, VsafeSel3
+ */
+ REG_INIT(AB8505_VSAFESEL3, 0x04, 0x1d, 0x7f),
+ /*
+ * 0x0f, Vaux1Sel
+ */
+ REG_INIT(AB8505_VAUX1SEL, 0x04, 0x1f, 0x0f),
+ /*
+ * 0x0f, Vaux2Sel
+ */
+ REG_INIT(AB8505_VAUX2SEL, 0x04, 0x20, 0x0f),
+ /*
+ * 0x07, Vaux3Sel
+ * 0x30, VRF1Sel
+ */
+ REG_INIT(AB8505_VRF1VAUX3SEL, 0x04, 0x21, 0x37),
+ /*
+ * 0x03, Vaux4RequestCtrl
+ */
+ REG_INIT(AB8505_VAUX4REQCTRL, 0x04, 0x2d, 0x03),
+ /*
+ * 0x03, Vaux4Regu
+ */
+ REG_INIT(AB8505_VAUX4REGU, 0x04, 0x2e, 0x03),
+ /*
+ * 0x0f, Vaux4Sel
+ */
+ REG_INIT(AB8505_VAUX4SEL, 0x04, 0x2f, 0x0f),
+ /*
+ * 0x04, Vaux1Disch
+ * 0x08, Vaux2Disch
+ * 0x10, Vaux3Disch
+ * 0x20, Vintcore12Disch
+ * 0x40, VTVoutDisch
+ * 0x80, VaudioDisch
+ */
+ REG_INIT(AB8505_REGUCTRLDISCH, 0x04, 0x43, 0xfc),
+ /*
+ * 0x02, VanaDisch
+ * 0x04, VdmicPullDownEna
+ * 0x10, VdmicDisch
+ */
+ REG_INIT(AB8505_REGUCTRLDISCH2, 0x04, 0x44, 0x16),
+ /*
+ * 0x01, Vaux4Disch
+ */
+ REG_INIT(AB8505_REGUCTRLDISCH3, 0x04, 0x48, 0x01),
+ /*
+ * 0x07, Vaux5Sel
+ * 0x08, Vaux5LP
+ * 0x10, Vaux5Ena
+ * 0x20, Vaux5Disch
+ * 0x40, Vaux5DisSfst
+ * 0x80, Vaux5DisPulld
+ */
+ REG_INIT(AB8505_CTRLVAUX5, 0x01, 0x55, 0xff),
+ /*
+ * 0x07, Vaux6Sel
+ * 0x08, Vaux6LP
+ * 0x10, Vaux6Ena
+ * 0x80, Vaux6DisPulld
+ */
+ REG_INIT(AB8505_CTRLVAUX6, 0x01, 0x56, 0x9f),
+};
+
+/* AB9540 register init */
+static struct ab8500_reg_init ab9540_reg_init[] = {
+ /*
+ * 0x03, VarmRequestCtrl
+ * 0x0c, VapeRequestCtrl
+ * 0x30, Vsmps1RequestCtrl
+ * 0xc0, Vsmps2RequestCtrl
+ */
+ REG_INIT(AB9540_REGUREQUESTCTRL1, 0x03, 0x03, 0xff),
+ /*
+ * 0x03, Vsmps3RequestCtrl
+ * 0x0c, VpllRequestCtrl
+ * 0x30, VanaRequestCtrl
+ * 0xc0, VextSupply1RequestCtrl
+ */
+ REG_INIT(AB9540_REGUREQUESTCTRL2, 0x03, 0x04, 0xff),
+ /*
+ * 0x03, VextSupply2RequestCtrl
+ * 0x0c, VextSupply3RequestCtrl
+ * 0x30, Vaux1RequestCtrl
+ * 0xc0, Vaux2RequestCtrl
+ */
+ REG_INIT(AB9540_REGUREQUESTCTRL3, 0x03, 0x05, 0xff),
+ /*
+ * 0x03, Vaux3RequestCtrl
+ * 0x04, SwHPReq
+ */
+ REG_INIT(AB9540_REGUREQUESTCTRL4, 0x03, 0x06, 0x07),
+ /*
+ * 0x01, Vsmps1SysClkReq1HPValid
+ * 0x02, Vsmps2SysClkReq1HPValid
+ * 0x04, Vsmps3SysClkReq1HPValid
+ * 0x08, VanaSysClkReq1HPValid
+ * 0x10, VpllSysClkReq1HPValid
+ * 0x20, Vaux1SysClkReq1HPValid
+ * 0x40, Vaux2SysClkReq1HPValid
+ * 0x80, Vaux3SysClkReq1HPValid
+ */
+ REG_INIT(AB9540_REGUSYSCLKREQ1HPVALID1, 0x03, 0x07, 0xff),
+ /*
+ * 0x01, VapeSysClkReq1HPValid
+ * 0x02, VarmSysClkReq1HPValid
+ * 0x04, VbbSysClkReq1HPValid
+ * 0x08, VmodSysClkReq1HPValid
+ * 0x10, VextSupply1SysClkReq1HPValid
+ * 0x20, VextSupply2SysClkReq1HPValid
+ * 0x40, VextSupply3SysClkReq1HPValid
+ */
+ REG_INIT(AB9540_REGUSYSCLKREQ1HPVALID2, 0x03, 0x08, 0x7f),
+ /*
+ * 0x01, Vsmps1HwHPReq1Valid
+ * 0x02, Vsmps2HwHPReq1Valid
+ * 0x04, Vsmps3HwHPReq1Valid
+ * 0x08, VanaHwHPReq1Valid
+ * 0x10, VpllHwHPReq1Valid
+ * 0x20, Vaux1HwHPReq1Valid
+ * 0x40, Vaux2HwHPReq1Valid
+ * 0x80, Vaux3HwHPReq1Valid
+ */
+ REG_INIT(AB9540_REGUHWHPREQ1VALID1, 0x03, 0x09, 0xff),
+ /*
+ * 0x01, VextSupply1HwHPReq1Valid
+ * 0x02, VextSupply2HwHPReq1Valid
+ * 0x04, VextSupply3HwHPReq1Valid
+ * 0x08, VmodHwHPReq1Valid
+ */
+ REG_INIT(AB9540_REGUHWHPREQ1VALID2, 0x03, 0x0a, 0x0f),
+ /*
+ * 0x01, Vsmps1HwHPReq2Valid
+ * 0x02, Vsmps2HwHPReq2Valid
+ * 0x03, Vsmps3HwHPReq2Valid
+ * 0x08, VanaHwHPReq2Valid
+ * 0x10, VpllHwHPReq2Valid
+ * 0x20, Vaux1HwHPReq2Valid
+ * 0x40, Vaux2HwHPReq2Valid
+ * 0x80, Vaux3HwHPReq2Valid
+ */
+ REG_INIT(AB9540_REGUHWHPREQ2VALID1, 0x03, 0x0b, 0xff),
+ /*
+ * 0x01, VextSupply1HwHPReq2Valid
+ * 0x02, VextSupply2HwHPReq2Valid
+ * 0x04, VextSupply3HwHPReq2Valid
+ * 0x08, VmodHwHPReq2Valid
+ */
+ REG_INIT(AB9540_REGUHWHPREQ2VALID2, 0x03, 0x0c, 0x0f),
+ /*
+ * 0x01, VapeSwHPReqValid
+ * 0x02, VarmSwHPReqValid
+ * 0x04, Vsmps1SwHPReqValid
+ * 0x08, Vsmps2SwHPReqValid
+ * 0x10, Vsmps3SwHPReqValid
+ * 0x20, VanaSwHPReqValid
+ * 0x40, VpllSwHPReqValid
+ * 0x80, Vaux1SwHPReqValid
+ */
+ REG_INIT(AB9540_REGUSWHPREQVALID1, 0x03, 0x0d, 0xff),
+ /*
+ * 0x01, Vaux2SwHPReqValid
+ * 0x02, Vaux3SwHPReqValid
+ * 0x04, VextSupply1SwHPReqValid
+ * 0x08, VextSupply2SwHPReqValid
+ * 0x10, VextSupply3SwHPReqValid
+ * 0x20, VmodSwHPReqValid
+ */
+ REG_INIT(AB9540_REGUSWHPREQVALID2, 0x03, 0x0e, 0x3f),
+ /*
+ * 0x02, SysClkReq2Valid1
+ * ...
+ * 0x80, SysClkReq8Valid1
+ */
+ REG_INIT(AB9540_REGUSYSCLKREQVALID1, 0x03, 0x0f, 0xfe),
+ /*
+ * 0x02, SysClkReq2Valid2
+ * ...
+ * 0x80, SysClkReq8Valid2
+ */
+ REG_INIT(AB9540_REGUSYSCLKREQVALID2, 0x03, 0x10, 0xfe),
+ /*
+ * 0x01, Vaux4SwHPReqValid
+ * 0x02, Vaux4HwHPReq2Valid
+ * 0x04, Vaux4HwHPReq1Valid
+ * 0x08, Vaux4SysClkReq1HPValid
+ */
+ REG_INIT(AB9540_REGUVAUX4REQVALID, 0x03, 0x11, 0x0f),
+ /*
+ * 0x02, VTVoutEna
+ * 0x04, Vintcore12Ena
+ * 0x38, Vintcore12Sel
+ * 0x40, Vintcore12LP
+ * 0x80, VTVoutLP
+ */
+ REG_INIT(AB9540_REGUMISC1, 0x03, 0x80, 0xfe),
+ /*
+ * 0x02, VaudioEna
+ * 0x04, VdmicEna
+ * 0x08, Vamic1Ena
+ * 0x10, Vamic2Ena
+ */
+ REG_INIT(AB9540_VAUDIOSUPPLY, 0x03, 0x83, 0x1e),
+ /*
+ * 0x01, Vamic1_dzout
+ * 0x02, Vamic2_dzout
+ */
+ REG_INIT(AB9540_REGUCTRL1VAMIC, 0x03, 0x84, 0x03),
+ /*
+ * 0x03, Vsmps1Regu
+ * 0x0c, Vsmps1SelCtrl
+ * 0x10, Vsmps1AutoMode
+ * 0x20, Vsmps1PWMMode
+ */
+ REG_INIT(AB9540_VSMPS1REGU, 0x04, 0x03, 0x3f),
+ /*
+ * 0x03, Vsmps2Regu
+ * 0x0c, Vsmps2SelCtrl
+ * 0x10, Vsmps2AutoMode
+ * 0x20, Vsmps2PWMMode
+ */
+ REG_INIT(AB9540_VSMPS2REGU, 0x04, 0x04, 0x3f),
+ /*
+ * 0x03, Vsmps3Regu
+ * 0x0c, Vsmps3SelCtrl
+ * NOTE! PRCMU register
+ */
+ REG_INIT(AB9540_VSMPS3REGU, 0x04, 0x05, 0x0f),
+ /*
+ * 0x03, VpllRegu
+ * 0x0c, VanaRegu
+ */
+ REG_INIT(AB9540_VPLLVANAREGU, 0x04, 0x06, 0x0f),
+ /*
+ * 0x03, VextSupply1Regu
+ * 0x0c, VextSupply2Regu
+ * 0x30, VextSupply3Regu
+ * 0x40, ExtSupply2Bypass
+ * 0x80, ExtSupply3Bypass
+ */
+ REG_INIT(AB9540_EXTSUPPLYREGU, 0x04, 0x08, 0xff),
+ /*
+ * 0x03, Vaux1Regu
+ * 0x0c, Vaux2Regu
+ */
+ REG_INIT(AB9540_VAUX12REGU, 0x04, 0x09, 0x0f),
+ /*
+ * 0x0c, Vrf1Regu
+ * 0x03, Vaux3Regu
+ */
+ REG_INIT(AB9540_VRF1VAUX3REGU, 0x04, 0x0a, 0x0f),
+ /*
+ * 0x3f, Vsmps1Sel1
+ */
+ REG_INIT(AB9540_VSMPS1SEL1, 0x04, 0x13, 0x3f),
+ /*
+ * 0x3f, Vsmps1Sel2
+ */
+ REG_INIT(AB9540_VSMPS1SEL2, 0x04, 0x14, 0x3f),
+ /*
+ * 0x3f, Vsmps1Sel3
+ */
+ REG_INIT(AB9540_VSMPS1SEL3, 0x04, 0x15, 0x3f),
+ /*
+ * 0x3f, Vsmps2Sel1
+ */
+ REG_INIT(AB9540_VSMPS2SEL1, 0x04, 0x17, 0x3f),
+ /*
+ * 0x3f, Vsmps2Sel2
+ */
+ REG_INIT(AB9540_VSMPS2SEL2, 0x04, 0x18, 0x3f),
+ /*
+ * 0x3f, Vsmps2Sel3
+ */
+ REG_INIT(AB9540_VSMPS2SEL3, 0x04, 0x19, 0x3f),
+ /*
+ * 0x7f, Vsmps3Sel1
+ * NOTE! PRCMU register
+ */
+ REG_INIT(AB9540_VSMPS3SEL1, 0x04, 0x1b, 0x7f),
+ /*
+ * 0x7f, Vsmps3Sel2
+ * NOTE! PRCMU register
+ */
+ REG_INIT(AB9540_VSMPS3SEL2, 0x04, 0x1c, 0x7f),
+ /*
+ * 0x0f, Vaux1Sel
+ */
+ REG_INIT(AB9540_VAUX1SEL, 0x04, 0x1f, 0x0f),
+ /*
+ * 0x0f, Vaux2Sel
+ */
+ REG_INIT(AB9540_VAUX2SEL, 0x04, 0x20, 0x0f),
+ /*
+ * 0x07, Vaux3Sel
+ * 0x30, Vrf1Sel
+ */
+ REG_INIT(AB9540_VRF1VAUX3SEL, 0x04, 0x21, 0x37),
+ /*
+ * 0x01, VextSupply12LP
+ */
+ REG_INIT(AB9540_REGUCTRL2SPARE, 0x04, 0x22, 0x01),
+ /*
+ * 0x03, Vaux4RequestCtrl
+ */
+ REG_INIT(AB9540_VAUX4REQCTRL, 0x04, 0x2d, 0x03),
+ /*
+ * 0x03, Vaux4Regu
+ */
+ REG_INIT(AB9540_VAUX4REGU, 0x04, 0x2e, 0x03),
+ /*
+ * 0x08, Vaux4Sel
+ */
+ REG_INIT(AB9540_VAUX4SEL, 0x04, 0x2f, 0x0f),
+ /*
+ * 0x01, VpllDisch
+ * 0x02, Vrf1Disch
+ * 0x04, Vaux1Disch
+ * 0x08, Vaux2Disch
+ * 0x10, Vaux3Disch
+ * 0x20, Vintcore12Disch
+ * 0x40, VTVoutDisch
+ * 0x80, VaudioDisch
+ */
+ REG_INIT(AB9540_REGUCTRLDISCH, 0x04, 0x43, 0xff),
+ /*
+ * 0x01, VsimDisch
+ * 0x02, VanaDisch
+ * 0x04, VdmicPullDownEna
+ * 0x08, VpllPullDownEna
+ * 0x10, VdmicDisch
+ */
+ REG_INIT(AB9540_REGUCTRLDISCH2, 0x04, 0x44, 0x1f),
+ /*
+ * 0x01, Vaux4Disch
+ */
+ REG_INIT(AB9540_REGUCTRLDISCH3, 0x04, 0x48, 0x01),
+};
+
+/* AB8540 register init */
+static struct ab8500_reg_init ab8540_reg_init[] = {
+ /*
+ * 0x01, VSimSycClkReq1Valid
+ * 0x02, VSimSycClkReq2Valid
+ * 0x04, VSimSycClkReq3Valid
+ * 0x08, VSimSycClkReq4Valid
+ * 0x10, VSimSycClkReq5Valid
+ * 0x20, VSimSycClkReq6Valid
+ * 0x40, VSimSycClkReq7Valid
+ * 0x80, VSimSycClkReq8Valid
+ */
+ REG_INIT(AB8540_VSIMSYSCLKCTRL, 0x02, 0x33, 0xff),
+ /*
+ * 0x03, VarmRequestCtrl
+ * 0x0c, VapeRequestCtrl
+ * 0x30, Vsmps1RequestCtrl
+ * 0xc0, Vsmps2RequestCtrl
+ */
+ REG_INIT(AB8540_REGUREQUESTCTRL1, 0x03, 0x03, 0xff),
+ /*
+ * 0x03, Vsmps3RequestCtrl
+ * 0x0c, VpllRequestCtrl
+ * 0x30, VanaRequestCtrl
+ * 0xc0, VextSupply1RequestCtrl
+ */
+ REG_INIT(AB8540_REGUREQUESTCTRL2, 0x03, 0x04, 0xff),
+ /*
+ * 0x03, VextSupply2RequestCtrl
+ * 0x0c, VextSupply3RequestCtrl
+ * 0x30, Vaux1RequestCtrl
+ * 0xc0, Vaux2RequestCtrl
+ */
+ REG_INIT(AB8540_REGUREQUESTCTRL3, 0x03, 0x05, 0xff),
+ /*
+ * 0x03, Vaux3RequestCtrl
+ * 0x04, SwHPReq
+ */
+ REG_INIT(AB8540_REGUREQUESTCTRL4, 0x03, 0x06, 0x07),
+ /*
+ * 0x01, Vsmps1SysClkReq1HPValid
+ * 0x02, Vsmps2SysClkReq1HPValid
+ * 0x04, Vsmps3SysClkReq1HPValid
+ * 0x08, VanaSysClkReq1HPValid
+ * 0x10, VpllSysClkReq1HPValid
+ * 0x20, Vaux1SysClkReq1HPValid
+ * 0x40, Vaux2SysClkReq1HPValid
+ * 0x80, Vaux3SysClkReq1HPValid
+ */
+ REG_INIT(AB8540_REGUSYSCLKREQ1HPVALID1, 0x03, 0x07, 0xff),
+ /*
+ * 0x01, VapeSysClkReq1HPValid
+ * 0x02, VarmSysClkReq1HPValid
+ * 0x04, VbbSysClkReq1HPValid
+ * 0x10, VextSupply1SysClkReq1HPValid
+ * 0x20, VextSupply2SysClkReq1HPValid
+ * 0x40, VextSupply3SysClkReq1HPValid
+ */
+ REG_INIT(AB8540_REGUSYSCLKREQ1HPVALID2, 0x03, 0x08, 0x77),
+ /*
+ * 0x01, Vsmps1HwHPReq1Valid
+ * 0x02, Vsmps2HwHPReq1Valid
+ * 0x04, Vsmps3HwHPReq1Valid
+ * 0x08, VanaHwHPReq1Valid
+ * 0x10, VpllHwHPReq1Valid
+ * 0x20, Vaux1HwHPReq1Valid
+ * 0x40, Vaux2HwHPReq1Valid
+ * 0x80, Vaux3HwHPReq1Valid
+ */
+ REG_INIT(AB8540_REGUHWHPREQ1VALID1, 0x03, 0x09, 0xff),
+ /*
+ * 0x01, VextSupply1HwHPReq1Valid
+ * 0x02, VextSupply2HwHPReq1Valid
+ * 0x04, VextSupply3HwHPReq1Valid
+ */
+ REG_INIT(AB8540_REGUHWHPREQ1VALID2, 0x03, 0x0a, 0x07),
+ /*
+ * 0x01, Vsmps1HwHPReq2Valid
+ * 0x02, Vsmps2HwHPReq2Valid
+ * 0x03, Vsmps3HwHPReq2Valid
+ * 0x08, VanaHwHPReq2Valid
+ * 0x10, VpllHwHPReq2Valid
+ * 0x20, Vaux1HwHPReq2Valid
+ * 0x40, Vaux2HwHPReq2Valid
+ * 0x80, Vaux3HwHPReq2Valid
+ */
+ REG_INIT(AB8540_REGUHWHPREQ2VALID1, 0x03, 0x0b, 0xff),
+ /*
+ * 0x01, VextSupply1HwHPReq2Valid
+ * 0x02, VextSupply2HwHPReq2Valid
+ * 0x04, VextSupply3HwHPReq2Valid
+ */
+ REG_INIT(AB8540_REGUHWHPREQ2VALID2, 0x03, 0x0c, 0x07),
+ /*
+ * 0x01, VapeSwHPReqValid
+ * 0x02, VarmSwHPReqValid
+ * 0x04, Vsmps1SwHPReqValid
+ * 0x08, Vsmps2SwHPReqValid
+ * 0x10, Vsmps3SwHPReqValid
+ * 0x20, VanaSwHPReqValid
+ * 0x40, VpllSwHPReqValid
+ * 0x80, Vaux1SwHPReqValid
+ */
+ REG_INIT(AB8540_REGUSWHPREQVALID1, 0x03, 0x0d, 0xff),
+ /*
+ * 0x01, Vaux2SwHPReqValid
+ * 0x02, Vaux3SwHPReqValid
+ * 0x04, VextSupply1SwHPReqValid
+ * 0x08, VextSupply2SwHPReqValid
+ * 0x10, VextSupply3SwHPReqValid
+ */
+ REG_INIT(AB8540_REGUSWHPREQVALID2, 0x03, 0x0e, 0x1f),
+ /*
+ * 0x02, SysClkReq2Valid1
+ * ...
+ * 0x80, SysClkReq8Valid1
+ */
+ REG_INIT(AB8540_REGUSYSCLKREQVALID1, 0x03, 0x0f, 0xff),
+ /*
+ * 0x02, SysClkReq2Valid2
+ * ...
+ * 0x80, SysClkReq8Valid2
+ */
+ REG_INIT(AB8540_REGUSYSCLKREQVALID2, 0x03, 0x10, 0xff),
+ /*
+ * 0x01, Vaux4SwHPReqValid
+ * 0x02, Vaux4HwHPReq2Valid
+ * 0x04, Vaux4HwHPReq1Valid
+ * 0x08, Vaux4SysClkReq1HPValid
+ */
+ REG_INIT(AB8540_REGUVAUX4REQVALID, 0x03, 0x11, 0x0f),
+ /*
+ * 0x01, Vaux5SwHPReqValid
+ * 0x02, Vaux5HwHPReq2Valid
+ * 0x04, Vaux5HwHPReq1Valid
+ * 0x08, Vaux5SysClkReq1HPValid
+ */
+ REG_INIT(AB8540_REGUVAUX5REQVALID, 0x03, 0x12, 0x0f),
+ /*
+ * 0x01, Vaux6SwHPReqValid
+ * 0x02, Vaux6HwHPReq2Valid
+ * 0x04, Vaux6HwHPReq1Valid
+ * 0x08, Vaux6SysClkReq1HPValid
+ */
+ REG_INIT(AB8540_REGUVAUX6REQVALID, 0x03, 0x13, 0x0f),
+ /*
+ * 0x01, VclkbSwHPReqValid
+ * 0x02, VclkbHwHPReq2Valid
+ * 0x04, VclkbHwHPReq1Valid
+ * 0x08, VclkbSysClkReq1HPValid
+ */
+ REG_INIT(AB8540_REGUVCLKBREQVALID, 0x03, 0x14, 0x0f),
+ /*
+ * 0x01, Vrf1SwHPReqValid
+ * 0x02, Vrf1HwHPReq2Valid
+ * 0x04, Vrf1HwHPReq1Valid
+ * 0x08, Vrf1SysClkReq1HPValid
+ */
+ REG_INIT(AB8540_REGUVRF1REQVALID, 0x03, 0x15, 0x0f),
+ /*
+ * 0x02, VTVoutEna
+ * 0x04, Vintcore12Ena
+ * 0x38, Vintcore12Sel
+ * 0x40, Vintcore12LP
+ * 0x80, VTVoutLP
+ */
+ REG_INIT(AB8540_REGUMISC1, 0x03, 0x80, 0xfe),
+ /*
+ * 0x02, VaudioEna
+ * 0x04, VdmicEna
+ * 0x08, Vamic1Ena
+ * 0x10, Vamic2Ena
+ * 0x20, Vamic12LP
+ * 0xC0, VdmicSel
+ */
+ REG_INIT(AB8540_VAUDIOSUPPLY, 0x03, 0x83, 0xfe),
+ /*
+ * 0x01, Vamic1_dzout
+ * 0x02, Vamic2_dzout
+ */
+ REG_INIT(AB8540_REGUCTRL1VAMIC, 0x03, 0x84, 0x03),
+ /*
+ * 0x07, VHSICSel
+ * 0x08, VHSICOffState
+ * 0x10, VHSIEna
+ * 0x20, VHSICLP
+ */
+ REG_INIT(AB8540_VHSIC, 0x03, 0x87, 0x3f),
+ /*
+ * 0x07, VSDIOSel
+ * 0x08, VSDIOOffState
+ * 0x10, VSDIOEna
+ * 0x20, VSDIOLP
+ */
+ REG_INIT(AB8540_VSDIO, 0x03, 0x88, 0x3f),
+ /*
+ * 0x03, Vsmps1Regu
+ * 0x0c, Vsmps1SelCtrl
+ * 0x10, Vsmps1AutoMode
+ * 0x20, Vsmps1PWMMode
+ */
+ REG_INIT(AB8540_VSMPS1REGU, 0x04, 0x03, 0x3f),
+ /*
+ * 0x03, Vsmps2Regu
+ * 0x0c, Vsmps2SelCtrl
+ * 0x10, Vsmps2AutoMode
+ * 0x20, Vsmps2PWMMode
+ */
+ REG_INIT(AB8540_VSMPS2REGU, 0x04, 0x04, 0x3f),
+ /*
+ * 0x03, Vsmps3Regu
+ * 0x0c, Vsmps3SelCtrl
+ * 0x10, Vsmps3AutoMode
+ * 0x20, Vsmps3PWMMode
+ * NOTE! PRCMU register
+ */
+ REG_INIT(AB8540_VSMPS3REGU, 0x04, 0x05, 0x0f),
+ /*
+ * 0x03, VpllRegu
+ * 0x0c, VanaRegu
+ */
+ REG_INIT(AB8540_VPLLVANAREGU, 0x04, 0x06, 0x0f),
+ /*
+ * 0x03, VextSupply1Regu
+ * 0x0c, VextSupply2Regu
+ * 0x30, VextSupply3Regu
+ * 0x40, ExtSupply2Bypass
+ * 0x80, ExtSupply3Bypass
+ */
+ REG_INIT(AB8540_EXTSUPPLYREGU, 0x04, 0x08, 0xff),
+ /*
+ * 0x03, Vaux1Regu
+ * 0x0c, Vaux2Regu
+ */
+ REG_INIT(AB8540_VAUX12REGU, 0x04, 0x09, 0x0f),
+ /*
+ * 0x0c, VRF1Regu
+ * 0x03, Vaux3Regu
+ */
+ REG_INIT(AB8540_VRF1VAUX3REGU, 0x04, 0x0a, 0x0f),
+ /*
+ * 0x3f, Vsmps1Sel1
+ */
+ REG_INIT(AB8540_VSMPS1SEL1, 0x04, 0x13, 0x3f),
+ /*
+ * 0x3f, Vsmps1Sel2
+ */
+ REG_INIT(AB8540_VSMPS1SEL2, 0x04, 0x14, 0x3f),
+ /*
+ * 0x3f, Vsmps1Sel3
+ */
+ REG_INIT(AB8540_VSMPS1SEL3, 0x04, 0x15, 0x3f),
+ /*
+ * 0x3f, Vsmps2Sel1
+ */
+ REG_INIT(AB8540_VSMPS2SEL1, 0x04, 0x17, 0x3f),
+ /*
+ * 0x3f, Vsmps2Sel2
+ */
+ REG_INIT(AB8540_VSMPS2SEL2, 0x04, 0x18, 0x3f),
+ /*
+ * 0x3f, Vsmps2Sel3
+ */
+ REG_INIT(AB8540_VSMPS2SEL3, 0x04, 0x19, 0x3f),
+ /*
+ * 0x7f, Vsmps3Sel1
+ * NOTE! PRCMU register
+ */
+ REG_INIT(AB8540_VSMPS3SEL1, 0x04, 0x1b, 0x7f),
+ /*
+ * 0x7f, Vsmps3Sel2
+ * NOTE! PRCMU register
+ */
+ REG_INIT(AB8540_VSMPS3SEL2, 0x04, 0x1c, 0x7f),
+ /*
+ * 0x0f, Vaux1Sel
+ */
+ REG_INIT(AB8540_VAUX1SEL, 0x04, 0x1f, 0x0f),
+ /*
+ * 0x0f, Vaux2Sel
+ */
+ REG_INIT(AB8540_VAUX2SEL, 0x04, 0x20, 0x0f),
+ /*
+ * 0x07, Vaux3Sel
+ * 0x70, Vrf1Sel
+ */
+ REG_INIT(AB8540_VRF1VAUX3SEL, 0x04, 0x21, 0x77),
+ /*
+ * 0x01, VextSupply12LP
+ */
+ REG_INIT(AB8540_REGUCTRL2SPARE, 0x04, 0x22, 0x01),
+ /*
+ * 0x07, Vanasel
+ * 0x30, Vpllsel
+ */
+ REG_INIT(AB8540_VANAVPLLSEL, 0x04, 0x29, 0x37),
+ /*
+ * 0x03, Vaux4RequestCtrl
+ */
+ REG_INIT(AB8540_VAUX4REQCTRL, 0x04, 0x2d, 0x03),
+ /*
+ * 0x03, Vaux4Regu
+ */
+ REG_INIT(AB8540_VAUX4REGU, 0x04, 0x2e, 0x03),
+ /*
+ * 0x0f, Vaux4Sel
+ */
+ REG_INIT(AB8540_VAUX4SEL, 0x04, 0x2f, 0x0f),
+ /*
+ * 0x03, Vaux5RequestCtrl
+ */
+ REG_INIT(AB8540_VAUX5REQCTRL, 0x04, 0x31, 0x03),
+ /*
+ * 0x03, Vaux5Regu
+ */
+ REG_INIT(AB8540_VAUX5REGU, 0x04, 0x32, 0x03),
+ /*
+ * 0x3f, Vaux5Sel
+ */
+ REG_INIT(AB8540_VAUX5SEL, 0x04, 0x33, 0x3f),
+ /*
+ * 0x03, Vaux6RequestCtrl
+ */
+ REG_INIT(AB8540_VAUX6REQCTRL, 0x04, 0x34, 0x03),
+ /*
+ * 0x03, Vaux6Regu
+ */
+ REG_INIT(AB8540_VAUX6REGU, 0x04, 0x35, 0x03),
+ /*
+ * 0x3f, Vaux6Sel
+ */
+ REG_INIT(AB8540_VAUX6SEL, 0x04, 0x36, 0x3f),
+ /*
+ * 0x03, VCLKBRequestCtrl
+ */
+ REG_INIT(AB8540_VCLKBREQCTRL, 0x04, 0x37, 0x03),
+ /*
+ * 0x03, VCLKBRegu
+ */
+ REG_INIT(AB8540_VCLKBREGU, 0x04, 0x38, 0x03),
+ /*
+ * 0x07, VCLKBSel
+ */
+ REG_INIT(AB8540_VCLKBSEL, 0x04, 0x39, 0x07),
+ /*
+ * 0x03, Vrf1RequestCtrl
+ */
+ REG_INIT(AB8540_VRF1REQCTRL, 0x04, 0x3a, 0x03),
+ /*
+ * 0x01, VpllDisch
+ * 0x02, Vrf1Disch
+ * 0x04, Vaux1Disch
+ * 0x08, Vaux2Disch
+ * 0x10, Vaux3Disch
+ * 0x20, Vintcore12Disch
+ * 0x40, VTVoutDisch
+ * 0x80, VaudioDisch
+ */
+ REG_INIT(AB8540_REGUCTRLDISCH, 0x04, 0x43, 0xff),
+ /*
+ * 0x02, VanaDisch
+ * 0x04, VdmicPullDownEna
+ * 0x08, VpllPullDownEna
+ * 0x10, VdmicDisch
+ */
+ REG_INIT(AB8540_REGUCTRLDISCH2, 0x04, 0x44, 0x1e),
+ /*
+ * 0x01, Vaux4Disch
+ */
+ REG_INIT(AB8540_REGUCTRLDISCH3, 0x04, 0x48, 0x01),
+ /*
+ * 0x01, Vaux5Disch
+ * 0x02, Vaux6Disch
+ * 0x04, VCLKBDisch
+ */
+ REG_INIT(AB8540_REGUCTRLDISCH4, 0x04, 0x49, 0x07),
+};
+
+static struct of_regulator_match ab8500_regulator_match[] = {
+ { .name = "ab8500_ldo_aux1", .driver_data = (void *) AB8500_LDO_AUX1, },
+ { .name = "ab8500_ldo_aux2", .driver_data = (void *) AB8500_LDO_AUX2, },
+ { .name = "ab8500_ldo_aux3", .driver_data = (void *) AB8500_LDO_AUX3, },
+ { .name = "ab8500_ldo_intcore", .driver_data = (void *) AB8500_LDO_INTCORE, },
+ { .name = "ab8500_ldo_tvout", .driver_data = (void *) AB8500_LDO_TVOUT, },
+ { .name = "ab8500_ldo_audio", .driver_data = (void *) AB8500_LDO_AUDIO, },
+ { .name = "ab8500_ldo_anamic1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
+ { .name = "ab8500_ldo_amamic2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
+ { .name = "ab8500_ldo_dmic", .driver_data = (void *) AB8500_LDO_DMIC, },
+ { .name = "ab8500_ldo_ana", .driver_data = (void *) AB8500_LDO_ANA, },
+};
+
+static struct of_regulator_match ab8505_regulator_match[] = {
+ { .name = "ab8500_ldo_aux1", .driver_data = (void *) AB8505_LDO_AUX1, },
+ { .name = "ab8500_ldo_aux2", .driver_data = (void *) AB8505_LDO_AUX2, },
+ { .name = "ab8500_ldo_aux3", .driver_data = (void *) AB8505_LDO_AUX3, },
+ { .name = "ab8500_ldo_aux4", .driver_data = (void *) AB8505_LDO_AUX4, },
+ { .name = "ab8500_ldo_aux5", .driver_data = (void *) AB8505_LDO_AUX5, },
+ { .name = "ab8500_ldo_aux6", .driver_data = (void *) AB8505_LDO_AUX6, },
+ { .name = "ab8500_ldo_intcore", .driver_data = (void *) AB8505_LDO_INTCORE, },
+ { .name = "ab8500_ldo_adc", .driver_data = (void *) AB8505_LDO_ADC, },
+ { .name = "ab8500_ldo_audio", .driver_data = (void *) AB8505_LDO_AUDIO, },
+ { .name = "ab8500_ldo_anamic1", .driver_data = (void *) AB8505_LDO_ANAMIC1, },
+ { .name = "ab8500_ldo_amamic2", .driver_data = (void *) AB8505_LDO_ANAMIC2, },
+ { .name = "ab8500_ldo_aux8", .driver_data = (void *) AB8505_LDO_AUX8, },
+ { .name = "ab8500_ldo_ana", .driver_data = (void *) AB8505_LDO_ANA, },
+};
+
+static struct of_regulator_match ab8540_regulator_match[] = {
+ { .name = "ab8500_ldo_aux1", .driver_data = (void *) AB8540_LDO_AUX1, },
+ { .name = "ab8500_ldo_aux2", .driver_data = (void *) AB8540_LDO_AUX2, },
+ { .name = "ab8500_ldo_aux3", .driver_data = (void *) AB8540_LDO_AUX3, },
+ { .name = "ab8500_ldo_aux4", .driver_data = (void *) AB8540_LDO_AUX4, },
+ { .name = "ab8500_ldo_aux5", .driver_data = (void *) AB8540_LDO_AUX5, },
+ { .name = "ab8500_ldo_aux6", .driver_data = (void *) AB8540_LDO_AUX6, },
+ { .name = "ab8500_ldo_intcore", .driver_data = (void *) AB8540_LDO_INTCORE, },
+ { .name = "ab8500_ldo_tvout", .driver_data = (void *) AB8540_LDO_TVOUT, },
+ { .name = "ab8500_ldo_audio", .driver_data = (void *) AB8540_LDO_AUDIO, },
+ { .name = "ab8500_ldo_anamic1", .driver_data = (void *) AB8540_LDO_ANAMIC1, },
+ { .name = "ab8500_ldo_amamic2", .driver_data = (void *) AB8540_LDO_ANAMIC2, },
+ { .name = "ab8500_ldo_dmic", .driver_data = (void *) AB8540_LDO_DMIC, },
+ { .name = "ab8500_ldo_ana", .driver_data = (void *) AB8540_LDO_ANA, },
+ { .name = "ab8500_ldo_sdio", .driver_data = (void *) AB8540_LDO_SDIO, },
+};
+
+static struct of_regulator_match ab9540_regulator_match[] = {
+ { .name = "ab8500_ldo_aux1", .driver_data = (void *) AB9540_LDO_AUX1, },
+ { .name = "ab8500_ldo_aux2", .driver_data = (void *) AB9540_LDO_AUX2, },
+ { .name = "ab8500_ldo_aux3", .driver_data = (void *) AB9540_LDO_AUX3, },
+ { .name = "ab8500_ldo_aux4", .driver_data = (void *) AB9540_LDO_AUX4, },
+ { .name = "ab8500_ldo_intcore", .driver_data = (void *) AB9540_LDO_INTCORE, },
+ { .name = "ab8500_ldo_tvout", .driver_data = (void *) AB9540_LDO_TVOUT, },
+ { .name = "ab8500_ldo_audio", .driver_data = (void *) AB9540_LDO_AUDIO, },
+ { .name = "ab8500_ldo_anamic1", .driver_data = (void *) AB9540_LDO_ANAMIC1, },
+ { .name = "ab8500_ldo_amamic2", .driver_data = (void *) AB9540_LDO_ANAMIC2, },
+ { .name = "ab8500_ldo_dmic", .driver_data = (void *) AB9540_LDO_DMIC, },
+ { .name = "ab8500_ldo_ana", .driver_data = (void *) AB9540_LDO_ANA, },
+};
+
+static struct {
+ struct ab8500_regulator_info *info;
+ int info_size;
+ struct ab8500_reg_init *init;
+ int init_size;
+ struct of_regulator_match *match;
+ int match_size;
+} abx500_regulator;
+
+static void abx500_get_regulator_info(struct ab8500 *ab8500)
+{
+ if (is_ab9540(ab8500)) {
+ abx500_regulator.info = ab9540_regulator_info;
+ abx500_regulator.info_size = ARRAY_SIZE(ab9540_regulator_info);
+ abx500_regulator.init = ab9540_reg_init;
+ abx500_regulator.init_size = AB9540_NUM_REGULATOR_REGISTERS;
+ abx500_regulator.match = ab9540_regulator_match;
+ abx500_regulator.match_size = ARRAY_SIZE(ab9540_regulator_match);
+ } else if (is_ab8505(ab8500)) {
+ abx500_regulator.info = ab8505_regulator_info;
+ abx500_regulator.info_size = ARRAY_SIZE(ab8505_regulator_info);
+ abx500_regulator.init = ab8505_reg_init;
+ abx500_regulator.init_size = AB8505_NUM_REGULATOR_REGISTERS;
+ abx500_regulator.match = ab8505_regulator_match;
+ abx500_regulator.match_size = ARRAY_SIZE(ab8505_regulator_match);
+ } else if (is_ab8540(ab8500)) {
+ abx500_regulator.info = ab8540_regulator_info;
+ abx500_regulator.info_size = ARRAY_SIZE(ab8540_regulator_info);
+ abx500_regulator.init = ab8540_reg_init;
+ abx500_regulator.init_size = AB8540_NUM_REGULATOR_REGISTERS;
+ abx500_regulator.match = ab8540_regulator_match;
+ abx500_regulator.match_size = ARRAY_SIZE(ab8540_regulator_match);
+ } else {
+ abx500_regulator.info = ab8500_regulator_info;
+ abx500_regulator.info_size = ARRAY_SIZE(ab8500_regulator_info);
+ abx500_regulator.init = ab8500_reg_init;
+ abx500_regulator.init_size = AB8500_NUM_REGULATOR_REGISTERS;
+ abx500_regulator.match = ab8500_regulator_match;
+ abx500_regulator.match_size = ARRAY_SIZE(ab8500_regulator_match);
+ }
+}
+
+static int ab8500_regulator_init_registers(struct platform_device *pdev,
+ int id, int mask, int value)
{
+ struct ab8500_reg_init *reg_init = abx500_regulator.init;
int err;
- if (value & ~ab8500_reg_init[id].mask) {
- dev_err(&pdev->dev,
- "Configuration error: value outside mask.\n");
- return -EINVAL;
- }
+ BUG_ON(value & ~mask);
+ BUG_ON(mask & ~reg_init[id].mask);
+ /* initialize register */
err = abx500_mask_and_set_register_interruptible(
&pdev->dev,
- ab8500_reg_init[id].bank,
- ab8500_reg_init[id].addr,
- ab8500_reg_init[id].mask,
- value);
+ reg_init[id].bank,
+ reg_init[id].addr,
+ mask, value);
if (err < 0) {
dev_err(&pdev->dev,
"Failed to initialize 0x%02x, 0x%02x.\n",
- ab8500_reg_init[id].bank,
- ab8500_reg_init[id].addr);
+ reg_init[id].bank,
+ reg_init[id].addr);
return err;
}
-
dev_vdbg(&pdev->dev,
- "init: 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
- ab8500_reg_init[id].bank,
- ab8500_reg_init[id].addr,
- ab8500_reg_init[id].mask,
- value);
+ " init: 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
+ reg_init[id].bank,
+ reg_init[id].addr,
+ mask, value);
return 0;
}
static int ab8500_regulator_register(struct platform_device *pdev,
- struct regulator_init_data *init_data,
- int id,
- struct device_node *np)
+ struct regulator_init_data *init_data,
+ int id, struct device_node *np)
{
+ struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
struct ab8500_regulator_info *info = NULL;
struct regulator_config config = { };
int err;
/* assign per-regulator data */
- info = &ab8500_regulator_info[id];
+ info = &abx500_regulator.info[id];
info->dev = &pdev->dev;
config.dev = &pdev->dev;
@@ -695,7 +3045,7 @@ static int ab8500_regulator_register(struct platform_device *pdev,
config.of_node = np;
/* fix for hardware before ab8500v2.0 */
- if (abx500_get_chip_id(info->dev) < 0x20) {
+ if (is_ab8500_1p1_or_earlier(ab8500)) {
if (info->desc.id == AB8500_LDO_AUX3) {
info->desc.n_voltages =
ARRAY_SIZE(ldo_vauxn_voltages);
@@ -712,7 +3062,7 @@ static int ab8500_regulator_register(struct platform_device *pdev,
info->desc.name);
/* when we fail, un-register all earlier regulators */
while (--id >= 0) {
- info = &ab8500_regulator_info[id];
+ info = &abx500_regulator.info[id];
regulator_unregister(info->regulator);
}
return err;
@@ -721,29 +3071,16 @@ static int ab8500_regulator_register(struct platform_device *pdev,
return 0;
}
-static struct of_regulator_match ab8500_regulator_matches[] = {
- { .name = "ab8500_ldo_aux1", .driver_data = (void *) AB8500_LDO_AUX1, },
- { .name = "ab8500_ldo_aux2", .driver_data = (void *) AB8500_LDO_AUX2, },
- { .name = "ab8500_ldo_aux3", .driver_data = (void *) AB8500_LDO_AUX3, },
- { .name = "ab8500_ldo_intcore", .driver_data = (void *) AB8500_LDO_INTCORE, },
- { .name = "ab8500_ldo_tvout", .driver_data = (void *) AB8500_LDO_TVOUT, },
- { .name = "ab8500_ldo_usb", .driver_data = (void *) AB8500_LDO_USB, },
- { .name = "ab8500_ldo_audio", .driver_data = (void *) AB8500_LDO_AUDIO, },
- { .name = "ab8500_ldo_anamic1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
- { .name = "ab8500_ldo_amamic2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
- { .name = "ab8500_ldo_dmic", .driver_data = (void *) AB8500_LDO_DMIC, },
- { .name = "ab8500_ldo_ana", .driver_data = (void *) AB8500_LDO_ANA, },
-};
-
static int
-ab8500_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
+ab8500_regulator_of_probe(struct platform_device *pdev,
+ struct device_node *np)
{
+ struct of_regulator_match *match = abx500_regulator.match;
int err, i;
- for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
+ for (i = 0; i < abx500_regulator.info_size; i++) {
err = ab8500_regulator_register(
- pdev, ab8500_regulator_matches[i].init_data,
- i, ab8500_regulator_matches[i].of_node);
+ pdev, match[i].init_data, i, match[i].of_node);
if (err)
return err;
}
@@ -754,14 +3091,22 @@ ab8500_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
static int ab8500_regulator_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
- struct ab8500_platform_data *pdata;
struct device_node *np = pdev->dev.of_node;
+ struct ab8500_platform_data *ppdata;
+ struct ab8500_regulator_platform_data *pdata;
int i, err;
+ if (!ab8500) {
+ dev_err(&pdev->dev, "null mfd parent\n");
+ return -EINVAL;
+ }
+
+ abx500_get_regulator_info(ab8500);
+
if (np) {
err = of_regulator_match(&pdev->dev, np,
- ab8500_regulator_matches,
- ARRAY_SIZE(ab8500_regulator_matches));
+ abx500_regulator.match,
+ abx500_regulator.match_size);
if (err < 0) {
dev_err(&pdev->dev,
"Error parsing regulator init data: %d\n", err);
@@ -772,46 +3117,61 @@ static int ab8500_regulator_probe(struct platform_device *pdev)
return err;
}
- if (!ab8500) {
- dev_err(&pdev->dev, "null mfd parent\n");
+ ppdata = dev_get_platdata(ab8500->dev);
+ if (!ppdata) {
+ dev_err(&pdev->dev, "null parent pdata\n");
return -EINVAL;
}
- pdata = dev_get_platdata(ab8500->dev);
+
+ pdata = ppdata->regulator;
if (!pdata) {
dev_err(&pdev->dev, "null pdata\n");
return -EINVAL;
}
/* make sure the platform data has the correct size */
- if (pdata->num_regulator != ARRAY_SIZE(ab8500_regulator_info)) {
+ if (pdata->num_regulator != abx500_regulator.info_size) {
dev_err(&pdev->dev, "Configuration error: size mismatch.\n");
return -EINVAL;
}
+ /* initialize debug (initial state is recorded with this call) */
+ err = ab8500_regulator_debug_init(pdev);
+ if (err)
+ return err;
+
/* initialize registers */
- for (i = 0; i < pdata->num_regulator_reg_init; i++) {
- int id, value;
+ for (i = 0; i < pdata->num_reg_init; i++) {
+ int id, mask, value;
- id = pdata->regulator_reg_init[i].id;
- value = pdata->regulator_reg_init[i].value;
+ id = pdata->reg_init[i].id;
+ mask = pdata->reg_init[i].mask;
+ value = pdata->reg_init[i].value;
/* check for configuration errors */
- if (id >= AB8500_NUM_REGULATOR_REGISTERS) {
- dev_err(&pdev->dev,
- "Configuration error: id outside range.\n");
- return -EINVAL;
- }
+ BUG_ON(id >= abx500_regulator.init_size);
- err = ab8500_regulator_init_registers(pdev, id, value);
+ err = ab8500_regulator_init_registers(pdev, id, mask, value);
if (err < 0)
return err;
}
+ if (!is_ab8505(ab8500)) {
+ /* register external regulators (before Vaux1, 2 and 3) */
+ err = ab8500_ext_regulator_init(pdev);
+ if (err)
+ return err;
+ }
+
/* register all regulators */
- for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
- err = ab8500_regulator_register(pdev, &pdata->regulator[i], i, NULL);
- if (err < 0)
+ for (i = 0; i < abx500_regulator.info_size; i++) {
+ err = ab8500_regulator_register(pdev, &pdata->regulator[i],
+ i, NULL);
+ if (err < 0) {
+ if (!is_ab8505(ab8500))
+ ab8500_ext_regulator_exit(pdev);
return err;
+ }
}
return 0;
@@ -819,11 +3179,12 @@ static int ab8500_regulator_probe(struct platform_device *pdev)
static int ab8500_regulator_remove(struct platform_device *pdev)
{
- int i;
+ int i, err;
+ struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
- for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
+ for (i = 0; i < abx500_regulator.info_size; i++) {
struct ab8500_regulator_info *info = NULL;
- info = &ab8500_regulator_info[i];
+ info = &abx500_regulator.info[i];
dev_vdbg(rdev_get_dev(info->regulator),
"%s-remove\n", info->desc.name);
@@ -831,6 +3192,15 @@ static int ab8500_regulator_remove(struct platform_device *pdev)
regulator_unregister(info->regulator);
}
+ /* remove external regulators (after Vaux1, 2 and 3) */
+ if (!is_ab8505(ab8500))
+ ab8500_ext_regulator_exit(pdev);
+
+ /* remove regulator debug */
+ err = ab8500_regulator_debug_exit(pdev);
+ if (err)
+ return err;
+
return 0;
}
@@ -863,5 +3233,7 @@ module_exit(ab8500_regulator_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sundar Iyer <sundar.iyer@stericsson.com>");
+MODULE_AUTHOR("Bengt Jonsson <bengt.g.jonsson@stericsson.com>");
+MODULE_AUTHOR("Daniel Willerud <daniel.willerud@stericsson.com>");
MODULE_DESCRIPTION("Regulator Driver for ST-Ericsson AB8500 Mixed-Sig PMIC");
MODULE_ALIAS("platform:ab8500-regulator");
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index ed7beec53af..81d8681c319 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -131,7 +131,7 @@ static const struct regulator_desc arizona_ldo1_hc = {
.min_uV = 900000,
.uV_step = 50000,
.n_voltages = 8,
- .enable_time = 500,
+ .enable_time = 1500,
.owner = THIS_MODULE,
};
diff --git a/drivers/regulator/as3711-regulator.c b/drivers/regulator/as3711-regulator.c
index f0ba8c4eefa..3da6bd6950c 100644
--- a/drivers/regulator/as3711-regulator.c
+++ b/drivers/regulator/as3711-regulator.c
@@ -13,9 +13,11 @@
#include <linux/init.h>
#include <linux/mfd/as3711.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
struct as3711_regulator_info {
@@ -276,6 +278,53 @@ static struct as3711_regulator_info as3711_reg_info[] = {
#define AS3711_REGULATOR_NUM ARRAY_SIZE(as3711_reg_info)
+static struct of_regulator_match
+as3711_regulator_matches[AS3711_REGULATOR_NUM] = {
+ [AS3711_REGULATOR_SD_1] = { .name = "sd1" },
+ [AS3711_REGULATOR_SD_2] = { .name = "sd2" },
+ [AS3711_REGULATOR_SD_3] = { .name = "sd3" },
+ [AS3711_REGULATOR_SD_4] = { .name = "sd4" },
+ [AS3711_REGULATOR_LDO_1] = { .name = "ldo1" },
+ [AS3711_REGULATOR_LDO_2] = { .name = "ldo2" },
+ [AS3711_REGULATOR_LDO_3] = { .name = "ldo3" },
+ [AS3711_REGULATOR_LDO_4] = { .name = "ldo4" },
+ [AS3711_REGULATOR_LDO_5] = { .name = "ldo5" },
+ [AS3711_REGULATOR_LDO_6] = { .name = "ldo6" },
+ [AS3711_REGULATOR_LDO_7] = { .name = "ldo7" },
+ [AS3711_REGULATOR_LDO_8] = { .name = "ldo8" },
+};
+
+static int as3711_regulator_parse_dt(struct device *dev,
+ struct device_node **of_node, const int count)
+{
+ struct as3711_regulator_pdata *pdata = dev_get_platdata(dev);
+ struct device_node *regulators =
+ of_find_node_by_name(dev->parent->of_node, "regulators");
+ struct of_regulator_match *match;
+ int ret, i;
+
+ if (!regulators) {
+ dev_err(dev, "regulator node not found\n");
+ return -ENODEV;
+ }
+
+ ret = of_regulator_match(dev->parent, regulators,
+ as3711_regulator_matches, count);
+ of_node_put(regulators);
+ if (ret < 0) {
+ dev_err(dev, "Error parsing regulator init data: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0, match = as3711_regulator_matches; i < count; i++, match++)
+ if (match->of_node) {
+ pdata->init_data[i] = match->init_data;
+ of_node[i] = match->of_node;
+ }
+
+ return 0;
+}
+
static int as3711_regulator_probe(struct platform_device *pdev)
{
struct as3711_regulator_pdata *pdata = dev_get_platdata(&pdev->dev);
@@ -284,13 +333,24 @@ static int as3711_regulator_probe(struct platform_device *pdev)
struct regulator_config config = {.dev = &pdev->dev,};
struct as3711_regulator *reg = NULL;
struct as3711_regulator *regs;
+ struct device_node *of_node[AS3711_REGULATOR_NUM] = {};
struct regulator_dev *rdev;
struct as3711_regulator_info *ri;
int ret;
int id;
- if (!pdata)
- dev_dbg(&pdev->dev, "No platform data...\n");
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data...\n");
+ return -ENODEV;
+ }
+
+ if (pdev->dev.parent->of_node) {
+ ret = as3711_regulator_parse_dt(&pdev->dev, of_node, AS3711_REGULATOR_NUM);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "DT parsing failed: %d\n", ret);
+ return ret;
+ }
+ }
regs = devm_kzalloc(&pdev->dev, AS3711_REGULATOR_NUM *
sizeof(struct as3711_regulator), GFP_KERNEL);
@@ -300,7 +360,7 @@ static int as3711_regulator_probe(struct platform_device *pdev)
}
for (id = 0, ri = as3711_reg_info; id < AS3711_REGULATOR_NUM; ++id, ri++) {
- reg_data = pdata ? pdata->init_data[id] : NULL;
+ reg_data = pdata->init_data[id];
/* No need to register if there is no regulator data */
if (!reg_data)
@@ -312,6 +372,7 @@ static int as3711_regulator_probe(struct platform_device *pdev)
config.init_data = reg_data;
config.driver_data = reg;
config.regmap = as3711->regmap;
+ config.of_node = of_node[id];
rdev = regulator_register(&ri->desc, &config);
if (IS_ERR(rdev)) {
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index e3661c20cf3..6e501784158 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -51,6 +51,7 @@
static DEFINE_MUTEX(regulator_list_mutex);
static LIST_HEAD(regulator_list);
static LIST_HEAD(regulator_map_list);
+static LIST_HEAD(regulator_ena_gpio_list);
static bool has_full_constraints;
static bool board_wants_dummy_regulator;
@@ -69,6 +70,19 @@ struct regulator_map {
};
/*
+ * struct regulator_enable_gpio
+ *
+ * Management for shared enable GPIO pin
+ */
+struct regulator_enable_gpio {
+ struct list_head list;
+ int gpio;
+ u32 enable_count; /* a number of enabled shared GPIO */
+ u32 request_count; /* a number of requested shared GPIO */
+ unsigned int ena_gpio_invert:1;
+};
+
+/*
* struct regulator
*
* One for each consumer device.
@@ -116,7 +130,7 @@ static const char *rdev_get_name(struct regulator_dev *rdev)
* @supply: regulator supply name
*
* Extract the regulator device node corresponding to the supply name.
- * retruns the device node corresponding to the regulator if found, else
+ * returns the device node corresponding to the regulator if found, else
* returns NULL.
*/
static struct device_node *of_get_regulator(struct device *dev, const char *supply)
@@ -1229,7 +1243,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
struct regulator_dev *rdev;
struct regulator *regulator = ERR_PTR(-EPROBE_DEFER);
const char *devname = NULL;
- int ret;
+ int ret = 0;
if (id == NULL) {
pr_err("get() with no identifier\n");
@@ -1245,6 +1259,15 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
if (rdev)
goto found;
+ /*
+ * If we have return value from dev_lookup fail, we do not expect to
+ * succeed, so, quit with appropriate error value
+ */
+ if (ret) {
+ regulator = ERR_PTR(ret);
+ goto out;
+ }
+
if (board_wants_dummy_regulator) {
rdev = dummy_regulator_rdev;
goto found;
@@ -1456,6 +1479,101 @@ void devm_regulator_put(struct regulator *regulator)
}
EXPORT_SYMBOL_GPL(devm_regulator_put);
+/* Manage enable GPIO list. Same GPIO pin can be shared among regulators */
+static int regulator_ena_gpio_request(struct regulator_dev *rdev,
+ const struct regulator_config *config)
+{
+ struct regulator_enable_gpio *pin;
+ int ret;
+
+ list_for_each_entry(pin, &regulator_ena_gpio_list, list) {
+ if (pin->gpio == config->ena_gpio) {
+ rdev_dbg(rdev, "GPIO %d is already used\n",
+ config->ena_gpio);
+ goto update_ena_gpio_to_rdev;
+ }
+ }
+
+ ret = gpio_request_one(config->ena_gpio,
+ GPIOF_DIR_OUT | config->ena_gpio_flags,
+ rdev_get_name(rdev));
+ if (ret)
+ return ret;
+
+ pin = kzalloc(sizeof(struct regulator_enable_gpio), GFP_KERNEL);
+ if (pin == NULL) {
+ gpio_free(config->ena_gpio);
+ return -ENOMEM;
+ }
+
+ pin->gpio = config->ena_gpio;
+ pin->ena_gpio_invert = config->ena_gpio_invert;
+ list_add(&pin->list, &regulator_ena_gpio_list);
+
+update_ena_gpio_to_rdev:
+ pin->request_count++;
+ rdev->ena_pin = pin;
+ return 0;
+}
+
+static void regulator_ena_gpio_free(struct regulator_dev *rdev)
+{
+ struct regulator_enable_gpio *pin, *n;
+
+ if (!rdev->ena_pin)
+ return;
+
+ /* Free the GPIO only in case of no use */
+ list_for_each_entry_safe(pin, n, &regulator_ena_gpio_list, list) {
+ if (pin->gpio == rdev->ena_pin->gpio) {
+ if (pin->request_count <= 1) {
+ pin->request_count = 0;
+ gpio_free(pin->gpio);
+ list_del(&pin->list);
+ kfree(pin);
+ } else {
+ pin->request_count--;
+ }
+ }
+ }
+}
+
+/**
+ * Balance enable_count of each GPIO and actual GPIO pin control.
+ * GPIO is enabled in case of initial use. (enable_count is 0)
+ * GPIO is disabled when it is not shared any more. (enable_count <= 1)
+ */
+static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable)
+{
+ struct regulator_enable_gpio *pin = rdev->ena_pin;
+
+ if (!pin)
+ return -EINVAL;
+
+ if (enable) {
+ /* Enable GPIO at initial use */
+ if (pin->enable_count == 0)
+ gpio_set_value_cansleep(pin->gpio,
+ !pin->ena_gpio_invert);
+
+ pin->enable_count++;
+ } else {
+ if (pin->enable_count > 1) {
+ pin->enable_count--;
+ return 0;
+ }
+
+ /* Disable GPIO if not used */
+ if (pin->enable_count <= 1) {
+ gpio_set_value_cansleep(pin->gpio,
+ pin->ena_gpio_invert);
+ pin->enable_count = 0;
+ }
+ }
+
+ return 0;
+}
+
static int _regulator_do_enable(struct regulator_dev *rdev)
{
int ret, delay;
@@ -1471,9 +1589,10 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
trace_regulator_enable(rdev_get_name(rdev));
- if (rdev->ena_gpio) {
- gpio_set_value_cansleep(rdev->ena_gpio,
- !rdev->ena_gpio_invert);
+ if (rdev->ena_pin) {
+ ret = regulator_ena_gpio_ctrl(rdev, true);
+ if (ret < 0)
+ return ret;
rdev->ena_gpio_state = 1;
} else if (rdev->desc->ops->enable) {
ret = rdev->desc->ops->enable(rdev);
@@ -1575,9 +1694,10 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
trace_regulator_disable(rdev_get_name(rdev));
- if (rdev->ena_gpio) {
- gpio_set_value_cansleep(rdev->ena_gpio,
- rdev->ena_gpio_invert);
+ if (rdev->ena_pin) {
+ ret = regulator_ena_gpio_ctrl(rdev, false);
+ if (ret < 0)
+ return ret;
rdev->ena_gpio_state = 0;
} else if (rdev->desc->ops->disable) {
@@ -1794,7 +1914,10 @@ int regulator_is_enabled_regmap(struct regulator_dev *rdev)
if (ret != 0)
return ret;
- return (val & rdev->desc->enable_mask) != 0;
+ if (rdev->desc->enable_is_inverted)
+ return (val & rdev->desc->enable_mask) == 0;
+ else
+ return (val & rdev->desc->enable_mask) != 0;
}
EXPORT_SYMBOL_GPL(regulator_is_enabled_regmap);
@@ -1809,9 +1932,15 @@ EXPORT_SYMBOL_GPL(regulator_is_enabled_regmap);
*/
int regulator_enable_regmap(struct regulator_dev *rdev)
{
+ unsigned int val;
+
+ if (rdev->desc->enable_is_inverted)
+ val = 0;
+ else
+ val = rdev->desc->enable_mask;
+
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
- rdev->desc->enable_mask,
- rdev->desc->enable_mask);
+ rdev->desc->enable_mask, val);
}
EXPORT_SYMBOL_GPL(regulator_enable_regmap);
@@ -1826,15 +1955,22 @@ EXPORT_SYMBOL_GPL(regulator_enable_regmap);
*/
int regulator_disable_regmap(struct regulator_dev *rdev)
{
+ unsigned int val;
+
+ if (rdev->desc->enable_is_inverted)
+ val = rdev->desc->enable_mask;
+ else
+ val = 0;
+
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
- rdev->desc->enable_mask, 0);
+ rdev->desc->enable_mask, val);
}
EXPORT_SYMBOL_GPL(regulator_disable_regmap);
static int _regulator_is_enabled(struct regulator_dev *rdev)
{
/* A GPIO control always takes precedence */
- if (rdev->ena_gpio)
+ if (rdev->ena_pin)
return rdev->ena_gpio_state;
/* If we don't know then assume that the regulator is always on */
@@ -2138,6 +2274,37 @@ int regulator_map_voltage_iterate(struct regulator_dev *rdev,
EXPORT_SYMBOL_GPL(regulator_map_voltage_iterate);
/**
+ * regulator_map_voltage_ascend - map_voltage() for ascendant voltage list
+ *
+ * @rdev: Regulator to operate on
+ * @min_uV: Lower bound for voltage
+ * @max_uV: Upper bound for voltage
+ *
+ * Drivers that have ascendant voltage list can use this as their
+ * map_voltage() operation.
+ */
+int regulator_map_voltage_ascend(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ int i, ret;
+
+ for (i = 0; i < rdev->desc->n_voltages; i++) {
+ ret = rdev->desc->ops->list_voltage(rdev, i);
+ if (ret < 0)
+ continue;
+
+ if (ret > max_uV)
+ break;
+
+ if (ret >= min_uV && ret <= max_uV)
+ return i;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regulator_map_voltage_ascend);
+
+/**
* regulator_map_voltage_linear - map_voltage() for simple linear mappings
*
* @rdev: Regulator to operate on
@@ -3237,7 +3404,7 @@ static int add_regulator_attributes(struct regulator_dev *rdev)
if (status < 0)
return status;
}
- if (rdev->ena_gpio || ops->is_enabled) {
+ if (rdev->ena_pin || ops->is_enabled) {
status = device_create_file(dev, &dev_attr_state);
if (status < 0)
return status;
@@ -3439,22 +3606,17 @@ regulator_register(const struct regulator_desc *regulator_desc,
dev_set_drvdata(&rdev->dev, rdev);
if (config->ena_gpio && gpio_is_valid(config->ena_gpio)) {
- ret = gpio_request_one(config->ena_gpio,
- GPIOF_DIR_OUT | config->ena_gpio_flags,
- rdev_get_name(rdev));
+ ret = regulator_ena_gpio_request(rdev, config);
if (ret != 0) {
rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
config->ena_gpio, ret);
goto wash;
}
- rdev->ena_gpio = config->ena_gpio;
- rdev->ena_gpio_invert = config->ena_gpio_invert;
-
if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH)
rdev->ena_gpio_state = 1;
- if (rdev->ena_gpio_invert)
+ if (config->ena_gpio_invert)
rdev->ena_gpio_state = !rdev->ena_gpio_state;
}
@@ -3481,7 +3643,14 @@ regulator_register(const struct regulator_desc *regulator_desc,
r = regulator_dev_lookup(dev, supply, &ret);
- if (!r) {
+ if (ret == -ENODEV) {
+ /*
+ * No supply was specified for this regulator and
+ * there will never be one.
+ */
+ ret = 0;
+ goto add_dev;
+ } else if (!r) {
dev_err(dev, "Failed to find supply %s\n", supply);
ret = -EPROBE_DEFER;
goto scrub;
@@ -3499,6 +3668,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
}
}
+add_dev:
/* add consumers devices */
if (init_data) {
for (i = 0; i < init_data->num_consumer_supplies; i++) {
@@ -3526,8 +3696,7 @@ unset_supplies:
scrub:
if (rdev->supply)
_regulator_put(rdev->supply);
- if (rdev->ena_gpio)
- gpio_free(rdev->ena_gpio);
+ regulator_ena_gpio_free(rdev);
kfree(rdev->constraints);
wash:
device_unregister(&rdev->dev);
@@ -3562,8 +3731,7 @@ void regulator_unregister(struct regulator_dev *rdev)
unset_regulator_supplies(rdev);
list_del(&rdev->list);
kfree(rdev->constraints);
- if (rdev->ena_gpio)
- gpio_free(rdev->ena_gpio);
+ regulator_ena_gpio_free(rdev);
device_unregister(&rdev->dev);
mutex_unlock(&regulator_list_mutex);
}
diff --git a/drivers/regulator/dbx500-prcmu.h b/drivers/regulator/dbx500-prcmu.h
index e763883a44f..c8e51ace9f0 100644
--- a/drivers/regulator/dbx500-prcmu.h
+++ b/drivers/regulator/dbx500-prcmu.h
@@ -21,7 +21,6 @@
* @is_enabled: status of the regulator
* @epod_id: id for EPOD (power domain)
* @is_ramret: RAM retention switch for EPOD (power domain)
- * @operating_point: operating point (only for vape, to be removed)
*
*/
struct dbx500_regulator_info {
@@ -32,7 +31,6 @@ struct dbx500_regulator_info {
u16 epod_id;
bool is_ramret;
bool exclude_from_power_state;
- unsigned int operating_point;
};
void power_state_active_enable(void);
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 9165b0c40ed..f0e1ae52bb0 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -219,9 +219,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
rdesc->owner = THIS_MODULE;
di->rdev = regulator_register(&di->desc, config);
- if (IS_ERR(di->rdev))
- return PTR_ERR(di->rdev);
- return 0;
+ return PTR_RET(di->rdev);
}
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 9cb2c0f3451..d8af9e77331 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -163,6 +163,7 @@ static int lp3971_ldo_set_voltage_sel(struct regulator_dev *dev,
static struct regulator_ops lp3971_ldo_ops = {
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
.is_enabled = lp3971_ldo_is_enabled,
.enable = lp3971_ldo_enable,
.disable = lp3971_ldo_disable,
@@ -236,6 +237,7 @@ static int lp3971_dcdc_set_voltage_sel(struct regulator_dev *dev,
static struct regulator_ops lp3971_dcdc_ops = {
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
.is_enabled = lp3971_dcdc_is_enabled,
.enable = lp3971_dcdc_enable,
.disable = lp3971_dcdc_disable,
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index 0baabcfb578..61e4cf9edf6 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -309,6 +309,7 @@ static int lp3972_ldo_set_voltage_sel(struct regulator_dev *dev,
static struct regulator_ops lp3972_ldo_ops = {
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
.is_enabled = lp3972_ldo_is_enabled,
.enable = lp3972_ldo_enable,
.disable = lp3972_ldo_disable,
@@ -389,6 +390,7 @@ static int lp3972_dcdc_set_voltage_sel(struct regulator_dev *dev,
static struct regulator_ops lp3972_dcdc_ops = {
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
.is_enabled = lp3972_dcdc_is_enabled,
.enable = lp3972_dcdc_enable,
.disable = lp3972_dcdc_disable,
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 8e3c7ae0047..f5fc4a142cd 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -478,6 +478,7 @@ static unsigned int lp872x_buck_get_mode(struct regulator_dev *rdev)
static struct regulator_ops lp872x_ldo_ops = {
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.enable = regulator_enable_regmap,
@@ -488,6 +489,7 @@ static struct regulator_ops lp872x_ldo_ops = {
static struct regulator_ops lp8720_buck_ops = {
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = lp872x_buck_set_voltage_sel,
.get_voltage_sel = lp872x_buck_get_voltage_sel,
.enable = regulator_enable_regmap,
@@ -500,6 +502,7 @@ static struct regulator_ops lp8720_buck_ops = {
static struct regulator_ops lp8725_buck_ops = {
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = lp872x_buck_set_voltage_sel,
.get_voltage_sel = lp872x_buck_get_voltage_sel,
.enable = regulator_enable_regmap,
diff --git a/drivers/regulator/lp8788-buck.c b/drivers/regulator/lp8788-buck.c
index 97891a7ea7b..eb1e1e88ae5 100644
--- a/drivers/regulator/lp8788-buck.c
+++ b/drivers/regulator/lp8788-buck.c
@@ -346,6 +346,7 @@ static unsigned int lp8788_buck_get_mode(struct regulator_dev *rdev)
static struct regulator_ops lp8788_buck12_ops = {
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = lp8788_buck12_set_voltage_sel,
.get_voltage_sel = lp8788_buck12_get_voltage_sel,
.enable = regulator_enable_regmap,
@@ -358,6 +359,7 @@ static struct regulator_ops lp8788_buck12_ops = {
static struct regulator_ops lp8788_buck34_ops = {
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.enable = regulator_enable_regmap,
diff --git a/drivers/regulator/lp8788-ldo.c b/drivers/regulator/lp8788-ldo.c
index cd5a14ad926..0ce2c4c194b 100644
--- a/drivers/regulator/lp8788-ldo.c
+++ b/drivers/regulator/lp8788-ldo.c
@@ -156,68 +156,6 @@ static const int lp8788_aldo7_vtbl[] = {
1200000, 1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1800000,
};
-static enum lp8788_ldo_id lp8788_dldo_id[] = {
- DLDO1,
- DLDO2,
- DLDO3,
- DLDO4,
- DLDO5,
- DLDO6,
- DLDO7,
- DLDO8,
- DLDO9,
- DLDO10,
- DLDO11,
- DLDO12,
-};
-
-static enum lp8788_ldo_id lp8788_aldo_id[] = {
- ALDO1,
- ALDO2,
- ALDO3,
- ALDO4,
- ALDO5,
- ALDO6,
- ALDO7,
- ALDO8,
- ALDO9,
- ALDO10,
-};
-
-static int lp8788_ldo_enable(struct regulator_dev *rdev)
-{
- struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
-
- if (ldo->en_pin) {
- gpio_set_value(ldo->en_pin->gpio, ENABLE);
- return 0;
- } else {
- return regulator_enable_regmap(rdev);
- }
-}
-
-static int lp8788_ldo_disable(struct regulator_dev *rdev)
-{
- struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
-
- if (ldo->en_pin) {
- gpio_set_value(ldo->en_pin->gpio, DISABLE);
- return 0;
- } else {
- return regulator_disable_regmap(rdev);
- }
-}
-
-static int lp8788_ldo_is_enabled(struct regulator_dev *rdev)
-{
- struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
-
- if (ldo->en_pin)
- return gpio_get_value(ldo->en_pin->gpio) ? 1 : 0;
- else
- return regulator_is_enabled_regmap(rdev);
-}
-
static int lp8788_ldo_enable_time(struct regulator_dev *rdev)
{
struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
@@ -232,38 +170,21 @@ static int lp8788_ldo_enable_time(struct regulator_dev *rdev)
return ENABLE_TIME_USEC * val;
}
-static int lp8788_ldo_fixed_get_voltage(struct regulator_dev *rdev)
-{
- enum lp8788_ldo_id id = rdev_get_id(rdev);
-
- switch (id) {
- case ALDO2 ... ALDO5:
- return 2850000;
- case DLDO12:
- case ALDO8 ... ALDO9:
- return 2500000;
- case ALDO10:
- return 1100000;
- default:
- return -EINVAL;
- }
-}
-
static struct regulator_ops lp8788_ldo_voltage_table_ops = {
.list_voltage = regulator_list_voltage_table,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .enable = lp8788_ldo_enable,
- .disable = lp8788_ldo_disable,
- .is_enabled = lp8788_ldo_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.enable_time = lp8788_ldo_enable_time,
};
static struct regulator_ops lp8788_ldo_voltage_fixed_ops = {
- .get_voltage = lp8788_ldo_fixed_get_voltage,
- .enable = lp8788_ldo_enable,
- .disable = lp8788_ldo_disable,
- .is_enabled = lp8788_ldo_is_enabled,
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.enable_time = lp8788_ldo_enable_time,
};
@@ -420,6 +341,7 @@ static struct regulator_desc lp8788_dldo_desc[] = {
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_LDO_B,
.enable_mask = LP8788_EN_DLDO12_M,
+ .min_uV = 2500000,
},
};
@@ -446,6 +368,7 @@ static struct regulator_desc lp8788_aldo_desc[] = {
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_LDO_B,
.enable_mask = LP8788_EN_ALDO2_M,
+ .min_uV = 2850000,
},
{
.name = "aldo3",
@@ -456,6 +379,7 @@ static struct regulator_desc lp8788_aldo_desc[] = {
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_LDO_B,
.enable_mask = LP8788_EN_ALDO3_M,
+ .min_uV = 2850000,
},
{
.name = "aldo4",
@@ -466,6 +390,7 @@ static struct regulator_desc lp8788_aldo_desc[] = {
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_LDO_B,
.enable_mask = LP8788_EN_ALDO4_M,
+ .min_uV = 2850000,
},
{
.name = "aldo5",
@@ -476,6 +401,7 @@ static struct regulator_desc lp8788_aldo_desc[] = {
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_LDO_C,
.enable_mask = LP8788_EN_ALDO5_M,
+ .min_uV = 2850000,
},
{
.name = "aldo6",
@@ -512,6 +438,7 @@ static struct regulator_desc lp8788_aldo_desc[] = {
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_LDO_C,
.enable_mask = LP8788_EN_ALDO8_M,
+ .min_uV = 2500000,
},
{
.name = "aldo9",
@@ -522,6 +449,7 @@ static struct regulator_desc lp8788_aldo_desc[] = {
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_LDO_C,
.enable_mask = LP8788_EN_ALDO9_M,
+ .min_uV = 2500000,
},
{
.name = "aldo10",
@@ -532,46 +460,14 @@ static struct regulator_desc lp8788_aldo_desc[] = {
.owner = THIS_MODULE,
.enable_reg = LP8788_EN_LDO_C,
.enable_mask = LP8788_EN_ALDO10_M,
+ .min_uV = 1100000,
},
};
-static int lp8788_gpio_request_ldo_en(struct platform_device *pdev,
- struct lp8788_ldo *ldo,
- enum lp8788_ext_ldo_en_id id)
-{
- struct device *dev = &pdev->dev;
- struct lp8788_ldo_enable_pin *pin = ldo->en_pin;
- int ret, gpio, pinstate;
- char *name[] = {
- [EN_ALDO1] = "LP8788_EN_ALDO1",
- [EN_ALDO234] = "LP8788_EN_ALDO234",
- [EN_ALDO5] = "LP8788_EN_ALDO5",
- [EN_ALDO7] = "LP8788_EN_ALDO7",
- [EN_DLDO7] = "LP8788_EN_DLDO7",
- [EN_DLDO911] = "LP8788_EN_DLDO911",
- };
-
- gpio = pin->gpio;
- if (!gpio_is_valid(gpio)) {
- dev_err(dev, "invalid gpio: %d\n", gpio);
- return -EINVAL;
- }
-
- pinstate = pin->init_state;
- ret = devm_gpio_request_one(dev, gpio, pinstate, name[id]);
- if (ret == -EBUSY) {
- dev_warn(dev, "gpio%d already used\n", gpio);
- return 0;
- }
-
- return ret;
-}
-
static int lp8788_config_ldo_enable_mode(struct platform_device *pdev,
struct lp8788_ldo *ldo,
enum lp8788_ldo_id id)
{
- int ret;
struct lp8788 *lp = ldo->lp;
struct lp8788_platform_data *pdata = lp->pdata;
enum lp8788_ext_ldo_en_id enable_id;
@@ -613,14 +509,7 @@ static int lp8788_config_ldo_enable_mode(struct platform_device *pdev,
goto set_default_ldo_enable_mode;
ldo->en_pin = pdata->ldo_pin[enable_id];
-
- ret = lp8788_gpio_request_ldo_en(pdev, ldo, enable_id);
- if (ret) {
- ldo->en_pin = NULL;
- goto set_default_ldo_enable_mode;
- }
-
- return ret;
+ return 0;
set_default_ldo_enable_mode:
return lp8788_update_bits(lp, LP8788_EN_SEL, en_mask[enable_id], 0);
@@ -640,10 +529,15 @@ static int lp8788_dldo_probe(struct platform_device *pdev)
return -ENOMEM;
ldo->lp = lp;
- ret = lp8788_config_ldo_enable_mode(pdev, ldo, lp8788_dldo_id[id]);
+ ret = lp8788_config_ldo_enable_mode(pdev, ldo, id);
if (ret)
return ret;
+ if (ldo->en_pin) {
+ cfg.ena_gpio = ldo->en_pin->gpio;
+ cfg.ena_gpio_flags = ldo->en_pin->init_state;
+ }
+
cfg.dev = pdev->dev.parent;
cfg.init_data = lp->pdata ? lp->pdata->dldo_data[id] : NULL;
cfg.driver_data = ldo;
@@ -696,10 +590,15 @@ static int lp8788_aldo_probe(struct platform_device *pdev)
return -ENOMEM;
ldo->lp = lp;
- ret = lp8788_config_ldo_enable_mode(pdev, ldo, lp8788_aldo_id[id]);
+ ret = lp8788_config_ldo_enable_mode(pdev, ldo, id + ALDO1);
if (ret)
return ret;
+ if (ldo->en_pin) {
+ cfg.ena_gpio = ldo->en_pin->gpio;
+ cfg.ena_gpio_flags = ldo->en_pin->init_state;
+ }
+
cfg.dev = pdev->dev.parent;
cfg.init_data = lp->pdata ? lp->pdata->aldo_data[id] : NULL;
cfg.driver_data = ldo;
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 8c5a54f541b..54af6101581 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -56,7 +56,7 @@ struct max1586_data {
* set V6 to either 0V, 1.8V, 2.5V, 3V depending on (x & 0x3)
* As regulator framework doesn't accept voltages to be 0V, we use 1uV.
*/
-static int v6_voltages_uv[] = { 1, 1800000, 2500000, 3000000 };
+static const unsigned int v6_voltages_uv[] = { 1, 1800000, 2500000, 3000000 };
/*
* V3 voltage
@@ -232,8 +232,7 @@ static int max1586_pmic_remove(struct i2c_client *client)
int i;
for (i = 0; i <= MAX1586_V6; i++)
- if (max1586->rdev[i])
- regulator_unregister(max1586->rdev[i]);
+ regulator_unregister(max1586->rdev[i]);
return 0;
}
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c
index e4586ee8858..20935b1a6ed 100644
--- a/drivers/regulator/max77686.c
+++ b/drivers/regulator/max77686.c
@@ -75,17 +75,20 @@ static int max77686_buck_set_suspend_disable(struct regulator_dev *rdev)
{
unsigned int val;
struct max77686_data *max77686 = rdev_get_drvdata(rdev);
- int id = rdev_get_id(rdev);
+ int ret, id = rdev_get_id(rdev);
if (id == MAX77686_BUCK1)
val = 0x1;
else
val = 0x1 << MAX77686_OPMODE_BUCK234_SHIFT;
+ ret = regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask, val);
+ if (ret)
+ return ret;
+
max77686->opmode[id] = val;
- return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
- rdev->desc->enable_mask,
- val);
+ return 0;
}
/* Some LDOs supports [LPM/Normal]ON mode during suspend state */
@@ -94,7 +97,7 @@ static int max77686_set_suspend_mode(struct regulator_dev *rdev,
{
struct max77686_data *max77686 = rdev_get_drvdata(rdev);
unsigned int val;
- int id = rdev_get_id(rdev);
+ int ret, id = rdev_get_id(rdev);
/* BUCK[5-9] doesn't support this feature */
if (id >= MAX77686_BUCK5)
@@ -113,10 +116,13 @@ static int max77686_set_suspend_mode(struct regulator_dev *rdev,
return -EINVAL;
}
+ ret = regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask, val);
+ if (ret)
+ return ret;
+
max77686->opmode[id] = val;
- return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
- rdev->desc->enable_mask,
- val);
+ return 0;
}
/* Some LDOs supports LPM-ON/OFF/Normal-ON mode during suspend state */
@@ -125,6 +131,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
{
unsigned int val;
struct max77686_data *max77686 = rdev_get_drvdata(rdev);
+ int ret;
switch (mode) {
case REGULATOR_MODE_STANDBY: /* switch off */
@@ -142,10 +149,13 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
return -EINVAL;
}
+ ret = regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask, val);
+ if (ret)
+ return ret;
+
max77686->opmode[rdev_get_id(rdev)] = val;
- return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
- rdev->desc->enable_mask,
- val);
+ return 0;
}
static int max77686_enable(struct regulator_dev *rdev)
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 3ca14380f22..db6c9be10f3 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -60,36 +60,6 @@ struct max8649_regulator_info {
unsigned ramp_down:1;
};
-/* EN_PD means pulldown on EN input */
-static int max8649_enable(struct regulator_dev *rdev)
-{
- struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
- return regmap_update_bits(info->regmap, MAX8649_CONTROL, MAX8649_EN_PD, 0);
-}
-
-/*
- * Applied internal pulldown resistor on EN input pin.
- * If pulldown EN pin outside, it would be better.
- */
-static int max8649_disable(struct regulator_dev *rdev)
-{
- struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
- return regmap_update_bits(info->regmap, MAX8649_CONTROL, MAX8649_EN_PD,
- MAX8649_EN_PD);
-}
-
-static int max8649_is_enabled(struct regulator_dev *rdev)
-{
- struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
- unsigned int val;
- int ret;
-
- ret = regmap_read(info->regmap, MAX8649_CONTROL, &val);
- if (ret != 0)
- return ret;
- return !((unsigned char)val & MAX8649_EN_PD);
-}
-
static int max8649_enable_time(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
@@ -151,9 +121,9 @@ static struct regulator_ops max8649_dcdc_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
- .enable = max8649_enable,
- .disable = max8649_disable,
- .is_enabled = max8649_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.enable_time = max8649_enable_time,
.set_mode = max8649_set_mode,
.get_mode = max8649_get_mode,
@@ -169,6 +139,9 @@ static struct regulator_desc dcdc_desc = {
.vsel_mask = MAX8649_VOL_MASK,
.min_uV = MAX8649_DCDC_VMIN,
.uV_step = MAX8649_DCDC_STEP,
+ .enable_reg = MAX8649_CONTROL,
+ .enable_mask = MAX8649_EN_PD,
+ .enable_is_inverted = true,
};
static struct regmap_config max8649_regmap_config = {
@@ -275,10 +248,8 @@ static int max8649_regulator_remove(struct i2c_client *client)
{
struct max8649_regulator_info *info = i2c_get_clientdata(client);
- if (info) {
- if (info->regulator)
- regulator_unregister(info->regulator);
- }
+ if (info)
+ regulator_unregister(info->regulator);
return 0;
}
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index 4d7c635c36c..d428ef9a626 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -426,8 +426,7 @@ static int max8660_remove(struct i2c_client *client)
int i;
for (i = 0; i < MAX8660_V_END; i++)
- if (max8660->rdev[i])
- regulator_unregister(max8660->rdev[i]);
+ regulator_unregister(max8660->rdev[i]);
return 0;
}
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index 0d5f64a805a..3597da8f0dc 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -246,7 +246,6 @@ static struct max8925_regulator_info max8925_regulator_info[] = {
#ifdef CONFIG_OF
static int max8925_regulator_dt_init(struct platform_device *pdev,
- struct max8925_regulator_info *info,
struct regulator_config *config,
int ridx)
{
@@ -272,7 +271,7 @@ static int max8925_regulator_dt_init(struct platform_device *pdev,
return 0;
}
#else
-#define max8925_regulator_dt_init(w, x, y, z) (-1)
+#define max8925_regulator_dt_init(x, y, z) (-1)
#endif
static int max8925_regulator_probe(struct platform_device *pdev)
@@ -309,7 +308,7 @@ static int max8925_regulator_probe(struct platform_device *pdev)
config.dev = &pdev->dev;
config.driver_data = ri;
- if (max8925_regulator_dt_init(pdev, ri, &config, regulator_idx))
+ if (max8925_regulator_dt_init(pdev, &config, regulator_idx))
if (pdata)
config.init_data = pdata;
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index fc7935a19e3..5259c2fea90 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -28,6 +28,9 @@
#include <linux/regulator/max8952.h>
#include <linux/gpio.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
/* Registers */
@@ -126,6 +129,69 @@ static const struct regulator_desc regulator = {
.owner = THIS_MODULE,
};
+#ifdef CONFIG_OF
+static struct of_device_id max8952_dt_match[] = {
+ { .compatible = "maxim,max8952" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, max8952_dt_match);
+
+static struct max8952_platform_data *max8952_parse_dt(struct device *dev)
+{
+ struct max8952_platform_data *pd;
+ struct device_node *np = dev->of_node;
+ int ret;
+ int i;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
+ dev_err(dev, "Failed to allocate platform data\n");
+ return NULL;
+ }
+
+ pd->gpio_vid0 = of_get_named_gpio(np, "max8952,vid-gpios", 0);
+ pd->gpio_vid1 = of_get_named_gpio(np, "max8952,vid-gpios", 1);
+ pd->gpio_en = of_get_named_gpio(np, "max8952,en-gpio", 0);
+
+ if (of_property_read_u32(np, "max8952,default-mode", &pd->default_mode))
+ dev_warn(dev, "Default mode not specified, assuming 0\n");
+
+ ret = of_property_read_u32_array(np, "max8952,dvs-mode-microvolt",
+ pd->dvs_mode, ARRAY_SIZE(pd->dvs_mode));
+ if (ret) {
+ dev_err(dev, "max8952,dvs-mode-microvolt property not specified");
+ return NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pd->dvs_mode); ++i) {
+ if (pd->dvs_mode[i] < 770000 || pd->dvs_mode[i] > 1400000) {
+ dev_err(dev, "DVS voltage %d out of range\n", i);
+ return NULL;
+ }
+ pd->dvs_mode[i] = (pd->dvs_mode[i] - 770000) / 10000;
+ }
+
+ if (of_property_read_u32(np, "max8952,sync-freq", &pd->sync_freq))
+ dev_warn(dev, "max8952,sync-freq property not specified, defaulting to 26MHz\n");
+
+ if (of_property_read_u32(np, "max8952,ramp-speed", &pd->ramp_speed))
+ dev_warn(dev, "max8952,ramp-speed property not specified, defaulting to 32mV/us\n");
+
+ pd->reg_data = of_get_regulator_init_data(dev, np);
+ if (!pd->reg_data) {
+ dev_err(dev, "Failed to parse regulator init data\n");
+ return NULL;
+ }
+
+ return pd;
+}
+#else
+static struct max8952_platform_data *max8952_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
static int max8952_pmic_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
@@ -136,6 +202,9 @@ static int max8952_pmic_probe(struct i2c_client *client,
int ret = 0, err = 0;
+ if (client->dev.of_node)
+ pdata = max8952_parse_dt(&client->dev);
+
if (!pdata) {
dev_err(&client->dev, "Require the platform data\n");
return -EINVAL;
@@ -154,11 +223,12 @@ static int max8952_pmic_probe(struct i2c_client *client,
max8952->pdata = pdata;
config.dev = max8952->dev;
- config.init_data = &pdata->reg_data;
+ config.init_data = pdata->reg_data;
config.driver_data = max8952;
+ config.of_node = client->dev.of_node;
config.ena_gpio = pdata->gpio_en;
- if (pdata->reg_data.constraints.boot_on)
+ if (pdata->reg_data->constraints.boot_on)
config.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
max8952->rdev = regulator_register(&regulator, &config);
@@ -271,6 +341,7 @@ static struct i2c_driver max8952_pmic_driver = {
.remove = max8952_pmic_remove,
.driver = {
.name = "max8952",
+ .of_match_table = of_match_ptr(max8952_dt_match),
},
.id_table = max8952_ids,
};
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 9a8ea916300..adb1414e5e3 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -274,15 +274,15 @@ static int max8973_init_dcdc(struct max8973_chip *max,
if (pdata->reg_init_data &&
pdata->reg_init_data->constraints.ramp_delay) {
if (pdata->reg_init_data->constraints.ramp_delay < 25000)
- control1 = MAX8973_RAMP_12mV_PER_US;
+ control1 |= MAX8973_RAMP_12mV_PER_US;
else if (pdata->reg_init_data->constraints.ramp_delay < 50000)
- control1 = MAX8973_RAMP_25mV_PER_US;
+ control1 |= MAX8973_RAMP_25mV_PER_US;
else if (pdata->reg_init_data->constraints.ramp_delay < 200000)
- control1 = MAX8973_RAMP_50mV_PER_US;
+ control1 |= MAX8973_RAMP_50mV_PER_US;
else
- control1 = MAX8973_RAMP_200mV_PER_US;
+ control1 |= MAX8973_RAMP_200mV_PER_US;
} else {
- control1 = MAX8973_RAMP_12mV_PER_US;
+ control1 |= MAX8973_RAMP_12mV_PER_US;
max->desc.ramp_delay = 12500;
}
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 0ac7a87519b..df20069f053 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -1035,8 +1035,8 @@ static int max8997_pmic_probe(struct platform_device *pdev)
int i, ret, size, nr_dvs;
u8 max_buck1 = 0, max_buck2 = 0, max_buck5 = 0;
- if (IS_ERR_OR_NULL(pdata)) {
- dev_err(pdev->dev.parent, "No platform init data supplied.\n");
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform init data supplied.\n");
return -ENODEV;
}
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index b588f07c7ca..a57a1b15cdb 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -665,14 +665,16 @@ static int max8998_pmic_probe(struct platform_device *pdev)
gpio_is_valid(pdata->buck1_set2)) {
/* Check if SET1 is not equal to 0 */
if (!pdata->buck1_set1) {
- printk(KERN_ERR "MAX8998 SET1 GPIO defined as 0 !\n");
+ dev_err(&pdev->dev,
+ "MAX8998 SET1 GPIO defined as 0 !\n");
WARN_ON(!pdata->buck1_set1);
ret = -EIO;
goto err_out;
}
/* Check if SET2 is not equal to 0 */
if (!pdata->buck1_set2) {
- printk(KERN_ERR "MAX8998 SET2 GPIO defined as 0 !\n");
+ dev_err(&pdev->dev,
+ "MAX8998 SET2 GPIO defined as 0 !\n");
WARN_ON(!pdata->buck1_set2);
ret = -EIO;
goto err_out;
@@ -738,7 +740,8 @@ static int max8998_pmic_probe(struct platform_device *pdev)
if (gpio_is_valid(pdata->buck2_set3)) {
/* Check if SET3 is not equal to 0 */
if (!pdata->buck2_set3) {
- printk(KERN_ERR "MAX8998 SET3 GPIO defined as 0 !\n");
+ dev_err(&pdev->dev,
+ "MAX8998 SET3 GPIO defined as 0 !\n");
WARN_ON(!pdata->buck2_set3);
ret = -EIO;
goto err_out;
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index c46c6705cd7..fdf7f0a0909 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -398,33 +398,51 @@ static int mc13783_regulator_probe(struct platform_device *pdev)
struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent);
struct mc13xxx_regulator_platform_data *pdata =
dev_get_platdata(&pdev->dev);
- struct mc13xxx_regulator_init_data *init_data;
+ struct mc13xxx_regulator_init_data *mc13xxx_data;
struct regulator_config config = { };
- int i, ret;
+ int i, ret, num_regulators;
- dev_dbg(&pdev->dev, "%s id %d\n", __func__, pdev->id);
+ num_regulators = mc13xxx_get_num_regulators_dt(pdev);
- if (!pdata)
+ if (num_regulators <= 0 && pdata)
+ num_regulators = pdata->num_regulators;
+ if (num_regulators <= 0)
return -EINVAL;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv) +
- pdata->num_regulators * sizeof(priv->regulators[0]),
+ num_regulators * sizeof(priv->regulators[0]),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ priv->num_regulators = num_regulators;
priv->mc13xxx_regulators = mc13783_regulators;
priv->mc13xxx = mc13783;
+ platform_set_drvdata(pdev, priv);
- for (i = 0; i < pdata->num_regulators; i++) {
- struct regulator_desc *desc;
+ mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13783_regulators,
+ ARRAY_SIZE(mc13783_regulators));
- init_data = &pdata->regulators[i];
- desc = &mc13783_regulators[init_data->id].desc;
+ for (i = 0; i < priv->num_regulators; i++) {
+ struct regulator_init_data *init_data;
+ struct regulator_desc *desc;
+ struct device_node *node = NULL;
+ int id;
+
+ if (mc13xxx_data) {
+ id = mc13xxx_data[i].id;
+ init_data = mc13xxx_data[i].init_data;
+ node = mc13xxx_data[i].node;
+ } else {
+ id = pdata->regulators[i].id;
+ init_data = pdata->regulators[i].init_data;
+ }
+ desc = &mc13783_regulators[id].desc;
config.dev = &pdev->dev;
- config.init_data = init_data->init_data;
+ config.init_data = init_data;
config.driver_data = priv;
+ config.of_node = node;
priv->regulators[i] = regulator_register(desc, &config);
if (IS_ERR(priv->regulators[i])) {
@@ -435,8 +453,6 @@ static int mc13783_regulator_probe(struct platform_device *pdev)
}
}
- platform_set_drvdata(pdev, priv);
-
return 0;
err:
while (--i >= 0)
@@ -448,13 +464,11 @@ err:
static int mc13783_regulator_remove(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
- struct mc13xxx_regulator_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
int i;
platform_set_drvdata(pdev, NULL);
- for (i = 0; i < pdata->num_regulators; i++)
+ for (i = 0; i < priv->num_regulators; i++)
regulator_unregister(priv->regulators[i]);
return 0;
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 9891aec47b5..b716283a876 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -465,13 +465,13 @@ static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev,
*/
if (mc13892_regulators[id].vsel_reg != MC13892_SWITCHERS0) {
+ mask |= MC13892_SWITCHERS0_SWxHI;
+
if (volt > 1375000) {
reg_value -= MC13892_SWxHI_SEL_OFFSET;
reg_value |= MC13892_SWITCHERS0_SWxHI;
- mask |= MC13892_SWITCHERS0_SWxHI;
- } else if (volt < 1100000) {
+ } else {
reg_value &= ~MC13892_SWITCHERS0_SWxHI;
- mask |= MC13892_SWITCHERS0_SWxHI;
}
}
@@ -485,6 +485,7 @@ static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev,
static struct regulator_ops mc13892_sw_regulator_ops = {
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = mc13892_sw_regulator_set_voltage_sel,
.get_voltage_sel = mc13892_sw_regulator_get_voltage_sel,
};
@@ -535,7 +536,7 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
struct mc13xxx_regulator_init_data *mc13xxx_data;
struct regulator_config config = { };
int i, ret;
- int num_regulators = 0, num_parsed;
+ int num_regulators = 0;
u32 val;
num_regulators = mc13xxx_get_num_regulators_dt(pdev);
@@ -545,8 +546,6 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
if (num_regulators <= 0)
return -EINVAL;
- num_parsed = num_regulators;
-
priv = devm_kzalloc(&pdev->dev, sizeof(*priv) +
num_regulators * sizeof(priv->regulators[0]),
GFP_KERNEL);
@@ -589,40 +588,9 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
= mc13892_vcam_get_mode;
mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
- ARRAY_SIZE(mc13892_regulators),
- &num_parsed);
-
- /*
- * Perform a little sanity check on the regulator tree - if we found
- * a number of regulators from mc13xxx_get_num_regulators_dt and
- * then parsed a smaller number in mc13xxx_parse_regulators_dt then
- * there is a regulator defined in the regulators node which has
- * not matched any usable regulator in the driver. In this case,
- * there is one missing and what will happen is the first regulator
- * will get registered again.
- *
- * Fix this by basically making our number of registerable regulators
- * equal to the number of regulators we parsed. We are allocating
- * too much memory for priv, but this is unavoidable at this point.
- *
- * As an example of how this can happen, try making a typo in your
- * regulators node (vviohi {} instead of viohi {}) so that the name
- * does not match..
- *
- * The check will basically pass for platform data (non-DT) because
- * mc13xxx_parse_regulators_dt for !CONFIG_OF will not touch num_parsed.
- *
- */
- if (num_parsed != num_regulators) {
- dev_warn(&pdev->dev,
- "parsed %d != regulators %d - check your device tree!\n",
- num_parsed, num_regulators);
-
- num_regulators = num_parsed;
- priv->num_regulators = num_regulators;
- }
+ ARRAY_SIZE(mc13892_regulators));
- for (i = 0; i < num_regulators; i++) {
+ for (i = 0; i < priv->num_regulators; i++) {
struct regulator_init_data *init_data;
struct regulator_desc *desc;
struct device_node *node = NULL;
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index 23cf9f9c383..da485928230 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -180,15 +180,13 @@ EXPORT_SYMBOL_GPL(mc13xxx_get_num_regulators_dt);
struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
struct platform_device *pdev, struct mc13xxx_regulator *regulators,
- int num_regulators, int *num_parsed)
+ int num_regulators)
{
struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
struct mc13xxx_regulator_init_data *data, *p;
struct device_node *parent, *child;
int i, parsed = 0;
- *num_parsed = 0;
-
of_node_get(pdev->dev.parent->of_node);
parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
if (!parent)
@@ -204,10 +202,13 @@ struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
p = data;
for_each_child_of_node(parent, child) {
+ int found = 0;
+
for (i = 0; i < num_regulators; i++) {
+ if (!regulators[i].desc.name)
+ continue;
if (!of_node_cmp(child->name,
regulators[i].desc.name)) {
-
p->id = i;
p->init_data = of_get_regulator_init_data(
&pdev->dev, child);
@@ -215,13 +216,19 @@ struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
p++;
parsed++;
+ found = 1;
break;
}
}
+
+ if (!found)
+ dev_warn(&pdev->dev,
+ "Unknown regulator: %s\n", child->name);
}
of_node_put(parent);
- *num_parsed = parsed;
+ priv->num_regulators = parsed;
+
return data;
}
EXPORT_SYMBOL_GPL(mc13xxx_parse_regulators_dt);
diff --git a/drivers/regulator/mc13xxx.h b/drivers/regulator/mc13xxx.h
index 007f83387fd..06c8903f182 100644
--- a/drivers/regulator/mc13xxx.h
+++ b/drivers/regulator/mc13xxx.h
@@ -39,7 +39,7 @@ extern int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev,
extern int mc13xxx_get_num_regulators_dt(struct platform_device *pdev);
extern struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
struct platform_device *pdev, struct mc13xxx_regulator *regulators,
- int num_regulators, int *num_parsed);
+ int num_regulators);
#else
static inline int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
{
@@ -48,7 +48,7 @@ static inline int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
static inline struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
struct platform_device *pdev, struct mc13xxx_regulator *regulators,
- int num_regulators, int *num_parsed)
+ int num_regulators)
{
return NULL;
}
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 39cf1460678..92ceed0fc65 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -1,7 +1,7 @@
/*
* Driver for Regulator part of Palmas PMIC Chips
*
- * Copyright 2011-2012 Texas Instruments Inc.
+ * Copyright 2011-2013 Texas Instruments Inc.
*
* Author: Graeme Gregory <gg@slimlogic.co.uk>
* Author: Ian Lartey <ian@slimlogic.co.uk>
@@ -29,6 +29,7 @@
struct regs_info {
char *name;
+ char *sname;
u8 vsel_addr;
u8 ctrl_addr;
u8 tstep_addr;
@@ -37,115 +38,159 @@ struct regs_info {
static const struct regs_info palmas_regs_info[] = {
{
.name = "SMPS12",
+ .sname = "smps1-in",
.vsel_addr = PALMAS_SMPS12_VOLTAGE,
.ctrl_addr = PALMAS_SMPS12_CTRL,
.tstep_addr = PALMAS_SMPS12_TSTEP,
},
{
.name = "SMPS123",
+ .sname = "smps1-in",
.vsel_addr = PALMAS_SMPS12_VOLTAGE,
.ctrl_addr = PALMAS_SMPS12_CTRL,
.tstep_addr = PALMAS_SMPS12_TSTEP,
},
{
.name = "SMPS3",
+ .sname = "smps3-in",
.vsel_addr = PALMAS_SMPS3_VOLTAGE,
.ctrl_addr = PALMAS_SMPS3_CTRL,
},
{
.name = "SMPS45",
+ .sname = "smps4-in",
.vsel_addr = PALMAS_SMPS45_VOLTAGE,
.ctrl_addr = PALMAS_SMPS45_CTRL,
.tstep_addr = PALMAS_SMPS45_TSTEP,
},
{
.name = "SMPS457",
+ .sname = "smps4-in",
.vsel_addr = PALMAS_SMPS45_VOLTAGE,
.ctrl_addr = PALMAS_SMPS45_CTRL,
.tstep_addr = PALMAS_SMPS45_TSTEP,
},
{
.name = "SMPS6",
+ .sname = "smps6-in",
.vsel_addr = PALMAS_SMPS6_VOLTAGE,
.ctrl_addr = PALMAS_SMPS6_CTRL,
.tstep_addr = PALMAS_SMPS6_TSTEP,
},
{
.name = "SMPS7",
+ .sname = "smps7-in",
.vsel_addr = PALMAS_SMPS7_VOLTAGE,
.ctrl_addr = PALMAS_SMPS7_CTRL,
},
{
.name = "SMPS8",
+ .sname = "smps8-in",
.vsel_addr = PALMAS_SMPS8_VOLTAGE,
.ctrl_addr = PALMAS_SMPS8_CTRL,
.tstep_addr = PALMAS_SMPS8_TSTEP,
},
{
.name = "SMPS9",
+ .sname = "smps9-in",
.vsel_addr = PALMAS_SMPS9_VOLTAGE,
.ctrl_addr = PALMAS_SMPS9_CTRL,
},
{
.name = "SMPS10",
+ .sname = "smps10-in",
+ .ctrl_addr = PALMAS_SMPS10_CTRL,
},
{
.name = "LDO1",
+ .sname = "ldo1-in",
.vsel_addr = PALMAS_LDO1_VOLTAGE,
.ctrl_addr = PALMAS_LDO1_CTRL,
},
{
.name = "LDO2",
+ .sname = "ldo2-in",
.vsel_addr = PALMAS_LDO2_VOLTAGE,
.ctrl_addr = PALMAS_LDO2_CTRL,
},
{
.name = "LDO3",
+ .sname = "ldo3-in",
.vsel_addr = PALMAS_LDO3_VOLTAGE,
.ctrl_addr = PALMAS_LDO3_CTRL,
},
{
.name = "LDO4",
+ .sname = "ldo4-in",
.vsel_addr = PALMAS_LDO4_VOLTAGE,
.ctrl_addr = PALMAS_LDO4_CTRL,
},
{
.name = "LDO5",
+ .sname = "ldo5-in",
.vsel_addr = PALMAS_LDO5_VOLTAGE,
.ctrl_addr = PALMAS_LDO5_CTRL,
},
{
.name = "LDO6",
+ .sname = "ldo6-in",
.vsel_addr = PALMAS_LDO6_VOLTAGE,
.ctrl_addr = PALMAS_LDO6_CTRL,
},
{
.name = "LDO7",
+ .sname = "ldo7-in",
.vsel_addr = PALMAS_LDO7_VOLTAGE,
.ctrl_addr = PALMAS_LDO7_CTRL,
},
{
.name = "LDO8",
+ .sname = "ldo8-in",
.vsel_addr = PALMAS_LDO8_VOLTAGE,
.ctrl_addr = PALMAS_LDO8_CTRL,
},
{
.name = "LDO9",
+ .sname = "ldo9-in",
.vsel_addr = PALMAS_LDO9_VOLTAGE,
.ctrl_addr = PALMAS_LDO9_CTRL,
},
{
.name = "LDOLN",
+ .sname = "ldoln-in",
.vsel_addr = PALMAS_LDOLN_VOLTAGE,
.ctrl_addr = PALMAS_LDOLN_CTRL,
},
{
.name = "LDOUSB",
+ .sname = "ldousb-in",
.vsel_addr = PALMAS_LDOUSB_VOLTAGE,
.ctrl_addr = PALMAS_LDOUSB_CTRL,
},
+ {
+ .name = "REGEN1",
+ .ctrl_addr = PALMAS_REGEN1_CTRL,
+ },
+ {
+ .name = "REGEN2",
+ .ctrl_addr = PALMAS_REGEN2_CTRL,
+ },
+ {
+ .name = "REGEN3",
+ .ctrl_addr = PALMAS_REGEN3_CTRL,
+ },
+ {
+ .name = "SYSEN1",
+ .ctrl_addr = PALMAS_SYSEN1_CTRL,
+ },
+ {
+ .name = "SYSEN2",
+ .ctrl_addr = PALMAS_SYSEN2_CTRL,
+ },
};
+static unsigned int palmas_smps_ramp_delay[4] = {0, 10000, 5000, 2500};
+
#define SMPS_CTRL_MODE_OFF 0x00
#define SMPS_CTRL_MODE_ON 0x01
#define SMPS_CTRL_MODE_ECO 0x02
@@ -231,7 +276,10 @@ static int palmas_enable_smps(struct regulator_dev *dev)
palmas_smps_read(pmic->palmas, palmas_regs_info[id].ctrl_addr, &reg);
reg &= ~PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
- reg |= SMPS_CTRL_MODE_ON;
+ if (pmic->current_reg_mode[id])
+ reg |= pmic->current_reg_mode[id];
+ else
+ reg |= SMPS_CTRL_MODE_ON;
palmas_smps_write(pmic->palmas, palmas_regs_info[id].ctrl_addr, reg);
@@ -253,16 +301,19 @@ static int palmas_disable_smps(struct regulator_dev *dev)
return 0;
}
-
static int palmas_set_mode_smps(struct regulator_dev *dev, unsigned int mode)
{
struct palmas_pmic *pmic = rdev_get_drvdata(dev);
int id = rdev_get_id(dev);
unsigned int reg;
+ bool rail_enable = true;
palmas_smps_read(pmic->palmas, palmas_regs_info[id].ctrl_addr, &reg);
reg &= ~PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
+ if (reg == SMPS_CTRL_MODE_OFF)
+ rail_enable = false;
+
switch (mode) {
case REGULATOR_MODE_NORMAL:
reg |= SMPS_CTRL_MODE_ON;
@@ -276,8 +327,11 @@ static int palmas_set_mode_smps(struct regulator_dev *dev, unsigned int mode)
default:
return -EINVAL;
}
- palmas_smps_write(pmic->palmas, palmas_regs_info[id].ctrl_addr, reg);
+ pmic->current_reg_mode[id] = reg & PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
+ if (rail_enable)
+ palmas_smps_write(pmic->palmas,
+ palmas_regs_info[id].ctrl_addr, reg);
return 0;
}
@@ -287,9 +341,7 @@ static unsigned int palmas_get_mode_smps(struct regulator_dev *dev)
int id = rdev_get_id(dev);
unsigned int reg;
- palmas_smps_read(pmic->palmas, palmas_regs_info[id].ctrl_addr, &reg);
- reg &= PALMAS_SMPS12_CTRL_STATUS_MASK;
- reg >>= PALMAS_SMPS12_CTRL_STATUS_SHIFT;
+ reg = pmic->current_reg_mode[id] & PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
switch (reg) {
case SMPS_CTRL_MODE_ON:
@@ -356,6 +408,63 @@ static int palmas_map_voltage_smps(struct regulator_dev *rdev,
return ret;
}
+static int palma_smps_set_voltage_smps_time_sel(struct regulator_dev *rdev,
+ unsigned int old_selector, unsigned int new_selector)
+{
+ struct palmas_pmic *pmic = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ int old_uv, new_uv;
+ unsigned int ramp_delay = pmic->ramp_delay[id];
+
+ if (!ramp_delay)
+ return 0;
+
+ old_uv = palmas_list_voltage_smps(rdev, old_selector);
+ if (old_uv < 0)
+ return old_uv;
+
+ new_uv = palmas_list_voltage_smps(rdev, new_selector);
+ if (new_uv < 0)
+ return new_uv;
+
+ return DIV_ROUND_UP(abs(old_uv - new_uv), ramp_delay);
+}
+
+static int palmas_smps_set_ramp_delay(struct regulator_dev *rdev,
+ int ramp_delay)
+{
+ struct palmas_pmic *pmic = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ unsigned int reg = 0;
+ unsigned int addr = palmas_regs_info[id].tstep_addr;
+ int ret;
+
+ /* SMPS3 and SMPS7 do not have tstep_addr setting */
+ switch (id) {
+ case PALMAS_REG_SMPS3:
+ case PALMAS_REG_SMPS7:
+ return 0;
+ }
+
+ if (ramp_delay <= 0)
+ reg = 0;
+ else if (ramp_delay <= 2500)
+ reg = 3;
+ else if (ramp_delay <= 5000)
+ reg = 2;
+ else
+ reg = 1;
+
+ ret = palmas_smps_write(pmic->palmas, addr, reg);
+ if (ret < 0) {
+ dev_err(pmic->palmas->dev, "TSTEP write failed: %d\n", ret);
+ return ret;
+ }
+
+ pmic->ramp_delay[id] = palmas_smps_ramp_delay[reg];
+ return ret;
+}
+
static struct regulator_ops palmas_ops_smps = {
.is_enabled = palmas_is_enabled_smps,
.enable = palmas_enable_smps,
@@ -366,6 +475,8 @@ static struct regulator_ops palmas_ops_smps = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = palmas_list_voltage_smps,
.map_voltage = palmas_map_voltage_smps,
+ .set_voltage_time_sel = palma_smps_set_voltage_smps_time_sel,
+ .set_ramp_delay = palmas_smps_set_ramp_delay,
};
static struct regulator_ops palmas_ops_smps10 = {
@@ -401,6 +512,12 @@ static struct regulator_ops palmas_ops_ldo = {
.map_voltage = regulator_map_voltage_linear,
};
+static struct regulator_ops palmas_ops_extreg = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+};
+
/*
* setup the hardware based sleep configuration of the SMPS/LDO regulators
* from the platform data. This is different to the software based control
@@ -422,40 +539,32 @@ static int palmas_smps_init(struct palmas *palmas, int id,
switch (id) {
case PALMAS_REG_SMPS10:
- if (reg_init->mode_sleep) {
- reg &= ~PALMAS_SMPS10_CTRL_MODE_SLEEP_MASK;
+ reg &= ~PALMAS_SMPS10_CTRL_MODE_SLEEP_MASK;
+ if (reg_init->mode_sleep)
reg |= reg_init->mode_sleep <<
PALMAS_SMPS10_CTRL_MODE_SLEEP_SHIFT;
- }
break;
default:
if (reg_init->warm_reset)
reg |= PALMAS_SMPS12_CTRL_WR_S;
+ else
+ reg &= ~PALMAS_SMPS12_CTRL_WR_S;
if (reg_init->roof_floor)
reg |= PALMAS_SMPS12_CTRL_ROOF_FLOOR_EN;
+ else
+ reg &= ~PALMAS_SMPS12_CTRL_ROOF_FLOOR_EN;
- if (reg_init->mode_sleep) {
- reg &= ~PALMAS_SMPS12_CTRL_MODE_SLEEP_MASK;
+ reg &= ~PALMAS_SMPS12_CTRL_MODE_SLEEP_MASK;
+ if (reg_init->mode_sleep)
reg |= reg_init->mode_sleep <<
PALMAS_SMPS12_CTRL_MODE_SLEEP_SHIFT;
- }
}
ret = palmas_smps_write(palmas, addr, reg);
if (ret)
return ret;
- if (palmas_regs_info[id].tstep_addr && reg_init->tstep) {
- addr = palmas_regs_info[id].tstep_addr;
-
- reg = reg_init->tstep & PALMAS_SMPS12_TSTEP_TSTEP_MASK;
-
- ret = palmas_smps_write(palmas, addr, reg);
- if (ret)
- return ret;
- }
-
if (palmas_regs_info[id].vsel_addr && reg_init->vsel) {
addr = palmas_regs_info[id].vsel_addr;
@@ -485,9 +594,13 @@ static int palmas_ldo_init(struct palmas *palmas, int id,
if (reg_init->warm_reset)
reg |= PALMAS_LDO1_CTRL_WR_S;
+ else
+ reg &= ~PALMAS_LDO1_CTRL_WR_S;
if (reg_init->mode_sleep)
reg |= PALMAS_LDO1_CTRL_MODE_SLEEP;
+ else
+ reg &= ~PALMAS_LDO1_CTRL_MODE_SLEEP;
ret = palmas_ldo_write(palmas, addr, reg);
if (ret)
@@ -496,6 +609,68 @@ static int palmas_ldo_init(struct palmas *palmas, int id,
return 0;
}
+static int palmas_extreg_init(struct palmas *palmas, int id,
+ struct palmas_reg_init *reg_init)
+{
+ unsigned int addr;
+ int ret;
+ unsigned int val = 0;
+
+ addr = palmas_regs_info[id].ctrl_addr;
+
+ if (reg_init->mode_sleep)
+ val = PALMAS_REGEN1_CTRL_MODE_SLEEP;
+
+ ret = palmas_update_bits(palmas, PALMAS_RESOURCE_BASE,
+ addr, PALMAS_REGEN1_CTRL_MODE_SLEEP, val);
+ if (ret < 0) {
+ dev_err(palmas->dev, "Resource reg 0x%02x update failed %d\n",
+ addr, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static void palmas_enable_ldo8_track(struct palmas *palmas)
+{
+ unsigned int reg;
+ unsigned int addr;
+ int ret;
+
+ addr = palmas_regs_info[PALMAS_REG_LDO8].ctrl_addr;
+
+ ret = palmas_ldo_read(palmas, addr, &reg);
+ if (ret) {
+ dev_err(palmas->dev, "Error in reading ldo8 control reg\n");
+ return;
+ }
+
+ reg |= PALMAS_LDO8_CTRL_LDO_TRACKING_EN;
+ ret = palmas_ldo_write(palmas, addr, reg);
+ if (ret < 0) {
+ dev_err(palmas->dev, "Error in enabling tracking mode\n");
+ return;
+ }
+ /*
+ * When SMPS45 is set to off and LDO8 tracking is enabled, the LDO8
+ * output is defined by the LDO8_VOLTAGE.VSEL register divided by two,
+ * and can be set from 0.45 to 1.65 V.
+ */
+ addr = palmas_regs_info[PALMAS_REG_LDO8].vsel_addr;
+ ret = palmas_ldo_read(palmas, addr, &reg);
+ if (ret) {
+ dev_err(palmas->dev, "Error in reading ldo8 voltage reg\n");
+ return;
+ }
+
+ reg = (reg << 1) & PALMAS_LDO8_VOLTAGE_VSEL_MASK;
+ ret = palmas_ldo_write(palmas, addr, reg);
+ if (ret < 0)
+ dev_err(palmas->dev, "Error in setting ldo8 voltage reg\n");
+
+ return;
+}
+
static struct of_regulator_match palmas_matches[] = {
{ .name = "smps12", },
{ .name = "smps123", },
@@ -518,6 +693,11 @@ static struct of_regulator_match palmas_matches[] = {
{ .name = "ldo9", },
{ .name = "ldoln", },
{ .name = "ldousb", },
+ { .name = "regen1", },
+ { .name = "regen2", },
+ { .name = "regen3", },
+ { .name = "sysen1", },
+ { .name = "sysen2", },
};
static void palmas_dt_to_pdata(struct device *dev,
@@ -553,39 +733,36 @@ static void palmas_dt_to_pdata(struct device *dev,
pdata->reg_init[idx] = devm_kzalloc(dev,
sizeof(struct palmas_reg_init), GFP_KERNEL);
- ret = of_property_read_u32(palmas_matches[idx].of_node,
- "ti,warm_reset", &prop);
- if (!ret)
- pdata->reg_init[idx]->warm_reset = prop;
+ pdata->reg_init[idx]->warm_reset =
+ of_property_read_bool(palmas_matches[idx].of_node,
+ "ti,warm-reset");
- ret = of_property_read_u32(palmas_matches[idx].of_node,
- "ti,roof_floor", &prop);
- if (!ret)
- pdata->reg_init[idx]->roof_floor = prop;
+ pdata->reg_init[idx]->roof_floor =
+ of_property_read_bool(palmas_matches[idx].of_node,
+ "ti,roof-floor");
ret = of_property_read_u32(palmas_matches[idx].of_node,
- "ti,mode_sleep", &prop);
+ "ti,mode-sleep", &prop);
if (!ret)
pdata->reg_init[idx]->mode_sleep = prop;
- ret = of_property_read_u32(palmas_matches[idx].of_node,
- "ti,tstep", &prop);
- if (!ret)
- pdata->reg_init[idx]->tstep = prop;
+ ret = of_property_read_bool(palmas_matches[idx].of_node,
+ "ti,smps-range");
+ if (ret)
+ pdata->reg_init[idx]->vsel =
+ PALMAS_SMPS12_VOLTAGE_RANGE;
- ret = of_property_read_u32(palmas_matches[idx].of_node,
- "ti,vsel", &prop);
- if (!ret)
- pdata->reg_init[idx]->vsel = prop;
+ if (idx == PALMAS_REG_LDO8)
+ pdata->enable_ldo8_tracking = of_property_read_bool(
+ palmas_matches[idx].of_node,
+ "ti,enable-ldo8-tracking");
}
- ret = of_property_read_u32(node, "ti,ldo6_vibrator", &prop);
- if (!ret)
- pdata->ldo6_vibrator = prop;
+ pdata->ldo6_vibrator = of_property_read_bool(node, "ti,ldo6-vibrator");
}
-static int palmas_probe(struct platform_device *pdev)
+static int palmas_regulators_probe(struct platform_device *pdev)
{
struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
struct palmas_pmic_platform_data *pdata = pdev->dev.platform_data;
@@ -630,6 +807,7 @@ static int palmas_probe(struct platform_device *pdev)
config.driver_data = pmic;
for (id = 0; id < PALMAS_REG_LDO1; id++) {
+ bool ramp_delay_support = false;
/*
* Miss out regulators which are not available due
@@ -640,19 +818,42 @@ static int palmas_probe(struct platform_device *pdev)
case PALMAS_REG_SMPS3:
if (pmic->smps123)
continue;
+ if (id == PALMAS_REG_SMPS12)
+ ramp_delay_support = true;
break;
case PALMAS_REG_SMPS123:
if (!pmic->smps123)
continue;
+ ramp_delay_support = true;
break;
case PALMAS_REG_SMPS45:
case PALMAS_REG_SMPS7:
if (pmic->smps457)
continue;
+ if (id == PALMAS_REG_SMPS45)
+ ramp_delay_support = true;
break;
case PALMAS_REG_SMPS457:
if (!pmic->smps457)
continue;
+ ramp_delay_support = true;
+ break;
+ }
+
+ if ((id == PALMAS_REG_SMPS6) && (id == PALMAS_REG_SMPS8))
+ ramp_delay_support = true;
+
+ if (ramp_delay_support) {
+ addr = palmas_regs_info[id].tstep_addr;
+ ret = palmas_smps_read(pmic->palmas, addr, &reg);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "reading TSTEP reg failed: %d\n", ret);
+ goto err_unregister_regulator;
+ }
+ pmic->desc[id].ramp_delay =
+ palmas_smps_ramp_delay[reg & 0x3];
+ pmic->ramp_delay[id] = pmic->desc[id].ramp_delay;
}
/* Initialise sleep/init values from platform data */
@@ -686,7 +887,8 @@ static int palmas_probe(struct platform_device *pdev)
/*
* Read and store the RANGE bit for later use
* This must be done before regulator is probed,
- * otherwise we error in probe with unsupportable ranges.
+ * otherwise we error in probe with unsupportable
+ * ranges. Read the current smps mode for later use.
*/
addr = palmas_regs_info[id].vsel_addr;
@@ -703,6 +905,14 @@ static int palmas_probe(struct platform_device *pdev)
palmas_regs_info[id].vsel_addr);
pmic->desc[id].vsel_mask =
PALMAS_SMPS12_VOLTAGE_VSEL_MASK;
+
+ /* Read the smps mode for later use. */
+ addr = palmas_regs_info[id].ctrl_addr;
+ ret = palmas_smps_read(pmic->palmas, addr, &reg);
+ if (ret)
+ goto err_unregister_regulator;
+ pmic->current_reg_mode[id] = reg &
+ PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
}
pmic->desc[id].type = REGULATOR_VOLTAGE;
@@ -713,6 +923,7 @@ static int palmas_probe(struct platform_device *pdev)
else
config.init_data = NULL;
+ pmic->desc[id].supply_name = palmas_regs_info[id].sname;
config.of_node = palmas_matches[id].of_node;
rdev = regulator_register(&pmic->desc[id], &config);
@@ -738,27 +949,49 @@ static int palmas_probe(struct platform_device *pdev)
/* Register the regulators */
pmic->desc[id].name = palmas_regs_info[id].name;
pmic->desc[id].id = id;
- pmic->desc[id].n_voltages = PALMAS_LDO_NUM_VOLTAGES;
-
- pmic->desc[id].ops = &palmas_ops_ldo;
-
pmic->desc[id].type = REGULATOR_VOLTAGE;
pmic->desc[id].owner = THIS_MODULE;
- pmic->desc[id].min_uV = 900000;
- pmic->desc[id].uV_step = 50000;
- pmic->desc[id].linear_min_sel = 1;
- pmic->desc[id].vsel_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE,
+
+ if (id < PALMAS_REG_REGEN1) {
+ pmic->desc[id].n_voltages = PALMAS_LDO_NUM_VOLTAGES;
+ pmic->desc[id].ops = &palmas_ops_ldo;
+ pmic->desc[id].min_uV = 900000;
+ pmic->desc[id].uV_step = 50000;
+ pmic->desc[id].linear_min_sel = 1;
+ pmic->desc[id].vsel_reg =
+ PALMAS_BASE_TO_REG(PALMAS_LDO_BASE,
palmas_regs_info[id].vsel_addr);
- pmic->desc[id].vsel_mask = PALMAS_LDO1_VOLTAGE_VSEL_MASK;
- pmic->desc[id].enable_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE,
+ pmic->desc[id].vsel_mask =
+ PALMAS_LDO1_VOLTAGE_VSEL_MASK;
+ pmic->desc[id].enable_reg =
+ PALMAS_BASE_TO_REG(PALMAS_LDO_BASE,
+ palmas_regs_info[id].ctrl_addr);
+ pmic->desc[id].enable_mask =
+ PALMAS_LDO1_CTRL_MODE_ACTIVE;
+
+ /* Check if LDO8 is in tracking mode or not */
+ if (pdata && (id == PALMAS_REG_LDO8) &&
+ pdata->enable_ldo8_tracking) {
+ palmas_enable_ldo8_track(palmas);
+ pmic->desc[id].min_uV = 450000;
+ pmic->desc[id].uV_step = 25000;
+ }
+ } else {
+ pmic->desc[id].n_voltages = 1;
+ pmic->desc[id].ops = &palmas_ops_extreg;
+ pmic->desc[id].enable_reg =
+ PALMAS_BASE_TO_REG(PALMAS_RESOURCE_BASE,
palmas_regs_info[id].ctrl_addr);
- pmic->desc[id].enable_mask = PALMAS_LDO1_CTRL_MODE_ACTIVE;
+ pmic->desc[id].enable_mask =
+ PALMAS_REGEN1_CTRL_MODE_ACTIVE;
+ }
if (pdata)
config.init_data = pdata->reg_data[id];
else
config.init_data = NULL;
+ pmic->desc[id].supply_name = palmas_regs_info[id].sname;
config.of_node = palmas_matches[id].of_node;
rdev = regulator_register(&pmic->desc[id], &config);
@@ -777,7 +1010,12 @@ static int palmas_probe(struct platform_device *pdev)
if (pdata) {
reg_init = pdata->reg_init[id];
if (reg_init) {
- ret = palmas_ldo_init(palmas, id, reg_init);
+ if (id < PALMAS_REG_REGEN1)
+ ret = palmas_ldo_init(palmas,
+ id, reg_init);
+ else
+ ret = palmas_extreg_init(palmas,
+ id, reg_init);
if (ret) {
regulator_unregister(pmic->rdev[id]);
goto err_unregister_regulator;
@@ -786,6 +1024,7 @@ static int palmas_probe(struct platform_device *pdev)
}
}
+
return 0;
err_unregister_regulator:
@@ -794,7 +1033,7 @@ err_unregister_regulator:
return ret;
}
-static int palmas_remove(struct platform_device *pdev)
+static int palmas_regulators_remove(struct platform_device *pdev)
{
struct palmas_pmic *pmic = platform_get_drvdata(pdev);
int id;
@@ -806,6 +1045,12 @@ static int palmas_remove(struct platform_device *pdev)
static struct of_device_id of_palmas_match_tbl[] = {
{ .compatible = "ti,palmas-pmic", },
+ { .compatible = "ti,twl6035-pmic", },
+ { .compatible = "ti,twl6036-pmic", },
+ { .compatible = "ti,twl6037-pmic", },
+ { .compatible = "ti,tps65913-pmic", },
+ { .compatible = "ti,tps65914-pmic", },
+ { .compatible = "ti,tps80036-pmic", },
{ /* end */ }
};
@@ -815,8 +1060,8 @@ static struct platform_driver palmas_driver = {
.of_match_table = of_palmas_match_tbl,
.owner = THIS_MODULE,
},
- .probe = palmas_probe,
- .remove = palmas_remove,
+ .probe = palmas_regulators_probe,
+ .remove = palmas_regulators_remove,
};
static int __init palmas_init(void)
diff --git a/drivers/regulator/rc5t583-regulator.c b/drivers/regulator/rc5t583-regulator.c
index 9e6f78694bf..5885b450459 100644
--- a/drivers/regulator/rc5t583-regulator.c
+++ b/drivers/regulator/rc5t583-regulator.c
@@ -49,10 +49,6 @@ struct rc5t583_regulator_info {
struct rc5t583_regulator {
struct rc5t583_regulator_info *reg_info;
-
- /* Devices */
- struct device *dev;
- struct rc5t583 *mfd;
struct regulator_dev *rdev;
};
@@ -155,8 +151,6 @@ static int rc5t583_regulator_probe(struct platform_device *pdev)
reg = &regs[id];
ri = &rc5t583_reg_info[id];
reg->reg_info = ri;
- reg->mfd = rc5t583;
- reg->dev = &pdev->dev;
if (ri->deepsleep_id == RC5T583_DS_NONE)
goto skip_ext_pwr_config;
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 8a831947c35..c24448bc43c 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -549,7 +549,7 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
rmode = devm_kzalloc(&pdev->dev, sizeof(*rmode) *
pdata->num_regulators, GFP_KERNEL);
- if (!rdata) {
+ if (!rmode) {
dev_err(iodev->dev,
"could not allocate memory for regulator mode\n");
return -ENOMEM;
@@ -923,8 +923,7 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
return 0;
err:
for (i = 0; i < s5m8767->num_regulators; i++)
- if (rdev[i])
- regulator_unregister(rdev[i]);
+ regulator_unregister(rdev[i]);
return ret;
}
@@ -936,8 +935,7 @@ static int s5m8767_pmic_remove(struct platform_device *pdev)
int i;
for (i = 0; i < s5m8767->num_regulators; i++)
- if (rdev[i])
- regulator_unregister(rdev[i]);
+ regulator_unregister(rdev[i]);
return 0;
}
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c
index acbd63fde41..612919c3081 100644
--- a/drivers/regulator/tps62360-regulator.c
+++ b/drivers/regulator/tps62360-regulator.c
@@ -278,7 +278,7 @@ static int tps62360_init_dcdc(struct tps62360_chip *tps,
__func__, REG_RAMPCTRL, ret);
return ret;
}
- ramp_ctrl = (ramp_ctrl >> 4) & 0x7;
+ ramp_ctrl = (ramp_ctrl >> 5) & 0x7;
/* ramp mV/us = 32/(2^ramp_ctrl) */
tps->desc.ramp_delay = DIV_ROUND_UP(32000, BIT(ramp_ctrl));
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 9b9af6d889c..9d053e23e9e 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -107,12 +107,7 @@ static const unsigned int DCDC_FIXED_1800000_VSEL_table[] = {
};
/* Supported voltage values for LDO regulators for tps65020 */
-static const unsigned int TPS65020_LDO1_VSEL_table[] = {
- 1000000, 1050000, 1100000, 1300000,
- 1800000, 2500000, 3000000, 3300000,
-};
-
-static const unsigned int TPS65020_LDO2_VSEL_table[] = {
+static const unsigned int TPS65020_LDO_VSEL_table[] = {
1000000, 1050000, 1100000, 1300000,
1800000, 2500000, 3000000, 3300000,
};
@@ -154,20 +149,15 @@ struct tps_driver_data {
static int tps65023_dcdc_get_voltage_sel(struct regulator_dev *dev)
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
- int ret;
- int data, dcdc = rdev_get_id(dev);
+ int dcdc = rdev_get_id(dev);
if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
return -EINVAL;
- if (dcdc == tps->core_regulator) {
- ret = regmap_read(tps->regmap, TPS65023_REG_DEF_CORE, &data);
- if (ret != 0)
- return ret;
- data &= (tps->info[dcdc]->table_len - 1);
- return data;
- } else
+ if (dcdc != tps->core_regulator)
return 0;
+
+ return regulator_get_voltage_sel_regmap(dev);
}
static int tps65023_dcdc_set_voltage_sel(struct regulator_dev *dev,
@@ -175,23 +165,11 @@ static int tps65023_dcdc_set_voltage_sel(struct regulator_dev *dev,
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
int dcdc = rdev_get_id(dev);
- int ret;
if (dcdc != tps->core_regulator)
return -EINVAL;
- ret = regmap_write(tps->regmap, TPS65023_REG_DEF_CORE, selector);
- if (ret)
- goto out;
-
- /* Tell the chip that we have changed the value in DEFCORE
- * and its time to update the core voltage
- */
- ret = regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
- TPS65023_REG_CTRL2_GO, TPS65023_REG_CTRL2_GO);
-
-out:
- return ret;
+ return regulator_set_voltage_sel_regmap(dev, selector);
}
/* Operations permitted on VDCDCx */
@@ -202,6 +180,7 @@ static struct regulator_ops tps65023_dcdc_ops = {
.get_voltage_sel = tps65023_dcdc_get_voltage_sel,
.set_voltage_sel = tps65023_dcdc_set_voltage_sel,
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
};
/* Operations permitted on LDOx */
@@ -212,6 +191,7 @@ static struct regulator_ops tps65023_ldo_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
};
static struct regmap_config tps65023_regmap_config = {
@@ -285,6 +265,10 @@ static int tps_65023_probe(struct i2c_client *client,
default: /* DCDCx */
tps->desc[i].enable_mask =
1 << (TPS65023_NUM_REGULATOR - i);
+ tps->desc[i].vsel_reg = TPS65023_REG_DEF_CORE;
+ tps->desc[i].vsel_mask = info->table_len - 1;
+ tps->desc[i].apply_reg = TPS65023_REG_CON_CTRL2;
+ tps->desc[i].apply_bit = TPS65023_REG_CTRL2_GO;
}
config.dev = &client->dev;
@@ -347,13 +331,13 @@ static const struct tps_info tps65020_regs[] = {
},
{
.name = "LDO1",
- .table_len = ARRAY_SIZE(TPS65020_LDO1_VSEL_table),
- .table = TPS65020_LDO1_VSEL_table,
+ .table_len = ARRAY_SIZE(TPS65020_LDO_VSEL_table),
+ .table = TPS65020_LDO_VSEL_table,
},
{
.name = "LDO2",
- .table_len = ARRAY_SIZE(TPS65020_LDO2_VSEL_table),
- .table = TPS65020_LDO2_VSEL_table,
+ .table_len = ARRAY_SIZE(TPS65020_LDO_VSEL_table),
+ .table = TPS65020_LDO_VSEL_table,
},
};
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 54aa2da7283..4117ff52dba 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -356,6 +356,7 @@ static struct regulator_ops tps6507x_pmic_ops = {
.get_voltage_sel = tps6507x_pmic_get_voltage_sel,
.set_voltage_sel = tps6507x_pmic_set_voltage_sel,
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
};
#ifdef CONFIG_OF
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 843ee0a9bb9..1094393155e 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -572,6 +572,7 @@ static struct regulator_ops regulator_ops = {
.get_voltage_sel = get_voltage_sel,
.set_voltage_sel = set_voltage_sel,
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
.set_current_limit = set_current_limit,
.get_current_limit = get_current_limit,
};
@@ -584,8 +585,7 @@ static int pmic_remove(struct spi_device *spi)
if (!hw)
return 0;
for (i = 0; i < N_REGULATORS; i++) {
- if (hw->rdev[i])
- regulator_unregister(hw->rdev[i]);
+ regulator_unregister(hw->rdev[i]);
hw->rdev[i] = NULL;
}
spi_set_drvdata(spi, NULL);
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index e68382d0e1e..d8fa37d5c73 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -70,6 +70,7 @@ static inline struct device *to_tps6586x_dev(struct regulator_dev *rdev)
static struct regulator_ops tps6586x_regulator_ops = {
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -245,7 +246,7 @@ static int tps6586x_regulator_set_slew_rate(struct platform_device *pdev,
reg = TPS6586X_SM1SL;
break;
default:
- dev_warn(&pdev->dev, "Only SM0/SM1 can set slew rate\n");
+ dev_err(&pdev->dev, "Only SM0/SM1 can set slew rate\n");
return -EINVAL;
}
@@ -304,14 +305,12 @@ static struct tps6586x_platform_data *tps6586x_parse_regulator_dt(
}
err = of_regulator_match(&pdev->dev, regs, tps6586x_matches, num);
+ of_node_put(regs);
if (err < 0) {
dev_err(&pdev->dev, "Regulator match failed, e %d\n", err);
- of_node_put(regs);
return NULL;
}
- of_node_put(regs);
-
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
dev_err(&pdev->dev, "Memory alloction failed\n");
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 6ba6931ac85..45c16447744 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -748,6 +748,7 @@ static struct regulator_ops tps65910_ops_dcdc = {
.set_voltage_sel = tps65910_set_voltage_dcdc_sel,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
.list_voltage = tps65910_list_voltage_dcdc,
+ .map_voltage = regulator_map_voltage_ascend,
};
static struct regulator_ops tps65910_ops_vdd3 = {
@@ -758,6 +759,7 @@ static struct regulator_ops tps65910_ops_vdd3 = {
.get_mode = tps65910_get_mode,
.get_voltage = tps65910_get_voltage_vdd3,
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
};
static struct regulator_ops tps65910_ops = {
@@ -769,6 +771,7 @@ static struct regulator_ops tps65910_ops = {
.get_voltage_sel = tps65910_get_voltage_sel,
.set_voltage_sel = tps65910_set_voltage_sel,
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
};
static struct regulator_ops tps65911_ops = {
@@ -780,6 +783,7 @@ static struct regulator_ops tps65911_ops = {
.get_voltage_sel = tps65911_get_voltage_sel,
.set_voltage_sel = tps65911_set_voltage_sel,
.list_voltage = tps65911_list_voltage,
+ .map_voltage = regulator_map_voltage_ascend,
};
static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
diff --git a/drivers/regulator/tps80031-regulator.c b/drivers/regulator/tps80031-regulator.c
index 9019d0e7ecb..6511d0bfd89 100644
--- a/drivers/regulator/tps80031-regulator.c
+++ b/drivers/regulator/tps80031-regulator.c
@@ -238,12 +238,11 @@ static int tps80031_dcdc_get_voltage_sel(struct regulator_dev *rdev)
return vsel & SMPS_VSEL_MASK;
}
-static int tps80031_ldo_set_voltage_sel(struct regulator_dev *rdev,
- unsigned sel)
+static int tps80031_ldo_list_voltage(struct regulator_dev *rdev,
+ unsigned int sel)
{
struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
struct device *parent = to_tps80031_dev(rdev);
- int ret;
/* Check for valid setting for TPS80031 or TPS80032-ES1.0 */
if ((ri->rinfo->desc.id == TPS80031_REGULATOR_LDO2) &&
@@ -260,28 +259,27 @@ static int tps80031_ldo_set_voltage_sel(struct regulator_dev *rdev,
}
}
- ret = tps80031_write(parent, ri->rinfo->volt_id,
- ri->rinfo->volt_reg, sel);
- if (ret < 0)
- dev_err(ri->dev, "Error in writing reg 0x%02x, e = %d\n",
- ri->rinfo->volt_reg, ret);
- return ret;
+ return regulator_list_voltage_linear(rdev, sel);
}
-static int tps80031_ldo_get_voltage_sel(struct regulator_dev *rdev)
+static int tps80031_ldo_map_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
{
struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
struct device *parent = to_tps80031_dev(rdev);
- uint8_t vsel;
- int ret;
- ret = tps80031_read(parent, ri->rinfo->volt_id,
- ri->rinfo->volt_reg, &vsel);
- if (ret < 0) {
- dev_err(ri->dev, "Error in writing the Voltage register\n");
- return ret;
+ /* Check for valid setting for TPS80031 or TPS80032-ES1.0 */
+ if ((ri->rinfo->desc.id == TPS80031_REGULATOR_LDO2) &&
+ (ri->device_flags & TRACK_MODE_ENABLE)) {
+ if (((tps80031_get_chip_info(parent) == TPS80031) ||
+ ((tps80031_get_chip_info(parent) == TPS80032) &&
+ (tps80031_get_pmu_version(parent) == 0x0)))) {
+ return regulator_map_voltage_iterate(rdev, min_uV,
+ max_uV);
+ }
}
- return vsel & rdev->desc->vsel_mask;
+
+ return regulator_map_voltage_linear(rdev, min_uV, max_uV);
}
static int tps80031_vbus_is_enabled(struct regulator_dev *rdev)
@@ -390,9 +388,10 @@ static struct regulator_ops tps80031_dcdc_ops = {
};
static struct regulator_ops tps80031_ldo_ops = {
- .list_voltage = regulator_list_voltage_linear,
- .set_voltage_sel = tps80031_ldo_set_voltage_sel,
- .get_voltage_sel = tps80031_ldo_get_voltage_sel,
+ .list_voltage = tps80031_ldo_list_voltage,
+ .map_voltage = tps80031_ldo_map_voltage,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
.enable = tps80031_reg_enable,
.disable = tps80031_reg_disable,
.is_enabled = tps80031_reg_is_enabled,
@@ -459,6 +458,7 @@ static struct regulator_ops tps80031_ext_reg_ops = {
.uV_step = 100000, \
.linear_min_sel = 1, \
.n_voltages = 25, \
+ .vsel_reg = TPS80031_##_id##_CFG_VOLTAGE, \
.vsel_mask = LDO_VSEL_MASK, \
.enable_time = 500, \
}, \
@@ -680,6 +680,7 @@ static int tps80031_regulator_probe(struct platform_device *pdev)
struct tps80031_regulator *pmic;
struct regulator_dev *rdev;
struct regulator_config config = { };
+ struct tps80031 *tps80031_mfd = dev_get_drvdata(pdev->dev.parent);
int ret;
int num;
@@ -707,6 +708,8 @@ static int tps80031_regulator_probe(struct platform_device *pdev)
config.dev = &pdev->dev;
config.init_data = NULL;
config.driver_data = ri;
+ config.regmap = tps80031_mfd->regmap[ri->rinfo->volt_id];
+
if (tps_pdata) {
config.init_data = tps_pdata->reg_init_data;
ri->config_flags = tps_pdata->config_flags;
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index f705d25b437..fb6e67d74ff 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -441,12 +441,6 @@ static const u16 VSIM_VSEL_table[] = {
static const u16 VDAC_VSEL_table[] = {
1200, 1300, 1800, 1800,
};
-static const u16 VDD1_VSEL_table[] = {
- 800, 1450,
-};
-static const u16 VDD2_VSEL_table[] = {
- 800, 1450, 1500,
-};
static const u16 VIO_VSEL_table[] = {
1800, 1850,
};
@@ -615,18 +609,8 @@ static struct regulator_ops twl6030ldo_ops = {
/*----------------------------------------------------------------------*/
-/*
- * Fixed voltage LDOs don't have a VSEL field to update.
- */
-static int twlfixed_list_voltage(struct regulator_dev *rdev, unsigned index)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
-
- return info->min_mV * 1000;
-}
-
static struct regulator_ops twl4030fixed_ops = {
- .list_voltage = twlfixed_list_voltage,
+ .list_voltage = regulator_list_voltage_linear,
.enable = twl4030reg_enable,
.disable = twl4030reg_disable,
@@ -638,7 +622,7 @@ static struct regulator_ops twl4030fixed_ops = {
};
static struct regulator_ops twl6030fixed_ops = {
- .list_voltage = twlfixed_list_voltage,
+ .list_voltage = regulator_list_voltage_linear,
.enable = twl6030reg_enable,
.disable = twl6030reg_disable,
@@ -944,19 +928,7 @@ static const struct twlreg_info TWLFIXED_INFO_##label = { \
.ops = &operations, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
- .enable_time = turnon_delay, \
- }, \
- }
-
-#define TWL6030_FIXED_RESOURCE(label, offset, turnon_delay) \
-static struct twlreg_info TWLRES_INFO_##label = { \
- .base = offset, \
- .desc = { \
- .name = #label, \
- .id = TWL6030_REG_##label, \
- .ops = &twl6030_fixed_resource, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
+ .min_uV = mVolts * 1000, \
.enable_time = turnon_delay, \
}, \
}
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 6ff87234264..a612c356a69 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -18,6 +18,7 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
#include <linux/gpio.h>
#include <linux/slab.h>
@@ -28,6 +29,8 @@
struct wm8994_ldo {
struct regulator_dev *regulator;
struct wm8994 *wm8994;
+ struct regulator_consumer_supply supply;
+ struct regulator_init_data init_data;
};
#define WM8994_LDO1_MAX_SELECTOR 0x7
@@ -99,6 +102,26 @@ static const struct regulator_desc wm8994_ldo_desc[] = {
},
};
+static const struct regulator_consumer_supply wm8994_ldo_consumer[] = {
+ { .supply = "AVDD1" },
+ { .supply = "DCVDD" },
+};
+
+static const struct regulator_init_data wm8994_ldo_default[] = {
+ {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ },
+ {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ },
+};
+
static int wm8994_ldo_probe(struct platform_device *pdev)
{
struct wm8994 *wm8994 = dev_get_drvdata(pdev->dev.parent);
@@ -117,13 +140,29 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
}
ldo->wm8994 = wm8994;
+ ldo->supply = wm8994_ldo_consumer[id];
+ ldo->supply.dev_name = dev_name(wm8994->dev);
config.dev = wm8994->dev;
config.driver_data = ldo;
config.regmap = wm8994->regmap;
- if (pdata) {
- config.init_data = pdata->ldo[id].init_data;
+ config.init_data = &ldo->init_data;
+ if (pdata)
config.ena_gpio = pdata->ldo[id].enable;
+ else if (wm8994->dev->of_node)
+ config.ena_gpio = wm8994->pdata.ldo[id].enable;
+
+ /* Use default constraints if none set up */
+ if (!pdata || !pdata->ldo[id].init_data || wm8994->dev->of_node) {
+ dev_dbg(wm8994->dev, "Using default init data, supply %s %s\n",
+ ldo->supply.dev_name, ldo->supply.supply);
+
+ ldo->init_data = wm8994_ldo_default[id];
+ ldo->init_data.consumer_supplies = &ldo->supply;
+ if (!config.ena_gpio)
+ ldo->init_data.constraints.valid_ops_mask = 0;
+ } else {
+ ldo->init_data = *pdata->ldo[id].init_data;
}
ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &config);
@@ -162,23 +201,7 @@ static struct platform_driver wm8994_ldo_driver = {
},
};
-static int __init wm8994_ldo_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&wm8994_ldo_driver);
- if (ret != 0)
- pr_err("Failed to register Wm8994 GP LDO driver: %d\n", ret);
-
- return ret;
-}
-subsys_initcall(wm8994_ldo_init);
-
-static void __exit wm8994_ldo_exit(void)
-{
- platform_driver_unregister(&wm8994_ldo_driver);
-}
-module_exit(wm8994_ldo_exit);
+module_platform_driver(wm8994_ldo_driver);
/* Module information */
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 2be0de920d6..141d8c10b76 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -75,6 +75,17 @@ config SPI_ATMEL
This selects a driver for the Atmel SPI Controller, present on
many AT32 (AVR32) and AT91 (ARM) chips.
+config SPI_BCM2835
+ tristate "BCM2835 SPI controller"
+ depends on ARCH_BCM2835
+ help
+ This selects a driver for the Broadcom BCM2835 SPI master.
+
+ The BCM2835 contains two types of SPI master controller; the
+ "universal SPI master", and the regular SPI controller. This driver
+ is for the regular SPI controller. Slave mode operation is not also
+ not supported.
+
config SPI_BFIN5XX
tristate "SPI controller driver for ADI Blackfin5xx"
depends on BLACKFIN
@@ -219,16 +230,23 @@ config SPI_MPC512x_PSC
config SPI_FSL_LIB
tristate
+ depends on OF
+
+config SPI_FSL_CPM
+ tristate
depends on FSL_SOC
config SPI_FSL_SPI
- bool "Freescale SPI controller"
- depends on FSL_SOC
+ bool "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller"
+ depends on OF
select SPI_FSL_LIB
+ select SPI_FSL_CPM if FSL_SOC
help
This enables using the Freescale SPI controllers in master mode.
MPC83xx platform uses the controller in cpu mode or CPM/QE mode.
MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
+ This also enables using the Aeroflex Gaisler GRLIB SPI controller in
+ master mode.
config SPI_FSL_ESPI
bool "Freescale eSPI controller"
@@ -398,6 +416,14 @@ config SPI_MXS
help
SPI driver for Freescale MXS devices.
+config SPI_TEGRA114
+ tristate "NVIDIA Tegra114 SPI Controller"
+ depends on ARCH_TEGRA && TEGRA20_APB_DMA
+ help
+ SPI driver for NVIDIA Tegra114 SPI Controller interface. This controller
+ is different than the older SoCs SPI controller and also register interface
+ get changed with this controller.
+
config SPI_TEGRA20_SFLASH
tristate "Nvidia Tegra20 Serial flash Controller"
depends on ARCH_TEGRA
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index e53c3094134..33f9c09561e 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_SPI_ALTERA) += spi-altera.o
obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
+obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o
obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o
obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
@@ -28,6 +29,7 @@ obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o
spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o
obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o
obj-$(CONFIG_SPI_FALCON) += spi-falcon.o
+obj-$(CONFIG_SPI_FSL_CPM) += spi-fsl-cpm.o
obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o
obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o
obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
@@ -63,6 +65,7 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
+obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o
obj-$(CONFIG_SPI_TEGRA20_SFLASH) += spi-tegra20-sflash.o
obj-$(CONFIG_SPI_TEGRA20_SLINK) += spi-tegra20-slink.o
obj-$(CONFIG_SPI_TI_SSP) += spi-ti-ssp.o
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 656d137db25..787bd2c22bc 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -15,16 +15,17 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/platform_data/atmel.h>
+#include <linux/platform_data/dma-atmel.h>
#include <linux/of.h>
-#include <asm/io.h>
-#include <asm/gpio.h>
-#include <mach/cpu.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
/* SPI register offsets */
#define SPI_CR 0x0000
@@ -39,6 +40,7 @@
#define SPI_CSR1 0x0034
#define SPI_CSR2 0x0038
#define SPI_CSR3 0x003c
+#define SPI_VERSION 0x00fc
#define SPI_RPR 0x0100
#define SPI_RCR 0x0104
#define SPI_TPR 0x0108
@@ -71,6 +73,8 @@
#define SPI_FDIV_SIZE 1
#define SPI_MODFDIS_OFFSET 4
#define SPI_MODFDIS_SIZE 1
+#define SPI_WDRBT_OFFSET 5
+#define SPI_WDRBT_SIZE 1
#define SPI_LLB_OFFSET 7
#define SPI_LLB_SIZE 1
#define SPI_PCS_OFFSET 16
@@ -180,6 +184,27 @@
#define spi_writel(port,reg,value) \
__raw_writel((value), (port)->regs + SPI_##reg)
+/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
+ * cache operations; better heuristics consider wordsize and bitrate.
+ */
+#define DMA_MIN_BYTES 16
+
+struct atmel_spi_dma {
+ struct dma_chan *chan_rx;
+ struct dma_chan *chan_tx;
+ struct scatterlist sgrx;
+ struct scatterlist sgtx;
+ struct dma_async_tx_descriptor *data_desc_rx;
+ struct dma_async_tx_descriptor *data_desc_tx;
+
+ struct at_dma_slave dma_slave;
+};
+
+struct atmel_spi_caps {
+ bool is_spi2;
+ bool has_wdrbt;
+ bool has_dma_support;
+};
/*
* The core SPI transfer engine just talks to a register bank to set up
@@ -188,7 +213,9 @@
*/
struct atmel_spi {
spinlock_t lock;
+ unsigned long flags;
+ phys_addr_t phybase;
void __iomem *regs;
int irq;
struct clk *clk;
@@ -197,13 +224,23 @@ struct atmel_spi {
u8 stopping;
struct list_head queue;
+ struct tasklet_struct tasklet;
struct spi_transfer *current_transfer;
unsigned long current_remaining_bytes;
struct spi_transfer *next_transfer;
unsigned long next_remaining_bytes;
+ int done_status;
+ /* scratch buffer */
void *buffer;
dma_addr_t buffer_dma;
+
+ struct atmel_spi_caps caps;
+
+ bool use_dma;
+ bool use_pdc;
+ /* dmaengine data */
+ struct atmel_spi_dma dma;
};
/* Controller-specific per-slave state */
@@ -222,14 +259,10 @@ struct atmel_spi_device {
* - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs)
* - SPI_CSRx.CSAAT
* - SPI_CSRx.SBCR allows faster clocking
- *
- * We can determine the controller version by reading the VERSION
- * register, but I haven't checked that it exists on all chips, and
- * this is cheaper anyway.
*/
-static bool atmel_spi_is_v2(void)
+static bool atmel_spi_is_v2(struct atmel_spi *as)
{
- return !cpu_is_at91rm9200();
+ return as->caps.is_spi2;
}
/*
@@ -250,11 +283,6 @@ static bool atmel_spi_is_v2(void)
* Master on Chip Select 0.") No workaround exists for that ... so for
* nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
* and (c) will trigger that first erratum in some cases.
- *
- * TODO: Test if the atmel_spi_is_v2() branch below works on
- * AT91RM9200 if we use some other register than CSR0. However, don't
- * do this unconditionally since AP7000 has an errata where the BITS
- * field in CSR0 overrides all other CSRs.
*/
static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
@@ -263,15 +291,25 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
unsigned active = spi->mode & SPI_CS_HIGH;
u32 mr;
- if (atmel_spi_is_v2()) {
- /*
- * Always use CSR0. This ensures that the clock
- * switches to the correct idle polarity before we
- * toggle the CS.
+ if (atmel_spi_is_v2(as)) {
+ spi_writel(as, CSR0 + 4 * spi->chip_select, asd->csr);
+ /* For the low SPI version, there is a issue that PDC transfer
+ * on CS1,2,3 needs SPI_CSR0.BITS config as SPI_CSR1,2,3.BITS
*/
spi_writel(as, CSR0, asd->csr);
- spi_writel(as, MR, SPI_BF(PCS, 0x0e) | SPI_BIT(MODFDIS)
- | SPI_BIT(MSTR));
+ if (as->caps.has_wdrbt) {
+ spi_writel(as, MR,
+ SPI_BF(PCS, ~(0x01 << spi->chip_select))
+ | SPI_BIT(WDRBT)
+ | SPI_BIT(MODFDIS)
+ | SPI_BIT(MSTR));
+ } else {
+ spi_writel(as, MR,
+ SPI_BF(PCS, ~(0x01 << spi->chip_select))
+ | SPI_BIT(MODFDIS)
+ | SPI_BIT(MSTR));
+ }
+
mr = spi_readl(as, MR);
gpio_set_value(asd->npcs_pin, active);
} else {
@@ -318,10 +356,26 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
asd->npcs_pin, active ? " (low)" : "",
mr);
- if (atmel_spi_is_v2() || spi->chip_select != 0)
+ if (atmel_spi_is_v2(as) || spi->chip_select != 0)
gpio_set_value(asd->npcs_pin, !active);
}
+static void atmel_spi_lock(struct atmel_spi *as)
+{
+ spin_lock_irqsave(&as->lock, as->flags);
+}
+
+static void atmel_spi_unlock(struct atmel_spi *as)
+{
+ spin_unlock_irqrestore(&as->lock, as->flags);
+}
+
+static inline bool atmel_spi_use_dma(struct atmel_spi *as,
+ struct spi_transfer *xfer)
+{
+ return as->use_dma && xfer->len >= DMA_MIN_BYTES;
+}
+
static inline int atmel_spi_xfer_is_last(struct spi_message *msg,
struct spi_transfer *xfer)
{
@@ -333,6 +387,265 @@ static inline int atmel_spi_xfer_can_be_chained(struct spi_transfer *xfer)
return xfer->delay_usecs == 0 && !xfer->cs_change;
}
+static int atmel_spi_dma_slave_config(struct atmel_spi *as,
+ struct dma_slave_config *slave_config,
+ u8 bits_per_word)
+{
+ int err = 0;
+
+ if (bits_per_word > 8) {
+ slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ } else {
+ slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ }
+
+ slave_config->dst_addr = (dma_addr_t)as->phybase + SPI_TDR;
+ slave_config->src_addr = (dma_addr_t)as->phybase + SPI_RDR;
+ slave_config->src_maxburst = 1;
+ slave_config->dst_maxburst = 1;
+ slave_config->device_fc = false;
+
+ slave_config->direction = DMA_MEM_TO_DEV;
+ if (dmaengine_slave_config(as->dma.chan_tx, slave_config)) {
+ dev_err(&as->pdev->dev,
+ "failed to configure tx dma channel\n");
+ err = -EINVAL;
+ }
+
+ slave_config->direction = DMA_DEV_TO_MEM;
+ if (dmaengine_slave_config(as->dma.chan_rx, slave_config)) {
+ dev_err(&as->pdev->dev,
+ "failed to configure rx dma channel\n");
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static bool filter(struct dma_chan *chan, void *slave)
+{
+ struct at_dma_slave *sl = slave;
+
+ if (sl->dma_dev == chan->device->dev) {
+ chan->private = sl;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static int atmel_spi_configure_dma(struct atmel_spi *as)
+{
+ struct at_dma_slave *sdata = &as->dma.dma_slave;
+ struct dma_slave_config slave_config;
+ int err;
+
+ if (sdata && sdata->dma_dev) {
+ dma_cap_mask_t mask;
+
+ /* Try to grab two DMA channels */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ as->dma.chan_tx = dma_request_channel(mask, filter, sdata);
+ if (as->dma.chan_tx)
+ as->dma.chan_rx =
+ dma_request_channel(mask, filter, sdata);
+ }
+ if (!as->dma.chan_rx || !as->dma.chan_tx) {
+ dev_err(&as->pdev->dev,
+ "DMA channel not available, SPI unable to use DMA\n");
+ err = -EBUSY;
+ goto error;
+ }
+
+ err = atmel_spi_dma_slave_config(as, &slave_config, 8);
+ if (err)
+ goto error;
+
+ dev_info(&as->pdev->dev,
+ "Using %s (tx) and %s (rx) for DMA transfers\n",
+ dma_chan_name(as->dma.chan_tx),
+ dma_chan_name(as->dma.chan_rx));
+ return 0;
+error:
+ if (as->dma.chan_rx)
+ dma_release_channel(as->dma.chan_rx);
+ if (as->dma.chan_tx)
+ dma_release_channel(as->dma.chan_tx);
+ return err;
+}
+
+static void atmel_spi_stop_dma(struct atmel_spi *as)
+{
+ if (as->dma.chan_rx)
+ as->dma.chan_rx->device->device_control(as->dma.chan_rx,
+ DMA_TERMINATE_ALL, 0);
+ if (as->dma.chan_tx)
+ as->dma.chan_tx->device->device_control(as->dma.chan_tx,
+ DMA_TERMINATE_ALL, 0);
+}
+
+static void atmel_spi_release_dma(struct atmel_spi *as)
+{
+ if (as->dma.chan_rx)
+ dma_release_channel(as->dma.chan_rx);
+ if (as->dma.chan_tx)
+ dma_release_channel(as->dma.chan_tx);
+}
+
+/* This function is called by the DMA driver from tasklet context */
+static void dma_callback(void *data)
+{
+ struct spi_master *master = data;
+ struct atmel_spi *as = spi_master_get_devdata(master);
+
+ /* trigger SPI tasklet */
+ tasklet_schedule(&as->tasklet);
+}
+
+/*
+ * Next transfer using PIO.
+ * lock is held, spi tasklet is blocked
+ */
+static void atmel_spi_next_xfer_pio(struct spi_master *master,
+ struct spi_transfer *xfer)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+
+ dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n");
+
+ as->current_remaining_bytes = xfer->len;
+
+ /* Make sure data is not remaining in RDR */
+ spi_readl(as, RDR);
+ while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
+ spi_readl(as, RDR);
+ cpu_relax();
+ }
+
+ if (xfer->tx_buf)
+ spi_writel(as, TDR, *(u8 *)(xfer->tx_buf));
+ else
+ spi_writel(as, TDR, 0);
+
+ dev_dbg(master->dev.parent,
+ " start pio xfer %p: len %u tx %p rx %p\n",
+ xfer, xfer->len, xfer->tx_buf, xfer->rx_buf);
+
+ /* Enable relevant interrupts */
+ spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES));
+}
+
+/*
+ * Submit next transfer for DMA.
+ * lock is held, spi tasklet is blocked
+ */
+static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
+ struct spi_transfer *xfer,
+ u32 *plen)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ struct dma_chan *rxchan = as->dma.chan_rx;
+ struct dma_chan *txchan = as->dma.chan_tx;
+ struct dma_async_tx_descriptor *rxdesc;
+ struct dma_async_tx_descriptor *txdesc;
+ struct dma_slave_config slave_config;
+ dma_cookie_t cookie;
+ u32 len = *plen;
+
+ dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n");
+
+ /* Check that the channels are available */
+ if (!rxchan || !txchan)
+ return -ENODEV;
+
+ /* release lock for DMA operations */
+ atmel_spi_unlock(as);
+
+ /* prepare the RX dma transfer */
+ sg_init_table(&as->dma.sgrx, 1);
+ if (xfer->rx_buf) {
+ as->dma.sgrx.dma_address = xfer->rx_dma + xfer->len - *plen;
+ } else {
+ as->dma.sgrx.dma_address = as->buffer_dma;
+ if (len > BUFFER_SIZE)
+ len = BUFFER_SIZE;
+ }
+
+ /* prepare the TX dma transfer */
+ sg_init_table(&as->dma.sgtx, 1);
+ if (xfer->tx_buf) {
+ as->dma.sgtx.dma_address = xfer->tx_dma + xfer->len - *plen;
+ } else {
+ as->dma.sgtx.dma_address = as->buffer_dma;
+ if (len > BUFFER_SIZE)
+ len = BUFFER_SIZE;
+ memset(as->buffer, 0, len);
+ }
+
+ sg_dma_len(&as->dma.sgtx) = len;
+ sg_dma_len(&as->dma.sgrx) = len;
+
+ *plen = len;
+
+ if (atmel_spi_dma_slave_config(as, &slave_config, 8))
+ goto err_exit;
+
+ /* Send both scatterlists */
+ rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
+ &as->dma.sgrx,
+ 1,
+ DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK,
+ NULL);
+ if (!rxdesc)
+ goto err_dma;
+
+ txdesc = txchan->device->device_prep_slave_sg(txchan,
+ &as->dma.sgtx,
+ 1,
+ DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK,
+ NULL);
+ if (!txdesc)
+ goto err_dma;
+
+ dev_dbg(master->dev.parent,
+ " start dma xfer %p: len %u tx %p/%08x rx %p/%08x\n",
+ xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
+ xfer->rx_buf, xfer->rx_dma);
+
+ /* Enable relevant interrupts */
+ spi_writel(as, IER, SPI_BIT(OVRES));
+
+ /* Put the callback on the RX transfer only, that should finish last */
+ rxdesc->callback = dma_callback;
+ rxdesc->callback_param = master;
+
+ /* Submit and fire RX and TX with TX last so we're ready to read! */
+ cookie = rxdesc->tx_submit(rxdesc);
+ if (dma_submit_error(cookie))
+ goto err_dma;
+ cookie = txdesc->tx_submit(txdesc);
+ if (dma_submit_error(cookie))
+ goto err_dma;
+ rxchan->device->device_issue_pending(rxchan);
+ txchan->device->device_issue_pending(txchan);
+
+ /* take back lock */
+ atmel_spi_lock(as);
+ return 0;
+
+err_dma:
+ spi_writel(as, IDR, SPI_BIT(OVRES));
+ atmel_spi_stop_dma(as);
+err_exit:
+ atmel_spi_lock(as);
+ return -ENOMEM;
+}
+
static void atmel_spi_next_xfer_data(struct spi_master *master,
struct spi_transfer *xfer,
dma_addr_t *tx_dma,
@@ -350,6 +663,7 @@ static void atmel_spi_next_xfer_data(struct spi_master *master,
if (len > BUFFER_SIZE)
len = BUFFER_SIZE;
}
+
if (xfer->tx_buf)
*tx_dma = xfer->tx_dma + xfer->len - *plen;
else {
@@ -365,10 +679,10 @@ static void atmel_spi_next_xfer_data(struct spi_master *master,
}
/*
- * Submit next transfer for DMA.
+ * Submit next transfer for PDC.
* lock is held, spi irq is blocked
*/
-static void atmel_spi_next_xfer(struct spi_master *master,
+static void atmel_spi_pdc_next_xfer(struct spi_master *master,
struct spi_message *msg)
{
struct atmel_spi *as = spi_master_get_devdata(master);
@@ -465,6 +779,48 @@ static void atmel_spi_next_xfer(struct spi_master *master,
spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
}
+/*
+ * Choose way to submit next transfer and start it.
+ * lock is held, spi tasklet is blocked
+ */
+static void atmel_spi_dma_next_xfer(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ struct spi_transfer *xfer;
+ u32 remaining, len;
+
+ remaining = as->current_remaining_bytes;
+ if (remaining) {
+ xfer = as->current_transfer;
+ len = remaining;
+ } else {
+ if (!as->current_transfer)
+ xfer = list_entry(msg->transfers.next,
+ struct spi_transfer, transfer_list);
+ else
+ xfer = list_entry(
+ as->current_transfer->transfer_list.next,
+ struct spi_transfer, transfer_list);
+
+ as->current_transfer = xfer;
+ len = xfer->len;
+ }
+
+ if (atmel_spi_use_dma(as, xfer)) {
+ u32 total = len;
+ if (!atmel_spi_next_xfer_dma_submit(master, xfer, &len)) {
+ as->current_remaining_bytes = total - len;
+ return;
+ } else {
+ dev_err(&msg->spi->dev, "unable to use DMA, fallback to PIO\n");
+ }
+ }
+
+ /* use PIO if error appened using DMA */
+ atmel_spi_next_xfer_pio(master, xfer);
+}
+
static void atmel_spi_next_message(struct spi_master *master)
{
struct atmel_spi *as = spi_master_get_devdata(master);
@@ -489,7 +845,10 @@ static void atmel_spi_next_message(struct spi_master *master)
} else
cs_activate(as, spi);
- atmel_spi_next_xfer(master, msg);
+ if (as->use_pdc)
+ atmel_spi_pdc_next_xfer(master, msg);
+ else
+ atmel_spi_dma_next_xfer(master, msg);
}
/*
@@ -542,38 +901,213 @@ static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
xfer->len, DMA_FROM_DEVICE);
}
+static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as)
+{
+ spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
+}
+
static void
atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
- struct spi_message *msg, int status, int stay)
+ struct spi_message *msg, int stay)
{
- if (!stay || status < 0)
+ if (!stay || as->done_status < 0)
cs_deactivate(as, msg->spi);
else
as->stay = msg->spi;
list_del(&msg->queue);
- msg->status = status;
+ msg->status = as->done_status;
dev_dbg(master->dev.parent,
"xfer complete: %u bytes transferred\n",
msg->actual_length);
- spin_unlock(&as->lock);
+ atmel_spi_unlock(as);
msg->complete(msg->context);
- spin_lock(&as->lock);
+ atmel_spi_lock(as);
as->current_transfer = NULL;
as->next_transfer = NULL;
+ as->done_status = 0;
/* continue if needed */
- if (list_empty(&as->queue) || as->stopping)
- spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
- else
+ if (list_empty(&as->queue) || as->stopping) {
+ if (as->use_pdc)
+ atmel_spi_disable_pdc_transfer(as);
+ } else {
atmel_spi_next_message(master);
+ }
+}
+
+/* Called from IRQ
+ * lock is held
+ *
+ * Must update "current_remaining_bytes" to keep track of data
+ * to transfer.
+ */
+static void
+atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
+{
+ u8 *txp;
+ u8 *rxp;
+ unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
+
+ if (xfer->rx_buf) {
+ rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
+ *rxp = spi_readl(as, RDR);
+ } else {
+ spi_readl(as, RDR);
+ }
+
+ as->current_remaining_bytes--;
+
+ if (as->current_remaining_bytes) {
+ if (xfer->tx_buf) {
+ txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1;
+ spi_writel(as, TDR, *txp);
+ } else {
+ spi_writel(as, TDR, 0);
+ }
+ }
+}
+
+/* Tasklet
+ * Called from DMA callback + pio transfer and overrun IRQ.
+ */
+static void atmel_spi_tasklet_func(unsigned long data)
+{
+ struct spi_master *master = (struct spi_master *)data;
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ struct spi_message *msg;
+ struct spi_transfer *xfer;
+
+ dev_vdbg(master->dev.parent, "atmel_spi_tasklet_func\n");
+
+ atmel_spi_lock(as);
+
+ xfer = as->current_transfer;
+
+ if (xfer == NULL)
+ /* already been there */
+ goto tasklet_out;
+
+ msg = list_entry(as->queue.next, struct spi_message, queue);
+
+ if (as->current_remaining_bytes == 0) {
+ if (as->done_status < 0) {
+ /* error happened (overrun) */
+ if (atmel_spi_use_dma(as, xfer))
+ atmel_spi_stop_dma(as);
+ } else {
+ /* only update length if no error */
+ msg->actual_length += xfer->len;
+ }
+
+ if (atmel_spi_use_dma(as, xfer))
+ if (!msg->is_dma_mapped)
+ atmel_spi_dma_unmap_xfer(master, xfer);
+
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+
+ if (atmel_spi_xfer_is_last(msg, xfer) || as->done_status < 0) {
+ /* report completed (or erroneous) message */
+ atmel_spi_msg_done(master, as, msg, xfer->cs_change);
+ } else {
+ if (xfer->cs_change) {
+ cs_deactivate(as, msg->spi);
+ udelay(1);
+ cs_activate(as, msg->spi);
+ }
+
+ /*
+ * Not done yet. Submit the next transfer.
+ *
+ * FIXME handle protocol options for xfer
+ */
+ atmel_spi_dma_next_xfer(master, msg);
+ }
+ } else {
+ /*
+ * Keep going, we still have data to send in
+ * the current transfer.
+ */
+ atmel_spi_dma_next_xfer(master, msg);
+ }
+
+tasklet_out:
+ atmel_spi_unlock(as);
}
+/* Interrupt
+ *
+ * No need for locking in this Interrupt handler: done_status is the
+ * only information modified. What we need is the update of this field
+ * before tasklet runs. This is ensured by using barrier.
+ */
static irqreturn_t
-atmel_spi_interrupt(int irq, void *dev_id)
+atmel_spi_pio_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ u32 status, pending, imr;
+ struct spi_transfer *xfer;
+ int ret = IRQ_NONE;
+
+ imr = spi_readl(as, IMR);
+ status = spi_readl(as, SR);
+ pending = status & imr;
+
+ if (pending & SPI_BIT(OVRES)) {
+ ret = IRQ_HANDLED;
+ spi_writel(as, IDR, SPI_BIT(OVRES));
+ dev_warn(master->dev.parent, "overrun\n");
+
+ /*
+ * When we get an overrun, we disregard the current
+ * transfer. Data will not be copied back from any
+ * bounce buffer and msg->actual_len will not be
+ * updated with the last xfer.
+ *
+ * We will also not process any remaning transfers in
+ * the message.
+ *
+ * All actions are done in tasklet with done_status indication
+ */
+ as->done_status = -EIO;
+ smp_wmb();
+
+ /* Clear any overrun happening while cleaning up */
+ spi_readl(as, SR);
+
+ tasklet_schedule(&as->tasklet);
+
+ } else if (pending & SPI_BIT(RDRF)) {
+ atmel_spi_lock(as);
+
+ if (as->current_remaining_bytes) {
+ ret = IRQ_HANDLED;
+ xfer = as->current_transfer;
+ atmel_spi_pump_pio_data(as, xfer);
+ if (!as->current_remaining_bytes) {
+ /* no more data to xfer, kick tasklet */
+ spi_writel(as, IDR, pending);
+ tasklet_schedule(&as->tasklet);
+ }
+ }
+
+ atmel_spi_unlock(as);
+ } else {
+ WARN_ONCE(pending, "IRQ not handled, pending = %x\n", pending);
+ ret = IRQ_HANDLED;
+ spi_writel(as, IDR, pending);
+ }
+
+ return ret;
+}
+
+static irqreturn_t
+atmel_spi_pdc_interrupt(int irq, void *dev_id)
{
struct spi_master *master = dev_id;
struct atmel_spi *as = spi_master_get_devdata(master);
@@ -582,7 +1116,7 @@ atmel_spi_interrupt(int irq, void *dev_id)
u32 status, pending, imr;
int ret = IRQ_NONE;
- spin_lock(&as->lock);
+ atmel_spi_lock(as);
xfer = as->current_transfer;
msg = list_entry(as->queue.next, struct spi_message, queue);
@@ -641,7 +1175,8 @@ atmel_spi_interrupt(int irq, void *dev_id)
/* Clear any overrun happening while cleaning up */
spi_readl(as, SR);
- atmel_spi_msg_done(master, as, msg, -EIO, 0);
+ as->done_status = -EIO;
+ atmel_spi_msg_done(master, as, msg, 0);
} else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) {
ret = IRQ_HANDLED;
@@ -659,7 +1194,7 @@ atmel_spi_interrupt(int irq, void *dev_id)
if (atmel_spi_xfer_is_last(msg, xfer)) {
/* report completed message */
- atmel_spi_msg_done(master, as, msg, 0,
+ atmel_spi_msg_done(master, as, msg,
xfer->cs_change);
} else {
if (xfer->cs_change) {
@@ -673,18 +1208,18 @@ atmel_spi_interrupt(int irq, void *dev_id)
*
* FIXME handle protocol options for xfer
*/
- atmel_spi_next_xfer(master, msg);
+ atmel_spi_pdc_next_xfer(master, msg);
}
} else {
/*
* Keep going, we still have data to send in
* the current transfer.
*/
- atmel_spi_next_xfer(master, msg);
+ atmel_spi_pdc_next_xfer(master, msg);
}
}
- spin_unlock(&as->lock);
+ atmel_spi_unlock(as);
return ret;
}
@@ -719,7 +1254,7 @@ static int atmel_spi_setup(struct spi_device *spi)
}
/* see notes above re chipselect */
- if (!atmel_spi_is_v2()
+ if (!atmel_spi_is_v2(as)
&& spi->chip_select == 0
&& (spi->mode & SPI_CS_HIGH)) {
dev_dbg(&spi->dev, "setup: can't be active-high\n");
@@ -728,7 +1263,7 @@ static int atmel_spi_setup(struct spi_device *spi)
/* v1 chips start out at half the peripheral bus speed. */
bus_hz = clk_get_rate(as->clk);
- if (!atmel_spi_is_v2())
+ if (!atmel_spi_is_v2(as))
bus_hz /= 2;
if (spi->max_speed_hz) {
@@ -789,13 +1324,11 @@ static int atmel_spi_setup(struct spi_device *spi)
spi->controller_state = asd;
gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH));
} else {
- unsigned long flags;
-
- spin_lock_irqsave(&as->lock, flags);
+ atmel_spi_lock(as);
if (as->stay == spi)
as->stay = NULL;
cs_deactivate(as, spi);
- spin_unlock_irqrestore(&as->lock, flags);
+ atmel_spi_unlock(as);
}
asd->csr = csr;
@@ -804,7 +1337,7 @@ static int atmel_spi_setup(struct spi_device *spi)
"setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n",
bus_hz / scbr, bits, spi->mode, spi->chip_select, csr);
- if (!atmel_spi_is_v2())
+ if (!atmel_spi_is_v2(as))
spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
return 0;
@@ -814,7 +1347,6 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
{
struct atmel_spi *as;
struct spi_transfer *xfer;
- unsigned long flags;
struct device *controller = spi->master->dev.parent;
u8 bits;
struct atmel_spi_device *asd;
@@ -854,13 +1386,10 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
/*
* DMA map early, for performance (empties dcache ASAP) and
- * better fault reporting. This is a DMA-only driver.
- *
- * NOTE that if dma_unmap_single() ever starts to do work on
- * platforms supported by this driver, we would need to clean
- * up mappings for previously-mapped transfers.
+ * better fault reporting.
*/
- if (!msg->is_dma_mapped) {
+ if ((!msg->is_dma_mapped) && (atmel_spi_use_dma(as, xfer)
+ || as->use_pdc)) {
if (atmel_spi_dma_map_xfer(as, xfer) < 0)
return -ENOMEM;
}
@@ -879,11 +1408,11 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
msg->status = -EINPROGRESS;
msg->actual_length = 0;
- spin_lock_irqsave(&as->lock, flags);
+ atmel_spi_lock(as);
list_add_tail(&msg->queue, &as->queue);
if (!as->current_transfer)
atmel_spi_next_message(spi->master);
- spin_unlock_irqrestore(&as->lock, flags);
+ atmel_spi_unlock(as);
return 0;
}
@@ -893,23 +1422,39 @@ static void atmel_spi_cleanup(struct spi_device *spi)
struct atmel_spi *as = spi_master_get_devdata(spi->master);
struct atmel_spi_device *asd = spi->controller_state;
unsigned gpio = (unsigned) spi->controller_data;
- unsigned long flags;
if (!asd)
return;
- spin_lock_irqsave(&as->lock, flags);
+ atmel_spi_lock(as);
if (as->stay == spi) {
as->stay = NULL;
cs_deactivate(as, spi);
}
- spin_unlock_irqrestore(&as->lock, flags);
+ atmel_spi_unlock(as);
spi->controller_state = NULL;
gpio_free(gpio);
kfree(asd);
}
+static inline unsigned int atmel_get_version(struct atmel_spi *as)
+{
+ return spi_readl(as, VERSION) & 0x00000fff;
+}
+
+static void atmel_get_caps(struct atmel_spi *as)
+{
+ unsigned int version;
+
+ version = atmel_get_version(as);
+ dev_info(&as->pdev->dev, "version: 0x%x\n", version);
+
+ as->caps.is_spi2 = version > 0x121;
+ as->caps.has_wdrbt = version >= 0x210;
+ as->caps.has_dma_support = version >= 0x212;
+}
+
/*-------------------------------------------------------------------------*/
static int atmel_spi_probe(struct platform_device *pdev)
@@ -963,15 +1508,39 @@ static int atmel_spi_probe(struct platform_device *pdev)
spin_lock_init(&as->lock);
INIT_LIST_HEAD(&as->queue);
+
as->pdev = pdev;
as->regs = ioremap(regs->start, resource_size(regs));
if (!as->regs)
goto out_free_buffer;
+ as->phybase = regs->start;
as->irq = irq;
as->clk = clk;
- ret = request_irq(irq, atmel_spi_interrupt, 0,
- dev_name(&pdev->dev), master);
+ atmel_get_caps(as);
+
+ as->use_dma = false;
+ as->use_pdc = false;
+ if (as->caps.has_dma_support) {
+ if (atmel_spi_configure_dma(as) == 0)
+ as->use_dma = true;
+ } else {
+ as->use_pdc = true;
+ }
+
+ if (as->caps.has_dma_support && !as->use_dma)
+ dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n");
+
+ if (as->use_pdc) {
+ ret = request_irq(irq, atmel_spi_pdc_interrupt, 0,
+ dev_name(&pdev->dev), master);
+ } else {
+ tasklet_init(&as->tasklet, atmel_spi_tasklet_func,
+ (unsigned long)master);
+
+ ret = request_irq(irq, atmel_spi_pio_interrupt, 0,
+ dev_name(&pdev->dev), master);
+ }
if (ret)
goto out_unmap_regs;
@@ -979,8 +1548,15 @@ static int atmel_spi_probe(struct platform_device *pdev)
clk_enable(clk);
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
- spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS));
- spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
+ if (as->caps.has_wdrbt) {
+ spi_writel(as, MR, SPI_BIT(WDRBT) | SPI_BIT(MODFDIS)
+ | SPI_BIT(MSTR));
+ } else {
+ spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS));
+ }
+
+ if (as->use_pdc)
+ spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
spi_writel(as, CR, SPI_BIT(SPIEN));
/* go! */
@@ -989,11 +1565,14 @@ static int atmel_spi_probe(struct platform_device *pdev)
ret = spi_register_master(master);
if (ret)
- goto out_reset_hw;
+ goto out_free_dma;
return 0;
-out_reset_hw:
+out_free_dma:
+ if (as->use_dma)
+ atmel_spi_release_dma(as);
+
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
clk_disable(clk);
@@ -1001,6 +1580,8 @@ out_reset_hw:
out_unmap_regs:
iounmap(as->regs);
out_free_buffer:
+ if (!as->use_pdc)
+ tasklet_kill(&as->tasklet);
dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
as->buffer_dma);
out_free:
@@ -1014,10 +1595,16 @@ static int atmel_spi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct atmel_spi *as = spi_master_get_devdata(master);
struct spi_message *msg;
+ struct spi_transfer *xfer;
/* reset the hardware and block queue progress */
spin_lock_irq(&as->lock);
as->stopping = 1;
+ if (as->use_dma) {
+ atmel_spi_stop_dma(as);
+ atmel_spi_release_dma(as);
+ }
+
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
spi_readl(as, SR);
@@ -1025,13 +1612,18 @@ static int atmel_spi_remove(struct platform_device *pdev)
/* Terminate remaining queued transfers */
list_for_each_entry(msg, &as->queue, queue) {
- /* REVISIT unmapping the dma is a NOP on ARM and AVR32
- * but we shouldn't depend on that...
- */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!msg->is_dma_mapped
+ && (atmel_spi_use_dma(as, xfer)
+ || as->use_pdc))
+ atmel_spi_dma_unmap_xfer(master, xfer);
+ }
msg->status = -ESHUTDOWN;
msg->complete(msg->context);
}
+ if (!as->use_pdc)
+ tasklet_kill(&as->tasklet);
dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
as->buffer_dma);
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
new file mode 100644
index 00000000000..89c0b503311
--- /dev/null
+++ b/drivers/spi/spi-bcm2835.c
@@ -0,0 +1,422 @@
+/*
+ * Driver for Broadcom BCM2835 SPI Controllers
+ *
+ * Copyright (C) 2012 Chris Boot
+ * Copyright (C) 2013 Stephen Warren
+ *
+ * This driver is inspired by:
+ * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+ * spi-atmel.c, Copyright (C) 2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+
+/* SPI register offsets */
+#define BCM2835_SPI_CS 0x00
+#define BCM2835_SPI_FIFO 0x04
+#define BCM2835_SPI_CLK 0x08
+#define BCM2835_SPI_DLEN 0x0c
+#define BCM2835_SPI_LTOH 0x10
+#define BCM2835_SPI_DC 0x14
+
+/* Bitfields in CS */
+#define BCM2835_SPI_CS_LEN_LONG 0x02000000
+#define BCM2835_SPI_CS_DMA_LEN 0x01000000
+#define BCM2835_SPI_CS_CSPOL2 0x00800000
+#define BCM2835_SPI_CS_CSPOL1 0x00400000
+#define BCM2835_SPI_CS_CSPOL0 0x00200000
+#define BCM2835_SPI_CS_RXF 0x00100000
+#define BCM2835_SPI_CS_RXR 0x00080000
+#define BCM2835_SPI_CS_TXD 0x00040000
+#define BCM2835_SPI_CS_RXD 0x00020000
+#define BCM2835_SPI_CS_DONE 0x00010000
+#define BCM2835_SPI_CS_LEN 0x00002000
+#define BCM2835_SPI_CS_REN 0x00001000
+#define BCM2835_SPI_CS_ADCS 0x00000800
+#define BCM2835_SPI_CS_INTR 0x00000400
+#define BCM2835_SPI_CS_INTD 0x00000200
+#define BCM2835_SPI_CS_DMAEN 0x00000100
+#define BCM2835_SPI_CS_TA 0x00000080
+#define BCM2835_SPI_CS_CSPOL 0x00000040
+#define BCM2835_SPI_CS_CLEAR_RX 0x00000020
+#define BCM2835_SPI_CS_CLEAR_TX 0x00000010
+#define BCM2835_SPI_CS_CPOL 0x00000008
+#define BCM2835_SPI_CS_CPHA 0x00000004
+#define BCM2835_SPI_CS_CS_10 0x00000002
+#define BCM2835_SPI_CS_CS_01 0x00000001
+
+#define BCM2835_SPI_TIMEOUT_MS 30000
+#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS)
+
+#define DRV_NAME "spi-bcm2835"
+
+struct bcm2835_spi {
+ void __iomem *regs;
+ struct clk *clk;
+ int irq;
+ struct completion done;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int len;
+};
+
+static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
+{
+ return readl(bs->regs + reg);
+}
+
+static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned reg, u32 val)
+{
+ writel(val, bs->regs + reg);
+}
+
+static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs, int len)
+{
+ u8 byte;
+
+ while (len--) {
+ byte = bcm2835_rd(bs, BCM2835_SPI_FIFO);
+ if (bs->rx_buf)
+ *bs->rx_buf++ = byte;
+ }
+}
+
+static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs, int len)
+{
+ u8 byte;
+
+ if (len > bs->len)
+ len = bs->len;
+
+ while (len--) {
+ byte = bs->tx_buf ? *bs->tx_buf++ : 0;
+ bcm2835_wr(bs, BCM2835_SPI_FIFO, byte);
+ bs->len--;
+ }
+}
+
+static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+
+ /*
+ * RXR - RX needs Reading. This means 12 (or more) bytes have been
+ * transmitted and hence 12 (or more) bytes have been received.
+ *
+ * The FIFO is 16-bytes deep. We check for this interrupt to keep the
+ * FIFO full; we have a 4-byte-time buffer for IRQ latency. We check
+ * this before DONE (TX empty) just in case we delayed processing this
+ * interrupt for some reason.
+ *
+ * We only check for this case if we have more bytes to TX; at the end
+ * of the transfer, we ignore this pipelining optimization, and let
+ * bcm2835_spi_finish_transfer() drain the RX FIFO.
+ */
+ if (bs->len && (cs & BCM2835_SPI_CS_RXR)) {
+ /* Read 12 bytes of data */
+ bcm2835_rd_fifo(bs, 12);
+
+ /* Write up to 12 bytes */
+ bcm2835_wr_fifo(bs, 12);
+
+ /*
+ * We must have written something to the TX FIFO due to the
+ * bs->len check above, so cannot be DONE. Hence, return
+ * early. Note that DONE could also be set if we serviced an
+ * RXR interrupt really late.
+ */
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * DONE - TX empty. This occurs when we first enable the transfer
+ * since we do not pre-fill the TX FIFO. At any other time, given that
+ * we refill the TX FIFO above based on RXR, and hence ignore DONE if
+ * RXR is set, DONE really does mean end-of-transfer.
+ */
+ if (cs & BCM2835_SPI_CS_DONE) {
+ if (bs->len) { /* First interrupt in a transfer */
+ bcm2835_wr_fifo(bs, 16);
+ } else { /* Transfer complete */
+ /* Disable SPI interrupts */
+ cs &= ~(BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD);
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs);
+
+ /*
+ * Wake up bcm2835_spi_transfer_one(), which will call
+ * bcm2835_spi_finish_transfer(), to drain the RX FIFO.
+ */
+ complete(&bs->done);
+ }
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int bcm2835_spi_start_transfer(struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct bcm2835_spi *bs = spi_master_get_devdata(spi->master);
+ unsigned long spi_hz, clk_hz, cdiv;
+ u32 cs = BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA;
+
+ spi_hz = tfr->speed_hz;
+ clk_hz = clk_get_rate(bs->clk);
+
+ if (spi_hz >= clk_hz / 2) {
+ cdiv = 2; /* clk_hz/2 is the fastest we can go */
+ } else if (spi_hz) {
+ /* CDIV must be a power of two */
+ cdiv = roundup_pow_of_two(DIV_ROUND_UP(clk_hz, spi_hz));
+
+ if (cdiv >= 65536)
+ cdiv = 0; /* 0 is the slowest we can go */
+ } else
+ cdiv = 0; /* 0 is the slowest we can go */
+
+ if (spi->mode & SPI_CPOL)
+ cs |= BCM2835_SPI_CS_CPOL;
+ if (spi->mode & SPI_CPHA)
+ cs |= BCM2835_SPI_CS_CPHA;
+
+ if (!(spi->mode & SPI_NO_CS)) {
+ if (spi->mode & SPI_CS_HIGH) {
+ cs |= BCM2835_SPI_CS_CSPOL;
+ cs |= BCM2835_SPI_CS_CSPOL0 << spi->chip_select;
+ }
+
+ cs |= spi->chip_select;
+ }
+
+ INIT_COMPLETION(bs->done);
+ bs->tx_buf = tfr->tx_buf;
+ bs->rx_buf = tfr->rx_buf;
+ bs->len = tfr->len;
+
+ bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
+ /*
+ * Enable the HW block. This will immediately trigger a DONE (TX
+ * empty) interrupt, upon which we will fill the TX FIFO with the
+ * first TX bytes. Pre-filling the TX FIFO here to avoid the
+ * interrupt doesn't work:-(
+ */
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs);
+
+ return 0;
+}
+
+static int bcm2835_spi_finish_transfer(struct spi_device *spi,
+ struct spi_transfer *tfr, bool cs_change)
+{
+ struct bcm2835_spi *bs = spi_master_get_devdata(spi->master);
+ u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+
+ /* Drain RX FIFO */
+ while (cs & BCM2835_SPI_CS_RXD) {
+ bcm2835_rd_fifo(bs, 1);
+ cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+ }
+
+ if (tfr->delay_usecs)
+ udelay(tfr->delay_usecs);
+
+ if (cs_change)
+ /* Clear TA flag */
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs & ~BCM2835_SPI_CS_TA);
+
+ return 0;
+}
+
+static int bcm2835_spi_transfer_one(struct spi_master *master,
+ struct spi_message *mesg)
+{
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct spi_transfer *tfr;
+ struct spi_device *spi = mesg->spi;
+ int err = 0;
+ unsigned int timeout;
+ bool cs_change;
+
+ list_for_each_entry(tfr, &mesg->transfers, transfer_list) {
+ err = bcm2835_spi_start_transfer(spi, tfr);
+ if (err)
+ goto out;
+
+ timeout = wait_for_completion_timeout(&bs->done,
+ msecs_to_jiffies(BCM2835_SPI_TIMEOUT_MS));
+ if (!timeout) {
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ cs_change = tfr->cs_change ||
+ list_is_last(&tfr->transfer_list, &mesg->transfers);
+
+ err = bcm2835_spi_finish_transfer(spi, tfr, cs_change);
+ if (err)
+ goto out;
+
+ mesg->actual_length += (tfr->len - bs->len);
+ }
+
+out:
+ /* Clear FIFOs, and disable the HW block */
+ bcm2835_wr(bs, BCM2835_SPI_CS,
+ BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
+ mesg->status = err;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static int bcm2835_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct bcm2835_spi *bs;
+ struct resource *res;
+ int err;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*bs));
+ if (!master) {
+ dev_err(&pdev->dev, "spi_alloc_master() failed\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, master);
+
+ master->mode_bits = BCM2835_SPI_MODE_BITS;
+ master->bits_per_word_mask = BIT(8 - 1);
+ master->bus_num = -1;
+ master->num_chipselect = 3;
+ master->transfer_one_message = bcm2835_spi_transfer_one;
+ master->dev.of_node = pdev->dev.of_node;
+
+ bs = spi_master_get_devdata(master);
+
+ init_completion(&bs->done);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "could not get memory resource\n");
+ err = -ENODEV;
+ goto out_master_put;
+ }
+
+ bs->regs = devm_request_and_ioremap(&pdev->dev, res);
+ if (!bs->regs) {
+ dev_err(&pdev->dev, "could not request/map memory region\n");
+ err = -ENODEV;
+ goto out_master_put;
+ }
+
+ bs->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(bs->clk)) {
+ err = PTR_ERR(bs->clk);
+ dev_err(&pdev->dev, "could not get clk: %d\n", err);
+ goto out_master_put;
+ }
+
+ bs->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (bs->irq <= 0) {
+ dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
+ err = bs->irq ? bs->irq : -ENODEV;
+ goto out_master_put;
+ }
+
+ clk_prepare_enable(bs->clk);
+
+ err = request_irq(bs->irq, bcm2835_spi_interrupt, 0,
+ dev_name(&pdev->dev), master);
+ if (err) {
+ dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
+ goto out_clk_disable;
+ }
+
+ /* initialise the hardware */
+ bcm2835_wr(bs, BCM2835_SPI_CS,
+ BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
+
+ err = spi_register_master(master);
+ if (err) {
+ dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
+ goto out_free_irq;
+ }
+
+ return 0;
+
+out_free_irq:
+ free_irq(bs->irq, master);
+out_clk_disable:
+ clk_disable_unprepare(bs->clk);
+out_master_put:
+ spi_master_put(master);
+ return err;
+}
+
+static int bcm2835_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
+
+ free_irq(bs->irq, master);
+ spi_unregister_master(master);
+
+ /* Clear FIFOs, and disable the HW block */
+ bcm2835_wr(bs, BCM2835_SPI_CS,
+ BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
+
+ clk_disable_unprepare(bs->clk);
+ spi_master_put(master);
+
+ return 0;
+}
+
+static const struct of_device_id bcm2835_spi_match[] = {
+ { .compatible = "brcm,bcm2835-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bcm2835_spi_match);
+
+static struct platform_driver bcm2835_spi_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = bcm2835_spi_match,
+ },
+ .probe = bcm2835_spi_probe,
+ .remove = bcm2835_spi_remove,
+};
+module_platform_driver(bcm2835_spi_driver);
+
+MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835");
+MODULE_AUTHOR("Chris Boot <bootc@bootc.net>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index d7df435d962..a4ec5f4ec81 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -46,7 +46,6 @@ struct bcm63xx_spi {
int irq;
/* Platform data */
- u32 speed_hz;
unsigned fifo_size;
unsigned int msg_type_shift;
unsigned int msg_ctl_width;
@@ -93,40 +92,16 @@ static const unsigned bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = {
{ 391000, SPI_CLK_0_391MHZ }
};
-static int bcm63xx_spi_check_transfer(struct spi_device *spi,
- struct spi_transfer *t)
-{
- u8 bits_per_word;
-
- bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
- if (bits_per_word != 8) {
- dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
- __func__, bits_per_word);
- return -EINVAL;
- }
-
- if (spi->chip_select > spi->master->num_chipselect) {
- dev_err(&spi->dev, "%s, unsupported slave %d\n",
- __func__, spi->chip_select);
- return -EINVAL;
- }
-
- return 0;
-}
-
static void bcm63xx_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
- u32 hz;
u8 clk_cfg, reg;
int i;
- hz = (t) ? t->speed_hz : spi->max_speed_hz;
-
/* Find the closest clock configuration */
for (i = 0; i < SPI_CLK_MASK; i++) {
- if (hz >= bcm63xx_spi_freq_table[i][0]) {
+ if (t->speed_hz >= bcm63xx_spi_freq_table[i][0]) {
clk_cfg = bcm63xx_spi_freq_table[i][1];
break;
}
@@ -143,7 +118,7 @@ static void bcm63xx_spi_setup_transfer(struct spi_device *spi,
bcm_spi_writeb(bs, reg, SPI_CLK_CFG);
dev_dbg(&spi->dev, "Setting clock register to %02x (hz %d)\n",
- clk_cfg, hz);
+ clk_cfg, t->speed_hz);
}
/* the spi->mode bits understood by this driver: */
@@ -151,22 +126,12 @@ static void bcm63xx_spi_setup_transfer(struct spi_device *spi,
static int bcm63xx_spi_setup(struct spi_device *spi)
{
- struct bcm63xx_spi *bs;
-
- bs = spi_master_get_devdata(spi->master);
-
- if (!spi->bits_per_word)
- spi->bits_per_word = 8;
-
- if (spi->mode & ~MODEBITS) {
- dev_err(&spi->dev, "%s, unsupported mode bits %x\n",
- __func__, spi->mode & ~MODEBITS);
+ if (spi->bits_per_word != 8) {
+ dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
+ __func__, spi->bits_per_word);
return -EINVAL;
}
- dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
- __func__, spi->mode & MODEBITS, spi->bits_per_word, 0);
-
return 0;
}
@@ -312,9 +277,12 @@ static int bcm63xx_spi_transfer_one(struct spi_master *master,
* full-duplex transfers.
*/
list_for_each_entry(t, &m->transfers, transfer_list) {
- status = bcm63xx_spi_check_transfer(spi, t);
- if (status < 0)
+ if (t->bits_per_word != 8) {
+ dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
+ __func__, t->bits_per_word);
+ status = -EINVAL;
goto exit;
+ }
if (!first)
first = t;
@@ -443,18 +411,9 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
bs->pdev = pdev;
- if (!devm_request_mem_region(&pdev->dev, r->start,
- resource_size(r), PFX)) {
- dev_err(dev, "iomem request failed\n");
- ret = -ENXIO;
- goto out_err;
- }
-
- bs->regs = devm_ioremap_nocache(&pdev->dev, r->start,
- resource_size(r));
- if (!bs->regs) {
- dev_err(dev, "unable to ioremap regs\n");
- ret = -ENOMEM;
+ bs->regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(bs->regs)) {
+ ret = PTR_ERR(bs->regs);
goto out_err;
}
@@ -476,7 +435,6 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
master->unprepare_transfer_hardware = bcm63xx_spi_unprepare_transfer;
master->transfer_one_message = bcm63xx_spi_transfer_one;
master->mode_bits = MODEBITS;
- bs->speed_hz = pdata->speed_hz;
bs->msg_type_shift = pdata->msg_type_shift;
bs->msg_ctl_width = pdata->msg_ctl_width;
bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA));
@@ -493,7 +451,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
}
/* Initialize hardware */
- clk_enable(bs->clk);
+ clk_prepare_enable(bs->clk);
bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
/* register and we are done */
@@ -509,7 +467,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
return 0;
out_clk_disable:
- clk_disable(clk);
+ clk_disable_unprepare(clk);
out_err:
platform_set_drvdata(pdev, NULL);
spi_master_put(master);
@@ -530,7 +488,7 @@ static int bcm63xx_spi_remove(struct platform_device *pdev)
bcm_spi_writeb(bs, 0, SPI_INT_MASK);
/* HW shutdown */
- clk_disable(bs->clk);
+ clk_disable_unprepare(bs->clk);
clk_put(bs->clk);
platform_set_drvdata(pdev, 0);
@@ -549,7 +507,7 @@ static int bcm63xx_spi_suspend(struct device *dev)
spi_master_suspend(master);
- clk_disable(bs->clk);
+ clk_disable_unprepare(bs->clk);
return 0;
}
@@ -560,7 +518,7 @@ static int bcm63xx_spi_resume(struct device *dev)
platform_get_drvdata(to_platform_device(dev));
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
- clk_enable(bs->clk);
+ clk_prepare_enable(bs->clk);
spi_master_resume(master);
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
new file mode 100644
index 00000000000..07971e3fe58
--- /dev/null
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -0,0 +1,387 @@
+/*
+ * Freescale SPI controller driver cpm functions.
+ *
+ * Maintainer: Kumar Gala
+ *
+ * Copyright (C) 2006 Polycom, Inc.
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ *
+ * CPM SPI and QE buffer descriptors mode support:
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/fsl_devices.h>
+#include <linux/dma-mapping.h>
+#include <asm/cpm.h>
+#include <asm/qe.h>
+
+#include "spi-fsl-lib.h"
+#include "spi-fsl-cpm.h"
+#include "spi-fsl-spi.h"
+
+/* CPM1 and CPM2 are mutually exclusive. */
+#ifdef CONFIG_CPM1
+#include <asm/cpm1.h>
+#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
+#else
+#include <asm/cpm2.h>
+#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
+#endif
+
+#define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */
+#define SPIE_RXB 0x00000100 /* Last char is written to rx buf */
+
+/* SPCOM register values */
+#define SPCOM_STR (1 << 23) /* Start transmit */
+
+#define SPI_PRAM_SIZE 0x100
+#define SPI_MRBLR ((unsigned int)PAGE_SIZE)
+
+static void *fsl_dummy_rx;
+static DEFINE_MUTEX(fsl_dummy_rx_lock);
+static int fsl_dummy_rx_refcnt;
+
+void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi)
+{
+ if (mspi->flags & SPI_QE) {
+ qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+ } else {
+ cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
+ if (mspi->flags & SPI_CPM1) {
+ out_be16(&mspi->pram->rbptr,
+ in_be16(&mspi->pram->rbase));
+ out_be16(&mspi->pram->tbptr,
+ in_be16(&mspi->pram->tbase));
+ }
+ }
+}
+
+static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
+{
+ struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
+ struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
+ unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
+ unsigned int xfer_ofs;
+ struct fsl_spi_reg *reg_base = mspi->reg_base;
+
+ xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
+
+ if (mspi->rx_dma == mspi->dma_dummy_rx)
+ out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
+ else
+ out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
+ out_be16(&rx_bd->cbd_datlen, 0);
+ out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
+
+ if (mspi->tx_dma == mspi->dma_dummy_tx)
+ out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
+ else
+ out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
+ out_be16(&tx_bd->cbd_datlen, xfer_len);
+ out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
+ BD_SC_LAST);
+
+ /* start transfer */
+ mpc8xxx_spi_write_reg(&reg_base->command, SPCOM_STR);
+}
+
+int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
+ struct spi_transfer *t, bool is_dma_mapped)
+{
+ struct device *dev = mspi->dev;
+ struct fsl_spi_reg *reg_base = mspi->reg_base;
+
+ if (is_dma_mapped) {
+ mspi->map_tx_dma = 0;
+ mspi->map_rx_dma = 0;
+ } else {
+ mspi->map_tx_dma = 1;
+ mspi->map_rx_dma = 1;
+ }
+
+ if (!t->tx_buf) {
+ mspi->tx_dma = mspi->dma_dummy_tx;
+ mspi->map_tx_dma = 0;
+ }
+
+ if (!t->rx_buf) {
+ mspi->rx_dma = mspi->dma_dummy_rx;
+ mspi->map_rx_dma = 0;
+ }
+
+ if (mspi->map_tx_dma) {
+ void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
+
+ mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, mspi->tx_dma)) {
+ dev_err(dev, "unable to map tx dma\n");
+ return -ENOMEM;
+ }
+ } else if (t->tx_buf) {
+ mspi->tx_dma = t->tx_dma;
+ }
+
+ if (mspi->map_rx_dma) {
+ mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, mspi->rx_dma)) {
+ dev_err(dev, "unable to map rx dma\n");
+ goto err_rx_dma;
+ }
+ } else if (t->rx_buf) {
+ mspi->rx_dma = t->rx_dma;
+ }
+
+ /* enable rx ints */
+ mpc8xxx_spi_write_reg(&reg_base->mask, SPIE_RXB);
+
+ mspi->xfer_in_progress = t;
+ mspi->count = t->len;
+
+ /* start CPM transfers */
+ fsl_spi_cpm_bufs_start(mspi);
+
+ return 0;
+
+err_rx_dma:
+ if (mspi->map_tx_dma)
+ dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
+ return -ENOMEM;
+}
+
+void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
+{
+ struct device *dev = mspi->dev;
+ struct spi_transfer *t = mspi->xfer_in_progress;
+
+ if (mspi->map_tx_dma)
+ dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
+ if (mspi->map_rx_dma)
+ dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
+ mspi->xfer_in_progress = NULL;
+}
+
+void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
+{
+ u16 len;
+ struct fsl_spi_reg *reg_base = mspi->reg_base;
+
+ dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
+ in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
+
+ len = in_be16(&mspi->rx_bd->cbd_datlen);
+ if (len > mspi->count) {
+ WARN_ON(1);
+ len = mspi->count;
+ }
+
+ /* Clear the events */
+ mpc8xxx_spi_write_reg(&reg_base->event, events);
+
+ mspi->count -= len;
+ if (mspi->count)
+ fsl_spi_cpm_bufs_start(mspi);
+ else
+ complete(&mspi->done);
+}
+
+static void *fsl_spi_alloc_dummy_rx(void)
+{
+ mutex_lock(&fsl_dummy_rx_lock);
+
+ if (!fsl_dummy_rx)
+ fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
+ if (fsl_dummy_rx)
+ fsl_dummy_rx_refcnt++;
+
+ mutex_unlock(&fsl_dummy_rx_lock);
+
+ return fsl_dummy_rx;
+}
+
+static void fsl_spi_free_dummy_rx(void)
+{
+ mutex_lock(&fsl_dummy_rx_lock);
+
+ switch (fsl_dummy_rx_refcnt) {
+ case 0:
+ WARN_ON(1);
+ break;
+ case 1:
+ kfree(fsl_dummy_rx);
+ fsl_dummy_rx = NULL;
+ /* fall through */
+ default:
+ fsl_dummy_rx_refcnt--;
+ break;
+ }
+
+ mutex_unlock(&fsl_dummy_rx_lock);
+}
+
+static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
+{
+ struct device *dev = mspi->dev;
+ struct device_node *np = dev->of_node;
+ const u32 *iprop;
+ int size;
+ void __iomem *spi_base;
+ unsigned long pram_ofs = -ENOMEM;
+
+ /* Can't use of_address_to_resource(), QE muram isn't at 0. */
+ iprop = of_get_property(np, "reg", &size);
+
+ /* QE with a fixed pram location? */
+ if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4)
+ return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE);
+
+ /* QE but with a dynamic pram location? */
+ if (mspi->flags & SPI_QE) {
+ pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
+ qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs);
+ return pram_ofs;
+ }
+
+ spi_base = of_iomap(np, 1);
+ if (spi_base == NULL)
+ return -EINVAL;
+
+ if (mspi->flags & SPI_CPM2) {
+ pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
+ out_be16(spi_base, pram_ofs);
+ } else {
+ struct spi_pram __iomem *pram = spi_base;
+ u16 rpbase = in_be16(&pram->rpbase);
+
+ /* Microcode relocation patch applied? */
+ if (rpbase) {
+ pram_ofs = rpbase;
+ } else {
+ pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
+ out_be16(spi_base, pram_ofs);
+ }
+ }
+
+ iounmap(spi_base);
+ return pram_ofs;
+}
+
+int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
+{
+ struct device *dev = mspi->dev;
+ struct device_node *np = dev->of_node;
+ const u32 *iprop;
+ int size;
+ unsigned long pram_ofs;
+ unsigned long bds_ofs;
+
+ if (!(mspi->flags & SPI_CPM_MODE))
+ return 0;
+
+ if (!fsl_spi_alloc_dummy_rx())
+ return -ENOMEM;
+
+ if (mspi->flags & SPI_QE) {
+ iprop = of_get_property(np, "cell-index", &size);
+ if (iprop && size == sizeof(*iprop))
+ mspi->subblock = *iprop;
+
+ switch (mspi->subblock) {
+ default:
+ dev_warn(dev, "cell-index unspecified, assuming SPI1");
+ /* fall through */
+ case 0:
+ mspi->subblock = QE_CR_SUBBLOCK_SPI1;
+ break;
+ case 1:
+ mspi->subblock = QE_CR_SUBBLOCK_SPI2;
+ break;
+ }
+ }
+
+ pram_ofs = fsl_spi_cpm_get_pram(mspi);
+ if (IS_ERR_VALUE(pram_ofs)) {
+ dev_err(dev, "can't allocate spi parameter ram\n");
+ goto err_pram;
+ }
+
+ bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) +
+ sizeof(*mspi->rx_bd), 8);
+ if (IS_ERR_VALUE(bds_ofs)) {
+ dev_err(dev, "can't allocate bds\n");
+ goto err_bds;
+ }
+
+ mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, mspi->dma_dummy_tx)) {
+ dev_err(dev, "unable to map dummy tx buffer\n");
+ goto err_dummy_tx;
+ }
+
+ mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
+ dev_err(dev, "unable to map dummy rx buffer\n");
+ goto err_dummy_rx;
+ }
+
+ mspi->pram = cpm_muram_addr(pram_ofs);
+
+ mspi->tx_bd = cpm_muram_addr(bds_ofs);
+ mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
+
+ /* Initialize parameter ram. */
+ out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd));
+ out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd));
+ out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL);
+ out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL);
+ out_be16(&mspi->pram->mrblr, SPI_MRBLR);
+ out_be32(&mspi->pram->rstate, 0);
+ out_be32(&mspi->pram->rdp, 0);
+ out_be16(&mspi->pram->rbptr, 0);
+ out_be16(&mspi->pram->rbc, 0);
+ out_be32(&mspi->pram->rxtmp, 0);
+ out_be32(&mspi->pram->tstate, 0);
+ out_be32(&mspi->pram->tdp, 0);
+ out_be16(&mspi->pram->tbptr, 0);
+ out_be16(&mspi->pram->tbc, 0);
+ out_be32(&mspi->pram->txtmp, 0);
+
+ return 0;
+
+err_dummy_rx:
+ dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
+err_dummy_tx:
+ cpm_muram_free(bds_ofs);
+err_bds:
+ cpm_muram_free(pram_ofs);
+err_pram:
+ fsl_spi_free_dummy_rx();
+ return -ENOMEM;
+}
+
+void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
+{
+ struct device *dev = mspi->dev;
+
+ if (!(mspi->flags & SPI_CPM_MODE))
+ return;
+
+ dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
+ dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
+ cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
+ cpm_muram_free(cpm_muram_offset(mspi->pram));
+ fsl_spi_free_dummy_rx();
+}
diff --git a/drivers/spi/spi-fsl-cpm.h b/drivers/spi/spi-fsl-cpm.h
new file mode 100644
index 00000000000..c7111580548
--- /dev/null
+++ b/drivers/spi/spi-fsl-cpm.h
@@ -0,0 +1,43 @@
+/*
+ * Freescale SPI controller driver cpm functions.
+ *
+ * Maintainer: Kumar Gala
+ *
+ * Copyright (C) 2006 Polycom, Inc.
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ *
+ * CPM SPI and QE buffer descriptors mode support:
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __SPI_FSL_CPM_H__
+#define __SPI_FSL_CPM_H__
+
+#include "spi-fsl-lib.h"
+
+#ifdef CONFIG_FSL_SOC
+extern void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi);
+extern int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
+ struct spi_transfer *t, bool is_dma_mapped);
+extern void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi);
+extern void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events);
+extern int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi);
+extern void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi);
+#else
+static inline void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi) { }
+static inline int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
+ struct spi_transfer *t,
+ bool is_dma_mapped) { return 0; }
+static inline void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) { }
+static inline void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) { }
+static inline int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) { return 0; }
+static inline void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) { }
+#endif
+
+#endif /* __SPI_FSL_CPM_H__ */
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index 8ade675a04f..a91db0e57b2 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -23,7 +23,9 @@
#include <linux/mm.h>
#include <linux/of_platform.h>
#include <linux/spi/spi.h>
+#ifdef CONFIG_FSL_SOC
#include <sysdev/fsl_soc.h>
+#endif
#include "spi-fsl-lib.h"
@@ -208,6 +210,7 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
/* Allocate bus num dynamically. */
pdata->bus_num = -1;
+#ifdef CONFIG_FSL_SOC
/* SPI controller is either clocked from QE or SoC clock. */
pdata->sysclk = get_brgfreq();
if (pdata->sysclk == -1) {
@@ -217,6 +220,11 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
goto err;
}
}
+#else
+ ret = of_property_read_u32(np, "clock-frequency", &pdata->sysclk);
+ if (ret)
+ goto err;
+#endif
prop = of_get_property(np, "mode", NULL);
if (prop && !strcmp(prop, "cpu-qe"))
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index cbe881b9ea7..52db6936778 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -34,8 +34,10 @@ struct mpc8xxx_spi {
int subblock;
struct spi_pram __iomem *pram;
+#ifdef CONFIG_FSL_SOC
struct cpm_buf_desc __iomem *tx_bd;
struct cpm_buf_desc __iomem *rx_bd;
+#endif
struct spi_transfer *xfer_in_progress;
@@ -67,6 +69,15 @@ struct mpc8xxx_spi {
unsigned int flags;
+#ifdef CONFIG_SPI_FSL_SPI
+ int type;
+ int native_chipselects;
+ u8 max_bits_per_word;
+
+ void (*set_shifts)(u32 *rx_shift, u32 *tx_shift,
+ int bits_per_word, int msb_first);
+#endif
+
struct workqueue_struct *workqueue;
struct work_struct work;
@@ -87,12 +98,12 @@ struct spi_mpc8xxx_cs {
static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val)
{
- out_be32(reg, val);
+ iowrite32be(val, reg);
}
static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg)
{
- return in_be32(reg);
+ return ioread32be(reg);
}
struct mpc8xxx_spi_probe_info {
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 086a9eef2e0..14e202ee703 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -10,6 +10,10 @@
* Copyright (c) 2009 MontaVista Software, Inc.
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
*
+ * GRLIB support:
+ * Copyright (c) 2012 Aeroflex Gaisler AB.
+ * Author: Andreas Larsson <andreas@gaisler.com>
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
@@ -30,75 +34,54 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
-#include <sysdev/fsl_soc.h>
-#include <asm/cpm.h>
-#include <asm/qe.h>
-
#include "spi-fsl-lib.h"
+#include "spi-fsl-cpm.h"
+#include "spi-fsl-spi.h"
-/* CPM1 and CPM2 are mutually exclusive. */
-#ifdef CONFIG_CPM1
-#include <asm/cpm1.h>
-#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
-#else
-#include <asm/cpm2.h>
-#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
-#endif
-
-/* SPI Controller registers */
-struct fsl_spi_reg {
- u8 res1[0x20];
- __be32 mode;
- __be32 event;
- __be32 mask;
- __be32 command;
- __be32 transmit;
- __be32 receive;
-};
-
-/* SPI Controller mode register definitions */
-#define SPMODE_LOOP (1 << 30)
-#define SPMODE_CI_INACTIVEHIGH (1 << 29)
-#define SPMODE_CP_BEGIN_EDGECLK (1 << 28)
-#define SPMODE_DIV16 (1 << 27)
-#define SPMODE_REV (1 << 26)
-#define SPMODE_MS (1 << 25)
-#define SPMODE_ENABLE (1 << 24)
-#define SPMODE_LEN(x) ((x) << 20)
-#define SPMODE_PM(x) ((x) << 16)
-#define SPMODE_OP (1 << 14)
-#define SPMODE_CG(x) ((x) << 7)
+#define TYPE_FSL 0
+#define TYPE_GRLIB 1
-/*
- * Default for SPI Mode:
- * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk
- */
-#define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \
- SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf))
-
-/* SPIE register values */
-#define SPIE_NE 0x00000200 /* Not empty */
-#define SPIE_NF 0x00000100 /* Not full */
+struct fsl_spi_match_data {
+ int type;
+};
-/* SPIM register values */
-#define SPIM_NE 0x00000200 /* Not empty */
-#define SPIM_NF 0x00000100 /* Not full */
+static struct fsl_spi_match_data of_fsl_spi_fsl_config = {
+ .type = TYPE_FSL,
+};
-#define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */
-#define SPIE_RXB 0x00000100 /* Last char is written to rx buf */
+static struct fsl_spi_match_data of_fsl_spi_grlib_config = {
+ .type = TYPE_GRLIB,
+};
-/* SPCOM register values */
-#define SPCOM_STR (1 << 23) /* Start transmit */
+static struct of_device_id of_fsl_spi_match[] = {
+ {
+ .compatible = "fsl,spi",
+ .data = &of_fsl_spi_fsl_config,
+ },
+ {
+ .compatible = "aeroflexgaisler,spictrl",
+ .data = &of_fsl_spi_grlib_config,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_fsl_spi_match);
-#define SPI_PRAM_SIZE 0x100
-#define SPI_MRBLR ((unsigned int)PAGE_SIZE)
+static int fsl_spi_get_type(struct device *dev)
+{
+ const struct of_device_id *match;
-static void *fsl_dummy_rx;
-static DEFINE_MUTEX(fsl_dummy_rx_lock);
-static int fsl_dummy_rx_refcnt;
+ if (dev->of_node) {
+ match = of_match_node(of_fsl_spi_match, dev->of_node);
+ if (match && match->data)
+ return ((struct fsl_spi_match_data *)match->data)->type;
+ }
+ return TYPE_FSL;
+}
static void fsl_spi_change_mode(struct spi_device *spi)
{
@@ -119,18 +102,7 @@ static void fsl_spi_change_mode(struct spi_device *spi)
/* When in CPM mode, we need to reinit tx and rx. */
if (mspi->flags & SPI_CPM_MODE) {
- if (mspi->flags & SPI_QE) {
- qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
- QE_CR_PROTOCOL_UNSPECIFIED, 0);
- } else {
- cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
- if (mspi->flags & SPI_CPM1) {
- out_be16(&mspi->pram->rbptr,
- in_be16(&mspi->pram->rbase));
- out_be16(&mspi->pram->tbptr,
- in_be16(&mspi->pram->tbase));
- }
- }
+ fsl_spi_cpm_reinit_txrx(mspi);
}
mpc8xxx_spi_write_reg(mode, cs->hw_mode);
local_irq_restore(flags);
@@ -163,6 +135,40 @@ static void fsl_spi_chipselect(struct spi_device *spi, int value)
}
}
+static void fsl_spi_qe_cpu_set_shifts(u32 *rx_shift, u32 *tx_shift,
+ int bits_per_word, int msb_first)
+{
+ *rx_shift = 0;
+ *tx_shift = 0;
+ if (msb_first) {
+ if (bits_per_word <= 8) {
+ *rx_shift = 16;
+ *tx_shift = 24;
+ } else if (bits_per_word <= 16) {
+ *rx_shift = 16;
+ *tx_shift = 16;
+ }
+ } else {
+ if (bits_per_word <= 8)
+ *rx_shift = 8;
+ }
+}
+
+static void fsl_spi_grlib_set_shifts(u32 *rx_shift, u32 *tx_shift,
+ int bits_per_word, int msb_first)
+{
+ *rx_shift = 0;
+ *tx_shift = 0;
+ if (bits_per_word <= 16) {
+ if (msb_first) {
+ *rx_shift = 16; /* LSB in bit 16 */
+ *tx_shift = 32 - bits_per_word; /* MSB in bit 31 */
+ } else {
+ *rx_shift = 16 - bits_per_word; /* MSB in bit 15 */
+ }
+ }
+}
+
static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
struct spi_device *spi,
struct mpc8xxx_spi *mpc8xxx_spi,
@@ -173,31 +179,20 @@ static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
if (bits_per_word <= 8) {
cs->get_rx = mpc8xxx_spi_rx_buf_u8;
cs->get_tx = mpc8xxx_spi_tx_buf_u8;
- if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
- cs->rx_shift = 16;
- cs->tx_shift = 24;
- }
} else if (bits_per_word <= 16) {
cs->get_rx = mpc8xxx_spi_rx_buf_u16;
cs->get_tx = mpc8xxx_spi_tx_buf_u16;
- if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
- cs->rx_shift = 16;
- cs->tx_shift = 16;
- }
} else if (bits_per_word <= 32) {
cs->get_rx = mpc8xxx_spi_rx_buf_u32;
cs->get_tx = mpc8xxx_spi_tx_buf_u32;
} else
return -EINVAL;
- if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE &&
- spi->mode & SPI_LSB_FIRST) {
- cs->tx_shift = 0;
- if (bits_per_word <= 8)
- cs->rx_shift = 8;
- else
- cs->rx_shift = 0;
- }
+ if (mpc8xxx_spi->set_shifts)
+ mpc8xxx_spi->set_shifts(&cs->rx_shift, &cs->tx_shift,
+ bits_per_word,
+ !(spi->mode & SPI_LSB_FIRST));
+
mpc8xxx_spi->rx_shift = cs->rx_shift;
mpc8xxx_spi->tx_shift = cs->tx_shift;
mpc8xxx_spi->get_rx = cs->get_rx;
@@ -246,7 +241,8 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
/* Make sure its a bit width we support [4..16, 32] */
if ((bits_per_word < 4)
- || ((bits_per_word > 16) && (bits_per_word != 32)))
+ || ((bits_per_word > 16) && (bits_per_word != 32))
+ || (bits_per_word > mpc8xxx_spi->max_bits_per_word))
return -EINVAL;
if (!hz)
@@ -295,112 +291,6 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
return 0;
}
-static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
-{
- struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
- struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
- unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
- unsigned int xfer_ofs;
- struct fsl_spi_reg *reg_base = mspi->reg_base;
-
- xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
-
- if (mspi->rx_dma == mspi->dma_dummy_rx)
- out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
- else
- out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
- out_be16(&rx_bd->cbd_datlen, 0);
- out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
-
- if (mspi->tx_dma == mspi->dma_dummy_tx)
- out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
- else
- out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
- out_be16(&tx_bd->cbd_datlen, xfer_len);
- out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
- BD_SC_LAST);
-
- /* start transfer */
- mpc8xxx_spi_write_reg(&reg_base->command, SPCOM_STR);
-}
-
-static int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
- struct spi_transfer *t, bool is_dma_mapped)
-{
- struct device *dev = mspi->dev;
- struct fsl_spi_reg *reg_base = mspi->reg_base;
-
- if (is_dma_mapped) {
- mspi->map_tx_dma = 0;
- mspi->map_rx_dma = 0;
- } else {
- mspi->map_tx_dma = 1;
- mspi->map_rx_dma = 1;
- }
-
- if (!t->tx_buf) {
- mspi->tx_dma = mspi->dma_dummy_tx;
- mspi->map_tx_dma = 0;
- }
-
- if (!t->rx_buf) {
- mspi->rx_dma = mspi->dma_dummy_rx;
- mspi->map_rx_dma = 0;
- }
-
- if (mspi->map_tx_dma) {
- void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
-
- mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, mspi->tx_dma)) {
- dev_err(dev, "unable to map tx dma\n");
- return -ENOMEM;
- }
- } else if (t->tx_buf) {
- mspi->tx_dma = t->tx_dma;
- }
-
- if (mspi->map_rx_dma) {
- mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, mspi->rx_dma)) {
- dev_err(dev, "unable to map rx dma\n");
- goto err_rx_dma;
- }
- } else if (t->rx_buf) {
- mspi->rx_dma = t->rx_dma;
- }
-
- /* enable rx ints */
- mpc8xxx_spi_write_reg(&reg_base->mask, SPIE_RXB);
-
- mspi->xfer_in_progress = t;
- mspi->count = t->len;
-
- /* start CPM transfers */
- fsl_spi_cpm_bufs_start(mspi);
-
- return 0;
-
-err_rx_dma:
- if (mspi->map_tx_dma)
- dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
- return -ENOMEM;
-}
-
-static void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
-{
- struct device *dev = mspi->dev;
- struct spi_transfer *t = mspi->xfer_in_progress;
-
- if (mspi->map_tx_dma)
- dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
- if (mspi->map_rx_dma)
- dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
- mspi->xfer_in_progress = NULL;
-}
-
static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi,
struct spi_transfer *t, unsigned int len)
{
@@ -565,31 +455,45 @@ static int fsl_spi_setup(struct spi_device *spi)
cs->hw_mode = hw_mode; /* Restore settings */
return retval;
}
- return 0;
-}
-static void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
-{
- u16 len;
- struct fsl_spi_reg *reg_base = mspi->reg_base;
+ if (mpc8xxx_spi->type == TYPE_GRLIB) {
+ if (gpio_is_valid(spi->cs_gpio)) {
+ int desel;
- dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
- in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
+ retval = gpio_request(spi->cs_gpio,
+ dev_name(&spi->dev));
+ if (retval)
+ return retval;
- len = in_be16(&mspi->rx_bd->cbd_datlen);
- if (len > mspi->count) {
- WARN_ON(1);
- len = mspi->count;
+ desel = !(spi->mode & SPI_CS_HIGH);
+ retval = gpio_direction_output(spi->cs_gpio, desel);
+ if (retval) {
+ gpio_free(spi->cs_gpio);
+ return retval;
+ }
+ } else if (spi->cs_gpio != -ENOENT) {
+ if (spi->cs_gpio < 0)
+ return spi->cs_gpio;
+ return -EINVAL;
+ }
+ /* When spi->cs_gpio == -ENOENT, a hole in the phandle list
+ * indicates to use native chipselect if present, or allow for
+ * an always selected chip
+ */
}
- /* Clear the events */
- mpc8xxx_spi_write_reg(&reg_base->event, events);
+ /* Initialize chipselect - might be active for SPI_CS_HIGH mode */
+ fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
- mspi->count -= len;
- if (mspi->count)
- fsl_spi_cpm_bufs_start(mspi);
- else
- complete(&mspi->done);
+ return 0;
+}
+
+static void fsl_spi_cleanup(struct spi_device *spi)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+
+ if (mpc8xxx_spi->type == TYPE_GRLIB && gpio_is_valid(spi->cs_gpio))
+ gpio_free(spi->cs_gpio);
}
static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
@@ -646,201 +550,51 @@ static irqreturn_t fsl_spi_irq(s32 irq, void *context_data)
return ret;
}
-static void *fsl_spi_alloc_dummy_rx(void)
-{
- mutex_lock(&fsl_dummy_rx_lock);
-
- if (!fsl_dummy_rx)
- fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
- if (fsl_dummy_rx)
- fsl_dummy_rx_refcnt++;
-
- mutex_unlock(&fsl_dummy_rx_lock);
-
- return fsl_dummy_rx;
-}
-
-static void fsl_spi_free_dummy_rx(void)
+static void fsl_spi_remove(struct mpc8xxx_spi *mspi)
{
- mutex_lock(&fsl_dummy_rx_lock);
-
- switch (fsl_dummy_rx_refcnt) {
- case 0:
- WARN_ON(1);
- break;
- case 1:
- kfree(fsl_dummy_rx);
- fsl_dummy_rx = NULL;
- /* fall through */
- default:
- fsl_dummy_rx_refcnt--;
- break;
- }
-
- mutex_unlock(&fsl_dummy_rx_lock);
+ iounmap(mspi->reg_base);
+ fsl_spi_cpm_free(mspi);
}
-static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
+static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on)
{
- struct device *dev = mspi->dev;
- struct device_node *np = dev->of_node;
- const u32 *iprop;
- int size;
- void __iomem *spi_base;
- unsigned long pram_ofs = -ENOMEM;
-
- /* Can't use of_address_to_resource(), QE muram isn't at 0. */
- iprop = of_get_property(np, "reg", &size);
-
- /* QE with a fixed pram location? */
- if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4)
- return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE);
-
- /* QE but with a dynamic pram location? */
- if (mspi->flags & SPI_QE) {
- pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
- qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock,
- QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs);
- return pram_ofs;
- }
-
- spi_base = of_iomap(np, 1);
- if (spi_base == NULL)
- return -EINVAL;
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ struct fsl_spi_reg *reg_base = mpc8xxx_spi->reg_base;
+ u32 slvsel;
+ u16 cs = spi->chip_select;
- if (mspi->flags & SPI_CPM2) {
- pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
- out_be16(spi_base, pram_ofs);
- } else {
- struct spi_pram __iomem *pram = spi_base;
- u16 rpbase = in_be16(&pram->rpbase);
-
- /* Microcode relocation patch applied? */
- if (rpbase)
- pram_ofs = rpbase;
- else {
- pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
- out_be16(spi_base, pram_ofs);
- }
+ if (gpio_is_valid(spi->cs_gpio)) {
+ gpio_set_value(spi->cs_gpio, on);
+ } else if (cs < mpc8xxx_spi->native_chipselects) {
+ slvsel = mpc8xxx_spi_read_reg(&reg_base->slvsel);
+ slvsel = on ? (slvsel | (1 << cs)) : (slvsel & ~(1 << cs));
+ mpc8xxx_spi_write_reg(&reg_base->slvsel, slvsel);
}
-
- iounmap(spi_base);
- return pram_ofs;
}
-static int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
+static void fsl_spi_grlib_probe(struct device *dev)
{
- struct device *dev = mspi->dev;
- struct device_node *np = dev->of_node;
- const u32 *iprop;
- int size;
- unsigned long pram_ofs;
- unsigned long bds_ofs;
-
- if (!(mspi->flags & SPI_CPM_MODE))
- return 0;
-
- if (!fsl_spi_alloc_dummy_rx())
- return -ENOMEM;
-
- if (mspi->flags & SPI_QE) {
- iprop = of_get_property(np, "cell-index", &size);
- if (iprop && size == sizeof(*iprop))
- mspi->subblock = *iprop;
-
- switch (mspi->subblock) {
- default:
- dev_warn(dev, "cell-index unspecified, assuming SPI1");
- /* fall through */
- case 0:
- mspi->subblock = QE_CR_SUBBLOCK_SPI1;
- break;
- case 1:
- mspi->subblock = QE_CR_SUBBLOCK_SPI2;
- break;
- }
- }
-
- pram_ofs = fsl_spi_cpm_get_pram(mspi);
- if (IS_ERR_VALUE(pram_ofs)) {
- dev_err(dev, "can't allocate spi parameter ram\n");
- goto err_pram;
- }
+ struct fsl_spi_platform_data *pdata = dev->platform_data;
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+ struct fsl_spi_reg *reg_base = mpc8xxx_spi->reg_base;
+ int mbits;
+ u32 capabilities;
- bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) +
- sizeof(*mspi->rx_bd), 8);
- if (IS_ERR_VALUE(bds_ofs)) {
- dev_err(dev, "can't allocate bds\n");
- goto err_bds;
- }
+ capabilities = mpc8xxx_spi_read_reg(&reg_base->cap);
- mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, mspi->dma_dummy_tx)) {
- dev_err(dev, "unable to map dummy tx buffer\n");
- goto err_dummy_tx;
- }
+ mpc8xxx_spi->set_shifts = fsl_spi_grlib_set_shifts;
+ mbits = SPCAP_MAXWLEN(capabilities);
+ if (mbits)
+ mpc8xxx_spi->max_bits_per_word = mbits + 1;
- mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
- dev_err(dev, "unable to map dummy rx buffer\n");
- goto err_dummy_rx;
+ mpc8xxx_spi->native_chipselects = 0;
+ if (SPCAP_SSEN(capabilities)) {
+ mpc8xxx_spi->native_chipselects = SPCAP_SSSZ(capabilities);
+ mpc8xxx_spi_write_reg(&reg_base->slvsel, 0xffffffff);
}
-
- mspi->pram = cpm_muram_addr(pram_ofs);
-
- mspi->tx_bd = cpm_muram_addr(bds_ofs);
- mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
-
- /* Initialize parameter ram. */
- out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd));
- out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd));
- out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL);
- out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL);
- out_be16(&mspi->pram->mrblr, SPI_MRBLR);
- out_be32(&mspi->pram->rstate, 0);
- out_be32(&mspi->pram->rdp, 0);
- out_be16(&mspi->pram->rbptr, 0);
- out_be16(&mspi->pram->rbc, 0);
- out_be32(&mspi->pram->rxtmp, 0);
- out_be32(&mspi->pram->tstate, 0);
- out_be32(&mspi->pram->tdp, 0);
- out_be16(&mspi->pram->tbptr, 0);
- out_be16(&mspi->pram->tbc, 0);
- out_be32(&mspi->pram->txtmp, 0);
-
- return 0;
-
-err_dummy_rx:
- dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
-err_dummy_tx:
- cpm_muram_free(bds_ofs);
-err_bds:
- cpm_muram_free(pram_ofs);
-err_pram:
- fsl_spi_free_dummy_rx();
- return -ENOMEM;
-}
-
-static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
-{
- struct device *dev = mspi->dev;
-
- if (!(mspi->flags & SPI_CPM_MODE))
- return;
-
- dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
- dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
- cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
- cpm_muram_free(cpm_muram_offset(mspi->pram));
- fsl_spi_free_dummy_rx();
-}
-
-static void fsl_spi_remove(struct mpc8xxx_spi *mspi)
-{
- iounmap(mspi->reg_base);
- fsl_spi_cpm_free(mspi);
+ master->num_chipselect = mpc8xxx_spi->native_chipselects;
+ pdata->cs_control = fsl_spi_grlib_cs_control;
}
static struct spi_master * fsl_spi_probe(struct device *dev,
@@ -866,27 +620,35 @@ static struct spi_master * fsl_spi_probe(struct device *dev,
goto err_probe;
master->setup = fsl_spi_setup;
+ master->cleanup = fsl_spi_cleanup;
mpc8xxx_spi = spi_master_get_devdata(master);
mpc8xxx_spi->spi_do_one_msg = fsl_spi_do_one_msg;
mpc8xxx_spi->spi_remove = fsl_spi_remove;
-
+ mpc8xxx_spi->max_bits_per_word = 32;
+ mpc8xxx_spi->type = fsl_spi_get_type(dev);
ret = fsl_spi_cpm_init(mpc8xxx_spi);
if (ret)
goto err_cpm_init;
- if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
- mpc8xxx_spi->rx_shift = 16;
- mpc8xxx_spi->tx_shift = 24;
- }
-
mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem));
if (mpc8xxx_spi->reg_base == NULL) {
ret = -ENOMEM;
goto err_ioremap;
}
+ if (mpc8xxx_spi->type == TYPE_GRLIB)
+ fsl_spi_grlib_probe(dev);
+
+ if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
+ mpc8xxx_spi->set_shifts = fsl_spi_qe_cpu_set_shifts;
+
+ if (mpc8xxx_spi->set_shifts)
+ /* 8 bits per word and MSB first */
+ mpc8xxx_spi->set_shifts(&mpc8xxx_spi->rx_shift,
+ &mpc8xxx_spi->tx_shift, 8, 1);
+
/* Register for SPI Interrupt */
ret = request_irq(mpc8xxx_spi->irq, fsl_spi_irq,
0, "fsl_spi", mpc8xxx_spi);
@@ -904,6 +666,10 @@ static struct spi_master * fsl_spi_probe(struct device *dev,
/* Enable SPI interface */
regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
+ if (mpc8xxx_spi->max_bits_per_word < 8) {
+ regval &= ~SPMODE_LEN(0xF);
+ regval |= SPMODE_LEN(mpc8xxx_spi->max_bits_per_word - 1);
+ }
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
regval |= SPMODE_OP;
@@ -1047,28 +813,31 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
struct device_node *np = ofdev->dev.of_node;
struct spi_master *master;
struct resource mem;
- struct resource irq;
+ int irq, type;
int ret = -ENOMEM;
ret = of_mpc8xxx_spi_probe(ofdev);
if (ret)
return ret;
- ret = of_fsl_spi_get_chipselects(dev);
- if (ret)
- goto err;
+ type = fsl_spi_get_type(&ofdev->dev);
+ if (type == TYPE_FSL) {
+ ret = of_fsl_spi_get_chipselects(dev);
+ if (ret)
+ goto err;
+ }
ret = of_address_to_resource(np, 0, &mem);
if (ret)
goto err;
- ret = of_irq_to_resource(np, 0, &irq);
- if (!ret) {
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
ret = -EINVAL;
goto err;
}
- master = fsl_spi_probe(dev, &mem, irq.start);
+ master = fsl_spi_probe(dev, &mem, irq);
if (IS_ERR(master)) {
ret = PTR_ERR(master);
goto err;
@@ -1077,27 +846,25 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
return 0;
err:
- of_fsl_spi_free_chipselects(dev);
+ if (type == TYPE_FSL)
+ of_fsl_spi_free_chipselects(dev);
return ret;
}
static int of_fsl_spi_remove(struct platform_device *ofdev)
{
+ struct spi_master *master = dev_get_drvdata(&ofdev->dev);
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
int ret;
ret = mpc8xxx_spi_remove(&ofdev->dev);
if (ret)
return ret;
- of_fsl_spi_free_chipselects(&ofdev->dev);
+ if (mpc8xxx_spi->type == TYPE_FSL)
+ of_fsl_spi_free_chipselects(&ofdev->dev);
return 0;
}
-static const struct of_device_id of_fsl_spi_match[] = {
- { .compatible = "fsl,spi" },
- {}
-};
-MODULE_DEVICE_TABLE(of, of_fsl_spi_match);
-
static struct platform_driver of_fsl_spi_driver = {
.driver = {
.name = "fsl_spi",
@@ -1134,9 +901,7 @@ static int plat_mpc8xxx_spi_probe(struct platform_device *pdev)
return -EINVAL;
master = fsl_spi_probe(&pdev->dev, mem, irq);
- if (IS_ERR(master))
- return PTR_ERR(master);
- return 0;
+ return PTR_RET(master);
}
static int plat_mpc8xxx_spi_remove(struct platform_device *pdev)
diff --git a/drivers/spi/spi-fsl-spi.h b/drivers/spi/spi-fsl-spi.h
new file mode 100644
index 00000000000..9a6dae00e3f
--- /dev/null
+++ b/drivers/spi/spi-fsl-spi.h
@@ -0,0 +1,72 @@
+/*
+ * Freescale SPI controller driver.
+ *
+ * Maintainer: Kumar Gala
+ *
+ * Copyright (C) 2006 Polycom, Inc.
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ *
+ * CPM SPI and QE buffer descriptors mode support:
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * GRLIB support:
+ * Copyright (c) 2012 Aeroflex Gaisler AB.
+ * Author: Andreas Larsson <andreas@gaisler.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __SPI_FSL_SPI_H__
+#define __SPI_FSL_SPI_H__
+
+/* SPI Controller registers */
+struct fsl_spi_reg {
+ __be32 cap; /* TYPE_GRLIB specific */
+ u8 res1[0x1C];
+ __be32 mode;
+ __be32 event;
+ __be32 mask;
+ __be32 command;
+ __be32 transmit;
+ __be32 receive;
+ __be32 slvsel; /* TYPE_GRLIB specific */
+};
+
+/* SPI Controller mode register definitions */
+#define SPMODE_LOOP (1 << 30)
+#define SPMODE_CI_INACTIVEHIGH (1 << 29)
+#define SPMODE_CP_BEGIN_EDGECLK (1 << 28)
+#define SPMODE_DIV16 (1 << 27)
+#define SPMODE_REV (1 << 26)
+#define SPMODE_MS (1 << 25)
+#define SPMODE_ENABLE (1 << 24)
+#define SPMODE_LEN(x) ((x) << 20)
+#define SPMODE_PM(x) ((x) << 16)
+#define SPMODE_OP (1 << 14)
+#define SPMODE_CG(x) ((x) << 7)
+
+/* TYPE_GRLIB SPI Controller capability register definitions */
+#define SPCAP_SSEN(x) (((x) >> 16) & 0x1)
+#define SPCAP_SSSZ(x) (((x) >> 24) & 0xff)
+#define SPCAP_MAXWLEN(x) (((x) >> 20) & 0xf)
+
+/*
+ * Default for SPI Mode:
+ * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk
+ */
+#define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \
+ SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf))
+
+/* SPIE register values */
+#define SPIE_NE 0x00000200 /* Not empty */
+#define SPIE_NF 0x00000100 /* Not full */
+
+/* SPIM register values */
+#define SPIM_NE 0x00000200 /* Not empty */
+#define SPIM_NF 0x00000100 /* Not full */
+
+#endif /* __SPI_FSL_SPI_H__ */
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 9ddef55a716..0021fc4c45b 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -265,9 +265,9 @@ static int spi_gpio_setup(struct spi_device *spi)
}
}
if (!status) {
- status = spi_bitbang_setup(spi);
/* in case it was initialized from static board data */
spi_gpio->cs_gpios[spi->chip_select] = cs;
+ status = spi_bitbang_setup(spi);
}
if (status) {
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 3e490ee7f27..dfddf336912 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -28,6 +28,7 @@
#include <linux/clk.h>
#include <linux/spi/spi.h>
#include <linux/fsl_devices.h>
+#include <linux/gpio.h>
#include <asm/mpc52xx_psc.h>
struct mpc512x_psc_spi {
@@ -113,7 +114,7 @@ static void mpc512x_psc_spi_activate_cs(struct spi_device *spi)
out_be32(&psc->ccr, ccr);
mps->bits_per_word = cs->bits_per_word;
- if (mps->cs_control)
+ if (mps->cs_control && gpio_is_valid(spi->cs_gpio))
mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0);
}
@@ -121,7 +122,7 @@ static void mpc512x_psc_spi_deactivate_cs(struct spi_device *spi)
{
struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
- if (mps->cs_control)
+ if (mps->cs_control && gpio_is_valid(spi->cs_gpio))
mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
}
@@ -148,6 +149,9 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi,
in_8(&psc->mode);
out_8(&psc->mode, 0x0);
+ /* enable transmiter/receiver */
+ out_8(&psc->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE);
+
while (len) {
int count;
int i;
@@ -176,10 +180,6 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi,
out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY);
out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY);
- /* enable transmiter/receiver */
- out_8(&psc->command,
- MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE);
-
wait_for_completion(&mps->done);
mdelay(1);
@@ -204,9 +204,6 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi,
while (in_be32(&fifo->rxcnt)) {
in_8(&fifo->rxdata_8);
}
-
- out_8(&psc->command,
- MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
}
/* disable transmiter/receiver and fifo interrupt */
out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
@@ -278,6 +275,7 @@ static int mpc512x_psc_spi_setup(struct spi_device *spi)
struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
struct mpc512x_psc_spi_cs *cs = spi->controller_state;
unsigned long flags;
+ int ret;
if (spi->bits_per_word % 8)
return -EINVAL;
@@ -286,6 +284,19 @@ static int mpc512x_psc_spi_setup(struct spi_device *spi)
cs = kzalloc(sizeof *cs, GFP_KERNEL);
if (!cs)
return -ENOMEM;
+
+ if (gpio_is_valid(spi->cs_gpio)) {
+ ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
+ if (ret) {
+ dev_err(&spi->dev, "can't get CS gpio: %d\n",
+ ret);
+ kfree(cs);
+ return ret;
+ }
+ gpio_direction_output(spi->cs_gpio,
+ spi->mode & SPI_CS_HIGH ? 0 : 1);
+ }
+
spi->controller_state = cs;
}
@@ -319,6 +330,8 @@ static int mpc512x_psc_spi_transfer(struct spi_device *spi,
static void mpc512x_psc_spi_cleanup(struct spi_device *spi)
{
+ if (gpio_is_valid(spi->cs_gpio))
+ gpio_free(spi->cs_gpio);
kfree(spi->controller_state);
}
@@ -405,6 +418,11 @@ static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id)
return IRQ_NONE;
}
+static void mpc512x_spi_cs_control(struct spi_device *spi, bool onoff)
+{
+ gpio_set_value(spi->cs_gpio, onoff);
+}
+
/* bus_num is used only for the case dev->platform_data == NULL */
static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
u32 size, unsigned int irq,
@@ -425,12 +443,9 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
mps->irq = irq;
if (pdata == NULL) {
- dev_err(dev, "probe called without platform data, no "
- "cs_control function will be called\n");
- mps->cs_control = NULL;
+ mps->cs_control = mpc512x_spi_cs_control;
mps->sysclk = 0;
master->bus_num = bus_num;
- master->num_chipselect = 255;
} else {
mps->cs_control = pdata->cs_control;
mps->sysclk = pdata->sysclk;
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 22a0af0147f..a1d5778e2bb 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -612,6 +612,7 @@ static int mxs_spi_probe(struct platform_device *pdev)
ssp->dmach = dma_request_channel(mask, mxs_ssp_dma_filter, ssp);
if (!ssp->dmach) {
dev_err(ssp->dev, "Failed to request DMA\n");
+ ret = -ENODEV;
goto out_master_free;
}
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index cb2e284bd81..e60a776ed2d 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -393,8 +393,6 @@ static const struct of_device_id tiny_spi_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, tiny_spi_match);
-#else /* CONFIG_OF */
-#define tiny_spi_match NULL
#endif /* CONFIG_OF */
static struct platform_driver tiny_spi_driver = {
@@ -404,7 +402,7 @@ static struct platform_driver tiny_spi_driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
.pm = NULL,
- .of_match_table = tiny_spi_match,
+ .of_match_table = of_match_ptr(tiny_spi_match),
},
};
module_platform_driver(tiny_spi_driver);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 893c3d78e42..86d2158946b 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -285,8 +285,12 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
timeout = jiffies + msecs_to_jiffies(1000);
while (!(__raw_readl(reg) & bit)) {
- if (time_after(jiffies, timeout))
- return -1;
+ if (time_after(jiffies, timeout)) {
+ if (!(__raw_readl(reg) & bit))
+ return -ETIMEDOUT;
+ else
+ return 0;
+ }
cpu_relax();
}
return 0;
@@ -805,6 +809,10 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
return 0;
}
+/*
+ * Note that we currently allow DMA only if we get a channel
+ * for both rx and tx. Otherwise we'll do PIO for both rx and tx.
+ */
static int omap2_mcspi_request_dma(struct spi_device *spi)
{
struct spi_master *master = spi->master;
@@ -823,21 +831,22 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
dma_cap_set(DMA_SLAVE, mask);
sig = mcspi_dma->dma_rx_sync_dev;
mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
- if (!mcspi_dma->dma_rx) {
- dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n");
- return -EAGAIN;
- }
+ if (!mcspi_dma->dma_rx)
+ goto no_dma;
sig = mcspi_dma->dma_tx_sync_dev;
mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
if (!mcspi_dma->dma_tx) {
- dev_err(&spi->dev, "no TX DMA engine channel for McSPI\n");
dma_release_channel(mcspi_dma->dma_rx);
mcspi_dma->dma_rx = NULL;
- return -EAGAIN;
+ goto no_dma;
}
return 0;
+
+no_dma:
+ dev_warn(&spi->dev, "not using DMA for McSPI\n");
+ return -EAGAIN;
}
static int omap2_mcspi_setup(struct spi_device *spi)
@@ -870,7 +879,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
ret = omap2_mcspi_request_dma(spi);
- if (ret < 0)
+ if (ret < 0 && ret != -EAGAIN)
return ret;
}
@@ -928,6 +937,7 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
struct spi_device *spi;
struct spi_transfer *t = NULL;
struct spi_master *master;
+ struct omap2_mcspi_dma *mcspi_dma;
int cs_active = 0;
struct omap2_mcspi_cs *cs;
struct omap2_mcspi_device_config *cd;
@@ -937,6 +947,7 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
spi = m->spi;
master = spi->master;
+ mcspi_dma = mcspi->dma_channels + spi->chip_select;
cs = spi->controller_state;
cd = spi->controller_data;
@@ -993,7 +1004,8 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
__raw_writel(0, cs->base
+ OMAP2_MCSPI_TX0);
- if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)
+ if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
+ (m->is_dma_mapped || t->len >= DMA_MIN_BYTES))
count = omap2_mcspi_txrx_dma(spi, t);
else
count = omap2_mcspi_txrx_pio(spi, t);
@@ -1040,10 +1052,14 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
static int omap2_mcspi_transfer_one_message(struct spi_master *master,
struct spi_message *m)
{
+ struct spi_device *spi;
struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
struct spi_transfer *t;
+ spi = m->spi;
mcspi = spi_master_get_devdata(master);
+ mcspi_dma = mcspi->dma_channels + spi->chip_select;
m->actual_length = 0;
m->status = 0;
@@ -1078,7 +1094,7 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
if (m->is_dma_mapped || len < DMA_MIN_BYTES)
continue;
- if (tx_buf != NULL) {
+ if (mcspi_dma->dma_tx && tx_buf != NULL) {
t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
len, DMA_TO_DEVICE);
if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
@@ -1087,7 +1103,7 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
return -EINVAL;
}
}
- if (rx_buf != NULL) {
+ if (mcspi_dma->dma_rx && rx_buf != NULL) {
t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
@@ -1277,7 +1293,8 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_enable(&pdev->dev);
- if (status || omap2_mcspi_master_setup(mcspi) < 0)
+ status = omap2_mcspi_master_setup(mcspi);
+ if (status < 0)
goto disable_pm;
status = spi_register_master(master);
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 364964d2ed0..74bc1877565 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -22,7 +22,7 @@ static int ce4100_spi_probe(struct pci_dev *dev,
return ret;
ret = pcim_iomap_regions(dev, 1 << 0, "PXA2xx SPI");
- if (!ret)
+ if (ret)
return ret;
memset(&spi_pdata, 0, sizeof(spi_pdata));
@@ -47,8 +47,8 @@ static int ce4100_spi_probe(struct pci_dev *dev,
pi.size_data = sizeof(spi_pdata);
pdev = platform_device_register_full(&pi);
- if (!pdev)
- return -ENOMEM;
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
pci_set_drvdata(dev, pdev);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 810413883c7..f5d84d6f822 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -22,6 +22,7 @@
#include <linux/device.h>
#include <linux/ioport.h>
#include <linux/errno.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/spi/pxa2xx_spi.h>
@@ -68,6 +69,7 @@ MODULE_ALIAS("platform:pxa2xx-spi");
#define LPSS_TX_HITHRESH_DFLT 224
/* Offset from drv_data->lpss_base */
+#define SSP_REG 0x0c
#define SPI_CS_CONTROL 0x18
#define SPI_CS_CONTROL_SW_MODE BIT(0)
#define SPI_CS_CONTROL_CS_HIGH BIT(1)
@@ -138,6 +140,10 @@ detection_done:
/* Enable software chip select control */
value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH;
__lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value);
+
+ /* Enable multiblock DMA transfers */
+ if (drv_data->master_info->enable_dma)
+ __lpss_ssp_write_priv(drv_data, SSP_REG, 1);
}
static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
@@ -1083,11 +1089,9 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
ssp = &pdata->ssp;
ssp->phys_base = res->start;
- ssp->mmio_base = devm_request_and_ioremap(&pdev->dev, res);
- if (!ssp->mmio_base) {
- dev_err(&pdev->dev, "failed to ioremap mmio_base\n");
- return NULL;
- }
+ ssp->mmio_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ssp->mmio_base))
+ return PTR_ERR(ssp->mmio_base);
ssp->clk = devm_clk_get(&pdev->dev, NULL);
ssp->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 4188b2faac5..5000586cb98 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
@@ -31,9 +32,12 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
-#include <mach/dma.h>
#include <linux/platform_data/spi-s3c64xx.h>
+#ifdef CONFIG_S3C_DMA
+#include <mach/dma.h>
+#endif
+
#define MAX_SPI_PORTS 3
/* Registers and bit-fields */
@@ -131,9 +135,9 @@
#define TXBUSY (1<<3)
struct s3c64xx_spi_dma_data {
- unsigned ch;
+ struct dma_chan *ch;
enum dma_transfer_direction direction;
- enum dma_ch dmach;
+ unsigned int dmach;
};
/**
@@ -195,16 +199,14 @@ struct s3c64xx_spi_driver_data {
unsigned cur_speed;
struct s3c64xx_spi_dma_data rx_dma;
struct s3c64xx_spi_dma_data tx_dma;
+#ifdef CONFIG_S3C_DMA
struct samsung_dma_ops *ops;
+#endif
struct s3c64xx_spi_port_config *port_conf;
unsigned int port_id;
unsigned long gpios[4];
};
-static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
- .name = "samsung-spi-dma",
-};
-
static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
{
void __iomem *regs = sdd->regs;
@@ -281,6 +283,13 @@ static void s3c64xx_spi_dmacb(void *data)
spin_unlock_irqrestore(&sdd->lock, flags);
}
+#ifdef CONFIG_S3C_DMA
+/* FIXME: remove this section once arch/arm/mach-s3c64xx uses dmaengine */
+
+static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
+ .name = "samsung-spi-dma",
+};
+
static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
unsigned len, dma_addr_t buf)
{
@@ -294,14 +303,14 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
config.direction = sdd->rx_dma.direction;
config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
config.width = sdd->cur_bpw / 8;
- sdd->ops->config(sdd->rx_dma.ch, &config);
+ sdd->ops->config((enum dma_ch)sdd->rx_dma.ch, &config);
} else {
sdd = container_of((void *)dma,
struct s3c64xx_spi_driver_data, tx_dma);
config.direction = sdd->tx_dma.direction;
config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
config.width = sdd->cur_bpw / 8;
- sdd->ops->config(sdd->tx_dma.ch, &config);
+ sdd->ops->config((enum dma_ch)sdd->tx_dma.ch, &config);
}
info.cap = DMA_SLAVE;
@@ -311,8 +320,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
info.direction = dma->direction;
info.buf = buf;
- sdd->ops->prepare(dma->ch, &info);
- sdd->ops->trigger(dma->ch);
+ sdd->ops->prepare((enum dma_ch)dma->ch, &info);
+ sdd->ops->trigger((enum dma_ch)dma->ch);
}
static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
@@ -325,12 +334,150 @@ static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
req.cap = DMA_SLAVE;
req.client = &s3c64xx_spi_dma_client;
- sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &req, dev, "rx");
- sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &req, dev, "tx");
+ sdd->rx_dma.ch = (void *)sdd->ops->request(sdd->rx_dma.dmach, &req, dev, "rx");
+ sdd->tx_dma.ch = (void *)sdd->ops->request(sdd->tx_dma.dmach, &req, dev, "tx");
return 1;
}
+static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
+
+ /* Acquire DMA channels */
+ while (!acquire_dma(sdd))
+ usleep_range(10000, 11000);
+
+ pm_runtime_get_sync(&sdd->pdev->dev);
+
+ return 0;
+}
+
+static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
+
+ /* Free DMA channels */
+ sdd->ops->release((enum dma_ch)sdd->rx_dma.ch, &s3c64xx_spi_dma_client);
+ sdd->ops->release((enum dma_ch)sdd->tx_dma.ch, &s3c64xx_spi_dma_client);
+
+ pm_runtime_put(&sdd->pdev->dev);
+
+ return 0;
+}
+
+static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
+ struct s3c64xx_spi_dma_data *dma)
+{
+ sdd->ops->stop((enum dma_ch)dma->ch);
+}
+#else
+
+static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
+ unsigned len, dma_addr_t buf)
+{
+ struct s3c64xx_spi_driver_data *sdd;
+ struct dma_slave_config config;
+ struct scatterlist sg;
+ struct dma_async_tx_descriptor *desc;
+
+ if (dma->direction == DMA_DEV_TO_MEM) {
+ sdd = container_of((void *)dma,
+ struct s3c64xx_spi_driver_data, rx_dma);
+ config.direction = dma->direction;
+ config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
+ config.src_addr_width = sdd->cur_bpw / 8;
+ config.src_maxburst = 1;
+ dmaengine_slave_config(dma->ch, &config);
+ } else {
+ sdd = container_of((void *)dma,
+ struct s3c64xx_spi_driver_data, tx_dma);
+ config.direction = dma->direction;
+ config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
+ config.dst_addr_width = sdd->cur_bpw / 8;
+ config.dst_maxburst = 1;
+ dmaengine_slave_config(dma->ch, &config);
+ }
+
+ sg_init_table(&sg, 1);
+ sg_dma_len(&sg) = len;
+ sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf)),
+ len, offset_in_page(buf));
+ sg_dma_address(&sg) = buf;
+
+ desc = dmaengine_prep_slave_sg(dma->ch,
+ &sg, 1, dma->direction, DMA_PREP_INTERRUPT);
+
+ desc->callback = s3c64xx_spi_dmacb;
+ desc->callback_param = dma;
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(dma->ch);
+}
+
+static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
+ dma_filter_fn filter = sdd->cntrlr_info->filter;
+ struct device *dev = &sdd->pdev->dev;
+ dma_cap_mask_t mask;
+ int ret;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Acquire DMA channels */
+ sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter,
+ (void*)sdd->rx_dma.dmach, dev, "rx");
+ if (!sdd->rx_dma.ch) {
+ dev_err(dev, "Failed to get RX DMA channel\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
+ (void*)sdd->tx_dma.dmach, dev, "tx");
+ if (!sdd->tx_dma.ch) {
+ dev_err(dev, "Failed to get TX DMA channel\n");
+ ret = -EBUSY;
+ goto out_rx;
+ }
+
+ ret = pm_runtime_get_sync(&sdd->pdev->dev);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable device: %d\n", ret);
+ goto out_tx;
+ }
+
+ return 0;
+
+out_tx:
+ dma_release_channel(sdd->tx_dma.ch);
+out_rx:
+ dma_release_channel(sdd->rx_dma.ch);
+out:
+ return ret;
+}
+
+static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
+
+ /* Free DMA channels */
+ dma_release_channel(sdd->rx_dma.ch);
+ dma_release_channel(sdd->tx_dma.ch);
+
+ pm_runtime_put(&sdd->pdev->dev);
+ return 0;
+}
+
+static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
+ struct s3c64xx_spi_dma_data *dma)
+{
+ dmaengine_terminate_all(dma->ch);
+}
+#endif
+
static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi,
struct spi_transfer *xfer, int dma_mode)
@@ -713,9 +860,9 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
}
/* Polling method for xfers not bigger than FIFO capacity */
- if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
- use_dma = 0;
- else
+ use_dma = 0;
+ if (sdd->rx_dma.ch && sdd->tx_dma.ch &&
+ (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1)))
use_dma = 1;
spin_lock_irqsave(&sdd->lock, flags);
@@ -750,10 +897,10 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
if (use_dma) {
if (xfer->tx_buf != NULL
&& (sdd->state & TXBUSY))
- sdd->ops->stop(sdd->tx_dma.ch);
+ s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma);
if (xfer->rx_buf != NULL
&& (sdd->state & RXBUSY))
- sdd->ops->stop(sdd->rx_dma.ch);
+ s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma);
}
goto out;
@@ -790,34 +937,7 @@ out:
return 0;
}
-static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
-{
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
-
- /* Acquire DMA channels */
- while (!acquire_dma(sdd))
- usleep_range(10000, 11000);
-
- pm_runtime_get_sync(&sdd->pdev->dev);
-
- return 0;
-}
-
-static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
-{
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
-
- /* Free DMA channels */
- sdd->ops->release(sdd->rx_dma.ch, &s3c64xx_spi_dma_client);
- sdd->ops->release(sdd->tx_dma.ch, &s3c64xx_spi_dma_client);
-
- pm_runtime_put(&sdd->pdev->dev);
-
- return 0;
-}
-
static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
- struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs;
@@ -874,7 +994,7 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
sdd = spi_master_get_devdata(spi->master);
if (!cs && spi->dev.of_node) {
- cs = s3c64xx_get_slave_ctrldata(sdd, spi);
+ cs = s3c64xx_get_slave_ctrldata(spi);
spi->controller_data = cs;
}
@@ -912,15 +1032,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
spin_unlock_irqrestore(&sdd->lock, flags);
- if (spi->bits_per_word != 8
- && spi->bits_per_word != 16
- && spi->bits_per_word != 32) {
- dev_err(&spi->dev, "setup: %dbits/wrd not supported!\n",
- spi->bits_per_word);
- err = -EINVAL;
- goto setup_exit;
- }
-
pm_runtime_get_sync(&sdd->pdev->dev);
/* Check if we can provide the requested rate */
@@ -1061,41 +1172,6 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
}
#ifdef CONFIG_OF
-static int s3c64xx_spi_parse_dt_gpio(struct s3c64xx_spi_driver_data *sdd)
-{
- struct device *dev = &sdd->pdev->dev;
- int idx, gpio, ret;
-
- /* find gpios for mosi, miso and clock lines */
- for (idx = 0; idx < 3; idx++) {
- gpio = of_get_gpio(dev->of_node, idx);
- if (!gpio_is_valid(gpio)) {
- dev_err(dev, "invalid gpio[%d]: %d\n", idx, gpio);
- goto free_gpio;
- }
- sdd->gpios[idx] = gpio;
- ret = gpio_request(gpio, "spi-bus");
- if (ret) {
- dev_err(dev, "gpio [%d] request failed: %d\n",
- gpio, ret);
- goto free_gpio;
- }
- }
- return 0;
-
-free_gpio:
- while (--idx >= 0)
- gpio_free(sdd->gpios[idx]);
- return -EINVAL;
-}
-
-static void s3c64xx_spi_dt_gpio_free(struct s3c64xx_spi_driver_data *sdd)
-{
- unsigned int idx;
- for (idx = 0; idx < 3; idx++)
- gpio_free(sdd->gpios[idx]);
-}
-
static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
{
struct s3c64xx_spi_info *sci;
@@ -1128,15 +1204,6 @@ static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
{
return dev->platform_data;
}
-
-static int s3c64xx_spi_parse_dt_gpio(struct s3c64xx_spi_driver_data *sdd)
-{
- return -EINVAL;
-}
-
-static void s3c64xx_spi_dt_gpio_free(struct s3c64xx_spi_driver_data *sdd)
-{
-}
#endif
static const struct of_device_id s3c64xx_spi_dt_match[];
@@ -1247,6 +1314,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
master->num_chipselect = sci->num_cs;
master->dma_alignment = 8;
+ master->bits_per_word_mask = BIT(32 - 1) | BIT(16 - 1) | BIT(8 - 1);
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
@@ -1256,10 +1324,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
goto err0;
}
- if (!sci->cfg_gpio && pdev->dev.of_node) {
- if (s3c64xx_spi_parse_dt_gpio(sdd))
- return -EBUSY;
- } else if (sci->cfg_gpio == NULL || sci->cfg_gpio()) {
+ if (sci->cfg_gpio && sci->cfg_gpio()) {
dev_err(&pdev->dev, "Unable to config gpio\n");
ret = -EBUSY;
goto err0;
@@ -1270,13 +1335,13 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
if (IS_ERR(sdd->clk)) {
dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
ret = PTR_ERR(sdd->clk);
- goto err1;
+ goto err0;
}
if (clk_prepare_enable(sdd->clk)) {
dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
ret = -EBUSY;
- goto err1;
+ goto err0;
}
sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
@@ -1333,9 +1398,6 @@ err3:
clk_disable_unprepare(sdd->src_clk);
err2:
clk_disable_unprepare(sdd->clk);
-err1:
- if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node)
- s3c64xx_spi_dt_gpio_free(sdd);
err0:
platform_set_drvdata(pdev, NULL);
spi_master_put(master);
@@ -1358,16 +1420,13 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(sdd->clk);
- if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node)
- s3c64xx_spi_dt_gpio_free(sdd);
-
platform_set_drvdata(pdev, NULL);
spi_master_put(master);
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int s3c64xx_spi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
@@ -1379,9 +1438,6 @@ static int s3c64xx_spi_suspend(struct device *dev)
clk_disable_unprepare(sdd->src_clk);
clk_disable_unprepare(sdd->clk);
- if (!sdd->cntrlr_info->cfg_gpio && dev->of_node)
- s3c64xx_spi_dt_gpio_free(sdd);
-
sdd->cur_speed = 0; /* Output Clock is stopped */
return 0;
@@ -1393,9 +1449,7 @@ static int s3c64xx_spi_resume(struct device *dev)
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
- if (!sci->cfg_gpio && dev->of_node)
- s3c64xx_spi_parse_dt_gpio(sdd);
- else
+ if (sci->cfg_gpio)
sci->cfg_gpio();
/* Enable the clock */
@@ -1408,7 +1462,7 @@ static int s3c64xx_spi_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_RUNTIME
static int s3c64xx_spi_runtime_suspend(struct device *dev)
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 8b40d0884f8..2bc5a6b8630 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -764,8 +764,6 @@ static const struct of_device_id sh_msiof_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, sh_msiof_match);
-#else
-#define sh_msiof_match NULL
#endif
static struct dev_pm_ops sh_msiof_spi_dev_pm_ops = {
@@ -780,7 +778,7 @@ static struct platform_driver sh_msiof_spi_drv = {
.name = "spi_sh_msiof",
.owner = THIS_MODULE,
.pm = &sh_msiof_spi_dev_pm_ops,
- .of_match_table = sh_msiof_match,
+ .of_match_table = of_match_ptr(sh_msiof_match),
},
};
module_platform_driver(sh_msiof_spi_drv);
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index f59d4177b41..0808cd56bf8 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -660,7 +660,7 @@ static const struct of_device_id spi_sirfsoc_of_match[] = {
{ .compatible = "sirf,marco-spi", },
{}
};
-MODULE_DEVICE_TABLE(of, sirfsoc_spi_of_match);
+MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
static struct platform_driver spi_sirfsoc_driver = {
.driver = {
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
new file mode 100644
index 00000000000..598eb45e800
--- /dev/null
+++ b/drivers/spi/spi-tegra114.c
@@ -0,0 +1,1246 @@
+/*
+ * SPI driver for NVIDIA's Tegra114 SPI Controller.
+ *
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+
+#define SPI_COMMAND1 0x000
+#define SPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+#define SPI_PACKED (1 << 5)
+#define SPI_TX_EN (1 << 11)
+#define SPI_RX_EN (1 << 12)
+#define SPI_BOTH_EN_BYTE (1 << 13)
+#define SPI_BOTH_EN_BIT (1 << 14)
+#define SPI_LSBYTE_FE (1 << 15)
+#define SPI_LSBIT_FE (1 << 16)
+#define SPI_BIDIROE (1 << 17)
+#define SPI_IDLE_SDA_DRIVE_LOW (0 << 18)
+#define SPI_IDLE_SDA_DRIVE_HIGH (1 << 18)
+#define SPI_IDLE_SDA_PULL_LOW (2 << 18)
+#define SPI_IDLE_SDA_PULL_HIGH (3 << 18)
+#define SPI_IDLE_SDA_MASK (3 << 18)
+#define SPI_CS_SS_VAL (1 << 20)
+#define SPI_CS_SW_HW (1 << 21)
+/* SPI_CS_POL_INACTIVE bits are default high */
+#define SPI_CS_POL_INACTIVE 22
+#define SPI_CS_POL_INACTIVE_0 (1 << 22)
+#define SPI_CS_POL_INACTIVE_1 (1 << 23)
+#define SPI_CS_POL_INACTIVE_2 (1 << 24)
+#define SPI_CS_POL_INACTIVE_3 (1 << 25)
+#define SPI_CS_POL_INACTIVE_MASK (0xF << 22)
+
+#define SPI_CS_SEL_0 (0 << 26)
+#define SPI_CS_SEL_1 (1 << 26)
+#define SPI_CS_SEL_2 (2 << 26)
+#define SPI_CS_SEL_3 (3 << 26)
+#define SPI_CS_SEL_MASK (3 << 26)
+#define SPI_CS_SEL(x) (((x) & 0x3) << 26)
+#define SPI_CONTROL_MODE_0 (0 << 28)
+#define SPI_CONTROL_MODE_1 (1 << 28)
+#define SPI_CONTROL_MODE_2 (2 << 28)
+#define SPI_CONTROL_MODE_3 (3 << 28)
+#define SPI_CONTROL_MODE_MASK (3 << 28)
+#define SPI_MODE_SEL(x) (((x) & 0x3) << 28)
+#define SPI_M_S (1 << 30)
+#define SPI_PIO (1 << 31)
+
+#define SPI_COMMAND2 0x004
+#define SPI_TX_TAP_DELAY(x) (((x) & 0x3F) << 6)
+#define SPI_RX_TAP_DELAY(x) (((x) & 0x3F) << 0)
+
+#define SPI_CS_TIMING1 0x008
+#define SPI_SETUP_HOLD(setup, hold) (((setup) << 4) | (hold))
+#define SPI_CS_SETUP_HOLD(reg, cs, val) \
+ ((((val) & 0xFFu) << ((cs) * 8)) | \
+ ((reg) & ~(0xFFu << ((cs) * 8))))
+
+#define SPI_CS_TIMING2 0x00C
+#define CYCLES_BETWEEN_PACKETS_0(x) (((x) & 0x1F) << 0)
+#define CS_ACTIVE_BETWEEN_PACKETS_0 (1 << 5)
+#define CYCLES_BETWEEN_PACKETS_1(x) (((x) & 0x1F) << 8)
+#define CS_ACTIVE_BETWEEN_PACKETS_1 (1 << 13)
+#define CYCLES_BETWEEN_PACKETS_2(x) (((x) & 0x1F) << 16)
+#define CS_ACTIVE_BETWEEN_PACKETS_2 (1 << 21)
+#define CYCLES_BETWEEN_PACKETS_3(x) (((x) & 0x1F) << 24)
+#define CS_ACTIVE_BETWEEN_PACKETS_3 (1 << 29)
+#define SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(reg, cs, val) \
+ (reg = (((val) & 0x1) << ((cs) * 8 + 5)) | \
+ ((reg) & ~(1 << ((cs) * 8 + 5))))
+#define SPI_SET_CYCLES_BETWEEN_PACKETS(reg, cs, val) \
+ (reg = (((val) & 0xF) << ((cs) * 8)) | \
+ ((reg) & ~(0xF << ((cs) * 8))))
+
+#define SPI_TRANS_STATUS 0x010
+#define SPI_BLK_CNT(val) (((val) >> 0) & 0xFFFF)
+#define SPI_SLV_IDLE_COUNT(val) (((val) >> 16) & 0xFF)
+#define SPI_RDY (1 << 30)
+
+#define SPI_FIFO_STATUS 0x014
+#define SPI_RX_FIFO_EMPTY (1 << 0)
+#define SPI_RX_FIFO_FULL (1 << 1)
+#define SPI_TX_FIFO_EMPTY (1 << 2)
+#define SPI_TX_FIFO_FULL (1 << 3)
+#define SPI_RX_FIFO_UNF (1 << 4)
+#define SPI_RX_FIFO_OVF (1 << 5)
+#define SPI_TX_FIFO_UNF (1 << 6)
+#define SPI_TX_FIFO_OVF (1 << 7)
+#define SPI_ERR (1 << 8)
+#define SPI_TX_FIFO_FLUSH (1 << 14)
+#define SPI_RX_FIFO_FLUSH (1 << 15)
+#define SPI_TX_FIFO_EMPTY_COUNT(val) (((val) >> 16) & 0x7F)
+#define SPI_RX_FIFO_FULL_COUNT(val) (((val) >> 23) & 0x7F)
+#define SPI_FRAME_END (1 << 30)
+#define SPI_CS_INACTIVE (1 << 31)
+
+#define SPI_FIFO_ERROR (SPI_RX_FIFO_UNF | \
+ SPI_RX_FIFO_OVF | SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF)
+#define SPI_FIFO_EMPTY (SPI_RX_FIFO_EMPTY | SPI_TX_FIFO_EMPTY)
+
+#define SPI_TX_DATA 0x018
+#define SPI_RX_DATA 0x01C
+
+#define SPI_DMA_CTL 0x020
+#define SPI_TX_TRIG_1 (0 << 15)
+#define SPI_TX_TRIG_4 (1 << 15)
+#define SPI_TX_TRIG_8 (2 << 15)
+#define SPI_TX_TRIG_16 (3 << 15)
+#define SPI_TX_TRIG_MASK (3 << 15)
+#define SPI_RX_TRIG_1 (0 << 19)
+#define SPI_RX_TRIG_4 (1 << 19)
+#define SPI_RX_TRIG_8 (2 << 19)
+#define SPI_RX_TRIG_16 (3 << 19)
+#define SPI_RX_TRIG_MASK (3 << 19)
+#define SPI_IE_TX (1 << 28)
+#define SPI_IE_RX (1 << 29)
+#define SPI_CONT (1 << 30)
+#define SPI_DMA (1 << 31)
+#define SPI_DMA_EN SPI_DMA
+
+#define SPI_DMA_BLK 0x024
+#define SPI_DMA_BLK_SET(x) (((x) & 0xFFFF) << 0)
+
+#define SPI_TX_FIFO 0x108
+#define SPI_RX_FIFO 0x188
+#define MAX_CHIP_SELECT 4
+#define SPI_FIFO_DEPTH 64
+#define DATA_DIR_TX (1 << 0)
+#define DATA_DIR_RX (1 << 1)
+
+#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
+#define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
+#define TX_FIFO_EMPTY_COUNT_MAX SPI_TX_FIFO_EMPTY_COUNT(0x40)
+#define RX_FIFO_FULL_COUNT_ZERO SPI_RX_FIFO_FULL_COUNT(0)
+#define MAX_HOLD_CYCLES 16
+#define SPI_DEFAULT_SPEED 25000000
+
+#define MAX_CHIP_SELECT 4
+#define SPI_FIFO_DEPTH 64
+
+struct tegra_spi_data {
+ struct device *dev;
+ struct spi_master *master;
+ spinlock_t lock;
+
+ struct clk *clk;
+ void __iomem *base;
+ phys_addr_t phys;
+ unsigned irq;
+ int dma_req_sel;
+ u32 spi_max_frequency;
+ u32 cur_speed;
+
+ struct spi_device *cur_spi;
+ unsigned cur_pos;
+ unsigned cur_len;
+ unsigned words_per_32bit;
+ unsigned bytes_per_word;
+ unsigned curr_dma_words;
+ unsigned cur_direction;
+
+ unsigned cur_rx_pos;
+ unsigned cur_tx_pos;
+
+ unsigned dma_buf_size;
+ unsigned max_buf_size;
+ bool is_curr_dma_xfer;
+
+ struct completion rx_dma_complete;
+ struct completion tx_dma_complete;
+
+ u32 tx_status;
+ u32 rx_status;
+ u32 status_reg;
+ bool is_packed;
+ unsigned long packed_size;
+
+ u32 command1_reg;
+ u32 dma_control_reg;
+ u32 def_command1_reg;
+ u32 spi_cs_timing;
+
+ struct completion xfer_completion;
+ struct spi_transfer *curr_xfer;
+ struct dma_chan *rx_dma_chan;
+ u32 *rx_dma_buf;
+ dma_addr_t rx_dma_phys;
+ struct dma_async_tx_descriptor *rx_dma_desc;
+
+ struct dma_chan *tx_dma_chan;
+ u32 *tx_dma_buf;
+ dma_addr_t tx_dma_phys;
+ struct dma_async_tx_descriptor *tx_dma_desc;
+};
+
+static int tegra_spi_runtime_suspend(struct device *dev);
+static int tegra_spi_runtime_resume(struct device *dev);
+
+static inline unsigned long tegra_spi_readl(struct tegra_spi_data *tspi,
+ unsigned long reg)
+{
+ return readl(tspi->base + reg);
+}
+
+static inline void tegra_spi_writel(struct tegra_spi_data *tspi,
+ unsigned long val, unsigned long reg)
+{
+ writel(val, tspi->base + reg);
+
+ /* Read back register to make sure that register writes completed */
+ if (reg != SPI_TX_FIFO)
+ readl(tspi->base + SPI_COMMAND1);
+}
+
+static void tegra_spi_clear_status(struct tegra_spi_data *tspi)
+{
+ unsigned long val;
+
+ /* Write 1 to clear status register */
+ val = tegra_spi_readl(tspi, SPI_TRANS_STATUS);
+ tegra_spi_writel(tspi, val, SPI_TRANS_STATUS);
+
+ /* Clear fifo status error if any */
+ val = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ if (val & SPI_ERR)
+ tegra_spi_writel(tspi, SPI_ERR | SPI_FIFO_ERROR,
+ SPI_FIFO_STATUS);
+}
+
+static unsigned tegra_spi_calculate_curr_xfer_param(
+ struct spi_device *spi, struct tegra_spi_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned remain_len = t->len - tspi->cur_pos;
+ unsigned max_word;
+ unsigned bits_per_word = t->bits_per_word;
+ unsigned max_len;
+ unsigned total_fifo_words;
+
+ tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1;
+
+ if (bits_per_word == 8 || bits_per_word == 16) {
+ tspi->is_packed = 1;
+ tspi->words_per_32bit = 32/bits_per_word;
+ } else {
+ tspi->is_packed = 0;
+ tspi->words_per_32bit = 1;
+ }
+
+ if (tspi->is_packed) {
+ max_len = min(remain_len, tspi->max_buf_size);
+ tspi->curr_dma_words = max_len/tspi->bytes_per_word;
+ total_fifo_words = (max_len + 3) / 4;
+ } else {
+ max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
+ max_word = min(max_word, tspi->max_buf_size/4);
+ tspi->curr_dma_words = max_word;
+ total_fifo_words = max_word;
+ }
+ return total_fifo_words;
+}
+
+static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ unsigned nbytes;
+ unsigned tx_empty_count;
+ unsigned long fifo_status;
+ unsigned max_n_32bit;
+ unsigned i, count;
+ unsigned long x;
+ unsigned int written_words;
+ unsigned fifo_words_left;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+
+ fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ tx_empty_count = SPI_TX_FIFO_EMPTY_COUNT(fifo_status);
+
+ if (tspi->is_packed) {
+ fifo_words_left = tx_empty_count * tspi->words_per_32bit;
+ written_words = min(fifo_words_left, tspi->curr_dma_words);
+ nbytes = written_words * tspi->bytes_per_word;
+ max_n_32bit = DIV_ROUND_UP(nbytes, 4);
+ for (count = 0; count < max_n_32bit; count++) {
+ x = 0;
+ for (i = 0; (i < 4) && nbytes; i++, nbytes--)
+ x |= (*tx_buf++) << (i*8);
+ tegra_spi_writel(tspi, x, SPI_TX_FIFO);
+ }
+ } else {
+ max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
+ written_words = max_n_32bit;
+ nbytes = written_words * tspi->bytes_per_word;
+ for (count = 0; count < max_n_32bit; count++) {
+ x = 0;
+ for (i = 0; nbytes && (i < tspi->bytes_per_word);
+ i++, nbytes--)
+ x |= ((*tx_buf++) << i*8);
+ tegra_spi_writel(tspi, x, SPI_TX_FIFO);
+ }
+ }
+ tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
+ return written_words;
+}
+
+static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ unsigned rx_full_count;
+ unsigned long fifo_status;
+ unsigned i, count;
+ unsigned long x;
+ unsigned int read_words = 0;
+ unsigned len;
+ u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
+
+ fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ rx_full_count = SPI_RX_FIFO_FULL_COUNT(fifo_status);
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ for (count = 0; count < rx_full_count; count++) {
+ x = tegra_spi_readl(tspi, SPI_RX_FIFO);
+ for (i = 0; len && (i < 4); i++, len--)
+ *rx_buf++ = (x >> i*8) & 0xFF;
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+ read_words += tspi->curr_dma_words;
+ } else {
+ unsigned int rx_mask;
+ unsigned int bits_per_word = t->bits_per_word;
+
+ rx_mask = (1 << bits_per_word) - 1;
+ for (count = 0; count < rx_full_count; count++) {
+ x = tegra_spi_readl(tspi, SPI_RX_FIFO);
+ x &= rx_mask;
+ for (i = 0; (i < tspi->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
+ read_words += rx_full_count;
+ }
+ return read_words;
+}
+
+static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ unsigned len;
+
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+ unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned int x;
+
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ x = 0;
+ for (i = 0; consume && (i < tspi->bytes_per_word);
+ i++, consume--)
+ x |= ((*tx_buf++) << i * 8);
+ tspi->tx_dma_buf[count] = x;
+ }
+ }
+ tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+}
+
+static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ unsigned len;
+
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
+ unsigned int x;
+ unsigned int rx_mask;
+ unsigned int bits_per_word = t->bits_per_word;
+
+ rx_mask = (1 << bits_per_word) - 1;
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ x = tspi->rx_dma_buf[count];
+ x &= rx_mask;
+ for (i = 0; (i < tspi->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+}
+
+static void tegra_spi_dma_complete(void *args)
+{
+ struct completion *dma_complete = args;
+
+ complete(dma_complete);
+}
+
+static int tegra_spi_start_tx_dma(struct tegra_spi_data *tspi, int len)
+{
+ INIT_COMPLETION(tspi->tx_dma_complete);
+ tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
+ tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tspi->tx_dma_desc) {
+ dev_err(tspi->dev, "Not able to get desc for Tx\n");
+ return -EIO;
+ }
+
+ tspi->tx_dma_desc->callback = tegra_spi_dma_complete;
+ tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
+
+ dmaengine_submit(tspi->tx_dma_desc);
+ dma_async_issue_pending(tspi->tx_dma_chan);
+ return 0;
+}
+
+static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
+{
+ INIT_COMPLETION(tspi->rx_dma_complete);
+ tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
+ tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tspi->rx_dma_desc) {
+ dev_err(tspi->dev, "Not able to get desc for Rx\n");
+ return -EIO;
+ }
+
+ tspi->rx_dma_desc->callback = tegra_spi_dma_complete;
+ tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
+
+ dmaengine_submit(tspi->rx_dma_desc);
+ dma_async_issue_pending(tspi->rx_dma_chan);
+ return 0;
+}
+
+static int tegra_spi_start_dma_based_transfer(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ unsigned long val;
+ unsigned int len;
+ int ret = 0;
+ unsigned long status;
+
+ /* Make sure that Rx and Tx fifo are empty */
+ status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ if ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
+ dev_err(tspi->dev,
+ "Rx/Tx fifo are not empty status 0x%08lx\n", status);
+ return -EIO;
+ }
+
+ val = SPI_DMA_BLK_SET(tspi->curr_dma_words - 1);
+ tegra_spi_writel(tspi, val, SPI_DMA_BLK);
+
+ if (tspi->is_packed)
+ len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
+ 4) * 4;
+ else
+ len = tspi->curr_dma_words * 4;
+
+ /* Set attention level based on length of transfer */
+ if (len & 0xF)
+ val |= SPI_TX_TRIG_1 | SPI_RX_TRIG_1;
+ else if (((len) >> 4) & 0x1)
+ val |= SPI_TX_TRIG_4 | SPI_RX_TRIG_4;
+ else
+ val |= SPI_TX_TRIG_8 | SPI_RX_TRIG_8;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SPI_IE_TX;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SPI_IE_RX;
+
+ tegra_spi_writel(tspi, val, SPI_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ tegra_spi_copy_client_txbuf_to_spi_txbuf(tspi, t);
+ ret = tegra_spi_start_tx_dma(tspi, len);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "Starting tx dma failed, err %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ ret = tegra_spi_start_rx_dma(tspi, len);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "Starting rx dma failed, err %d\n", ret);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ return ret;
+ }
+ }
+ tspi->is_curr_dma_xfer = true;
+ tspi->dma_control_reg = val;
+
+ val |= SPI_DMA_EN;
+ tegra_spi_writel(tspi, val, SPI_DMA_CTL);
+ return ret;
+}
+
+static int tegra_spi_start_cpu_based_transfer(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ unsigned long val;
+ unsigned cur_words;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ cur_words = tegra_spi_fill_tx_fifo_from_client_txbuf(tspi, t);
+ else
+ cur_words = tspi->curr_dma_words;
+
+ val = SPI_DMA_BLK_SET(cur_words - 1);
+ tegra_spi_writel(tspi, val, SPI_DMA_BLK);
+
+ val = 0;
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SPI_IE_TX;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SPI_IE_RX;
+
+ tegra_spi_writel(tspi, val, SPI_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ tspi->is_curr_dma_xfer = false;
+
+ val |= SPI_DMA_EN;
+ tegra_spi_writel(tspi, val, SPI_DMA_CTL);
+ return 0;
+}
+
+static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
+ bool dma_to_memory)
+{
+ struct dma_chan *dma_chan;
+ u32 *dma_buf;
+ dma_addr_t dma_phys;
+ int ret;
+ struct dma_slave_config dma_sconfig;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_chan = dma_request_channel(mask, NULL, NULL);
+ if (!dma_chan) {
+ dev_err(tspi->dev,
+ "Dma channel is not available, will try later\n");
+ return -EPROBE_DEFER;
+ }
+
+ dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
+ &dma_phys, GFP_KERNEL);
+ if (!dma_buf) {
+ dev_err(tspi->dev, " Not able to allocate the dma buffer\n");
+ dma_release_channel(dma_chan);
+ return -ENOMEM;
+ }
+
+ dma_sconfig.slave_id = tspi->dma_req_sel;
+ if (dma_to_memory) {
+ dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.src_maxburst = 0;
+ } else {
+ dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.dst_maxburst = 0;
+ }
+
+ ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
+ if (ret)
+ goto scrub;
+ if (dma_to_memory) {
+ tspi->rx_dma_chan = dma_chan;
+ tspi->rx_dma_buf = dma_buf;
+ tspi->rx_dma_phys = dma_phys;
+ } else {
+ tspi->tx_dma_chan = dma_chan;
+ tspi->tx_dma_buf = dma_buf;
+ tspi->tx_dma_phys = dma_phys;
+ }
+ return 0;
+
+scrub:
+ dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
+ dma_release_channel(dma_chan);
+ return ret;
+}
+
+static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
+ bool dma_to_memory)
+{
+ u32 *dma_buf;
+ dma_addr_t dma_phys;
+ struct dma_chan *dma_chan;
+
+ if (dma_to_memory) {
+ dma_buf = tspi->rx_dma_buf;
+ dma_chan = tspi->rx_dma_chan;
+ dma_phys = tspi->rx_dma_phys;
+ tspi->rx_dma_chan = NULL;
+ tspi->rx_dma_buf = NULL;
+ } else {
+ dma_buf = tspi->tx_dma_buf;
+ dma_chan = tspi->tx_dma_chan;
+ dma_phys = tspi->tx_dma_phys;
+ tspi->tx_dma_buf = NULL;
+ tspi->tx_dma_chan = NULL;
+ }
+ if (!dma_chan)
+ return;
+
+ dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
+ dma_release_channel(dma_chan);
+}
+
+static int tegra_spi_start_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t, bool is_first_of_msg,
+ bool is_single_xfer)
+{
+ struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ u32 speed = t->speed_hz;
+ u8 bits_per_word = t->bits_per_word;
+ unsigned total_fifo_words;
+ int ret;
+ unsigned long command1;
+ int req_mode;
+
+ if (speed != tspi->cur_speed) {
+ clk_set_rate(tspi->clk, speed);
+ tspi->cur_speed = speed;
+ }
+
+ tspi->cur_spi = spi;
+ tspi->cur_pos = 0;
+ tspi->cur_rx_pos = 0;
+ tspi->cur_tx_pos = 0;
+ tspi->curr_xfer = t;
+ total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t);
+
+ if (is_first_of_msg) {
+ tegra_spi_clear_status(tspi);
+
+ command1 = tspi->def_command1_reg;
+ command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
+
+ command1 &= ~SPI_CONTROL_MODE_MASK;
+ req_mode = spi->mode & 0x3;
+ if (req_mode == SPI_MODE_0)
+ command1 |= SPI_CONTROL_MODE_0;
+ else if (req_mode == SPI_MODE_1)
+ command1 |= SPI_CONTROL_MODE_1;
+ else if (req_mode == SPI_MODE_2)
+ command1 |= SPI_CONTROL_MODE_2;
+ else if (req_mode == SPI_MODE_3)
+ command1 |= SPI_CONTROL_MODE_3;
+
+ tegra_spi_writel(tspi, command1, SPI_COMMAND1);
+
+ command1 |= SPI_CS_SW_HW;
+ if (spi->mode & SPI_CS_HIGH)
+ command1 |= SPI_CS_SS_VAL;
+ else
+ command1 &= ~SPI_CS_SS_VAL;
+
+ tegra_spi_writel(tspi, 0, SPI_COMMAND2);
+ } else {
+ command1 = tspi->command1_reg;
+ command1 &= ~SPI_BIT_LENGTH(~0);
+ command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
+ }
+
+ if (tspi->is_packed)
+ command1 |= SPI_PACKED;
+
+ command1 &= ~(SPI_CS_SEL_MASK | SPI_TX_EN | SPI_RX_EN);
+ tspi->cur_direction = 0;
+ if (t->rx_buf) {
+ command1 |= SPI_RX_EN;
+ tspi->cur_direction |= DATA_DIR_RX;
+ }
+ if (t->tx_buf) {
+ command1 |= SPI_TX_EN;
+ tspi->cur_direction |= DATA_DIR_TX;
+ }
+ command1 |= SPI_CS_SEL(spi->chip_select);
+ tegra_spi_writel(tspi, command1, SPI_COMMAND1);
+ tspi->command1_reg = command1;
+
+ dev_dbg(tspi->dev, "The def 0x%x and written 0x%lx\n",
+ tspi->def_command1_reg, command1);
+
+ if (total_fifo_words > SPI_FIFO_DEPTH)
+ ret = tegra_spi_start_dma_based_transfer(tspi, t);
+ else
+ ret = tegra_spi_start_cpu_based_transfer(tspi, t);
+ return ret;
+}
+
+static int tegra_spi_setup(struct spi_device *spi)
+{
+ struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ unsigned long val;
+ unsigned long flags;
+ int ret;
+ unsigned int cs_pol_bit[MAX_CHIP_SELECT] = {
+ SPI_CS_POL_INACTIVE_0,
+ SPI_CS_POL_INACTIVE_1,
+ SPI_CS_POL_INACTIVE_2,
+ SPI_CS_POL_INACTIVE_3,
+ };
+
+ dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
+ spi->bits_per_word,
+ spi->mode & SPI_CPOL ? "" : "~",
+ spi->mode & SPI_CPHA ? "" : "~",
+ spi->max_speed_hz);
+
+ BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
+
+ /* Set speed to the spi max fequency if spi device has not set */
+ spi->max_speed_hz = spi->max_speed_hz ? : tspi->spi_max_frequency;
+
+ ret = pm_runtime_get_sync(tspi->dev);
+ if (ret < 0) {
+ dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ val = tspi->def_command1_reg;
+ if (spi->mode & SPI_CS_HIGH)
+ val &= ~cs_pol_bit[spi->chip_select];
+ else
+ val |= cs_pol_bit[spi->chip_select];
+ tspi->def_command1_reg = val;
+ tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+
+ pm_runtime_put(tspi->dev);
+ return 0;
+}
+
+static int tegra_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ bool is_first_msg = true;
+ int single_xfer;
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ struct spi_transfer *xfer;
+ struct spi_device *spi = msg->spi;
+ int ret;
+
+ msg->status = 0;
+ msg->actual_length = 0;
+
+ ret = pm_runtime_get_sync(tspi->dev);
+ if (ret < 0) {
+ dev_err(tspi->dev, "runtime PM get failed: %d\n", ret);
+ msg->status = ret;
+ spi_finalize_current_message(master);
+ return ret;
+ }
+
+ single_xfer = list_is_singular(&msg->transfers);
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ INIT_COMPLETION(tspi->xfer_completion);
+ ret = tegra_spi_start_transfer_one(spi, xfer,
+ is_first_msg, single_xfer);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "spi can not start transfer, err %d\n", ret);
+ goto exit;
+ }
+ is_first_msg = false;
+ ret = wait_for_completion_timeout(&tspi->xfer_completion,
+ SPI_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(tspi->dev,
+ "spi trasfer timeout, err %d\n", ret);
+ ret = -EIO;
+ goto exit;
+ }
+
+ if (tspi->tx_status || tspi->rx_status) {
+ dev_err(tspi->dev, "Error in Transfer\n");
+ ret = -EIO;
+ goto exit;
+ }
+ msg->actual_length += xfer->len;
+ if (xfer->cs_change && xfer->delay_usecs) {
+ tegra_spi_writel(tspi, tspi->def_command1_reg,
+ SPI_COMMAND1);
+ udelay(xfer->delay_usecs);
+ }
+ }
+ ret = 0;
+exit:
+ tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
+ pm_runtime_put(tspi->dev);
+ msg->status = ret;
+ spi_finalize_current_message(master);
+ return ret;
+}
+
+static irqreturn_t handle_cpu_based_xfer(struct tegra_spi_data *tspi)
+{
+ struct spi_transfer *t = tspi->curr_xfer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (tspi->tx_status || tspi->rx_status) {
+ dev_err(tspi->dev, "CpuXfer ERROR bit set 0x%x\n",
+ tspi->status_reg);
+ dev_err(tspi->dev, "CpuXfer 0x%08x:0x%08x\n",
+ tspi->command1_reg, tspi->dma_control_reg);
+ tegra_periph_reset_assert(tspi->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(tspi->clk);
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tegra_spi_read_rx_fifo_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else
+ tspi->cur_pos = tspi->cur_rx_pos;
+
+ if (tspi->cur_pos == t->len) {
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ tegra_spi_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
+ tegra_spi_start_cpu_based_transfer(tspi, t);
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_dma_based_xfer(struct tegra_spi_data *tspi)
+{
+ struct spi_transfer *t = tspi->curr_xfer;
+ long wait_status;
+ int err = 0;
+ unsigned total_fifo_words;
+ unsigned long flags;
+
+ /* Abort dmas if any error */
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ if (tspi->tx_status) {
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ err += 1;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->tx_dma_complete, SPI_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ dev_err(tspi->dev, "TxDma Xfer failed\n");
+ err += 1;
+ }
+ }
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ if (tspi->rx_status) {
+ dmaengine_terminate_all(tspi->rx_dma_chan);
+ err += 2;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->rx_dma_complete, SPI_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tspi->rx_dma_chan);
+ dev_err(tspi->dev, "RxDma Xfer failed\n");
+ err += 2;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (err) {
+ dev_err(tspi->dev, "DmaXfer: ERROR bit set 0x%x\n",
+ tspi->status_reg);
+ dev_err(tspi->dev, "DmaXfer 0x%08x:0x%08x\n",
+ tspi->command1_reg, tspi->dma_control_reg);
+ tegra_periph_reset_assert(tspi->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(tspi->clk);
+ complete(&tspi->xfer_completion);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tegra_spi_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else
+ tspi->cur_pos = tspi->cur_rx_pos;
+
+ if (tspi->cur_pos == t->len) {
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ /* Continue transfer in current message */
+ total_fifo_words = tegra_spi_calculate_curr_xfer_param(tspi->cur_spi,
+ tspi, t);
+ if (total_fifo_words > SPI_FIFO_DEPTH)
+ err = tegra_spi_start_dma_based_transfer(tspi, t);
+ else
+ err = tegra_spi_start_cpu_based_transfer(tspi, t);
+
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_spi_isr_thread(int irq, void *context_data)
+{
+ struct tegra_spi_data *tspi = context_data;
+
+ if (!tspi->is_curr_dma_xfer)
+ return handle_cpu_based_xfer(tspi);
+ return handle_dma_based_xfer(tspi);
+}
+
+static irqreturn_t tegra_spi_isr(int irq, void *context_data)
+{
+ struct tegra_spi_data *tspi = context_data;
+
+ tspi->status_reg = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->tx_status = tspi->status_reg &
+ (SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF);
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tspi->rx_status = tspi->status_reg &
+ (SPI_RX_FIFO_OVF | SPI_RX_FIFO_UNF);
+ tegra_spi_clear_status(tspi);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static void tegra_spi_parse_dt(struct platform_device *pdev,
+ struct tegra_spi_data *tspi)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 of_dma[2];
+
+ if (of_property_read_u32_array(np, "nvidia,dma-request-selector",
+ of_dma, 2) >= 0)
+ tspi->dma_req_sel = of_dma[1];
+
+ if (of_property_read_u32(np, "spi-max-frequency",
+ &tspi->spi_max_frequency))
+ tspi->spi_max_frequency = 25000000; /* 25MHz */
+}
+
+static struct of_device_id tegra_spi_of_match[] = {
+ { .compatible = "nvidia,tegra114-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tegra_spi_of_match);
+
+static int tegra_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct tegra_spi_data *tspi;
+ struct resource *r;
+ int ret, spi_irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
+ if (!master) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+ dev_set_drvdata(&pdev->dev, master);
+ tspi = spi_master_get_devdata(master);
+
+ /* Parse DT */
+ tegra_spi_parse_dt(pdev, tspi);
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->setup = tegra_spi_setup;
+ master->transfer_one_message = tegra_spi_transfer_one_message;
+ master->num_chipselect = MAX_CHIP_SELECT;
+ master->bus_num = -1;
+
+ tspi->master = master;
+ tspi->dev = &pdev->dev;
+ spin_lock_init(&tspi->lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "No IO memory resource\n");
+ ret = -ENODEV;
+ goto exit_free_master;
+ }
+ tspi->phys = r->start;
+ tspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(tspi->base)) {
+ ret = PTR_ERR(tspi->base);
+ dev_err(&pdev->dev, "ioremap failed: err = %d\n", ret);
+ goto exit_free_master;
+ }
+
+ spi_irq = platform_get_irq(pdev, 0);
+ tspi->irq = spi_irq;
+ ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
+ tegra_spi_isr_thread, IRQF_ONESHOT,
+ dev_name(&pdev->dev), tspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tspi->irq);
+ goto exit_free_master;
+ }
+
+ tspi->clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(tspi->clk)) {
+ dev_err(&pdev->dev, "can not get clock\n");
+ ret = PTR_ERR(tspi->clk);
+ goto exit_free_irq;
+ }
+
+ tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
+ tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
+
+ if (tspi->dma_req_sel) {
+ ret = tegra_spi_init_dma_param(tspi, true);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "RxDma Init failed, err %d\n", ret);
+ goto exit_free_irq;
+ }
+
+ ret = tegra_spi_init_dma_param(tspi, false);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "TxDma Init failed, err %d\n", ret);
+ goto exit_rx_dma_free;
+ }
+ tspi->max_buf_size = tspi->dma_buf_size;
+ init_completion(&tspi->tx_dma_complete);
+ init_completion(&tspi->rx_dma_complete);
+ }
+
+ init_completion(&tspi->xfer_completion);
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = tegra_spi_runtime_resume(&pdev->dev);
+ if (ret)
+ goto exit_pm_disable;
+ }
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
+ goto exit_pm_disable;
+ }
+ tspi->def_command1_reg = SPI_M_S;
+ tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
+ pm_runtime_put(&pdev->dev);
+
+ master->dev.of_node = pdev->dev.of_node;
+ ret = spi_register_master(master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ goto exit_pm_disable;
+ }
+ return ret;
+
+exit_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_spi_runtime_suspend(&pdev->dev);
+ tegra_spi_deinit_dma_param(tspi, false);
+exit_rx_dma_free:
+ tegra_spi_deinit_dma_param(tspi, true);
+exit_free_irq:
+ free_irq(spi_irq, tspi);
+exit_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int tegra_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = dev_get_drvdata(&pdev->dev);
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+
+ free_irq(tspi->irq, tspi);
+ spi_unregister_master(master);
+
+ if (tspi->tx_dma_chan)
+ tegra_spi_deinit_dma_param(tspi, false);
+
+ if (tspi->rx_dma_chan)
+ tegra_spi_deinit_dma_param(tspi, true);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_spi_runtime_suspend(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int tegra_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+ tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
+ pm_runtime_put(dev);
+
+ return spi_master_resume(master);
+}
+#endif
+
+static int tegra_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+
+ /* Flush all write which are in PPSB queue by reading back */
+ tegra_spi_readl(tspi, SPI_COMMAND1);
+
+ clk_disable_unprepare(tspi->clk);
+ return 0;
+}
+
+static int tegra_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(tspi->clk);
+ if (ret < 0) {
+ dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_spi_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_spi_runtime_suspend,
+ tegra_spi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_spi_suspend, tegra_spi_resume)
+};
+static struct platform_driver tegra_spi_driver = {
+ .driver = {
+ .name = "spi-tegra114",
+ .owner = THIS_MODULE,
+ .pm = &tegra_spi_pm_ops,
+ .of_match_table = tegra_spi_of_match,
+ },
+ .probe = tegra_spi_probe,
+ .remove = tegra_spi_remove,
+};
+module_platform_driver(tegra_spi_driver);
+
+MODULE_ALIAS("platform:spi-tegra114");
+MODULE_DESCRIPTION("NVIDIA Tegra114 SPI Controller Driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 3d6a12b2af0..d65c000efe3 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -33,7 +33,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/spi/spi.h>
-#include <linux/spi/spi-tegra.h>
#include <linux/clk/tegra.h>
#define SPI_COMMAND 0x000
@@ -439,23 +438,13 @@ static irqreturn_t tegra_sflash_isr(int irq, void *context_data)
return handle_cpu_based_xfer(tsd);
}
-static struct tegra_spi_platform_data *tegra_sflash_parse_dt(
- struct platform_device *pdev)
+static void tegra_sflash_parse_dt(struct tegra_sflash_data *tsd)
{
- struct tegra_spi_platform_data *pdata;
- struct device_node *np = pdev->dev.of_node;
- u32 max_freq;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(&pdev->dev, "Memory alloc for pdata failed\n");
- return NULL;
- }
-
- if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
- pdata->spi_max_frequency = max_freq;
+ struct device_node *np = tsd->dev->of_node;
- return pdata;
+ if (of_property_read_u32(np, "spi-max-frequency",
+ &tsd->spi_max_frequency))
+ tsd->spi_max_frequency = 25000000; /* 25MHz */
}
static struct of_device_id tegra_sflash_of_match[] = {
@@ -469,28 +458,15 @@ static int tegra_sflash_probe(struct platform_device *pdev)
struct spi_master *master;
struct tegra_sflash_data *tsd;
struct resource *r;
- struct tegra_spi_platform_data *pdata = pdev->dev.platform_data;
int ret;
const struct of_device_id *match;
- match = of_match_device(of_match_ptr(tegra_sflash_of_match),
- &pdev->dev);
+ match = of_match_device(tegra_sflash_of_match, &pdev->dev);
if (!match) {
dev_err(&pdev->dev, "Error: No device match found\n");
return -ENODEV;
}
- if (!pdata && pdev->dev.of_node)
- pdata = tegra_sflash_parse_dt(pdev);
-
- if (!pdata) {
- dev_err(&pdev->dev, "No platform data, exiting\n");
- return -ENODEV;
- }
-
- if (!pdata->spi_max_frequency)
- pdata->spi_max_frequency = 25000000; /* 25MHz */
-
master = spi_alloc_master(&pdev->dev, sizeof(*tsd));
if (!master) {
dev_err(&pdev->dev, "master allocation failed\n");
@@ -510,6 +486,8 @@ static int tegra_sflash_probe(struct platform_device *pdev)
tsd->dev = &pdev->dev;
spin_lock_init(&tsd->lock);
+ tegra_sflash_parse_dt(tsd);
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "No IO memory resource\n");
@@ -538,7 +516,6 @@ static int tegra_sflash_probe(struct platform_device *pdev)
goto exit_free_irq;
}
- tsd->spi_max_frequency = pdata->spi_max_frequency;
init_completion(&tsd->xfer_completion);
pm_runtime_enable(&pdev->dev);
if (!pm_runtime_enabled(&pdev->dev)) {
@@ -658,7 +635,7 @@ static struct platform_driver tegra_sflash_driver = {
.name = "spi-tegra-sflash",
.owner = THIS_MODULE,
.pm = &slink_pm_ops,
- .of_match_table = of_match_ptr(tegra_sflash_of_match),
+ .of_match_table = tegra_sflash_of_match,
},
.probe = tegra_sflash_probe,
.remove = tegra_sflash_remove,
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index a829563f471..3faf88d003d 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -34,7 +34,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/spi/spi.h>
-#include <linux/spi/spi-tegra.h>
#include <linux/clk/tegra.h>
#define SLINK_COMMAND 0x000
@@ -189,7 +188,6 @@ struct tegra_slink_data {
unsigned dma_buf_size;
unsigned max_buf_size;
bool is_curr_dma_xfer;
- bool is_hw_based_cs;
struct completion rx_dma_complete;
struct completion tx_dma_complete;
@@ -375,9 +373,6 @@ static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
read_words += tspi->curr_dma_words;
} else {
- unsigned int bits_per_word;
-
- bits_per_word = t->bits_per_word;
for (count = 0; count < rx_full_count; count++) {
x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
for (i = 0; (i < tspi->bytes_per_word); i++)
@@ -720,7 +715,6 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi,
u8 bits_per_word;
unsigned total_fifo_words;
int ret;
- struct tegra_spi_device_controller_data *cdata = spi->controller_data;
unsigned long command;
unsigned long command2;
@@ -743,39 +737,11 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi,
command = tspi->def_command_reg;
command |= SLINK_BIT_LENGTH(bits_per_word - 1);
+ command |= SLINK_CS_SW | SLINK_CS_VALUE;
command2 = tspi->def_command2_reg;
command2 |= SLINK_SS_EN_CS(spi->chip_select);
- /* possibly use the hw based chip select */
- tspi->is_hw_based_cs = false;
- if (cdata && cdata->is_hw_based_cs && is_single_xfer &&
- ((tspi->curr_dma_words * tspi->bytes_per_word) ==
- (t->len - tspi->cur_pos))) {
- int setup_count;
- int sts2;
-
- setup_count = cdata->cs_setup_clk_count >> 1;
- setup_count = max(setup_count, 3);
- command2 |= SLINK_SS_SETUP(setup_count);
- if (tspi->chip_data->cs_hold_time) {
- int hold_count;
-
- hold_count = cdata->cs_hold_clk_count;
- hold_count = max(hold_count, 0xF);
- sts2 = tegra_slink_readl(tspi, SLINK_STATUS2);
- sts2 &= ~SLINK_SS_HOLD_TIME(0xF);
- sts2 |= SLINK_SS_HOLD_TIME(hold_count);
- tegra_slink_writel(tspi, sts2, SLINK_STATUS2);
- }
- tspi->is_hw_based_cs = true;
- }
-
- if (tspi->is_hw_based_cs)
- command &= ~SLINK_CS_SW;
- else
- command |= SLINK_CS_SW | SLINK_CS_VALUE;
-
command &= ~SLINK_MODES;
if (spi->mode & SPI_CPHA)
command |= SLINK_CK_SDA;
@@ -1065,36 +1031,25 @@ static irqreturn_t tegra_slink_isr(int irq, void *context_data)
return IRQ_WAKE_THREAD;
}
-static struct tegra_spi_platform_data *tegra_slink_parse_dt(
- struct platform_device *pdev)
+static void tegra_slink_parse_dt(struct tegra_slink_data *tspi)
{
- struct tegra_spi_platform_data *pdata;
- const unsigned int *prop;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = tspi->dev->of_node;
u32 of_dma[2];
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(&pdev->dev, "Memory alloc for pdata failed\n");
- return NULL;
- }
-
if (of_property_read_u32_array(np, "nvidia,dma-request-selector",
of_dma, 2) >= 0)
- pdata->dma_req_sel = of_dma[1];
-
- prop = of_get_property(np, "spi-max-frequency", NULL);
- if (prop)
- pdata->spi_max_frequency = be32_to_cpup(prop);
+ tspi->dma_req_sel = of_dma[1];
- return pdata;
+ if (of_property_read_u32(np, "spi-max-frequency",
+ &tspi->spi_max_frequency))
+ tspi->spi_max_frequency = 25000000; /* 25MHz */
}
-const struct tegra_slink_chip_data tegra30_spi_cdata = {
+static const struct tegra_slink_chip_data tegra30_spi_cdata = {
.cs_hold_time = true,
};
-const struct tegra_slink_chip_data tegra20_spi_cdata = {
+static const struct tegra_slink_chip_data tegra20_spi_cdata = {
.cs_hold_time = false,
};
@@ -1110,27 +1065,16 @@ static int tegra_slink_probe(struct platform_device *pdev)
struct spi_master *master;
struct tegra_slink_data *tspi;
struct resource *r;
- struct tegra_spi_platform_data *pdata = pdev->dev.platform_data;
int ret, spi_irq;
const struct tegra_slink_chip_data *cdata = NULL;
const struct of_device_id *match;
- match = of_match_device(of_match_ptr(tegra_slink_of_match), &pdev->dev);
+ match = of_match_device(tegra_slink_of_match, &pdev->dev);
if (!match) {
dev_err(&pdev->dev, "Error: No device match found\n");
return -ENODEV;
}
cdata = match->data;
- if (!pdata && pdev->dev.of_node)
- pdata = tegra_slink_parse_dt(pdev);
-
- if (!pdata) {
- dev_err(&pdev->dev, "No platform data, exiting\n");
- return -ENODEV;
- }
-
- if (!pdata->spi_max_frequency)
- pdata->spi_max_frequency = 25000000; /* 25MHz */
master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
if (!master) {
@@ -1148,11 +1092,12 @@ static int tegra_slink_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, master);
tspi = spi_master_get_devdata(master);
tspi->master = master;
- tspi->dma_req_sel = pdata->dma_req_sel;
tspi->dev = &pdev->dev;
tspi->chip_data = cdata;
spin_lock_init(&tspi->lock);
+ tegra_slink_parse_dt(tspi);
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "No IO memory resource\n");
@@ -1186,9 +1131,8 @@ static int tegra_slink_probe(struct platform_device *pdev)
tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
- tspi->spi_max_frequency = pdata->spi_max_frequency;
- if (pdata->dma_req_sel) {
+ if (tspi->dma_req_sel) {
ret = tegra_slink_init_dma_param(tspi, true);
if (ret < 0) {
dev_err(&pdev->dev, "RxDma Init failed, err %d\n", ret);
@@ -1331,7 +1275,7 @@ static struct platform_driver tegra_slink_driver = {
.name = "spi-tegra-slink",
.owner = THIS_MODULE,
.pm = &slink_pm_ops,
- .of_match_table = of_match_ptr(tegra_slink_of_match),
+ .of_match_table = tegra_slink_of_match,
},
.probe = tegra_slink_probe,
.remove = tegra_slink_remove,
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index f756481b0fe..35f60bd252d 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -615,7 +615,7 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
int size;
u32 n_writes;
int j;
- struct spi_message *pmsg;
+ struct spi_message *pmsg, *tmp;
const u8 *tx_buf;
const u16 *tx_sbuf;
@@ -656,7 +656,7 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
if (!data->pkt_rx_buff) {
/* flush queue and set status of all transfers to -ENOMEM */
dev_err(&data->master->dev, "%s :kzalloc failed\n", __func__);
- list_for_each_entry(pmsg, data->queue.next, queue) {
+ list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
pmsg->status = -ENOMEM;
if (pmsg->complete != 0)
@@ -703,7 +703,7 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
static void pch_spi_nomore_transfer(struct pch_spi_data *data)
{
- struct spi_message *pmsg;
+ struct spi_message *pmsg, *tmp;
dev_dbg(&data->master->dev, "%s called\n", __func__);
/* Invoke complete callback
* [To the spi core..indicating end of transfer] */
@@ -740,7 +740,7 @@ static void pch_spi_nomore_transfer(struct pch_spi_data *data)
dev_dbg(&data->master->dev,
"%s suspend/remove initiated, flushing queue\n",
__func__);
- list_for_each_entry(pmsg, data->queue.next, queue) {
+ list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
pmsg->status = -EIO;
if (pmsg->complete)
@@ -1187,7 +1187,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
static void pch_spi_process_messages(struct work_struct *pwork)
{
- struct spi_message *pmsg;
+ struct spi_message *pmsg, *tmp;
struct pch_spi_data *data;
int bpw;
@@ -1199,7 +1199,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
dev_dbg(&data->master->dev, "%s suspend/remove initiated,"
"flushing queue\n", __func__);
- list_for_each_entry(pmsg, data->queue.next, queue) {
+ list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
pmsg->status = -EIO;
if (pmsg->complete != 0) {
@@ -1789,8 +1789,10 @@ static int __init pch_spi_init(void)
return ret;
ret = pci_register_driver(&pch_spi_pcidev_driver);
- if (ret)
+ if (ret) {
+ platform_driver_unregister(&pch_spi_pd_driver);
return ret;
+ }
return 0;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 004b10f184d..163fd802b7a 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1376,6 +1376,14 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
xfer->bits_per_word = spi->bits_per_word;
if (!xfer->speed_hz)
xfer->speed_hz = spi->max_speed_hz;
+ if (master->bits_per_word_mask) {
+ /* Only 32 bits fit in the mask */
+ if (xfer->bits_per_word > 32)
+ return -EINVAL;
+ if (!(master->bits_per_word_mask &
+ BIT(xfer->bits_per_word - 1)))
+ return -EINVAL;
+ }
}
message->spi = spi;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 2e0655dbe07..911e9e0711d 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -603,7 +603,7 @@ static int spidev_probe(struct spi_device *spi)
dev = device_create(spidev_class, &spi->dev, spidev->devt,
spidev, "spidev%d.%d",
spi->master->bus_num, spi->chip_select);
- status = IS_ERR(dev) ? PTR_ERR(dev) : 0;
+ status = PTR_RET(dev);
} else {
dev_dbg(&spi->dev, "no minor number available!\n");
status = -ENODEV;
diff --git a/include/linux/async.h b/include/linux/async.h
index a2e3f18b2ad..6b0226bdaad 100644
--- a/include/linux/async.h
+++ b/include/linux/async.h
@@ -16,9 +16,8 @@
#include <linux/list.h>
typedef u64 async_cookie_t;
-typedef void (async_func_ptr) (void *data, async_cookie_t cookie);
+typedef void (*async_func_t) (void *data, async_cookie_t cookie);
struct async_domain {
- struct list_head node;
struct list_head pending;
unsigned registered:1;
};
@@ -27,8 +26,7 @@ struct async_domain {
* domain participates in global async_synchronize_full
*/
#define ASYNC_DOMAIN(_name) \
- struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \
- .pending = LIST_HEAD_INIT(_name.pending), \
+ struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \
.registered = 1 }
/*
@@ -36,12 +34,11 @@ struct async_domain {
* complete, this domain does not participate in async_synchronize_full
*/
#define ASYNC_DOMAIN_EXCLUSIVE(_name) \
- struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \
- .pending = LIST_HEAD_INIT(_name.pending), \
+ struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \
.registered = 0 }
-extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data);
-extern async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
+extern async_cookie_t async_schedule(async_func_t func, void *data);
+extern async_cookie_t async_schedule_domain(async_func_t func, void *data,
struct async_domain *domain);
void async_unregister_domain(struct async_domain *domain);
extern void async_synchronize_full(void);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 470073bf93d..d86e215ca2b 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -19,6 +19,7 @@
#include <linux/idr.h>
#include <linux/workqueue.h>
#include <linux/xattr.h>
+#include <linux/fs.h>
#ifdef CONFIG_CGROUPS
@@ -30,10 +31,6 @@ struct css_id;
extern int cgroup_init_early(void);
extern int cgroup_init(void);
-extern void cgroup_lock(void);
-extern int cgroup_lock_is_held(void);
-extern bool cgroup_lock_live_group(struct cgroup *cgrp);
-extern void cgroup_unlock(void);
extern void cgroup_fork(struct task_struct *p);
extern void cgroup_post_fork(struct task_struct *p);
extern void cgroup_exit(struct task_struct *p, int run_callbacks);
@@ -44,14 +41,25 @@ extern void cgroup_unload_subsys(struct cgroup_subsys *ss);
extern const struct file_operations proc_cgroup_operations;
-/* Define the enumeration of all builtin cgroup subsystems */
+/*
+ * Define the enumeration of all cgroup subsystems.
+ *
+ * We define ids for builtin subsystems and then modular ones.
+ */
#define SUBSYS(_x) _x ## _subsys_id,
-#define IS_SUBSYS_ENABLED(option) IS_ENABLED(option)
enum cgroup_subsys_id {
+#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
+#include <linux/cgroup_subsys.h>
+#undef IS_SUBSYS_ENABLED
+ CGROUP_BUILTIN_SUBSYS_COUNT,
+
+ __CGROUP_SUBSYS_TEMP_PLACEHOLDER = CGROUP_BUILTIN_SUBSYS_COUNT - 1,
+
+#define IS_SUBSYS_ENABLED(option) IS_MODULE(option)
#include <linux/cgroup_subsys.h>
+#undef IS_SUBSYS_ENABLED
CGROUP_SUBSYS_COUNT,
};
-#undef IS_SUBSYS_ENABLED
#undef SUBSYS
/* Per-subsystem/per-cgroup state maintained by the system. */
@@ -148,6 +156,13 @@ enum {
* specified at mount time and thus is implemented here.
*/
CGRP_CPUSET_CLONE_CHILDREN,
+ /* see the comment above CGRP_ROOT_SANE_BEHAVIOR for details */
+ CGRP_SANE_BEHAVIOR,
+};
+
+struct cgroup_name {
+ struct rcu_head rcu_head;
+ char name[];
};
struct cgroup {
@@ -172,11 +187,23 @@ struct cgroup {
struct cgroup *parent; /* my parent */
struct dentry *dentry; /* cgroup fs entry, RCU protected */
+ /*
+ * This is a copy of dentry->d_name, and it's needed because
+ * we can't use dentry->d_name in cgroup_path().
+ *
+ * You must acquire rcu_read_lock() to access cgrp->name, and
+ * the only place that can change it is rename(), which is
+ * protected by parent dir's i_mutex.
+ *
+ * Normally you should use cgroup_name() wrapper rather than
+ * access it directly.
+ */
+ struct cgroup_name __rcu *name;
+
/* Private pointers for each registered subsystem */
struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
struct cgroupfs_root *root;
- struct cgroup *top_cgroup;
/*
* List of cg_cgroup_links pointing at css_sets with
@@ -213,6 +240,96 @@ struct cgroup {
struct simple_xattrs xattrs;
};
+#define MAX_CGROUP_ROOT_NAMELEN 64
+
+/* cgroupfs_root->flags */
+enum {
+ /*
+ * Unfortunately, cgroup core and various controllers are riddled
+ * with idiosyncrasies and pointless options. The following flag,
+ * when set, will force sane behavior - some options are forced on,
+ * others are disallowed, and some controllers will change their
+ * hierarchical or other behaviors.
+ *
+ * The set of behaviors affected by this flag are still being
+ * determined and developed and the mount option for this flag is
+ * prefixed with __DEVEL__. The prefix will be dropped once we
+ * reach the point where all behaviors are compatible with the
+ * planned unified hierarchy, which will automatically turn on this
+ * flag.
+ *
+ * The followings are the behaviors currently affected this flag.
+ *
+ * - Mount options "noprefix" and "clone_children" are disallowed.
+ * Also, cgroupfs file cgroup.clone_children is not created.
+ *
+ * - When mounting an existing superblock, mount options should
+ * match.
+ *
+ * - Remount is disallowed.
+ *
+ * - memcg: use_hierarchy is on by default and the cgroup file for
+ * the flag is not created.
+ *
+ * The followings are planned changes.
+ *
+ * - release_agent will be disallowed once replacement notification
+ * mechanism is implemented.
+ */
+ CGRP_ROOT_SANE_BEHAVIOR = (1 << 0),
+
+ CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
+ CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
+};
+
+/*
+ * A cgroupfs_root represents the root of a cgroup hierarchy, and may be
+ * associated with a superblock to form an active hierarchy. This is
+ * internal to cgroup core. Don't access directly from controllers.
+ */
+struct cgroupfs_root {
+ struct super_block *sb;
+
+ /*
+ * The bitmask of subsystems intended to be attached to this
+ * hierarchy
+ */
+ unsigned long subsys_mask;
+
+ /* Unique id for this hierarchy. */
+ int hierarchy_id;
+
+ /* The bitmask of subsystems currently attached to this hierarchy */
+ unsigned long actual_subsys_mask;
+
+ /* A list running through the attached subsystems */
+ struct list_head subsys_list;
+
+ /* The root cgroup for this hierarchy */
+ struct cgroup top_cgroup;
+
+ /* Tracks how many cgroups are currently defined in hierarchy.*/
+ int number_of_cgroups;
+
+ /* A list running through the active hierarchies */
+ struct list_head root_list;
+
+ /* All cgroups on this root, cgroup_mutex protected */
+ struct list_head allcg_list;
+
+ /* Hierarchy-specific flags */
+ unsigned long flags;
+
+ /* IDs for cgroups in this hierarchy */
+ struct ida cgroup_ida;
+
+ /* The path to use for release notifications. */
+ char release_agent_path[PATH_MAX];
+
+ /* The name for this hierarchy - may be empty */
+ char name[MAX_CGROUP_ROOT_NAMELEN];
+};
+
/*
* A css_set is a structure holding pointers to a set of
* cgroup_subsys_state objects. This saves space in the task struct
@@ -278,6 +395,7 @@ struct cgroup_map_cb {
/* cftype->flags */
#define CFTYPE_ONLY_ON_ROOT (1U << 0) /* only create on root cg */
#define CFTYPE_NOT_ON_ROOT (1U << 1) /* don't create on root cg */
+#define CFTYPE_INSANE (1U << 2) /* don't create if sane_behavior */
#define MAX_CFTYPE_NAME 64
@@ -304,9 +422,6 @@ struct cftype {
/* CFTYPE_* flags */
unsigned int flags;
- /* file xattrs */
- struct simple_xattrs xattrs;
-
int (*open)(struct inode *inode, struct file *file);
ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft,
struct file *file,
@@ -404,18 +519,31 @@ struct cgroup_scanner {
void *data;
};
+/*
+ * See the comment above CGRP_ROOT_SANE_BEHAVIOR for details. This
+ * function can be called as long as @cgrp is accessible.
+ */
+static inline bool cgroup_sane_behavior(const struct cgroup *cgrp)
+{
+ return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR;
+}
+
+/* Caller should hold rcu_read_lock() */
+static inline const char *cgroup_name(const struct cgroup *cgrp)
+{
+ return rcu_dereference(cgrp->name)->name;
+}
+
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_is_removed(const struct cgroup *cgrp);
+bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
int cgroup_task_count(const struct cgroup *cgrp);
-/* Return true if cgrp is a descendant of the task's cgroup */
-int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task);
-
/*
* Control Group taskset, used to pass around set of tasks to cgroup_subsys
* methods.
@@ -523,10 +651,16 @@ static inline struct cgroup_subsys_state *cgroup_subsys_state(
* rcu_dereference_check() conditions, such as locks used during the
* cgroup_subsys::attach() methods.
*/
+#ifdef CONFIG_PROVE_RCU
+extern struct mutex cgroup_mutex;
+#define task_subsys_state_check(task, subsys_id, __c) \
+ rcu_dereference_check((task)->cgroups->subsys[(subsys_id)], \
+ lockdep_is_held(&(task)->alloc_lock) || \
+ lockdep_is_held(&cgroup_mutex) || (__c))
+#else
#define task_subsys_state_check(task, subsys_id, __c) \
- rcu_dereference_check(task->cgroups->subsys[subsys_id], \
- lockdep_is_held(&task->alloc_lock) || \
- cgroup_lock_is_held() || (__c))
+ rcu_dereference((task)->cgroups->subsys[(subsys_id)])
+#endif
static inline struct cgroup_subsys_state *
task_subsys_state(struct task_struct *task, int subsys_id)
@@ -661,8 +795,8 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
struct cgroup_iter *it);
void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
int cgroup_scan_tasks(struct cgroup_scanner *scan);
-int cgroup_attach_task(struct cgroup *, struct task_struct *);
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
+int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
/*
* CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h
index 9c7f5807824..dd7adff76e8 100644
--- a/include/linux/clk-private.h
+++ b/include/linux/clk-private.h
@@ -152,7 +152,7 @@ struct clk {
}, \
.reg = _reg, \
.shift = _shift, \
- .width = _width, \
+ .mask = BIT(_width) - 1, \
.flags = _mux_flags, \
.lock = _lock, \
}; \
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 7f197d7addb..11860985fec 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -45,6 +45,14 @@ struct clk_hw;
* undo any work done in the @prepare callback. Called with
* prepare_lock held.
*
+ * @is_prepared: Queries the hardware to determine if the clock is prepared.
+ * This function is allowed to sleep. Optional, if this op is not
+ * set then the prepare count will be used.
+ *
+ * @unprepare_unused: Unprepare the clock atomically. Only called from
+ * clk_disable_unused for prepare clocks with special needs.
+ * Called with prepare mutex held. This function may sleep.
+ *
* @enable: Enable the clock atomically. This must not return until the
* clock is generating a valid clock signal, usable by consumer
* devices. Called with enable_lock held. This function must not
@@ -108,6 +116,8 @@ struct clk_hw;
struct clk_ops {
int (*prepare)(struct clk_hw *hw);
void (*unprepare)(struct clk_hw *hw);
+ int (*is_prepared)(struct clk_hw *hw);
+ void (*unprepare_unused)(struct clk_hw *hw);
int (*enable)(struct clk_hw *hw);
void (*disable)(struct clk_hw *hw);
int (*is_enabled)(struct clk_hw *hw);
@@ -239,9 +249,14 @@ struct clk_div_table {
* CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the
* register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is
* the raw value read from the register, with the value of zero considered
- * invalid
+ * invalid, unless CLK_DIVIDER_ALLOW_ZERO is set.
* CLK_DIVIDER_POWER_OF_TWO - clock divisor is 2 raised to the value read from
* the hardware register
+ * CLK_DIVIDER_ALLOW_ZERO - Allow zero divisors. For dividers which have
+ * CLK_DIVIDER_ONE_BASED set, it is possible to end up with a zero divisor.
+ * Some hardware implementations gracefully handle this case and allow a
+ * zero divisor by not modifying their input clock
+ * (divide by one / bypass).
*/
struct clk_divider {
struct clk_hw hw;
@@ -255,6 +270,7 @@ struct clk_divider {
#define CLK_DIVIDER_ONE_BASED BIT(0)
#define CLK_DIVIDER_POWER_OF_TWO BIT(1)
+#define CLK_DIVIDER_ALLOW_ZERO BIT(2)
extern const struct clk_ops clk_divider_ops;
struct clk *clk_register_divider(struct device *dev, const char *name,
@@ -274,7 +290,7 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
* @reg: register controlling multiplexer
* @shift: shift to multiplexer bit field
* @width: width of mutliplexer bit field
- * @num_clks: number of parent clocks
+ * @flags: hardware-specific flags
* @lock: register lock
*
* Clock with multiple selectable parents. Implements .get_parent, .set_parent
@@ -287,8 +303,9 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
struct clk_mux {
struct clk_hw hw;
void __iomem *reg;
+ u32 *table;
+ u32 mask;
u8 shift;
- u8 width;
u8 flags;
spinlock_t *lock;
};
@@ -297,11 +314,19 @@ struct clk_mux {
#define CLK_MUX_INDEX_BIT BIT(1)
extern const struct clk_ops clk_mux_ops;
+
struct clk *clk_register_mux(struct device *dev, const char *name,
const char **parent_names, u8 num_parents, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_mux_flags, spinlock_t *lock);
+struct clk *clk_register_mux_table(struct device *dev, const char *name,
+ const char **parent_names, u8 num_parents, unsigned long flags,
+ void __iomem *reg, u8 shift, u32 mask,
+ u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+
+void of_fixed_factor_clk_setup(struct device_node *node);
+
/**
* struct clk_fixed_factor - fixed multiplier and divider clock
*
@@ -325,6 +350,37 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div);
+/***
+ * struct clk_composite - aggregate clock of mux, divider and gate clocks
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @mux_hw: handle between composite and hardware-specific mux clock
+ * @rate_hw: handle between composite and hardware-specific rate clock
+ * @gate_hw: handle between composite and hardware-specific gate clock
+ * @mux_ops: clock ops for mux
+ * @rate_ops: clock ops for rate
+ * @gate_ops: clock ops for gate
+ */
+struct clk_composite {
+ struct clk_hw hw;
+ struct clk_ops ops;
+
+ struct clk_hw *mux_hw;
+ struct clk_hw *rate_hw;
+ struct clk_hw *gate_hw;
+
+ const struct clk_ops *mux_ops;
+ const struct clk_ops *rate_ops;
+ const struct clk_ops *gate_ops;
+};
+
+struct clk *clk_register_composite(struct device *dev, const char *name,
+ const char **parent_names, int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+ unsigned long flags);
+
/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
@@ -351,6 +407,7 @@ unsigned int __clk_get_enable_count(struct clk *clk);
unsigned int __clk_get_prepare_count(struct clk *clk);
unsigned long __clk_get_rate(struct clk *clk);
unsigned long __clk_get_flags(struct clk *clk);
+bool __clk_is_prepared(struct clk *clk);
bool __clk_is_enabled(struct clk *clk);
struct clk *__clk_lookup(const char *name);
diff --git a/include/linux/clk.h b/include/linux/clk.h
index b3ac22d0fc1..9a6d04524b1 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -28,16 +28,16 @@ struct clk;
* PRE_RATE_CHANGE - called immediately before the clk rate is changed,
* to indicate that the rate change will proceed. Drivers must
* immediately terminate any operations that will be affected by the
- * rate change. Callbacks may either return NOTIFY_DONE or
- * NOTIFY_STOP.
+ * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK,
+ * NOTIFY_STOP or NOTIFY_BAD.
*
* ABORT_RATE_CHANGE: called if the rate change failed for some reason
* after PRE_RATE_CHANGE. In this case, all registered notifiers on
* the clk will be called with ABORT_RATE_CHANGE. Callbacks must
- * always return NOTIFY_DONE.
+ * always return NOTIFY_DONE or NOTIFY_OK.
*
* POST_RATE_CHANGE - called after the clk rate change has successfully
- * completed. Callbacks must always return NOTIFY_DONE.
+ * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK.
*
*/
#define PRE_RATE_CHANGE BIT(0)
diff --git a/include/linux/clk/sunxi.h b/include/linux/clk/sunxi.h
new file mode 100644
index 00000000000..e074fdd5a23
--- /dev/null
+++ b/include/linux/clk/sunxi.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2012 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_CLK_SUNXI_H_
+#define __LINUX_CLK_SUNXI_H_
+
+void __init sunxi_init_clocks(void);
+
+#endif
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 032560295fc..d08e4d2a9b9 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -591,6 +591,21 @@ static inline int cpulist_scnprintf(char *buf, int len,
}
/**
+ * cpumask_parse - extract a cpumask from from a string
+ * @buf: the buffer to extract from
+ * @dstp: the cpumask to set.
+ *
+ * Returns -errno, or 0 for success.
+ */
+static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
+{
+ char *nl = strchr(buf, '\n');
+ int len = nl ? nl - buf : strlen(buf);
+
+ return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
+}
+
+/**
* cpulist_parse - extract a cpumask from a user string of ranges
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 8c8a60d2940..ccd1de8ad82 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -11,7 +11,6 @@
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/nodemask.h>
-#include <linux/cgroup.h>
#include <linux/mm.h>
#ifdef CONFIG_CPUSETS
diff --git a/include/linux/device.h b/include/linux/device.h
index 88615ccaf23..711793b145f 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -297,6 +297,8 @@ void subsys_interface_unregister(struct subsys_interface *sif);
int subsys_system_register(struct bus_type *subsys,
const struct attribute_group **groups);
+int subsys_virtual_register(struct bus_type *subsys,
+ const struct attribute_group **groups);
/**
* struct class - device classes
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 52da2a25079..f83e17a40e8 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -261,8 +261,10 @@ struct ftrace_probe_ops {
void (*func)(unsigned long ip,
unsigned long parent_ip,
void **data);
- int (*callback)(unsigned long ip, void **data);
- void (*free)(void **data);
+ int (*init)(struct ftrace_probe_ops *ops,
+ unsigned long ip, void **data);
+ void (*free)(struct ftrace_probe_ops *ops,
+ unsigned long ip, void **data);
int (*print)(struct seq_file *m,
unsigned long ip,
struct ftrace_probe_ops *ops,
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 13a54d0bdfa..34e00fb49be 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -8,6 +8,7 @@
#include <linux/perf_event.h>
struct trace_array;
+struct trace_buffer;
struct tracer;
struct dentry;
@@ -38,6 +39,12 @@ const char *ftrace_print_symbols_seq_u64(struct trace_seq *p,
const char *ftrace_print_hex_seq(struct trace_seq *p,
const unsigned char *buf, int len);
+struct trace_iterator;
+struct trace_event;
+
+int ftrace_raw_output_prep(struct trace_iterator *iter,
+ struct trace_event *event);
+
/*
* The trace entry - the most basic unit of tracing. This is what
* is printed in the end as a single line in the trace output, such as:
@@ -61,6 +68,7 @@ struct trace_entry {
struct trace_iterator {
struct trace_array *tr;
struct tracer *trace;
+ struct trace_buffer *trace_buffer;
void *private;
int cpu_file;
struct mutex mutex;
@@ -95,8 +103,6 @@ enum trace_iter_flags {
};
-struct trace_event;
-
typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
int flags, struct trace_event *event);
@@ -128,6 +134,13 @@ enum print_line_t {
void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags,
int pc);
+struct ftrace_event_file;
+
+struct ring_buffer_event *
+trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
+ struct ftrace_event_file *ftrace_file,
+ int type, unsigned long len,
+ unsigned long flags, int pc);
struct ring_buffer_event *
trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
int type, unsigned long len,
@@ -182,53 +195,49 @@ extern int ftrace_event_reg(struct ftrace_event_call *event,
enum trace_reg type, void *data);
enum {
- TRACE_EVENT_FL_ENABLED_BIT,
TRACE_EVENT_FL_FILTERED_BIT,
- TRACE_EVENT_FL_RECORDED_CMD_BIT,
TRACE_EVENT_FL_CAP_ANY_BIT,
TRACE_EVENT_FL_NO_SET_FILTER_BIT,
TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
+ TRACE_EVENT_FL_WAS_ENABLED_BIT,
};
+/*
+ * Event flags:
+ * FILTERED - The event has a filter attached
+ * CAP_ANY - Any user can enable for perf
+ * NO_SET_FILTER - Set when filter has error and is to be ignored
+ * IGNORE_ENABLE - For ftrace internal events, do not enable with debugfs file
+ * WAS_ENABLED - Set and stays set when an event was ever enabled
+ * (used for module unloading, if a module event is enabled,
+ * it is best to clear the buffers that used it).
+ */
enum {
- TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
- TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
+ TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
};
struct ftrace_event_call {
struct list_head list;
struct ftrace_event_class *class;
char *name;
- struct dentry *dir;
struct trace_event event;
const char *print_fmt;
struct event_filter *filter;
+ struct list_head *files;
void *mod;
void *data;
-
/*
- * 32 bit flags:
- * bit 1: enabled
- * bit 2: filter_active
- * bit 3: enabled cmd record
- * bit 4: allow trace by non root (cap any)
- * bit 5: failed to apply filter
- * bit 6: ftrace internal event (do not enable)
- *
- * Changes to flags must hold the event_mutex.
- *
- * Note: Reads of flags do not hold the event_mutex since
- * they occur in critical sections. But the way flags
- * is currently used, these changes do no affect the code
- * except that when a change is made, it may have a slight
- * delay in propagating the changes to other CPUs due to
- * caching and such.
+ * bit 0: filter_active
+ * bit 1: allow trace by non root (cap any)
+ * bit 2: failed to apply filter
+ * bit 3: ftrace internal event (do not enable)
+ * bit 4: Event was enabled by module
*/
- unsigned int flags;
+ int flags; /* static flags of different events */
#ifdef CONFIG_PERF_EVENTS
int perf_refcount;
@@ -236,6 +245,56 @@ struct ftrace_event_call {
#endif
};
+struct trace_array;
+struct ftrace_subsystem_dir;
+
+enum {
+ FTRACE_EVENT_FL_ENABLED_BIT,
+ FTRACE_EVENT_FL_RECORDED_CMD_BIT,
+ FTRACE_EVENT_FL_SOFT_MODE_BIT,
+ FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
+};
+
+/*
+ * Ftrace event file flags:
+ * ENABLED - The event is enabled
+ * RECORDED_CMD - The comms should be recorded at sched_switch
+ * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
+ * SOFT_DISABLED - When set, do not trace the event (even though its
+ * tracepoint may be enabled)
+ */
+enum {
+ FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT),
+ FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT),
+ FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT),
+ FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT),
+};
+
+struct ftrace_event_file {
+ struct list_head list;
+ struct ftrace_event_call *event_call;
+ struct dentry *dir;
+ struct trace_array *tr;
+ struct ftrace_subsystem_dir *system;
+
+ /*
+ * 32 bit flags:
+ * bit 0: enabled
+ * bit 1: enabled cmd record
+ * bit 2: enable/disable with the soft disable bit
+ * bit 3: soft disabled
+ *
+ * Note: The bits must be set atomically to prevent races
+ * from other writers. Reads of flags do not need to be in
+ * sync as they occur in critical sections. But the way flags
+ * is currently used, these changes do not affect the code
+ * except that when a change is made, it may have a slight
+ * delay in propagating the changes to other CPUs due to
+ * caching and such. Which is mostly OK ;-)
+ */
+ unsigned long flags;
+};
+
#define __TRACE_EVENT_FLAGS(name, value) \
static int __init trace_init_flags_##name(void) \
{ \
@@ -274,7 +333,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
extern int trace_add_event_call(struct ftrace_event_call *call);
extern void trace_remove_event_call(struct ftrace_event_call *call);
-#define is_signed_type(type) (((type)(-1)) < (type)0)
+#define is_signed_type(type) (((type)(-1)) < (type)1)
int trace_set_clr_event(const char *system, const char *event, int set);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 79fdd80a42d..2dac79c3919 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -486,6 +486,8 @@ enum ftrace_dump_mode {
void tracing_on(void);
void tracing_off(void);
int tracing_is_on(void);
+void tracing_snapshot(void);
+void tracing_snapshot_alloc(void);
extern void tracing_start(void);
extern void tracing_stop(void);
@@ -515,10 +517,32 @@ do { \
*
* This is intended as a debugging tool for the developer only.
* Please refrain from leaving trace_printks scattered around in
- * your code.
+ * your code. (Extra memory is used for special buffers that are
+ * allocated when trace_printk() is used)
+ *
+ * A little optization trick is done here. If there's only one
+ * argument, there's no need to scan the string for printf formats.
+ * The trace_puts() will suffice. But how can we take advantage of
+ * using trace_puts() when trace_printk() has only one argument?
+ * By stringifying the args and checking the size we can tell
+ * whether or not there are args. __stringify((__VA_ARGS__)) will
+ * turn into "()\0" with a size of 3 when there are no args, anything
+ * else will be bigger. All we need to do is define a string to this,
+ * and then take its size and compare to 3. If it's bigger, use
+ * do_trace_printk() otherwise, optimize it to trace_puts(). Then just
+ * let gcc optimize the rest.
*/
-#define trace_printk(fmt, args...) \
+#define trace_printk(fmt, ...) \
+do { \
+ char _______STR[] = __stringify((__VA_ARGS__)); \
+ if (sizeof(_______STR) > 3) \
+ do_trace_printk(fmt, ##__VA_ARGS__); \
+ else \
+ trace_puts(fmt); \
+} while (0)
+
+#define do_trace_printk(fmt, args...) \
do { \
static const char *trace_printk_fmt \
__attribute__((section("__trace_printk_fmt"))) = \
@@ -538,7 +562,45 @@ int __trace_bprintk(unsigned long ip, const char *fmt, ...);
extern __printf(2, 3)
int __trace_printk(unsigned long ip, const char *fmt, ...);
-extern void trace_dump_stack(void);
+/**
+ * trace_puts - write a string into the ftrace buffer
+ * @str: the string to record
+ *
+ * Note: __trace_bputs is an internal function for trace_puts and
+ * the @ip is passed in via the trace_puts macro.
+ *
+ * This is similar to trace_printk() but is made for those really fast
+ * paths that a developer wants the least amount of "Heisenbug" affects,
+ * where the processing of the print format is still too much.
+ *
+ * This function allows a kernel developer to debug fast path sections
+ * that printk is not appropriate for. By scattering in various
+ * printk like tracing in the code, a developer can quickly see
+ * where problems are occurring.
+ *
+ * This is intended as a debugging tool for the developer only.
+ * Please refrain from leaving trace_puts scattered around in
+ * your code. (Extra memory is used for special buffers that are
+ * allocated when trace_puts() is used)
+ *
+ * Returns: 0 if nothing was written, positive # if string was.
+ * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
+ */
+
+extern int __trace_bputs(unsigned long ip, const char *str);
+extern int __trace_puts(unsigned long ip, const char *str, int size);
+#define trace_puts(str) ({ \
+ static const char *trace_printk_fmt \
+ __attribute__((section("__trace_printk_fmt"))) = \
+ __builtin_constant_p(str) ? str : NULL; \
+ \
+ if (__builtin_constant_p(str)) \
+ __trace_bputs(_THIS_IP_, trace_printk_fmt); \
+ else \
+ __trace_puts(_THIS_IP_, str, strlen(str)); \
+})
+
+extern void trace_dump_stack(int skip);
/*
* The double __builtin_constant_p is because gcc will give us an error
@@ -573,6 +635,8 @@ static inline void trace_dump_stack(void) { }
static inline void tracing_on(void) { }
static inline void tracing_off(void) { }
static inline int tracing_is_on(void) { return 0; }
+static inline void tracing_snapshot(void) { }
+static inline void tracing_snapshot_alloc(void) { }
static inline __printf(1, 2)
int trace_printk(const char *fmt, ...)
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index 9db0bda446a..84f449475c2 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -364,8 +364,7 @@ struct ab8500 {
const int *irq_reg_offset;
};
-struct regulator_reg_init;
-struct regulator_init_data;
+struct ab8500_regulator_platform_data;
struct ab8500_gpio_platform_data;
struct ab8500_codec_platform_data;
struct ab8500_sysctrl_platform_data;
@@ -375,19 +374,13 @@ struct ab8500_sysctrl_platform_data;
* @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used
* @pm_power_off: Should machine pm power off hook be registered or not
* @init: board-specific initialization after detection of ab8500
- * @num_regulator_reg_init: number of regulator init registers
- * @regulator_reg_init: regulator init registers
- * @num_regulator: number of regulators
* @regulator: machine-specific constraints for regulators
*/
struct ab8500_platform_data {
int irq_base;
bool pm_power_off;
void (*init) (struct ab8500 *);
- int num_regulator_reg_init;
- struct ab8500_regulator_reg_init *regulator_reg_init;
- int num_regulator;
- struct regulator_init_data *regulator;
+ struct ab8500_regulator_platform_data *regulator;
struct abx500_gpio_platform_data *gpio;
struct ab8500_codec_platform_data *codec;
struct ab8500_sysctrl_platform_data *sysctrl;
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index 3bbda22721e..ecddc5173c7 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -109,19 +109,6 @@ struct palmas_reg_init {
*/
int mode_sleep;
- /* tstep is the timestep loaded to the TSTEP register
- *
- * For SMPS
- *
- * 0: Jump (no slope control)
- * 1: 10mV/us
- * 2: 5mV/us
- * 3: 2.5mV/us
- *
- * For LDO unused
- */
- int tstep;
-
/* voltage_sel is the bitfield loaded onto the SMPSX_VOLTAGE
* register. Set this is the default voltage set in OTP needs
* to be overridden.
@@ -154,6 +141,12 @@ enum palmas_regulators {
PALMAS_REG_LDO9,
PALMAS_REG_LDOLN,
PALMAS_REG_LDOUSB,
+ /* External regulators */
+ PALMAS_REG_REGEN1,
+ PALMAS_REG_REGEN2,
+ PALMAS_REG_REGEN3,
+ PALMAS_REG_SYSEN1,
+ PALMAS_REG_SYSEN2,
/* Total number of regulators */
PALMAS_NUM_REGS,
};
@@ -171,6 +164,9 @@ struct palmas_pmic_platform_data {
/* use LDO6 for vibrator control */
int ldo6_vibrator;
+
+ /* Enable tracking mode of LDO8 */
+ bool enable_ldo8_tracking;
};
struct palmas_usb_platform_data {
@@ -331,6 +327,8 @@ struct palmas_pmic {
int smps457;
int range[PALMAS_REG_SMPS10];
+ unsigned int ramp_delay[PALMAS_REG_SMPS10];
+ unsigned int current_reg_mode[PALMAS_REG_SMPS10];
};
struct palmas_resource {
diff --git a/include/linux/platform_data/si5351.h b/include/linux/platform_data/si5351.h
new file mode 100644
index 00000000000..92dabcaf649
--- /dev/null
+++ b/include/linux/platform_data/si5351.h
@@ -0,0 +1,114 @@
+/*
+ * Si5351A/B/C programmable clock generator platform_data.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_SI5351_H__
+#define __LINUX_PLATFORM_DATA_SI5351_H__
+
+struct clk;
+
+/**
+ * enum si5351_variant - SiLabs Si5351 chip variant
+ * @SI5351_VARIANT_A: Si5351A (8 output clocks, XTAL input)
+ * @SI5351_VARIANT_A3: Si5351A MSOP10 (3 output clocks, XTAL input)
+ * @SI5351_VARIANT_B: Si5351B (8 output clocks, XTAL/VXCO input)
+ * @SI5351_VARIANT_C: Si5351C (8 output clocks, XTAL/CLKIN input)
+ */
+enum si5351_variant {
+ SI5351_VARIANT_A = 1,
+ SI5351_VARIANT_A3 = 2,
+ SI5351_VARIANT_B = 3,
+ SI5351_VARIANT_C = 4,
+};
+
+/**
+ * enum si5351_pll_src - Si5351 pll clock source
+ * @SI5351_PLL_SRC_DEFAULT: default, do not change eeprom config
+ * @SI5351_PLL_SRC_XTAL: pll source clock is XTAL input
+ * @SI5351_PLL_SRC_CLKIN: pll source clock is CLKIN input (Si5351C only)
+ */
+enum si5351_pll_src {
+ SI5351_PLL_SRC_DEFAULT = 0,
+ SI5351_PLL_SRC_XTAL = 1,
+ SI5351_PLL_SRC_CLKIN = 2,
+};
+
+/**
+ * enum si5351_multisynth_src - Si5351 multisynth clock source
+ * @SI5351_MULTISYNTH_SRC_DEFAULT: default, do not change eeprom config
+ * @SI5351_MULTISYNTH_SRC_VCO0: multisynth source clock is VCO0
+ * @SI5351_MULTISYNTH_SRC_VCO1: multisynth source clock is VCO1/VXCO
+ */
+enum si5351_multisynth_src {
+ SI5351_MULTISYNTH_SRC_DEFAULT = 0,
+ SI5351_MULTISYNTH_SRC_VCO0 = 1,
+ SI5351_MULTISYNTH_SRC_VCO1 = 2,
+};
+
+/**
+ * enum si5351_clkout_src - Si5351 clock output clock source
+ * @SI5351_CLKOUT_SRC_DEFAULT: default, do not change eeprom config
+ * @SI5351_CLKOUT_SRC_MSYNTH_N: clkout N source clock is multisynth N
+ * @SI5351_CLKOUT_SRC_MSYNTH_0_4: clkout N source clock is multisynth 0 (N<4)
+ * or 4 (N>=4)
+ * @SI5351_CLKOUT_SRC_XTAL: clkout N source clock is XTAL
+ * @SI5351_CLKOUT_SRC_CLKIN: clkout N source clock is CLKIN (Si5351C only)
+ */
+enum si5351_clkout_src {
+ SI5351_CLKOUT_SRC_DEFAULT = 0,
+ SI5351_CLKOUT_SRC_MSYNTH_N = 1,
+ SI5351_CLKOUT_SRC_MSYNTH_0_4 = 2,
+ SI5351_CLKOUT_SRC_XTAL = 3,
+ SI5351_CLKOUT_SRC_CLKIN = 4,
+};
+
+/**
+ * enum si5351_drive_strength - Si5351 clock output drive strength
+ * @SI5351_DRIVE_DEFAULT: default, do not change eeprom config
+ * @SI5351_DRIVE_2MA: 2mA clock output drive strength
+ * @SI5351_DRIVE_4MA: 4mA clock output drive strength
+ * @SI5351_DRIVE_6MA: 6mA clock output drive strength
+ * @SI5351_DRIVE_8MA: 8mA clock output drive strength
+ */
+enum si5351_drive_strength {
+ SI5351_DRIVE_DEFAULT = 0,
+ SI5351_DRIVE_2MA = 2,
+ SI5351_DRIVE_4MA = 4,
+ SI5351_DRIVE_6MA = 6,
+ SI5351_DRIVE_8MA = 8,
+};
+
+/**
+ * struct si5351_clkout_config - Si5351 clock output configuration
+ * @clkout: clkout number
+ * @multisynth_src: multisynth source clock
+ * @clkout_src: clkout source clock
+ * @pll_master: if true, clkout can also change pll rate
+ * @drive: output drive strength
+ * @rate: initial clkout rate, or default if 0
+ */
+struct si5351_clkout_config {
+ enum si5351_multisynth_src multisynth_src;
+ enum si5351_clkout_src clkout_src;
+ enum si5351_drive_strength drive;
+ bool pll_master;
+ unsigned long rate;
+};
+
+/**
+ * struct si5351_platform_data - Platform data for the Si5351 clock driver
+ * @variant: Si5351 chip variant
+ * @clk_xtal: xtal input clock
+ * @clk_clkin: clkin input clock
+ * @pll_src: array of pll source clock setting
+ * @clkout: array of clkout configuration
+ */
+struct si5351_platform_data {
+ enum si5351_variant variant;
+ struct clk *clk_xtal;
+ struct clk *clk_clkin;
+ enum si5351_pll_src pll_src[2];
+ struct si5351_clkout_config clkout[8];
+};
+
+#endif
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h
index ceba18d23a5..8447f634c7f 100644
--- a/include/linux/platform_data/spi-s3c64xx.h
+++ b/include/linux/platform_data/spi-s3c64xx.h
@@ -11,6 +11,8 @@
#ifndef __S3C64XX_PLAT_SPI_H
#define __S3C64XX_PLAT_SPI_H
+#include <linux/dmaengine.h>
+
struct platform_device;
/**
@@ -38,6 +40,7 @@ struct s3c64xx_spi_info {
int src_clk_nr;
int num_cs;
int (*cfg_gpio)(void);
+ dma_filter_fn filter;
};
/**
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index bf77dfdabef..02d84e24b7c 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -389,6 +389,7 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
bool *change);
int regmap_get_val_bytes(struct regmap *map);
int regmap_async_complete(struct regmap *map);
+bool regmap_can_raw_write(struct regmap *map);
int regcache_sync(struct regmap *map);
int regcache_sync_region(struct regmap *map, unsigned int min,
diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h
index 7bd73bbdfd1..7c5ff0c5577 100644
--- a/include/linux/regulator/ab8500.h
+++ b/include/linux/regulator/ab8500.h
@@ -5,11 +5,14 @@
*
* Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
* Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
+ * Daniel Willerud <daniel.willerud@stericsson.com> for ST-Ericsson
*/
#ifndef __LINUX_MFD_AB8500_REGULATOR_H
#define __LINUX_MFD_AB8500_REGULATOR_H
+#include <linux/platform_device.h>
+
/* AB8500 regulators */
enum ab8500_regulator_id {
AB8500_LDO_AUX1,
@@ -17,7 +20,6 @@ enum ab8500_regulator_id {
AB8500_LDO_AUX3,
AB8500_LDO_INTCORE,
AB8500_LDO_TVOUT,
- AB8500_LDO_USB,
AB8500_LDO_AUDIO,
AB8500_LDO_ANAMIC1,
AB8500_LDO_ANAMIC2,
@@ -26,7 +28,28 @@ enum ab8500_regulator_id {
AB8500_NUM_REGULATORS,
};
-/* AB9450 regulators */
+/* AB8505 regulators */
+enum ab8505_regulator_id {
+ AB8505_LDO_AUX1,
+ AB8505_LDO_AUX2,
+ AB8505_LDO_AUX3,
+ AB8505_LDO_AUX4,
+ AB8505_LDO_AUX5,
+ AB8505_LDO_AUX6,
+ AB8505_LDO_INTCORE,
+ AB8505_LDO_ADC,
+ AB8505_LDO_USB,
+ AB8505_LDO_AUDIO,
+ AB8505_LDO_ANAMIC1,
+ AB8505_LDO_ANAMIC2,
+ AB8505_LDO_AUX8,
+ AB8505_LDO_ANA,
+ AB8505_SYSCLKREQ_2,
+ AB8505_SYSCLKREQ_4,
+ AB8505_NUM_REGULATORS,
+};
+
+/* AB9540 regulators */
enum ab9540_regulator_id {
AB9540_LDO_AUX1,
AB9540_LDO_AUX2,
@@ -45,16 +68,39 @@ enum ab9540_regulator_id {
AB9540_NUM_REGULATORS,
};
-/* AB8500 and AB9540 register initialization */
+/* AB8540 regulators */
+enum ab8540_regulator_id {
+ AB8540_LDO_AUX1,
+ AB8540_LDO_AUX2,
+ AB8540_LDO_AUX3,
+ AB8540_LDO_AUX4,
+ AB8540_LDO_AUX5,
+ AB8540_LDO_AUX6,
+ AB8540_LDO_INTCORE,
+ AB8540_LDO_TVOUT,
+ AB8540_LDO_AUDIO,
+ AB8540_LDO_ANAMIC1,
+ AB8540_LDO_ANAMIC2,
+ AB8540_LDO_DMIC,
+ AB8540_LDO_ANA,
+ AB8540_LDO_SDIO,
+ AB8540_SYSCLKREQ_2,
+ AB8540_SYSCLKREQ_4,
+ AB8540_NUM_REGULATORS,
+};
+
+/* AB8500, AB8505, and AB9540 register initialization */
struct ab8500_regulator_reg_init {
int id;
+ u8 mask;
u8 value;
};
-#define INIT_REGULATOR_REGISTER(_id, _value) \
- { \
- .id = _id, \
- .value = _value, \
+#define INIT_REGULATOR_REGISTER(_id, _mask, _value) \
+ { \
+ .id = _id, \
+ .mask = _mask, \
+ .value = _value, \
}
/* AB8500 registers */
@@ -86,10 +132,58 @@ enum ab8500_regulator_reg {
AB8500_REGUCTRL2SPARE,
AB8500_REGUCTRLDISCH,
AB8500_REGUCTRLDISCH2,
- AB8500_VSMPS1SEL1,
AB8500_NUM_REGULATOR_REGISTERS,
};
+/* AB8505 registers */
+enum ab8505_regulator_reg {
+ AB8505_REGUREQUESTCTRL1,
+ AB8505_REGUREQUESTCTRL2,
+ AB8505_REGUREQUESTCTRL3,
+ AB8505_REGUREQUESTCTRL4,
+ AB8505_REGUSYSCLKREQ1HPVALID1,
+ AB8505_REGUSYSCLKREQ1HPVALID2,
+ AB8505_REGUHWHPREQ1VALID1,
+ AB8505_REGUHWHPREQ1VALID2,
+ AB8505_REGUHWHPREQ2VALID1,
+ AB8505_REGUHWHPREQ2VALID2,
+ AB8505_REGUSWHPREQVALID1,
+ AB8505_REGUSWHPREQVALID2,
+ AB8505_REGUSYSCLKREQVALID1,
+ AB8505_REGUSYSCLKREQVALID2,
+ AB8505_REGUVAUX4REQVALID,
+ AB8505_REGUMISC1,
+ AB8505_VAUDIOSUPPLY,
+ AB8505_REGUCTRL1VAMIC,
+ AB8505_VSMPSAREGU,
+ AB8505_VSMPSBREGU,
+ AB8505_VSAFEREGU, /* NOTE! PRCMU register */
+ AB8505_VPLLVANAREGU,
+ AB8505_EXTSUPPLYREGU,
+ AB8505_VAUX12REGU,
+ AB8505_VRF1VAUX3REGU,
+ AB8505_VSMPSASEL1,
+ AB8505_VSMPSASEL2,
+ AB8505_VSMPSASEL3,
+ AB8505_VSMPSBSEL1,
+ AB8505_VSMPSBSEL2,
+ AB8505_VSMPSBSEL3,
+ AB8505_VSAFESEL1, /* NOTE! PRCMU register */
+ AB8505_VSAFESEL2, /* NOTE! PRCMU register */
+ AB8505_VSAFESEL3, /* NOTE! PRCMU register */
+ AB8505_VAUX1SEL,
+ AB8505_VAUX2SEL,
+ AB8505_VRF1VAUX3SEL,
+ AB8505_VAUX4REQCTRL,
+ AB8505_VAUX4REGU,
+ AB8505_VAUX4SEL,
+ AB8505_REGUCTRLDISCH,
+ AB8505_REGUCTRLDISCH2,
+ AB8505_REGUCTRLDISCH3,
+ AB8505_CTRLVAUX5,
+ AB8505_CTRLVAUX6,
+ AB8505_NUM_REGULATOR_REGISTERS,
+};
/* AB9540 registers */
enum ab9540_regulator_reg {
@@ -139,4 +233,111 @@ enum ab9540_regulator_reg {
AB9540_NUM_REGULATOR_REGISTERS,
};
+/* AB8540 registers */
+enum ab8540_regulator_reg {
+ AB8540_REGUREQUESTCTRL1,
+ AB8540_REGUREQUESTCTRL2,
+ AB8540_REGUREQUESTCTRL3,
+ AB8540_REGUREQUESTCTRL4,
+ AB8540_REGUSYSCLKREQ1HPVALID1,
+ AB8540_REGUSYSCLKREQ1HPVALID2,
+ AB8540_REGUHWHPREQ1VALID1,
+ AB8540_REGUHWHPREQ1VALID2,
+ AB8540_REGUHWHPREQ2VALID1,
+ AB8540_REGUHWHPREQ2VALID2,
+ AB8540_REGUSWHPREQVALID1,
+ AB8540_REGUSWHPREQVALID2,
+ AB8540_REGUSYSCLKREQVALID1,
+ AB8540_REGUSYSCLKREQVALID2,
+ AB8540_REGUVAUX4REQVALID,
+ AB8540_REGUVAUX5REQVALID,
+ AB8540_REGUVAUX6REQVALID,
+ AB8540_REGUVCLKBREQVALID,
+ AB8540_REGUVRF1REQVALID,
+ AB8540_REGUMISC1,
+ AB8540_VAUDIOSUPPLY,
+ AB8540_REGUCTRL1VAMIC,
+ AB8540_VHSIC,
+ AB8540_VSDIO,
+ AB8540_VSMPS1REGU,
+ AB8540_VSMPS2REGU,
+ AB8540_VSMPS3REGU,
+ AB8540_VPLLVANAREGU,
+ AB8540_EXTSUPPLYREGU,
+ AB8540_VAUX12REGU,
+ AB8540_VRF1VAUX3REGU,
+ AB8540_VSMPS1SEL1,
+ AB8540_VSMPS1SEL2,
+ AB8540_VSMPS1SEL3,
+ AB8540_VSMPS2SEL1,
+ AB8540_VSMPS2SEL2,
+ AB8540_VSMPS2SEL3,
+ AB8540_VSMPS3SEL1,
+ AB8540_VSMPS3SEL2,
+ AB8540_VAUX1SEL,
+ AB8540_VAUX2SEL,
+ AB8540_VRF1VAUX3SEL,
+ AB8540_REGUCTRL2SPARE,
+ AB8540_VAUX4REQCTRL,
+ AB8540_VAUX4REGU,
+ AB8540_VAUX4SEL,
+ AB8540_VAUX5REQCTRL,
+ AB8540_VAUX5REGU,
+ AB8540_VAUX5SEL,
+ AB8540_VAUX6REQCTRL,
+ AB8540_VAUX6REGU,
+ AB8540_VAUX6SEL,
+ AB8540_VCLKBREQCTRL,
+ AB8540_VCLKBREGU,
+ AB8540_VCLKBSEL,
+ AB8540_VRF1REQCTRL,
+ AB8540_REGUCTRLDISCH,
+ AB8540_REGUCTRLDISCH2,
+ AB8540_REGUCTRLDISCH3,
+ AB8540_REGUCTRLDISCH4,
+ AB8540_VSIMSYSCLKCTRL,
+ AB8540_VANAVPLLSEL,
+ AB8540_NUM_REGULATOR_REGISTERS,
+};
+
+/* AB8500 external regulators */
+struct ab8500_ext_regulator_cfg {
+ bool hwreq; /* requires hw mode or high power mode */
+};
+
+enum ab8500_ext_regulator_id {
+ AB8500_EXT_SUPPLY1,
+ AB8500_EXT_SUPPLY2,
+ AB8500_EXT_SUPPLY3,
+ AB8500_NUM_EXT_REGULATORS,
+};
+
+/* AB8500 regulator platform data */
+struct ab8500_regulator_platform_data {
+ int num_reg_init;
+ struct ab8500_regulator_reg_init *reg_init;
+ int num_regulator;
+ struct regulator_init_data *regulator;
+ int num_ext_regulator;
+ struct regulator_init_data *ext_regulator;
+};
+
+#ifdef CONFIG_REGULATOR_AB8500_DEBUG
+int ab8500_regulator_debug_init(struct platform_device *pdev);
+int ab8500_regulator_debug_exit(struct platform_device *pdev);
+#else
+static inline int ab8500_regulator_debug_init(struct platform_device *pdev)
+{
+ return 0;
+}
+static inline int ab8500_regulator_debug_exit(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
+/* AB8500 external regulator functions. */
+int ab8500_ext_regulator_init(struct platform_device *pdev);
+void ab8500_ext_regulator_exit(struct platform_device *pdev);
+
#endif
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 7bc732ce6e5..145022a8308 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -141,18 +141,18 @@ void regulator_put(struct regulator *regulator);
void devm_regulator_put(struct regulator *regulator);
/* regulator output control and status */
-int regulator_enable(struct regulator *regulator);
+int __must_check regulator_enable(struct regulator *regulator);
int regulator_disable(struct regulator *regulator);
int regulator_force_disable(struct regulator *regulator);
int regulator_is_enabled(struct regulator *regulator);
int regulator_disable_deferred(struct regulator *regulator, int ms);
-int regulator_bulk_get(struct device *dev, int num_consumers,
- struct regulator_bulk_data *consumers);
-int devm_regulator_bulk_get(struct device *dev, int num_consumers,
- struct regulator_bulk_data *consumers);
-int regulator_bulk_enable(int num_consumers,
- struct regulator_bulk_data *consumers);
+int __must_check regulator_bulk_get(struct device *dev, int num_consumers,
+ struct regulator_bulk_data *consumers);
+int __must_check devm_regulator_bulk_get(struct device *dev, int num_consumers,
+ struct regulator_bulk_data *consumers);
+int __must_check regulator_bulk_enable(int num_consumers,
+ struct regulator_bulk_data *consumers);
int regulator_bulk_disable(int num_consumers,
struct regulator_bulk_data *consumers);
int regulator_bulk_force_disable(int num_consumers,
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 7df93f52db0..6700cc94bdd 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -22,6 +22,7 @@
struct regmap;
struct regulator_dev;
struct regulator_init_data;
+struct regulator_enable_gpio;
enum regulator_status {
REGULATOR_STATUS_OFF,
@@ -199,6 +200,8 @@ enum regulator_type {
* output when using regulator_set_voltage_sel_regmap
* @enable_reg: Register for control when using regmap enable/disable ops
* @enable_mask: Mask for control when using regmap enable/disable ops
+ * @enable_is_inverted: A flag to indicate set enable_mask bits to disable
+ * when using regulator_enable_regmap and friends APIs.
* @bypass_reg: Register for control when using regmap set_bypass
* @bypass_mask: Mask for control when using regmap set_bypass
*
@@ -228,6 +231,7 @@ struct regulator_desc {
unsigned int apply_bit;
unsigned int enable_reg;
unsigned int enable_mask;
+ bool enable_is_inverted;
unsigned int bypass_reg;
unsigned int bypass_mask;
@@ -302,8 +306,7 @@ struct regulator_dev {
struct dentry *debugfs;
- int ena_gpio;
- unsigned int ena_gpio_invert:1;
+ struct regulator_enable_gpio *ena_pin;
unsigned int ena_gpio_state:1;
};
@@ -329,6 +332,8 @@ int regulator_map_voltage_linear(struct regulator_dev *rdev,
int min_uV, int max_uV);
int regulator_map_voltage_iterate(struct regulator_dev *rdev,
int min_uV, int max_uV);
+int regulator_map_voltage_ascend(struct regulator_dev *rdev,
+ int min_uV, int max_uV);
int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev);
int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel);
int regulator_is_enabled_regmap(struct regulator_dev *rdev);
diff --git a/include/linux/regulator/max8952.h b/include/linux/regulator/max8952.h
index 45e42855ad0..4dbb63a1d4a 100644
--- a/include/linux/regulator/max8952.h
+++ b/include/linux/regulator/max8952.h
@@ -122,13 +122,13 @@ struct max8952_platform_data {
int gpio_vid1;
int gpio_en;
- u8 default_mode;
- u8 dvs_mode[MAX8952_NUM_DVS_MODE]; /* MAX8952_DVS_MODEx_XXXXmV */
+ u32 default_mode;
+ u32 dvs_mode[MAX8952_NUM_DVS_MODE]; /* MAX8952_DVS_MODEx_XXXXmV */
- u8 sync_freq;
- u8 ramp_speed;
+ u32 sync_freq;
+ u32 ramp_speed;
- struct regulator_init_data reg_data;
+ struct regulator_init_data *reg_data;
};
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index c23099413ad..96a509b6be0 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -13,7 +13,7 @@
* info about what this counter is.
*/
-#include <linux/cgroup.h>
+#include <linux/spinlock.h>
#include <linux/errno.h>
/*
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 1342e69542f..d69cf637a15 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -4,6 +4,7 @@
#include <linux/kmemcheck.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
+#include <linux/poll.h>
struct ring_buffer;
struct ring_buffer_iter;
@@ -96,6 +97,11 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \
})
+void ring_buffer_wait(struct ring_buffer *buffer, int cpu);
+int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+ struct file *filp, poll_table *poll_table);
+
+
#define RING_BUFFER_ALL_CPUS -1
void ring_buffer_free(struct ring_buffer *buffer);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2d02c76a01b..bcbc30397f2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1793,7 +1793,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
-#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */
+#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
diff --git a/include/linux/spi/spi-tegra.h b/include/linux/spi/spi-tegra.h
deleted file mode 100644
index 786932c62ed..00000000000
--- a/include/linux/spi/spi-tegra.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * spi-tegra.h: SPI interface for Nvidia Tegra20 SLINK controller.
- *
- * Copyright (C) 2011 NVIDIA Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef _LINUX_SPI_TEGRA_H
-#define _LINUX_SPI_TEGRA_H
-
-struct tegra_spi_platform_data {
- int dma_req_sel;
- unsigned int spi_max_frequency;
-};
-
-/*
- * Controller data from device to pass some info like
- * hw based chip select can be used or not and if yes
- * then CS hold and setup time.
- */
-struct tegra_spi_device_controller_data {
- bool is_hw_based_cs;
- int cs_setup_clk_count;
- int cs_hold_clk_count;
-};
-
-#endif /* _LINUX_SPI_TEGRA_H */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 38c2b925923..733eb5ee31c 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -228,6 +228,11 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* every chipselect is connected to a slave.
* @dma_alignment: SPI controller constraint on DMA buffers alignment.
* @mode_bits: flags understood by this controller driver
+ * @bits_per_word_mask: A mask indicating which values of bits_per_word are
+ * supported by the driver. Bit n indicates that a bits_per_word n+1 is
+ * suported. If set, the SPI core will reject any transfer with an
+ * unsupported bits_per_word. If not set, this value is simply ignored,
+ * and it's up to the individual driver to perform any validation.
* @flags: other constraints relevant to this driver
* @bus_lock_spinlock: spinlock for SPI bus locking
* @bus_lock_mutex: mutex for SPI bus locking
@@ -301,6 +306,9 @@ struct spi_master {
/* spi_device.mode flags understood by this controller driver */
u16 mode_bits;
+ /* bitmask of supported bits_per_word for transfers */
+ u32 bits_per_word_mask;
+
/* other constraints relevant to this driver */
u16 flags;
#define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */
diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h
index d563f37e1a1..1d7ca273927 100644
--- a/include/linux/trace_clock.h
+++ b/include/linux/trace_clock.h
@@ -16,6 +16,7 @@
extern u64 notrace trace_clock_local(void);
extern u64 notrace trace_clock(void);
+extern u64 notrace trace_clock_jiffies(void);
extern u64 notrace trace_clock_global(void);
extern u64 notrace trace_clock_counter(void);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 8afab27cdbc..71797563937 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -11,6 +11,7 @@
#include <linux/lockdep.h>
#include <linux/threads.h>
#include <linux/atomic.h>
+#include <linux/cpumask.h>
struct workqueue_struct;
@@ -68,7 +69,7 @@ enum {
WORK_STRUCT_COLOR_BITS,
/* data contains off-queue information when !WORK_STRUCT_PWQ */
- WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS,
+ WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
@@ -115,6 +116,20 @@ struct delayed_work {
int cpu;
};
+/*
+ * A struct for workqueue attributes. This can be used to change
+ * attributes of an unbound workqueue.
+ *
+ * Unlike other fields, ->no_numa isn't a property of a worker_pool. It
+ * only modifies how apply_workqueue_attrs() select pools and thus doesn't
+ * participate in pool hash calculations or equality comparisons.
+ */
+struct workqueue_attrs {
+ int nice; /* nice level */
+ cpumask_var_t cpumask; /* allowed CPUs */
+ bool no_numa; /* disable NUMA affinity */
+};
+
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
{
return container_of(work, struct delayed_work, work);
@@ -283,9 +298,10 @@ enum {
WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
WQ_HIGHPRI = 1 << 4, /* high priority */
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
+ WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
- WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */
- WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
+ __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
+ __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
@@ -388,7 +404,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
#define alloc_ordered_workqueue(fmt, flags, args...) \
- alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
+ alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
#define create_workqueue(name) \
alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
@@ -399,30 +415,23 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
extern void destroy_workqueue(struct workqueue_struct *wq);
+struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
+void free_workqueue_attrs(struct workqueue_attrs *attrs);
+int apply_workqueue_attrs(struct workqueue_struct *wq,
+ const struct workqueue_attrs *attrs);
+
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
-extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *work, unsigned long delay);
-extern bool queue_delayed_work(struct workqueue_struct *wq,
- struct delayed_work *work, unsigned long delay);
extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay);
-extern bool mod_delayed_work(struct workqueue_struct *wq,
- struct delayed_work *dwork, unsigned long delay);
extern void flush_workqueue(struct workqueue_struct *wq);
extern void drain_workqueue(struct workqueue_struct *wq);
extern void flush_scheduled_work(void);
-extern bool schedule_work_on(int cpu, struct work_struct *work);
-extern bool schedule_work(struct work_struct *work);
-extern bool schedule_delayed_work_on(int cpu, struct delayed_work *work,
- unsigned long delay);
-extern bool schedule_delayed_work(struct delayed_work *work,
- unsigned long delay);
extern int schedule_on_each_cpu(work_func_t func);
-extern int keventd_up(void);
int execute_in_process_context(work_func_t fn, struct execute_work *);
@@ -435,9 +444,121 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
extern void workqueue_set_max_active(struct workqueue_struct *wq,
int max_active);
-extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
+extern bool current_is_workqueue_rescuer(void);
+extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
extern unsigned int work_busy(struct work_struct *work);
+/**
+ * queue_work - queue work on a workqueue
+ * @wq: workqueue to use
+ * @work: work to queue
+ *
+ * Returns %false if @work was already on a queue, %true otherwise.
+ *
+ * We queue the work to the CPU on which it was submitted, but if the CPU dies
+ * it can be processed by another CPU.
+ */
+static inline bool queue_work(struct workqueue_struct *wq,
+ struct work_struct *work)
+{
+ return queue_work_on(WORK_CPU_UNBOUND, wq, work);
+}
+
+/**
+ * queue_delayed_work - queue work on a workqueue after delay
+ * @wq: workqueue to use
+ * @dwork: delayable work to queue
+ * @delay: number of jiffies to wait before queueing
+ *
+ * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
+ */
+static inline bool queue_delayed_work(struct workqueue_struct *wq,
+ struct delayed_work *dwork,
+ unsigned long delay)
+{
+ return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
+}
+
+/**
+ * mod_delayed_work - modify delay of or queue a delayed work
+ * @wq: workqueue to use
+ * @dwork: work to queue
+ * @delay: number of jiffies to wait before queueing
+ *
+ * mod_delayed_work_on() on local CPU.
+ */
+static inline bool mod_delayed_work(struct workqueue_struct *wq,
+ struct delayed_work *dwork,
+ unsigned long delay)
+{
+ return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
+}
+
+/**
+ * schedule_work_on - put work task on a specific cpu
+ * @cpu: cpu to put the work task on
+ * @work: job to be done
+ *
+ * This puts a job on a specific cpu
+ */
+static inline bool schedule_work_on(int cpu, struct work_struct *work)
+{
+ return queue_work_on(cpu, system_wq, work);
+}
+
+/**
+ * schedule_work - put work task in global workqueue
+ * @work: job to be done
+ *
+ * Returns %false if @work was already on the kernel-global workqueue and
+ * %true otherwise.
+ *
+ * This puts a job in the kernel-global workqueue if it was not already
+ * queued and leaves it in the same position on the kernel-global
+ * workqueue otherwise.
+ */
+static inline bool schedule_work(struct work_struct *work)
+{
+ return queue_work(system_wq, work);
+}
+
+/**
+ * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
+ * @cpu: cpu to use
+ * @dwork: job to be done
+ * @delay: number of jiffies to wait
+ *
+ * After waiting for a given time this puts a job in the kernel-global
+ * workqueue on the specified CPU.
+ */
+static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
+ unsigned long delay)
+{
+ return queue_delayed_work_on(cpu, system_wq, dwork, delay);
+}
+
+/**
+ * schedule_delayed_work - put work task in global workqueue after delay
+ * @dwork: job to be done
+ * @delay: number of jiffies to wait or 0 for immediate execution
+ *
+ * After waiting for a given time this puts a job in the kernel-global
+ * workqueue.
+ */
+static inline bool schedule_delayed_work(struct delayed_work *dwork,
+ unsigned long delay)
+{
+ return queue_delayed_work(system_wq, dwork, delay);
+}
+
+/**
+ * keventd_up - is workqueue initialized yet?
+ */
+static inline bool keventd_up(void)
+{
+ return system_wq != NULL;
+}
+
/*
* Like above, but uses del_timer() instead of del_timer_sync(). This means,
* if it returns 0 the timer function may be running and the queueing is in
@@ -466,12 +587,12 @@ static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwo
}
#ifndef CONFIG_SMP
-static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
+static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
{
return fn(arg);
}
#else
-long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
+long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
#endif /* CONFIG_SMP */
#ifdef CONFIG_FREEZER
@@ -480,4 +601,11 @@ extern bool freeze_workqueues_busy(void);
extern void thaw_workqueues(void);
#endif /* CONFIG_FREEZER */
+#ifdef CONFIG_SYSFS
+int workqueue_sysfs_register(struct workqueue_struct *wq);
+#else /* CONFIG_SYSFS */
+static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
+{ return 0; }
+#endif /* CONFIG_SYSFS */
+
#endif
diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h
index 41a7dbd570e..a43a2f67bd8 100644
--- a/include/trace/events/regmap.h
+++ b/include/trace/events/regmap.h
@@ -175,6 +175,54 @@ DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
);
+DECLARE_EVENT_CLASS(regmap_async,
+
+ TP_PROTO(struct device *dev),
+
+ TP_ARGS(dev),
+
+ TP_STRUCT__entry(
+ __string( name, dev_name(dev) )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ ),
+
+ TP_printk("%s", __get_str(name))
+);
+
+DEFINE_EVENT(regmap_block, regmap_async_write_start,
+
+ TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+ TP_ARGS(dev, reg, count)
+);
+
+DEFINE_EVENT(regmap_async, regmap_async_io_complete,
+
+ TP_PROTO(struct device *dev),
+
+ TP_ARGS(dev)
+
+);
+
+DEFINE_EVENT(regmap_async, regmap_async_complete_start,
+
+ TP_PROTO(struct device *dev),
+
+ TP_ARGS(dev)
+
+);
+
+DEFINE_EVENT(regmap_async, regmap_async_complete_done,
+
+ TP_PROTO(struct device *dev),
+
+ TP_ARGS(dev)
+
+);
+
#endif /* _TRACE_REGMAP_H */
/* This part must be outside protection */
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 40dc5e8fe34..19edd7facaa 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -227,29 +227,18 @@ static notrace enum print_line_t \
ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
struct trace_event *trace_event) \
{ \
- struct ftrace_event_call *event; \
struct trace_seq *s = &iter->seq; \
+ struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
struct ftrace_raw_##call *field; \
- struct trace_entry *entry; \
- struct trace_seq *p = &iter->tmp_seq; \
int ret; \
\
- event = container_of(trace_event, struct ftrace_event_call, \
- event); \
- \
- entry = iter->ent; \
- \
- if (entry->type != event->event.type) { \
- WARN_ON_ONCE(1); \
- return TRACE_TYPE_UNHANDLED; \
- } \
- \
- field = (typeof(field))entry; \
+ field = (typeof(field))iter->ent; \
\
- trace_seq_init(p); \
- ret = trace_seq_printf(s, "%s: ", event->name); \
+ ret = ftrace_raw_output_prep(iter, trace_event); \
if (ret) \
- ret = trace_seq_printf(s, print); \
+ return ret; \
+ \
+ ret = trace_seq_printf(s, print); \
if (!ret) \
return TRACE_TYPE_PARTIAL_LINE; \
\
@@ -335,7 +324,7 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
-static int notrace \
+static int notrace __init \
ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
{ \
struct ftrace_raw_##call field; \
@@ -414,7 +403,8 @@ static inline notrace int ftrace_get_offsets_##call( \
*
* static void ftrace_raw_event_<call>(void *__data, proto)
* {
- * struct ftrace_event_call *event_call = __data;
+ * struct ftrace_event_file *ftrace_file = __data;
+ * struct ftrace_event_call *event_call = ftrace_file->event_call;
* struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
* struct ring_buffer_event *event;
* struct ftrace_raw_<call> *entry; <-- defined in stage 1
@@ -423,12 +413,16 @@ static inline notrace int ftrace_get_offsets_##call( \
* int __data_size;
* int pc;
*
+ * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
+ * &ftrace_file->flags))
+ * return;
+ *
* local_save_flags(irq_flags);
* pc = preempt_count();
*
* __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
*
- * event = trace_current_buffer_lock_reserve(&buffer,
+ * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
* event_<call>->event.type,
* sizeof(*entry) + __data_size,
* irq_flags, pc);
@@ -440,7 +434,7 @@ static inline notrace int ftrace_get_offsets_##call( \
* __array macros.
*
* if (!filter_current_check_discard(buffer, event_call, entry, event))
- * trace_current_buffer_unlock_commit(buffer,
+ * trace_nowake_buffer_unlock_commit(buffer,
* event, irq_flags, pc);
* }
*
@@ -518,7 +512,8 @@ static inline notrace int ftrace_get_offsets_##call( \
static notrace void \
ftrace_raw_event_##call(void *__data, proto) \
{ \
- struct ftrace_event_call *event_call = __data; \
+ struct ftrace_event_file *ftrace_file = __data; \
+ struct ftrace_event_call *event_call = ftrace_file->event_call; \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
struct ring_buffer_event *event; \
struct ftrace_raw_##call *entry; \
@@ -527,12 +522,16 @@ ftrace_raw_event_##call(void *__data, proto) \
int __data_size; \
int pc; \
\
+ if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, \
+ &ftrace_file->flags)) \
+ return; \
+ \
local_save_flags(irq_flags); \
pc = preempt_count(); \
\
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
\
- event = trace_current_buffer_lock_reserve(&buffer, \
+ event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, \
event_call->event.type, \
sizeof(*entry) + __data_size, \
irq_flags, pc); \
@@ -581,7 +580,7 @@ static inline void ftrace_test_probe_##call(void) \
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
_TRACE_PERF_PROTO(call, PARAMS(proto)); \
static const char print_fmt_##call[] = print; \
-static struct ftrace_event_class __used event_class_##call = { \
+static struct ftrace_event_class __used __refdata event_class_##call = { \
.system = __stringify(TRACE_SYSTEM), \
.define_fields = ftrace_define_fields_##call, \
.fields = LIST_HEAD_INIT(event_class_##call.fields),\
@@ -705,5 +704,3 @@ static inline void perf_test_probe_##call(void) \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#endif /* CONFIG_PERF_EVENTS */
-#undef _TRACE_PROFILE_INIT
-
diff --git a/kernel/async.c b/kernel/async.c
index 8ddee2c3e5b..61f023ce022 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -73,7 +73,7 @@ struct async_entry {
struct list_head global_list;
struct work_struct work;
async_cookie_t cookie;
- async_func_ptr *func;
+ async_func_t func;
void *data;
struct async_domain *domain;
};
@@ -84,24 +84,20 @@ static atomic_t entry_count;
static async_cookie_t lowest_in_progress(struct async_domain *domain)
{
- struct async_entry *first = NULL;
+ struct list_head *pending;
async_cookie_t ret = ASYNC_COOKIE_MAX;
unsigned long flags;
spin_lock_irqsave(&async_lock, flags);
- if (domain) {
- if (!list_empty(&domain->pending))
- first = list_first_entry(&domain->pending,
- struct async_entry, domain_list);
- } else {
- if (!list_empty(&async_global_pending))
- first = list_first_entry(&async_global_pending,
- struct async_entry, global_list);
- }
+ if (domain)
+ pending = &domain->pending;
+ else
+ pending = &async_global_pending;
- if (first)
- ret = first->cookie;
+ if (!list_empty(pending))
+ ret = list_first_entry(pending, struct async_entry,
+ domain_list)->cookie;
spin_unlock_irqrestore(&async_lock, flags);
return ret;
@@ -149,7 +145,7 @@ static void async_run_entry_fn(struct work_struct *work)
wake_up(&async_done);
}
-static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *domain)
+static async_cookie_t __async_schedule(async_func_t func, void *data, struct async_domain *domain)
{
struct async_entry *entry;
unsigned long flags;
@@ -169,13 +165,13 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a
spin_unlock_irqrestore(&async_lock, flags);
/* low on memory.. run synchronously */
- ptr(data, newcookie);
+ func(data, newcookie);
return newcookie;
}
INIT_LIST_HEAD(&entry->domain_list);
INIT_LIST_HEAD(&entry->global_list);
INIT_WORK(&entry->work, async_run_entry_fn);
- entry->func = ptr;
+ entry->func = func;
entry->data = data;
entry->domain = domain;
@@ -202,21 +198,21 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a
/**
* async_schedule - schedule a function for asynchronous execution
- * @ptr: function to execute asynchronously
+ * @func: function to execute asynchronously
* @data: data pointer to pass to the function
*
* Returns an async_cookie_t that may be used for checkpointing later.
* Note: This function may be called from atomic or non-atomic contexts.
*/
-async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
+async_cookie_t async_schedule(async_func_t func, void *data)
{
- return __async_schedule(ptr, data, &async_dfl_domain);
+ return __async_schedule(func, data, &async_dfl_domain);
}
EXPORT_SYMBOL_GPL(async_schedule);
/**
* async_schedule_domain - schedule a function for asynchronous execution within a certain domain
- * @ptr: function to execute asynchronously
+ * @func: function to execute asynchronously
* @data: data pointer to pass to the function
* @domain: the domain
*
@@ -226,10 +222,10 @@ EXPORT_SYMBOL_GPL(async_schedule);
* synchronization domain is specified via @domain. Note: This function
* may be called from atomic or non-atomic contexts.
*/
-async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
+async_cookie_t async_schedule_domain(async_func_t func, void *data,
struct async_domain *domain)
{
- return __async_schedule(ptr, data, domain);
+ return __async_schedule(func, data, domain);
}
EXPORT_SYMBOL_GPL(async_schedule_domain);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index dfaf50d4705..eeb7e49946b 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -30,7 +30,6 @@
#include <linux/cred.h>
#include <linux/ctype.h>
#include <linux/errno.h>
-#include <linux/fs.h>
#include <linux/init_task.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -59,7 +58,7 @@
#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
#include <linux/eventfd.h>
#include <linux/poll.h>
-#include <linux/flex_array.h> /* used in cgroup_attach_proc */
+#include <linux/flex_array.h> /* used in cgroup_attach_task */
#include <linux/kthread.h>
#include <linux/atomic.h>
@@ -83,7 +82,13 @@
* B happens only through cgroup_show_options() and using cgroup_root_mutex
* breaks it.
*/
+#ifdef CONFIG_PROVE_RCU
+DEFINE_MUTEX(cgroup_mutex);
+EXPORT_SYMBOL_GPL(cgroup_mutex); /* only for task_subsys_state_check() */
+#else
static DEFINE_MUTEX(cgroup_mutex);
+#endif
+
static DEFINE_MUTEX(cgroup_root_mutex);
/*
@@ -98,56 +103,6 @@ static struct cgroup_subsys *subsys[CGROUP_SUBSYS_COUNT] = {
#include <linux/cgroup_subsys.h>
};
-#define MAX_CGROUP_ROOT_NAMELEN 64
-
-/*
- * A cgroupfs_root represents the root of a cgroup hierarchy,
- * and may be associated with a superblock to form an active
- * hierarchy
- */
-struct cgroupfs_root {
- struct super_block *sb;
-
- /*
- * The bitmask of subsystems intended to be attached to this
- * hierarchy
- */
- unsigned long subsys_mask;
-
- /* Unique id for this hierarchy. */
- int hierarchy_id;
-
- /* The bitmask of subsystems currently attached to this hierarchy */
- unsigned long actual_subsys_mask;
-
- /* A list running through the attached subsystems */
- struct list_head subsys_list;
-
- /* The root cgroup for this hierarchy */
- struct cgroup top_cgroup;
-
- /* Tracks how many cgroups are currently defined in hierarchy.*/
- int number_of_cgroups;
-
- /* A list running through the active hierarchies */
- struct list_head root_list;
-
- /* All cgroups on this root, cgroup_mutex protected */
- struct list_head allcg_list;
-
- /* Hierarchy-specific flags */
- unsigned long flags;
-
- /* IDs for cgroups in this hierarchy */
- struct ida cgroup_ida;
-
- /* The path to use for release notifications. */
- char release_agent_path[PATH_MAX];
-
- /* The name for this hierarchy - may be empty */
- char name[MAX_CGROUP_ROOT_NAMELEN];
-};
-
/*
* The "rootnode" hierarchy is the "dummy hierarchy", reserved for the
* subsystems that are otherwise unattached - it never has more than a
@@ -162,6 +117,9 @@ struct cfent {
struct list_head node;
struct dentry *dentry;
struct cftype *type;
+
+ /* file xattrs */
+ struct simple_xattrs xattrs;
};
/*
@@ -238,6 +196,8 @@ static DEFINE_SPINLOCK(hierarchy_id_lock);
/* dummytop is a shorthand for the dummy hierarchy's top cgroup */
#define dummytop (&rootnode.top_cgroup)
+static struct cgroup_name root_cgroup_name = { .name = "/" };
+
/* This flag indicates whether tasks in the fork and exit paths should
* check for fork/exit handlers to call. This avoids us having to do
* extra work in the fork/exit path if none of the subsystems need to
@@ -249,20 +209,6 @@ static int cgroup_destroy_locked(struct cgroup *cgrp);
static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys,
struct cftype cfts[], bool is_add);
-#ifdef CONFIG_PROVE_LOCKING
-int cgroup_lock_is_held(void)
-{
- return lockdep_is_held(&cgroup_mutex);
-}
-#else /* #ifdef CONFIG_PROVE_LOCKING */
-int cgroup_lock_is_held(void)
-{
- return mutex_is_locked(&cgroup_mutex);
-}
-#endif /* #else #ifdef CONFIG_PROVE_LOCKING */
-
-EXPORT_SYMBOL_GPL(cgroup_lock_is_held);
-
static int css_unbias_refcnt(int refcnt)
{
return refcnt >= 0 ? refcnt : refcnt - CSS_DEACT_BIAS;
@@ -282,11 +228,25 @@ inline int cgroup_is_removed(const struct cgroup *cgrp)
return test_bit(CGRP_REMOVED, &cgrp->flags);
}
-/* bits in struct cgroupfs_root flags field */
-enum {
- ROOT_NOPREFIX, /* mounted subsystems have no named prefix */
- ROOT_XATTR, /* supports extended attributes */
-};
+/**
+ * cgroup_is_descendant - test ancestry
+ * @cgrp: the cgroup to be tested
+ * @ancestor: possible ancestor of @cgrp
+ *
+ * Test whether @cgrp is a descendant of @ancestor. It also returns %true
+ * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
+ * and @ancestor are accessible.
+ */
+bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor)
+{
+ while (cgrp) {
+ if (cgrp == ancestor)
+ return true;
+ cgrp = cgrp->parent;
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(cgroup_is_descendant);
static int cgroup_is_releasable(const struct cgroup *cgrp)
{
@@ -327,6 +287,23 @@ static inline struct cftype *__d_cft(struct dentry *dentry)
return __d_cfe(dentry)->type;
}
+/**
+ * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
+ * @cgrp: the cgroup to be checked for liveness
+ *
+ * On success, returns true; the mutex should be later unlocked. On
+ * failure returns false with no lock held.
+ */
+static bool cgroup_lock_live_group(struct cgroup *cgrp)
+{
+ mutex_lock(&cgroup_mutex);
+ if (cgroup_is_removed(cgrp)) {
+ mutex_unlock(&cgroup_mutex);
+ return false;
+ }
+ return true;
+}
+
/* the list of cgroups eligible for automatic release. Protected by
* release_list_lock */
static LIST_HEAD(release_list);
@@ -800,27 +777,6 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task,
* update of a tasks cgroup pointer by cgroup_attach_task()
*/
-/**
- * cgroup_lock - lock out any changes to cgroup structures
- *
- */
-void cgroup_lock(void)
-{
- mutex_lock(&cgroup_mutex);
-}
-EXPORT_SYMBOL_GPL(cgroup_lock);
-
-/**
- * cgroup_unlock - release lock on cgroup changes
- *
- * Undo the lock taken in a previous cgroup_lock() call.
- */
-void cgroup_unlock(void)
-{
- mutex_unlock(&cgroup_mutex);
-}
-EXPORT_SYMBOL_GPL(cgroup_unlock);
-
/*
* A couple of forward declarations required, due to cyclic reference loop:
* cgroup_mkdir -> cgroup_create -> cgroup_populate_dir ->
@@ -859,6 +815,17 @@ static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb)
return inode;
}
+static struct cgroup_name *cgroup_alloc_name(struct dentry *dentry)
+{
+ struct cgroup_name *name;
+
+ name = kmalloc(sizeof(*name) + dentry->d_name.len + 1, GFP_KERNEL);
+ if (!name)
+ return NULL;
+ strcpy(name->name, dentry->d_name.name);
+ return name;
+}
+
static void cgroup_free_fn(struct work_struct *work)
{
struct cgroup *cgrp = container_of(work, struct cgroup, free_work);
@@ -875,8 +842,18 @@ static void cgroup_free_fn(struct work_struct *work)
mutex_unlock(&cgroup_mutex);
/*
+ * We get a ref to the parent's dentry, and put the ref when
+ * this cgroup is being freed, so it's guaranteed that the
+ * parent won't be destroyed before its children.
+ */
+ dput(cgrp->parent->dentry);
+
+ ida_simple_remove(&cgrp->root->cgroup_ida, cgrp->id);
+
+ /*
* Drop the active superblock reference that we took when we
- * created the cgroup
+ * created the cgroup. This will free cgrp->root, if we are
+ * holding the last reference to @sb.
*/
deactivate_super(cgrp->root->sb);
@@ -888,7 +865,7 @@ static void cgroup_free_fn(struct work_struct *work)
simple_xattrs_free(&cgrp->xattrs);
- ida_simple_remove(&cgrp->root->cgroup_ida, cgrp->id);
+ kfree(rcu_dereference_raw(cgrp->name));
kfree(cgrp);
}
@@ -910,13 +887,12 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
} else {
struct cfent *cfe = __d_cfe(dentry);
struct cgroup *cgrp = dentry->d_parent->d_fsdata;
- struct cftype *cft = cfe->type;
WARN_ONCE(!list_empty(&cfe->node) &&
cgrp != &cgrp->root->top_cgroup,
"cfe still linked for %s\n", cfe->type->name);
+ simple_xattrs_free(&cfe->xattrs);
kfree(cfe);
- simple_xattrs_free(&cft->xattrs);
}
iput(inode);
}
@@ -1108,9 +1084,11 @@ static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry)
mutex_lock(&cgroup_root_mutex);
for_each_subsys(root, ss)
seq_printf(seq, ",%s", ss->name);
- if (test_bit(ROOT_NOPREFIX, &root->flags))
+ if (root->flags & CGRP_ROOT_SANE_BEHAVIOR)
+ seq_puts(seq, ",sane_behavior");
+ if (root->flags & CGRP_ROOT_NOPREFIX)
seq_puts(seq, ",noprefix");
- if (test_bit(ROOT_XATTR, &root->flags))
+ if (root->flags & CGRP_ROOT_XATTR)
seq_puts(seq, ",xattr");
if (strlen(root->release_agent_path))
seq_printf(seq, ",release_agent=%s", root->release_agent_path);
@@ -1172,8 +1150,12 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
all_ss = true;
continue;
}
+ if (!strcmp(token, "__DEVEL__sane_behavior")) {
+ opts->flags |= CGRP_ROOT_SANE_BEHAVIOR;
+ continue;
+ }
if (!strcmp(token, "noprefix")) {
- set_bit(ROOT_NOPREFIX, &opts->flags);
+ opts->flags |= CGRP_ROOT_NOPREFIX;
continue;
}
if (!strcmp(token, "clone_children")) {
@@ -1181,7 +1163,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
continue;
}
if (!strcmp(token, "xattr")) {
- set_bit(ROOT_XATTR, &opts->flags);
+ opts->flags |= CGRP_ROOT_XATTR;
continue;
}
if (!strncmp(token, "release_agent=", 14)) {
@@ -1259,13 +1241,26 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
/* Consistency checks */
+ if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) {
+ pr_warning("cgroup: sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");
+
+ if (opts->flags & CGRP_ROOT_NOPREFIX) {
+ pr_err("cgroup: sane_behavior: noprefix is not allowed\n");
+ return -EINVAL;
+ }
+
+ if (opts->cpuset_clone_children) {
+ pr_err("cgroup: sane_behavior: clone_children is not allowed\n");
+ return -EINVAL;
+ }
+ }
+
/*
* Option noprefix was introduced just for backward compatibility
* with the old cpuset, so we allow noprefix only if mounting just
* the cpuset subsystem.
*/
- if (test_bit(ROOT_NOPREFIX, &opts->flags) &&
- (opts->subsys_mask & mask))
+ if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
return -EINVAL;
@@ -1336,6 +1331,11 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
struct cgroup_sb_opts opts;
unsigned long added_mask, removed_mask;
+ if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) {
+ pr_err("cgroup: sane_behavior: remount is not allowed\n");
+ return -EINVAL;
+ }
+
mutex_lock(&cgrp->dentry->d_inode->i_mutex);
mutex_lock(&cgroup_mutex);
mutex_lock(&cgroup_root_mutex);
@@ -1421,7 +1421,7 @@ static void init_cgroup_root(struct cgroupfs_root *root)
INIT_LIST_HEAD(&root->allcg_list);
root->number_of_cgroups = 1;
cgrp->root = root;
- cgrp->top_cgroup = cgrp;
+ cgrp->name = &root_cgroup_name;
init_cgroup_housekeeping(cgrp);
list_add_tail(&cgrp->allcg_node, &root->allcg_list);
}
@@ -1685,6 +1685,14 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
* any) is not needed
*/
cgroup_drop_root(opts.new_root);
+
+ if (((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) &&
+ root->flags != opts.flags) {
+ pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n");
+ ret = -EINVAL;
+ goto drop_new_super;
+ }
+
/* no subsys rebinding, so refcounts don't change */
drop_parsed_module_refcounts(opts.subsys_mask);
}
@@ -1769,49 +1777,48 @@ static struct kobject *cgroup_kobj;
* @buf: the buffer to write the path into
* @buflen: the length of the buffer
*
- * Called with cgroup_mutex held or else with an RCU-protected cgroup
- * reference. Writes path of cgroup into buf. Returns 0 on success,
- * -errno on error.
+ * Writes path of cgroup into buf. Returns 0 on success, -errno on error.
+ *
+ * We can't generate cgroup path using dentry->d_name, as accessing
+ * dentry->name must be protected by irq-unsafe dentry->d_lock or parent
+ * inode's i_mutex, while on the other hand cgroup_path() can be called
+ * with some irq-safe spinlocks held.
*/
int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
{
- struct dentry *dentry = cgrp->dentry;
+ int ret = -ENAMETOOLONG;
char *start;
- rcu_lockdep_assert(rcu_read_lock_held() || cgroup_lock_is_held(),
- "cgroup_path() called without proper locking");
-
- if (cgrp == dummytop) {
- /*
- * Inactive subsystems have no dentry for their root
- * cgroup
- */
- strcpy(buf, "/");
+ if (!cgrp->parent) {
+ if (strlcpy(buf, "/", buflen) >= buflen)
+ return -ENAMETOOLONG;
return 0;
}
start = buf + buflen - 1;
-
*start = '\0';
- for (;;) {
- int len = dentry->d_name.len;
+ rcu_read_lock();
+ do {
+ const char *name = cgroup_name(cgrp);
+ int len;
+
+ len = strlen(name);
if ((start -= len) < buf)
- return -ENAMETOOLONG;
- memcpy(start, dentry->d_name.name, len);
- cgrp = cgrp->parent;
- if (!cgrp)
- break;
+ goto out;
+ memcpy(start, name, len);
- dentry = cgrp->dentry;
- if (!cgrp->parent)
- continue;
if (--start < buf)
- return -ENAMETOOLONG;
+ goto out;
*start = '/';
- }
+
+ cgrp = cgrp->parent;
+ } while (cgrp->parent);
+ ret = 0;
memmove(buf, start, buf + buflen - start);
- return 0;
+out:
+ rcu_read_unlock();
+ return ret;
}
EXPORT_SYMBOL_GPL(cgroup_path);
@@ -1900,7 +1907,7 @@ EXPORT_SYMBOL_GPL(cgroup_taskset_size);
*
* Must be called with cgroup_mutex and threadgroup locked.
*/
-static void cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
+static void cgroup_task_migrate(struct cgroup *oldcgrp,
struct task_struct *tsk, struct css_set *newcg)
{
struct css_set *oldcg;
@@ -1933,121 +1940,22 @@ static void cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
}
/**
- * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp'
- * @cgrp: the cgroup the task is attaching to
- * @tsk: the task to be attached
- *
- * Call with cgroup_mutex and threadgroup locked. May take task_lock of
- * @tsk during call.
- */
-int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
-{
- int retval = 0;
- struct cgroup_subsys *ss, *failed_ss = NULL;
- struct cgroup *oldcgrp;
- struct cgroupfs_root *root = cgrp->root;
- struct cgroup_taskset tset = { };
- struct css_set *newcg;
-
- /* @tsk either already exited or can't exit until the end */
- if (tsk->flags & PF_EXITING)
- return -ESRCH;
-
- /* Nothing to do if the task is already in that cgroup */
- oldcgrp = task_cgroup_from_root(tsk, root);
- if (cgrp == oldcgrp)
- return 0;
-
- tset.single.task = tsk;
- tset.single.cgrp = oldcgrp;
-
- for_each_subsys(root, ss) {
- if (ss->can_attach) {
- retval = ss->can_attach(cgrp, &tset);
- if (retval) {
- /*
- * Remember on which subsystem the can_attach()
- * failed, so that we only call cancel_attach()
- * against the subsystems whose can_attach()
- * succeeded. (See below)
- */
- failed_ss = ss;
- goto out;
- }
- }
- }
-
- newcg = find_css_set(tsk->cgroups, cgrp);
- if (!newcg) {
- retval = -ENOMEM;
- goto out;
- }
-
- cgroup_task_migrate(cgrp, oldcgrp, tsk, newcg);
-
- for_each_subsys(root, ss) {
- if (ss->attach)
- ss->attach(cgrp, &tset);
- }
-
-out:
- if (retval) {
- for_each_subsys(root, ss) {
- if (ss == failed_ss)
- /*
- * This subsystem was the one that failed the
- * can_attach() check earlier, so we don't need
- * to call cancel_attach() against it or any
- * remaining subsystems.
- */
- break;
- if (ss->cancel_attach)
- ss->cancel_attach(cgrp, &tset);
- }
- }
- return retval;
-}
-
-/**
- * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
- * @from: attach to all cgroups of a given task
- * @tsk: the task to be attached
- */
-int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
-{
- struct cgroupfs_root *root;
- int retval = 0;
-
- cgroup_lock();
- for_each_active_root(root) {
- struct cgroup *from_cg = task_cgroup_from_root(from, root);
-
- retval = cgroup_attach_task(from_cg, tsk);
- if (retval)
- break;
- }
- cgroup_unlock();
-
- return retval;
-}
-EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
-
-/**
- * cgroup_attach_proc - attach all threads in a threadgroup to a cgroup
+ * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
* @cgrp: the cgroup to attach to
- * @leader: the threadgroup leader task_struct of the group to be attached
+ * @tsk: the task or the leader of the threadgroup to be attached
+ * @threadgroup: attach the whole threadgroup?
*
* Call holding cgroup_mutex and the group_rwsem of the leader. Will take
- * task_lock of each thread in leader's threadgroup individually in turn.
+ * task_lock of @tsk or each thread in the threadgroup individually in turn.
*/
-static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
+static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
+ bool threadgroup)
{
int retval, i, group_size;
struct cgroup_subsys *ss, *failed_ss = NULL;
- /* guaranteed to be initialized later, but the compiler needs this */
struct cgroupfs_root *root = cgrp->root;
/* threadgroup list cursor and array */
- struct task_struct *tsk;
+ struct task_struct *leader = tsk;
struct task_and_cgroup *tc;
struct flex_array *group;
struct cgroup_taskset tset = { };
@@ -2059,17 +1967,19 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
* group - group_rwsem prevents new threads from appearing, and if
* threads exit, this will just be an over-estimate.
*/
- group_size = get_nr_threads(leader);
+ if (threadgroup)
+ group_size = get_nr_threads(tsk);
+ else
+ group_size = 1;
/* flex_array supports very large thread-groups better than kmalloc. */
group = flex_array_alloc(sizeof(*tc), group_size, GFP_KERNEL);
if (!group)
return -ENOMEM;
/* pre-allocate to guarantee space while iterating in rcu read-side. */
- retval = flex_array_prealloc(group, 0, group_size - 1, GFP_KERNEL);
+ retval = flex_array_prealloc(group, 0, group_size, GFP_KERNEL);
if (retval)
goto out_free_group_list;
- tsk = leader;
i = 0;
/*
* Prevent freeing of tasks while we take a snapshot. Tasks that are
@@ -2098,6 +2008,9 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
BUG_ON(retval != 0);
i++;
+
+ if (!threadgroup)
+ break;
} while_each_thread(leader, tsk);
rcu_read_unlock();
/* remember the number of threads in the array for later. */
@@ -2143,7 +2056,7 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
*/
for (i = 0; i < group_size; i++) {
tc = flex_array_get(group, i);
- cgroup_task_migrate(cgrp, tc->cgrp, tc->task, tc->cg);
+ cgroup_task_migrate(tc->cgrp, tc->task, tc->cg);
}
/* nothing is sensitive to fork() after this point. */
@@ -2224,11 +2137,11 @@ retry_find_task:
tsk = tsk->group_leader;
/*
- * Workqueue threads may acquire PF_THREAD_BOUND and become
+ * Workqueue threads may acquire PF_NO_SETAFFINITY and become
* trapped in a cpuset, or RT worker may be born in a cgroup
* with no rt_runtime allocated. Just say no.
*/
- if (tsk == kthreadd_task || (tsk->flags & PF_THREAD_BOUND)) {
+ if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
ret = -EINVAL;
rcu_read_unlock();
goto out_unlock_cgroup;
@@ -2251,17 +2164,42 @@ retry_find_task:
put_task_struct(tsk);
goto retry_find_task;
}
- ret = cgroup_attach_proc(cgrp, tsk);
- } else
- ret = cgroup_attach_task(cgrp, tsk);
+ }
+
+ ret = cgroup_attach_task(cgrp, tsk, threadgroup);
+
threadgroup_unlock(tsk);
put_task_struct(tsk);
out_unlock_cgroup:
- cgroup_unlock();
+ mutex_unlock(&cgroup_mutex);
return ret;
}
+/**
+ * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
+ * @from: attach to all cgroups of a given task
+ * @tsk: the task to be attached
+ */
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
+{
+ struct cgroupfs_root *root;
+ int retval = 0;
+
+ mutex_lock(&cgroup_mutex);
+ for_each_active_root(root) {
+ struct cgroup *from_cg = task_cgroup_from_root(from, root);
+
+ retval = cgroup_attach_task(from_cg, tsk, false);
+ if (retval)
+ break;
+ }
+ mutex_unlock(&cgroup_mutex);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
+
static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
{
return attach_task_by_pid(cgrp, pid, false);
@@ -2272,24 +2210,6 @@ static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid)
return attach_task_by_pid(cgrp, tgid, true);
}
-/**
- * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
- * @cgrp: the cgroup to be checked for liveness
- *
- * On success, returns true; the lock should be later released with
- * cgroup_unlock(). On failure returns false with no lock held.
- */
-bool cgroup_lock_live_group(struct cgroup *cgrp)
-{
- mutex_lock(&cgroup_mutex);
- if (cgroup_is_removed(cgrp)) {
- mutex_unlock(&cgroup_mutex);
- return false;
- }
- return true;
-}
-EXPORT_SYMBOL_GPL(cgroup_lock_live_group);
-
static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
const char *buffer)
{
@@ -2301,7 +2221,7 @@ static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
mutex_lock(&cgroup_root_mutex);
strcpy(cgrp->root->release_agent_path, buffer);
mutex_unlock(&cgroup_root_mutex);
- cgroup_unlock();
+ mutex_unlock(&cgroup_mutex);
return 0;
}
@@ -2312,7 +2232,14 @@ static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
return -ENODEV;
seq_puts(seq, cgrp->root->release_agent_path);
seq_putc(seq, '\n');
- cgroup_unlock();
+ mutex_unlock(&cgroup_mutex);
+ return 0;
+}
+
+static int cgroup_sane_behavior_show(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *seq)
+{
+ seq_printf(seq, "%d\n", cgroup_sane_behavior(cgrp));
return 0;
}
@@ -2537,13 +2464,40 @@ static int cgroup_file_release(struct inode *inode, struct file *file)
static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
+ int ret;
+ struct cgroup_name *name, *old_name;
+ struct cgroup *cgrp;
+
+ /*
+ * It's convinient to use parent dir's i_mutex to protected
+ * cgrp->name.
+ */
+ lockdep_assert_held(&old_dir->i_mutex);
+
if (!S_ISDIR(old_dentry->d_inode->i_mode))
return -ENOTDIR;
if (new_dentry->d_inode)
return -EEXIST;
if (old_dir != new_dir)
return -EIO;
- return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
+
+ cgrp = __d_cgrp(old_dentry);
+
+ name = cgroup_alloc_name(new_dentry);
+ if (!name)
+ return -ENOMEM;
+
+ ret = simple_rename(old_dir, old_dentry, new_dir, new_dentry);
+ if (ret) {
+ kfree(name);
+ return ret;
+ }
+
+ old_name = cgrp->name;
+ rcu_assign_pointer(cgrp->name, name);
+
+ kfree_rcu(old_name, rcu_head);
+ return 0;
}
static struct simple_xattrs *__d_xattrs(struct dentry *dentry)
@@ -2551,13 +2505,13 @@ static struct simple_xattrs *__d_xattrs(struct dentry *dentry)
if (S_ISDIR(dentry->d_inode->i_mode))
return &__d_cgrp(dentry)->xattrs;
else
- return &__d_cft(dentry)->xattrs;
+ return &__d_cfe(dentry)->xattrs;
}
static inline int xattr_enabled(struct dentry *dentry)
{
struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
- return test_bit(ROOT_XATTR, &root->flags);
+ return root->flags & CGRP_ROOT_XATTR;
}
static bool is_valid_xattr(const char *name)
@@ -2727,9 +2681,7 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
umode_t mode;
char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
- simple_xattrs_init(&cft->xattrs);
-
- if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) {
+ if (subsys && !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) {
strcpy(name, subsys->name);
strcat(name, ".");
}
@@ -2753,6 +2705,7 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
cfe->type = (void *)cft;
cfe->dentry = dentry;
dentry->d_fsdata = cfe;
+ simple_xattrs_init(&cfe->xattrs);
list_add_tail(&cfe->node, &parent->files);
cfe = NULL;
}
@@ -2770,6 +2723,8 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys,
for (cft = cfts; cft->name[0] != '\0'; cft++) {
/* does cft->flags tell us to skip this file on @cgrp? */
+ if ((cft->flags & CFTYPE_INSANE) && cgroup_sane_behavior(cgrp))
+ continue;
if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgrp->parent)
continue;
if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgrp->parent)
@@ -3300,6 +3255,34 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
return 0;
}
+static void cgroup_transfer_one_task(struct task_struct *task,
+ struct cgroup_scanner *scan)
+{
+ struct cgroup *new_cgroup = scan->data;
+
+ mutex_lock(&cgroup_mutex);
+ cgroup_attach_task(new_cgroup, task, false);
+ mutex_unlock(&cgroup_mutex);
+}
+
+/**
+ * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
+ * @to: cgroup to which the tasks will be moved
+ * @from: cgroup in which the tasks currently reside
+ */
+int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
+{
+ struct cgroup_scanner scan;
+
+ scan.cg = from;
+ scan.test_task = NULL; /* select all tasks in cgroup */
+ scan.process_task = cgroup_transfer_one_task;
+ scan.heap = NULL;
+ scan.data = to;
+
+ return cgroup_scan_tasks(&scan);
+}
+
/*
* Stuff for reading the 'tasks'/'procs' files.
*
@@ -3362,35 +3345,14 @@ static void pidlist_free(void *p)
else
kfree(p);
}
-static void *pidlist_resize(void *p, int newcount)
-{
- void *newlist;
- /* note: if new alloc fails, old p will still be valid either way */
- if (is_vmalloc_addr(p)) {
- newlist = vmalloc(newcount * sizeof(pid_t));
- if (!newlist)
- return NULL;
- memcpy(newlist, p, newcount * sizeof(pid_t));
- vfree(p);
- } else {
- newlist = krealloc(p, newcount * sizeof(pid_t), GFP_KERNEL);
- }
- return newlist;
-}
/*
* pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
- * If the new stripped list is sufficiently smaller and there's enough memory
- * to allocate a new buffer, will let go of the unneeded memory. Returns the
- * number of unique elements.
+ * Returns the number of unique elements.
*/
-/* is the size difference enough that we should re-allocate the array? */
-#define PIDLIST_REALLOC_DIFFERENCE(old, new) ((old) - PAGE_SIZE >= (new))
-static int pidlist_uniq(pid_t **p, int length)
+static int pidlist_uniq(pid_t *list, int length)
{
int src, dest = 1;
- pid_t *list = *p;
- pid_t *newlist;
/*
* we presume the 0th element is unique, so i starts at 1. trivial
@@ -3411,16 +3373,6 @@ static int pidlist_uniq(pid_t **p, int length)
dest++;
}
after:
- /*
- * if the length difference is large enough, we want to allocate a
- * smaller buffer to save memory. if this fails due to out of memory,
- * we'll just stay with what we've got.
- */
- if (PIDLIST_REALLOC_DIFFERENCE(length, dest)) {
- newlist = pidlist_resize(list, dest);
- if (newlist)
- *p = newlist;
- }
return dest;
}
@@ -3516,7 +3468,7 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
/* now sort & (if procs) strip out duplicates */
sort(array, length, sizeof(pid_t), cmppid, NULL);
if (type == CGROUP_FILE_PROCS)
- length = pidlist_uniq(&array, length);
+ length = pidlist_uniq(array, length);
l = cgroup_pidlist_find(cgrp, type);
if (!l) {
pidlist_free(array);
@@ -3930,11 +3882,7 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
if (ret)
goto fail;
- if (efile->f_op->poll(efile, &event->pt) & POLLHUP) {
- event->cft->unregister_event(cgrp, event->cft, event->eventfd);
- ret = 0;
- goto fail;
- }
+ efile->f_op->poll(efile, &event->pt);
/*
* Events should be removed after rmdir of cgroup directory, but before
@@ -4016,10 +3964,16 @@ static struct cftype files[] = {
},
{
.name = "cgroup.clone_children",
+ .flags = CFTYPE_INSANE,
.read_u64 = cgroup_clone_children_read,
.write_u64 = cgroup_clone_children_write,
},
{
+ .name = "cgroup.sane_behavior",
+ .flags = CFTYPE_ONLY_ON_ROOT,
+ .read_seq_string = cgroup_sane_behavior_show,
+ },
+ {
.name = "release_agent",
.flags = CFTYPE_ONLY_ON_ROOT,
.read_seq_string = cgroup_release_agent_show,
@@ -4131,17 +4085,8 @@ static void offline_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
if (!(css->flags & CSS_ONLINE))
return;
- /*
- * css_offline() should be called with cgroup_mutex unlocked. See
- * 3fa59dfbc3 ("cgroup: fix potential deadlock in pre_destroy") for
- * details. This temporary unlocking should go away once
- * cgroup_mutex is unexported from controllers.
- */
- if (ss->css_offline) {
- mutex_unlock(&cgroup_mutex);
+ if (ss->css_offline)
ss->css_offline(cgrp);
- mutex_lock(&cgroup_mutex);
- }
cgrp->subsys[ss->subsys_id]->flags &= ~CSS_ONLINE;
}
@@ -4158,6 +4103,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
umode_t mode)
{
struct cgroup *cgrp;
+ struct cgroup_name *name;
struct cgroupfs_root *root = parent->root;
int err = 0;
struct cgroup_subsys *ss;
@@ -4168,9 +4114,14 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
if (!cgrp)
return -ENOMEM;
+ name = cgroup_alloc_name(dentry);
+ if (!name)
+ goto err_free_cgrp;
+ rcu_assign_pointer(cgrp->name, name);
+
cgrp->id = ida_simple_get(&root->cgroup_ida, 1, 0, GFP_KERNEL);
if (cgrp->id < 0)
- goto err_free_cgrp;
+ goto err_free_name;
/*
* Only live parents can have children. Note that the liveliness
@@ -4198,7 +4149,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
cgrp->parent = parent;
cgrp->root = parent->root;
- cgrp->top_cgroup = parent->top_cgroup;
if (notify_on_release(parent))
set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
@@ -4241,6 +4191,9 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
for_each_subsys(root, ss)
dget(dentry);
+ /* hold a ref to the parent's dentry */
+ dget(parent->dentry);
+
/* creation succeeded, notify subsystems */
for_each_subsys(root, ss) {
err = online_css(ss, cgrp);
@@ -4276,6 +4229,8 @@ err_free_all:
deactivate_super(sb);
err_free_id:
ida_simple_remove(&root->cgroup_ida, cgrp->id);
+err_free_name:
+ kfree(rcu_dereference_raw(cgrp->name));
err_free_cgrp:
kfree(cgrp);
return err;
@@ -4295,56 +4250,13 @@ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
return cgroup_create(c_parent, dentry, mode | S_IFDIR);
}
-/*
- * Check the reference count on each subsystem. Since we already
- * established that there are no tasks in the cgroup, if the css refcount
- * is also 1, then there should be no outstanding references, so the
- * subsystem is safe to destroy. We scan across all subsystems rather than
- * using the per-hierarchy linked list of mounted subsystems since we can
- * be called via check_for_release() with no synchronization other than
- * RCU, and the subsystem linked list isn't RCU-safe.
- */
-static int cgroup_has_css_refs(struct cgroup *cgrp)
-{
- int i;
-
- /*
- * We won't need to lock the subsys array, because the subsystems
- * we're concerned about aren't going anywhere since our cgroup root
- * has a reference on them.
- */
- for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
- struct cgroup_subsys *ss = subsys[i];
- struct cgroup_subsys_state *css;
-
- /* Skip subsystems not present or not in this hierarchy */
- if (ss == NULL || ss->root != cgrp->root)
- continue;
-
- css = cgrp->subsys[ss->subsys_id];
- /*
- * When called from check_for_release() it's possible
- * that by this point the cgroup has been removed
- * and the css deleted. But a false-positive doesn't
- * matter, since it can only happen if the cgroup
- * has been deleted and hence no longer needs the
- * release agent to be called anyway.
- */
- if (css && css_refcnt(css) > 1)
- return 1;
- }
- return 0;
-}
-
static int cgroup_destroy_locked(struct cgroup *cgrp)
__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
{
struct dentry *d = cgrp->dentry;
struct cgroup *parent = cgrp->parent;
- DEFINE_WAIT(wait);
struct cgroup_event *event, *tmp;
struct cgroup_subsys *ss;
- LIST_HEAD(tmp_list);
lockdep_assert_held(&d->d_inode->i_mutex);
lockdep_assert_held(&cgroup_mutex);
@@ -4935,17 +4847,17 @@ void cgroup_post_fork(struct task_struct *child)
* and addition to css_set.
*/
if (need_forkexit_callback) {
- for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ /*
+ * fork/exit callbacks are supported only for builtin
+ * subsystems, and the builtin section of the subsys
+ * array is immutable, so we don't need to lock the
+ * subsys array here. On the other hand, modular section
+ * of the array can be freed at module unload, so we
+ * can't touch that.
+ */
+ for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i];
- /*
- * fork/exit callbacks are supported only for
- * builtin subsystems and we don't need further
- * synchronization as they never go away.
- */
- if (!ss || ss->module)
- continue;
-
if (ss->fork)
ss->fork(child);
}
@@ -5010,13 +4922,13 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
tsk->cgroups = &init_css_set;
if (run_callbacks && need_forkexit_callback) {
- for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ /*
+ * fork/exit callbacks are supported only for builtin
+ * subsystems, see cgroup_post_fork() for details.
+ */
+ for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
struct cgroup_subsys *ss = subsys[i];
- /* modular subsystems can't use callbacks */
- if (!ss || ss->module)
- continue;
-
if (ss->exit) {
struct cgroup *old_cgrp =
rcu_dereference_raw(cg->subsys[i])->cgroup;
@@ -5030,44 +4942,19 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
put_css_set_taskexit(cg);
}
-/**
- * cgroup_is_descendant - see if @cgrp is a descendant of @task's cgrp
- * @cgrp: the cgroup in question
- * @task: the task in question
- *
- * See if @cgrp is a descendant of @task's cgroup in the appropriate
- * hierarchy.
- *
- * If we are sending in dummytop, then presumably we are creating
- * the top cgroup in the subsystem.
- *
- * Called only by the ns (nsproxy) cgroup.
- */
-int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task)
-{
- int ret;
- struct cgroup *target;
-
- if (cgrp == dummytop)
- return 1;
-
- target = task_cgroup_from_root(task, cgrp->root);
- while (cgrp != target && cgrp!= cgrp->top_cgroup)
- cgrp = cgrp->parent;
- ret = (cgrp == target);
- return ret;
-}
-
static void check_for_release(struct cgroup *cgrp)
{
/* All of these checks rely on RCU to keep the cgroup
* structure alive */
- if (cgroup_is_releasable(cgrp) && !atomic_read(&cgrp->count)
- && list_empty(&cgrp->children) && !cgroup_has_css_refs(cgrp)) {
- /* Control Group is currently removeable. If it's not
+ if (cgroup_is_releasable(cgrp) &&
+ !atomic_read(&cgrp->count) && list_empty(&cgrp->children)) {
+ /*
+ * Control Group is currently removeable. If it's not
* already queued for a userspace notification, queue
- * it now */
+ * it now
+ */
int need_schedule_work = 0;
+
raw_spin_lock(&release_list_lock);
if (!cgroup_is_removed(cgrp) &&
list_empty(&cgrp->release_list)) {
@@ -5100,24 +4987,11 @@ EXPORT_SYMBOL_GPL(__css_tryget);
/* Caller must verify that the css is not for root cgroup */
void __css_put(struct cgroup_subsys_state *css)
{
- struct cgroup *cgrp = css->cgroup;
int v;
- rcu_read_lock();
v = css_unbias_refcnt(atomic_dec_return(&css->refcnt));
-
- switch (v) {
- case 1:
- if (notify_on_release(cgrp)) {
- set_bit(CGRP_RELEASABLE, &cgrp->flags);
- check_for_release(cgrp);
- }
- break;
- case 0:
+ if (v == 0)
schedule_work(&css->dput_work);
- break;
- }
- rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(__css_put);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 334d983a36b..12331120767 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -265,17 +265,6 @@ static DEFINE_MUTEX(cpuset_mutex);
static DEFINE_MUTEX(callback_mutex);
/*
- * cpuset_buffer_lock protects both the cpuset_name and cpuset_nodelist
- * buffers. They are statically allocated to prevent using excess stack
- * when calling cpuset_print_task_mems_allowed().
- */
-#define CPUSET_NAME_LEN (128)
-#define CPUSET_NODELIST_LEN (256)
-static char cpuset_name[CPUSET_NAME_LEN];
-static char cpuset_nodelist[CPUSET_NODELIST_LEN];
-static DEFINE_SPINLOCK(cpuset_buffer_lock);
-
-/*
* CPU / memory hotplug is handled asynchronously.
*/
static struct workqueue_struct *cpuset_propagate_hotplug_wq;
@@ -780,25 +769,26 @@ static void rebuild_sched_domains_locked(void)
lockdep_assert_held(&cpuset_mutex);
get_online_cpus();
+ /*
+ * We have raced with CPU hotplug. Don't do anything to avoid
+ * passing doms with offlined cpu to partition_sched_domains().
+ * Anyways, hotplug work item will rebuild sched domains.
+ */
+ if (!cpumask_equal(top_cpuset.cpus_allowed, cpu_active_mask))
+ goto out;
+
/* Generate domain masks and attrs */
ndoms = generate_sched_domains(&doms, &attr);
/* Have scheduler rebuild the domains */
partition_sched_domains(ndoms, doms, attr);
-
+out:
put_online_cpus();
}
#else /* !CONFIG_SMP */
static void rebuild_sched_domains_locked(void)
{
}
-
-static int generate_sched_domains(cpumask_var_t **domains,
- struct sched_domain_attr **attributes)
-{
- *domains = NULL;
- return 1;
-}
#endif /* CONFIG_SMP */
void rebuild_sched_domains(void)
@@ -1388,16 +1378,16 @@ static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
cgroup_taskset_for_each(task, cgrp, tset) {
/*
- * Kthreads bound to specific cpus cannot be moved to a new
- * cpuset; we cannot change their cpu affinity and
- * isolating such threads by their set of allowed nodes is
- * unnecessary. Thus, cpusets are not applicable for such
- * threads. This prevents checking for success of
- * set_cpus_allowed_ptr() on all attached tasks before
- * cpus_allowed may be changed.
+ * Kthreads which disallow setaffinity shouldn't be moved
+ * to a new cpuset; we don't want to change their cpu
+ * affinity and isolating such threads by their set of
+ * allowed nodes is unnecessary. Thus, cpusets are not
+ * applicable for such threads. This prevents checking for
+ * success of set_cpus_allowed_ptr() on all attached tasks
+ * before cpus_allowed may be changed.
*/
ret = -EINVAL;
- if (task->flags & PF_THREAD_BOUND)
+ if (task->flags & PF_NO_SETAFFINITY)
goto out_unlock;
ret = security_task_setscheduler(task);
if (ret)
@@ -2005,50 +1995,6 @@ int __init cpuset_init(void)
return 0;
}
-/**
- * cpuset_do_move_task - move a given task to another cpuset
- * @tsk: pointer to task_struct the task to move
- * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
- *
- * Called by cgroup_scan_tasks() for each task in a cgroup.
- * Return nonzero to stop the walk through the tasks.
- */
-static void cpuset_do_move_task(struct task_struct *tsk,
- struct cgroup_scanner *scan)
-{
- struct cgroup *new_cgroup = scan->data;
-
- cgroup_lock();
- cgroup_attach_task(new_cgroup, tsk);
- cgroup_unlock();
-}
-
-/**
- * move_member_tasks_to_cpuset - move tasks from one cpuset to another
- * @from: cpuset in which the tasks currently reside
- * @to: cpuset to which the tasks will be moved
- *
- * Called with cpuset_mutex held
- * callback_mutex must not be held, as cpuset_attach() will take it.
- *
- * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
- * calling callback functions for each.
- */
-static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
-{
- struct cgroup_scanner scan;
-
- scan.cg = from->css.cgroup;
- scan.test_task = NULL; /* select all tasks in cgroup */
- scan.process_task = cpuset_do_move_task;
- scan.heap = NULL;
- scan.data = to->css.cgroup;
-
- if (cgroup_scan_tasks(&scan))
- printk(KERN_ERR "move_member_tasks_to_cpuset: "
- "cgroup_scan_tasks failed\n");
-}
-
/*
* If CPU and/or memory hotplug handlers, below, unplug any CPUs
* or memory nodes, we need to walk over the cpuset hierarchy,
@@ -2069,7 +2015,12 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
nodes_empty(parent->mems_allowed))
parent = parent_cs(parent);
- move_member_tasks_to_cpuset(cs, parent);
+ if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
+ rcu_read_lock();
+ printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset %s\n",
+ cgroup_name(cs->css.cgroup));
+ rcu_read_unlock();
+ }
}
/**
@@ -2222,17 +2173,8 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
flush_workqueue(cpuset_propagate_hotplug_wq);
/* rebuild sched domains if cpus_allowed has changed */
- if (cpus_updated) {
- struct sched_domain_attr *attr;
- cpumask_var_t *doms;
- int ndoms;
-
- mutex_lock(&cpuset_mutex);
- ndoms = generate_sched_domains(&doms, &attr);
- mutex_unlock(&cpuset_mutex);
-
- partition_sched_domains(ndoms, doms, attr);
- }
+ if (cpus_updated)
+ rebuild_sched_domains();
}
void cpuset_update_active_cpus(bool cpu_online)
@@ -2594,6 +2536,8 @@ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
}
+#define CPUSET_NODELIST_LEN (256)
+
/**
* cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
* @task: pointer to task_struct of some task.
@@ -2604,25 +2548,22 @@ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
*/
void cpuset_print_task_mems_allowed(struct task_struct *tsk)
{
- struct dentry *dentry;
+ /* Statically allocated to prevent using excess stack. */
+ static char cpuset_nodelist[CPUSET_NODELIST_LEN];
+ static DEFINE_SPINLOCK(cpuset_buffer_lock);
- dentry = task_cs(tsk)->css.cgroup->dentry;
- spin_lock(&cpuset_buffer_lock);
+ struct cgroup *cgrp = task_cs(tsk)->css.cgroup;
- if (!dentry) {
- strcpy(cpuset_name, "/");
- } else {
- spin_lock(&dentry->d_lock);
- strlcpy(cpuset_name, (const char *)dentry->d_name.name,
- CPUSET_NAME_LEN);
- spin_unlock(&dentry->d_lock);
- }
+ rcu_read_lock();
+ spin_lock(&cpuset_buffer_lock);
nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
tsk->mems_allowed);
printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
- tsk->comm, cpuset_name, cpuset_nodelist);
+ tsk->comm, cgroup_name(cgrp), cpuset_nodelist);
+
spin_unlock(&cpuset_buffer_lock);
+ rcu_read_unlock();
}
/*
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9fcb0944f07..dce6e13cf9d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -251,7 +251,22 @@ perf_cgroup_match(struct perf_event *event)
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
- return !event->cgrp || event->cgrp == cpuctx->cgrp;
+ /* @event doesn't care about cgroup */
+ if (!event->cgrp)
+ return true;
+
+ /* wants specific cgroup scope but @cpuctx isn't associated with any */
+ if (!cpuctx->cgrp)
+ return false;
+
+ /*
+ * Cgroup scoping is recursive. An event enabled for a cgroup is
+ * also enabled for all its descendant cgroups. If @cpuctx's
+ * cgroup is a descendant of @event's (the test covers identity
+ * case), it's a match.
+ */
+ return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
+ event->cgrp->css.cgroup);
}
static inline bool perf_tryget_cgroup(struct perf_event *event)
@@ -7517,12 +7532,5 @@ struct cgroup_subsys perf_subsys = {
.css_free = perf_cgroup_css_free,
.exit = perf_cgroup_exit,
.attach = perf_cgroup_attach,
-
- /*
- * perf_event cgroup doesn't handle nesting correctly.
- * ctx->nr_cgroups adjustments should be propagated through the
- * cgroup hierarchy. Fix it and remove the following.
- */
- .broken_hierarchy = true,
};
#endif /* CONFIG_CGROUP_PERF */
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 9b12d65186f..16d8ddd268b 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -278,7 +278,7 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
}
/* It's safe because the task is inactive. */
do_set_cpus_allowed(p, cpumask_of(cpu));
- p->flags |= PF_THREAD_BOUND;
+ p->flags |= PF_NO_SETAFFINITY;
}
/**
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 42053547e0f..d8285eb0cde 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4083,6 +4083,10 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
get_task_struct(p);
rcu_read_unlock();
+ if (p->flags & PF_NO_SETAFFINITY) {
+ retval = -EINVAL;
+ goto out_put_task;
+ }
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_put_task;
@@ -4730,11 +4734,6 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
goto out;
}
- if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
- ret = -EINVAL;
- goto out;
- }
-
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index fc382d6e276..5e9efd4b83a 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -176,6 +176,8 @@ config IRQSOFF_TRACER
select GENERIC_TRACER
select TRACER_MAX_TRACE
select RING_BUFFER_ALLOW_SWAP
+ select TRACER_SNAPSHOT
+ select TRACER_SNAPSHOT_PER_CPU_SWAP
help
This option measures the time spent in irqs-off critical
sections, with microsecond accuracy.
@@ -198,6 +200,8 @@ config PREEMPT_TRACER
select GENERIC_TRACER
select TRACER_MAX_TRACE
select RING_BUFFER_ALLOW_SWAP
+ select TRACER_SNAPSHOT
+ select TRACER_SNAPSHOT_PER_CPU_SWAP
help
This option measures the time spent in preemption-off critical
sections, with microsecond accuracy.
@@ -217,6 +221,7 @@ config SCHED_TRACER
select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER
select TRACER_MAX_TRACE
+ select TRACER_SNAPSHOT
help
This tracer tracks the latency of the highest priority task
to be scheduled in, starting from the point it has woken up.
@@ -248,6 +253,27 @@ config TRACER_SNAPSHOT
echo 1 > /sys/kernel/debug/tracing/snapshot
cat snapshot
+config TRACER_SNAPSHOT_PER_CPU_SWAP
+ bool "Allow snapshot to swap per CPU"
+ depends on TRACER_SNAPSHOT
+ select RING_BUFFER_ALLOW_SWAP
+ help
+ Allow doing a snapshot of a single CPU buffer instead of a
+ full swap (all buffers). If this is set, then the following is
+ allowed:
+
+ echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
+
+ After which, only the tracing buffer for CPU 2 was swapped with
+ the main tracing buffer, and the other CPU buffers remain the same.
+
+ When this is enabled, this adds a little more overhead to the
+ trace recording, as it needs to add some checks to synchronize
+ recording with swaps. But this does not affect the performance
+ of the overall system. This is enabled by default when the preempt
+ or irq latency tracers are enabled, as those need to swap as well
+ and already adds the overhead (plus a lot more).
+
config TRACE_BRANCH_PROFILING
bool
select GENERIC_TRACER
@@ -524,6 +550,29 @@ config RING_BUFFER_BENCHMARK
If unsure, say N.
+config RING_BUFFER_STARTUP_TEST
+ bool "Ring buffer startup self test"
+ depends on RING_BUFFER
+ help
+ Run a simple self test on the ring buffer on boot up. Late in the
+ kernel boot sequence, the test will start that kicks off
+ a thread per cpu. Each thread will write various size events
+ into the ring buffer. Another thread is created to send IPIs
+ to each of the threads, where the IPI handler will also write
+ to the ring buffer, to test/stress the nesting ability.
+ If any anomalies are discovered, a warning will be displayed
+ and all ring buffers will be disabled.
+
+ The test runs for 10 seconds. This will slow your boot time
+ by at least 10 more seconds.
+
+ At the end of the test, statics and more checks are done.
+ It will output the stats of each per cpu buffer. What
+ was written, the sizes, what was read, what was lost, and
+ other similar details.
+
+ If unsure, say N
+
endif # FTRACE
endif # TRACING_SUPPORT
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 5a0f781cd72..ed58a3216a6 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -72,7 +72,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
bool blk_tracer = blk_tracer_enabled;
if (blk_tracer) {
- buffer = blk_tr->buffer;
+ buffer = blk_tr->trace_buffer.buffer;
pc = preempt_count();
event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + len,
@@ -218,7 +218,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
if (blk_tracer) {
tracing_record_cmdline(current);
- buffer = blk_tr->buffer;
+ buffer = blk_tr->trace_buffer.buffer;
pc = preempt_count();
event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + pdu_len,
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b3fde6d7b7f..8a5c017bb50 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -486,7 +486,6 @@ struct ftrace_profile_stat {
#define PROFILES_PER_PAGE \
(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
-static int ftrace_profile_bits __read_mostly;
static int ftrace_profile_enabled __read_mostly;
/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
@@ -494,7 +493,8 @@ static DEFINE_MUTEX(ftrace_profile_lock);
static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
-#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
+#define FTRACE_PROFILE_HASH_BITS 10
+#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
static void *
function_stat_next(void *v, int idx)
@@ -676,7 +676,7 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
- for (i = 0; i < pages; i++) {
+ for (i = 1; i < pages; i++) {
pg->next = (void *)get_zeroed_page(GFP_KERNEL);
if (!pg->next)
goto out_free;
@@ -724,13 +724,6 @@ static int ftrace_profile_init_cpu(int cpu)
if (!stat->hash)
return -ENOMEM;
- if (!ftrace_profile_bits) {
- size--;
-
- for (; size; size >>= 1)
- ftrace_profile_bits++;
- }
-
/* Preallocate the function profiling pages */
if (ftrace_profile_pages_init(stat) < 0) {
kfree(stat->hash);
@@ -763,7 +756,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
struct hlist_head *hhd;
unsigned long key;
- key = hash_long(ip, ftrace_profile_bits);
+ key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
hhd = &stat->hash[key];
if (hlist_empty(hhd))
@@ -782,7 +775,7 @@ static void ftrace_add_profile(struct ftrace_profile_stat *stat,
{
unsigned long key;
- key = hash_long(rec->ip, ftrace_profile_bits);
+ key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
hlist_add_head_rcu(&rec->node, &stat->hash[key]);
}
@@ -1079,7 +1072,7 @@ struct ftrace_func_probe {
unsigned long flags;
unsigned long ip;
void *data;
- struct rcu_head rcu;
+ struct list_head free_list;
};
struct ftrace_func_entry {
@@ -1329,7 +1322,6 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
struct hlist_head *hhd;
struct ftrace_hash *old_hash;
struct ftrace_hash *new_hash;
- unsigned long key;
int size = src->count;
int bits = 0;
int ret;
@@ -1372,10 +1364,6 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
for (i = 0; i < size; i++) {
hhd = &src->buckets[i];
hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
- if (bits > 0)
- key = hash_long(entry->ip, bits);
- else
- key = 0;
remove_hash_entry(src, entry);
__add_hash_entry(new_hash, entry);
}
@@ -2973,28 +2961,27 @@ static void __disable_ftrace_function_probe(void)
}
-static void ftrace_free_entry_rcu(struct rcu_head *rhp)
+static void ftrace_free_entry(struct ftrace_func_probe *entry)
{
- struct ftrace_func_probe *entry =
- container_of(rhp, struct ftrace_func_probe, rcu);
-
if (entry->ops->free)
- entry->ops->free(&entry->data);
+ entry->ops->free(entry->ops, entry->ip, &entry->data);
kfree(entry);
}
-
int
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data)
{
struct ftrace_func_probe *entry;
+ struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
+ struct ftrace_hash *hash;
struct ftrace_page *pg;
struct dyn_ftrace *rec;
int type, len, not;
unsigned long key;
int count = 0;
char *search;
+ int ret;
type = filter_parse_regex(glob, strlen(glob), &search, &not);
len = strlen(search);
@@ -3005,8 +2992,16 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
mutex_lock(&ftrace_lock);
- if (unlikely(ftrace_disabled))
+ hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
+ if (!hash) {
+ count = -ENOMEM;
goto out_unlock;
+ }
+
+ if (unlikely(ftrace_disabled)) {
+ count = -ENODEV;
+ goto out_unlock;
+ }
do_for_each_ftrace_rec(pg, rec) {
@@ -3030,14 +3025,21 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
* for each function we find. We call the callback
* to give the caller an opportunity to do so.
*/
- if (ops->callback) {
- if (ops->callback(rec->ip, &entry->data) < 0) {
+ if (ops->init) {
+ if (ops->init(ops, rec->ip, &entry->data) < 0) {
/* caller does not like this func */
kfree(entry);
continue;
}
}
+ ret = enter_record(hash, rec, 0);
+ if (ret < 0) {
+ kfree(entry);
+ count = ret;
+ goto out_unlock;
+ }
+
entry->ops = ops;
entry->ip = rec->ip;
@@ -3045,10 +3047,16 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
} while_for_each_ftrace_rec();
+
+ ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
+ if (ret < 0)
+ count = ret;
+
__enable_ftrace_function_probe();
out_unlock:
mutex_unlock(&ftrace_lock);
+ free_ftrace_hash(hash);
return count;
}
@@ -3062,7 +3070,12 @@ static void
__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data, int flags)
{
+ struct ftrace_func_entry *rec_entry;
struct ftrace_func_probe *entry;
+ struct ftrace_func_probe *p;
+ struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
+ struct list_head free_list;
+ struct ftrace_hash *hash;
struct hlist_node *tmp;
char str[KSYM_SYMBOL_LEN];
int type = MATCH_FULL;
@@ -3083,6 +3096,14 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
}
mutex_lock(&ftrace_lock);
+
+ hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
+ if (!hash)
+ /* Hmm, should report this somehow */
+ goto out_unlock;
+
+ INIT_LIST_HEAD(&free_list);
+
for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
struct hlist_head *hhd = &ftrace_func_hash[i];
@@ -3103,12 +3124,30 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
continue;
}
+ rec_entry = ftrace_lookup_ip(hash, entry->ip);
+ /* It is possible more than one entry had this ip */
+ if (rec_entry)
+ free_hash_entry(hash, rec_entry);
+
hlist_del_rcu(&entry->node);
- call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu);
+ list_add(&entry->free_list, &free_list);
}
}
__disable_ftrace_function_probe();
+ /*
+ * Remove after the disable is called. Otherwise, if the last
+ * probe is removed, a null hash means *all enabled*.
+ */
+ ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
+ synchronize_sched();
+ list_for_each_entry_safe(entry, p, &free_list, free_list) {
+ list_del(&entry->free_list);
+ ftrace_free_entry(entry);
+ }
+
+ out_unlock:
mutex_unlock(&ftrace_lock);
+ free_ftrace_hash(hash);
}
void
@@ -3736,7 +3775,8 @@ out:
if (fail)
return -EINVAL;
- ftrace_graph_filter_enabled = 1;
+ ftrace_graph_filter_enabled = !!(*idx);
+
return 0;
}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 6989df2ba19..b59aea2c48c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -8,13 +8,16 @@
#include <linux/trace_clock.h>
#include <linux/trace_seq.h>
#include <linux/spinlock.h>
+#include <linux/irq_work.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
+#include <linux/kthread.h> /* for self test */
#include <linux/kmemcheck.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/mutex.h>
+#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/hash.h>
@@ -444,6 +447,12 @@ int ring_buffer_print_page_header(struct trace_seq *s)
return ret;
}
+struct rb_irq_work {
+ struct irq_work work;
+ wait_queue_head_t waiters;
+ bool waiters_pending;
+};
+
/*
* head_page == tail_page && head == tail then buffer is empty.
*/
@@ -478,6 +487,8 @@ struct ring_buffer_per_cpu {
struct list_head new_pages; /* new pages to add */
struct work_struct update_pages_work;
struct completion update_done;
+
+ struct rb_irq_work irq_work;
};
struct ring_buffer {
@@ -497,6 +508,8 @@ struct ring_buffer {
struct notifier_block cpu_notify;
#endif
u64 (*clock)(void);
+
+ struct rb_irq_work irq_work;
};
struct ring_buffer_iter {
@@ -508,6 +521,118 @@ struct ring_buffer_iter {
u64 read_stamp;
};
+/*
+ * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
+ *
+ * Schedules a delayed work to wake up any task that is blocked on the
+ * ring buffer waiters queue.
+ */
+static void rb_wake_up_waiters(struct irq_work *work)
+{
+ struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
+
+ wake_up_all(&rbwork->waiters);
+}
+
+/**
+ * ring_buffer_wait - wait for input to the ring buffer
+ * @buffer: buffer to wait on
+ * @cpu: the cpu buffer to wait on
+ *
+ * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
+ * as data is added to any of the @buffer's cpu buffers. Otherwise
+ * it will wait for data to be added to a specific cpu buffer.
+ */
+void ring_buffer_wait(struct ring_buffer *buffer, int cpu)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ DEFINE_WAIT(wait);
+ struct rb_irq_work *work;
+
+ /*
+ * Depending on what the caller is waiting for, either any
+ * data in any cpu buffer, or a specific buffer, put the
+ * caller on the appropriate wait queue.
+ */
+ if (cpu == RING_BUFFER_ALL_CPUS)
+ work = &buffer->irq_work;
+ else {
+ cpu_buffer = buffer->buffers[cpu];
+ work = &cpu_buffer->irq_work;
+ }
+
+
+ prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
+
+ /*
+ * The events can happen in critical sections where
+ * checking a work queue can cause deadlocks.
+ * After adding a task to the queue, this flag is set
+ * only to notify events to try to wake up the queue
+ * using irq_work.
+ *
+ * We don't clear it even if the buffer is no longer
+ * empty. The flag only causes the next event to run
+ * irq_work to do the work queue wake up. The worse
+ * that can happen if we race with !trace_empty() is that
+ * an event will cause an irq_work to try to wake up
+ * an empty queue.
+ *
+ * There's no reason to protect this flag either, as
+ * the work queue and irq_work logic will do the necessary
+ * synchronization for the wake ups. The only thing
+ * that is necessary is that the wake up happens after
+ * a task has been queued. It's OK for spurious wake ups.
+ */
+ work->waiters_pending = true;
+
+ if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) ||
+ (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu)))
+ schedule();
+
+ finish_wait(&work->waiters, &wait);
+}
+
+/**
+ * ring_buffer_poll_wait - poll on buffer input
+ * @buffer: buffer to wait on
+ * @cpu: the cpu buffer to wait on
+ * @filp: the file descriptor
+ * @poll_table: The poll descriptor
+ *
+ * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
+ * as data is added to any of the @buffer's cpu buffers. Otherwise
+ * it will wait for data to be added to a specific cpu buffer.
+ *
+ * Returns POLLIN | POLLRDNORM if data exists in the buffers,
+ * zero otherwise.
+ */
+int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+ struct file *filp, poll_table *poll_table)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ struct rb_irq_work *work;
+
+ if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
+ (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
+ return POLLIN | POLLRDNORM;
+
+ if (cpu == RING_BUFFER_ALL_CPUS)
+ work = &buffer->irq_work;
+ else {
+ cpu_buffer = buffer->buffers[cpu];
+ work = &cpu_buffer->irq_work;
+ }
+
+ work->waiters_pending = true;
+ poll_wait(filp, &work->waiters, poll_table);
+
+ if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
+ (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
+ return POLLIN | POLLRDNORM;
+ return 0;
+}
+
/* buffer may be either ring_buffer or ring_buffer_per_cpu */
#define RB_WARN_ON(b, cond) \
({ \
@@ -1063,6 +1188,8 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
init_completion(&cpu_buffer->update_done);
+ init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
+ init_waitqueue_head(&cpu_buffer->irq_work.waiters);
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu));
@@ -1158,6 +1285,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
buffer->clock = trace_clock_local;
buffer->reader_lock_key = key;
+ init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
+ init_waitqueue_head(&buffer->irq_work.waiters);
+
/* need at least two pages */
if (nr_pages < 2)
nr_pages = 2;
@@ -1553,11 +1683,22 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
if (!cpu_buffer->nr_pages_to_update)
continue;
- if (cpu_online(cpu))
+ /* The update must run on the CPU that is being updated. */
+ preempt_disable();
+ if (cpu == smp_processor_id() || !cpu_online(cpu)) {
+ rb_update_pages(cpu_buffer);
+ cpu_buffer->nr_pages_to_update = 0;
+ } else {
+ /*
+ * Can not disable preemption for schedule_work_on()
+ * on PREEMPT_RT.
+ */
+ preempt_enable();
schedule_work_on(cpu,
&cpu_buffer->update_pages_work);
- else
- rb_update_pages(cpu_buffer);
+ preempt_disable();
+ }
+ preempt_enable();
}
/* wait for all the updates to complete */
@@ -1595,12 +1736,22 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
get_online_cpus();
- if (cpu_online(cpu_id)) {
+ preempt_disable();
+ /* The update must run on the CPU that is being updated. */
+ if (cpu_id == smp_processor_id() || !cpu_online(cpu_id))
+ rb_update_pages(cpu_buffer);
+ else {
+ /*
+ * Can not disable preemption for schedule_work_on()
+ * on PREEMPT_RT.
+ */
+ preempt_enable();
schedule_work_on(cpu_id,
&cpu_buffer->update_pages_work);
wait_for_completion(&cpu_buffer->update_done);
- } else
- rb_update_pages(cpu_buffer);
+ preempt_disable();
+ }
+ preempt_enable();
cpu_buffer->nr_pages_to_update = 0;
put_online_cpus();
@@ -2612,6 +2763,22 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
rb_end_commit(cpu_buffer);
}
+static __always_inline void
+rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
+{
+ if (buffer->irq_work.waiters_pending) {
+ buffer->irq_work.waiters_pending = false;
+ /* irq_work_queue() supplies it's own memory barriers */
+ irq_work_queue(&buffer->irq_work.work);
+ }
+
+ if (cpu_buffer->irq_work.waiters_pending) {
+ cpu_buffer->irq_work.waiters_pending = false;
+ /* irq_work_queue() supplies it's own memory barriers */
+ irq_work_queue(&cpu_buffer->irq_work.work);
+ }
+}
+
/**
* ring_buffer_unlock_commit - commit a reserved
* @buffer: The buffer to commit to
@@ -2631,6 +2798,8 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
rb_commit(cpu_buffer, event);
+ rb_wakeups(buffer, cpu_buffer);
+
trace_recursive_unlock();
preempt_enable_notrace();
@@ -2803,6 +2972,8 @@ int ring_buffer_write(struct ring_buffer *buffer,
rb_commit(cpu_buffer, event);
+ rb_wakeups(buffer, cpu_buffer);
+
ret = 0;
out:
preempt_enable_notrace();
@@ -4467,3 +4638,320 @@ static int rb_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
#endif
+
+#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
+/*
+ * This is a basic integrity check of the ring buffer.
+ * Late in the boot cycle this test will run when configured in.
+ * It will kick off a thread per CPU that will go into a loop
+ * writing to the per cpu ring buffer various sizes of data.
+ * Some of the data will be large items, some small.
+ *
+ * Another thread is created that goes into a spin, sending out
+ * IPIs to the other CPUs to also write into the ring buffer.
+ * this is to test the nesting ability of the buffer.
+ *
+ * Basic stats are recorded and reported. If something in the
+ * ring buffer should happen that's not expected, a big warning
+ * is displayed and all ring buffers are disabled.
+ */
+static struct task_struct *rb_threads[NR_CPUS] __initdata;
+
+struct rb_test_data {
+ struct ring_buffer *buffer;
+ unsigned long events;
+ unsigned long bytes_written;
+ unsigned long bytes_alloc;
+ unsigned long bytes_dropped;
+ unsigned long events_nested;
+ unsigned long bytes_written_nested;
+ unsigned long bytes_alloc_nested;
+ unsigned long bytes_dropped_nested;
+ int min_size_nested;
+ int max_size_nested;
+ int max_size;
+ int min_size;
+ int cpu;
+ int cnt;
+};
+
+static struct rb_test_data rb_data[NR_CPUS] __initdata;
+
+/* 1 meg per cpu */
+#define RB_TEST_BUFFER_SIZE 1048576
+
+static char rb_string[] __initdata =
+ "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
+ "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
+ "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
+
+static bool rb_test_started __initdata;
+
+struct rb_item {
+ int size;
+ char str[];
+};
+
+static __init int rb_write_something(struct rb_test_data *data, bool nested)
+{
+ struct ring_buffer_event *event;
+ struct rb_item *item;
+ bool started;
+ int event_len;
+ int size;
+ int len;
+ int cnt;
+
+ /* Have nested writes different that what is written */
+ cnt = data->cnt + (nested ? 27 : 0);
+
+ /* Multiply cnt by ~e, to make some unique increment */
+ size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1);
+
+ len = size + sizeof(struct rb_item);
+
+ started = rb_test_started;
+ /* read rb_test_started before checking buffer enabled */
+ smp_rmb();
+
+ event = ring_buffer_lock_reserve(data->buffer, len);
+ if (!event) {
+ /* Ignore dropped events before test starts. */
+ if (started) {
+ if (nested)
+ data->bytes_dropped += len;
+ else
+ data->bytes_dropped_nested += len;
+ }
+ return len;
+ }
+
+ event_len = ring_buffer_event_length(event);
+
+ if (RB_WARN_ON(data->buffer, event_len < len))
+ goto out;
+
+ item = ring_buffer_event_data(event);
+ item->size = size;
+ memcpy(item->str, rb_string, size);
+
+ if (nested) {
+ data->bytes_alloc_nested += event_len;
+ data->bytes_written_nested += len;
+ data->events_nested++;
+ if (!data->min_size_nested || len < data->min_size_nested)
+ data->min_size_nested = len;
+ if (len > data->max_size_nested)
+ data->max_size_nested = len;
+ } else {
+ data->bytes_alloc += event_len;
+ data->bytes_written += len;
+ data->events++;
+ if (!data->min_size || len < data->min_size)
+ data->max_size = len;
+ if (len > data->max_size)
+ data->max_size = len;
+ }
+
+ out:
+ ring_buffer_unlock_commit(data->buffer, event);
+
+ return 0;
+}
+
+static __init int rb_test(void *arg)
+{
+ struct rb_test_data *data = arg;
+
+ while (!kthread_should_stop()) {
+ rb_write_something(data, false);
+ data->cnt++;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ /* Now sleep between a min of 100-300us and a max of 1ms */
+ usleep_range(((data->cnt % 3) + 1) * 100, 1000);
+ }
+
+ return 0;
+}
+
+static __init void rb_ipi(void *ignore)
+{
+ struct rb_test_data *data;
+ int cpu = smp_processor_id();
+
+ data = &rb_data[cpu];
+ rb_write_something(data, true);
+}
+
+static __init int rb_hammer_test(void *arg)
+{
+ while (!kthread_should_stop()) {
+
+ /* Send an IPI to all cpus to write data! */
+ smp_call_function(rb_ipi, NULL, 1);
+ /* No sleep, but for non preempt, let others run */
+ schedule();
+ }
+
+ return 0;
+}
+
+static __init int test_ringbuffer(void)
+{
+ struct task_struct *rb_hammer;
+ struct ring_buffer *buffer;
+ int cpu;
+ int ret = 0;
+
+ pr_info("Running ring buffer tests...\n");
+
+ buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
+ if (WARN_ON(!buffer))
+ return 0;
+
+ /* Disable buffer so that threads can't write to it yet */
+ ring_buffer_record_off(buffer);
+
+ for_each_online_cpu(cpu) {
+ rb_data[cpu].buffer = buffer;
+ rb_data[cpu].cpu = cpu;
+ rb_data[cpu].cnt = cpu;
+ rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
+ "rbtester/%d", cpu);
+ if (WARN_ON(!rb_threads[cpu])) {
+ pr_cont("FAILED\n");
+ ret = -1;
+ goto out_free;
+ }
+
+ kthread_bind(rb_threads[cpu], cpu);
+ wake_up_process(rb_threads[cpu]);
+ }
+
+ /* Now create the rb hammer! */
+ rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
+ if (WARN_ON(!rb_hammer)) {
+ pr_cont("FAILED\n");
+ ret = -1;
+ goto out_free;
+ }
+
+ ring_buffer_record_on(buffer);
+ /*
+ * Show buffer is enabled before setting rb_test_started.
+ * Yes there's a small race window where events could be
+ * dropped and the thread wont catch it. But when a ring
+ * buffer gets enabled, there will always be some kind of
+ * delay before other CPUs see it. Thus, we don't care about
+ * those dropped events. We care about events dropped after
+ * the threads see that the buffer is active.
+ */
+ smp_wmb();
+ rb_test_started = true;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ /* Just run for 10 seconds */;
+ schedule_timeout(10 * HZ);
+
+ kthread_stop(rb_hammer);
+
+ out_free:
+ for_each_online_cpu(cpu) {
+ if (!rb_threads[cpu])
+ break;
+ kthread_stop(rb_threads[cpu]);
+ }
+ if (ret) {
+ ring_buffer_free(buffer);
+ return ret;
+ }
+
+ /* Report! */
+ pr_info("finished\n");
+ for_each_online_cpu(cpu) {
+ struct ring_buffer_event *event;
+ struct rb_test_data *data = &rb_data[cpu];
+ struct rb_item *item;
+ unsigned long total_events;
+ unsigned long total_dropped;
+ unsigned long total_written;
+ unsigned long total_alloc;
+ unsigned long total_read = 0;
+ unsigned long total_size = 0;
+ unsigned long total_len = 0;
+ unsigned long total_lost = 0;
+ unsigned long lost;
+ int big_event_size;
+ int small_event_size;
+
+ ret = -1;
+
+ total_events = data->events + data->events_nested;
+ total_written = data->bytes_written + data->bytes_written_nested;
+ total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
+ total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
+
+ big_event_size = data->max_size + data->max_size_nested;
+ small_event_size = data->min_size + data->min_size_nested;
+
+ pr_info("CPU %d:\n", cpu);
+ pr_info(" events: %ld\n", total_events);
+ pr_info(" dropped bytes: %ld\n", total_dropped);
+ pr_info(" alloced bytes: %ld\n", total_alloc);
+ pr_info(" written bytes: %ld\n", total_written);
+ pr_info(" biggest event: %d\n", big_event_size);
+ pr_info(" smallest event: %d\n", small_event_size);
+
+ if (RB_WARN_ON(buffer, total_dropped))
+ break;
+
+ ret = 0;
+
+ while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
+ total_lost += lost;
+ item = ring_buffer_event_data(event);
+ total_len += ring_buffer_event_length(event);
+ total_size += item->size + sizeof(struct rb_item);
+ if (memcmp(&item->str[0], rb_string, item->size) != 0) {
+ pr_info("FAILED!\n");
+ pr_info("buffer had: %.*s\n", item->size, item->str);
+ pr_info("expected: %.*s\n", item->size, rb_string);
+ RB_WARN_ON(buffer, 1);
+ ret = -1;
+ break;
+ }
+ total_read++;
+ }
+ if (ret)
+ break;
+
+ ret = -1;
+
+ pr_info(" read events: %ld\n", total_read);
+ pr_info(" lost events: %ld\n", total_lost);
+ pr_info(" total events: %ld\n", total_lost + total_read);
+ pr_info(" recorded len bytes: %ld\n", total_len);
+ pr_info(" recorded size bytes: %ld\n", total_size);
+ if (total_lost)
+ pr_info(" With dropped events, record len and size may not match\n"
+ " alloced and written from above\n");
+ if (!total_lost) {
+ if (RB_WARN_ON(buffer, total_len != total_alloc ||
+ total_size != total_written))
+ break;
+ }
+ if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
+ break;
+
+ ret = 0;
+ }
+ if (!ret)
+ pr_info("Ring buffer PASSED!\n");
+
+ ring_buffer_free(buffer);
+ return 0;
+}
+
+late_initcall(test_ringbuffer);
+#endif /* CONFIG_RING_BUFFER_STARTUP_TEST */
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 66338c4f7f4..581630a6387 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1,7 +1,7 @@
/*
* ring buffer based function tracer
*
- * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
*
* Originally taken from the RT patch by:
@@ -19,7 +19,6 @@
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/irqflags.h>
-#include <linux/irq_work.h>
#include <linux/debugfs.h>
#include <linux/pagemap.h>
#include <linux/hardirq.h>
@@ -48,7 +47,7 @@
* On boot up, the ring buffer is set to the minimum size, so that
* we do not waste memory on systems that are not using tracing.
*/
-int ring_buffer_expanded;
+bool ring_buffer_expanded;
/*
* We need to change this state when a selftest is running.
@@ -87,14 +86,6 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
static DEFINE_PER_CPU(bool, trace_cmdline_save);
/*
- * When a reader is waiting for data, then this variable is
- * set to true.
- */
-static bool trace_wakeup_needed;
-
-static struct irq_work trace_work_wakeup;
-
-/*
* Kill all tracing for good (never come back).
* It is initialized to 1 but will turn to zero if the initialization
* of the tracer is successful. But that is the only place that sets
@@ -130,12 +121,14 @@ static int tracing_set_tracer(const char *buf);
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
static char *default_bootup_tracer;
+static bool allocate_snapshot;
+
static int __init set_cmdline_ftrace(char *str)
{
strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
default_bootup_tracer = bootup_tracer_buf;
/* We are using ftrace early, expand it */
- ring_buffer_expanded = 1;
+ ring_buffer_expanded = true;
return 1;
}
__setup("ftrace=", set_cmdline_ftrace);
@@ -156,6 +149,15 @@ static int __init set_ftrace_dump_on_oops(char *str)
}
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
+static int __init boot_alloc_snapshot(char *str)
+{
+ allocate_snapshot = true;
+ /* We also need the main ring buffer expanded */
+ ring_buffer_expanded = true;
+ return 1;
+}
+__setup("alloc_snapshot", boot_alloc_snapshot);
+
static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
static char *trace_boot_options __initdata;
@@ -189,7 +191,7 @@ unsigned long long ns2usecs(cycle_t nsec)
*/
static struct trace_array global_trace;
-static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
+LIST_HEAD(ftrace_trace_arrays);
int filter_current_check_discard(struct ring_buffer *buffer,
struct ftrace_event_call *call, void *rec,
@@ -204,29 +206,15 @@ cycle_t ftrace_now(int cpu)
u64 ts;
/* Early boot up does not have a buffer yet */
- if (!global_trace.buffer)
+ if (!global_trace.trace_buffer.buffer)
return trace_clock_local();
- ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
- ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
+ ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
+ ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
return ts;
}
-/*
- * The max_tr is used to snapshot the global_trace when a maximum
- * latency is reached. Some tracers will use this to store a maximum
- * trace while it continues examining live traces.
- *
- * The buffers for the max_tr are set up the same as the global_trace.
- * When a snapshot is taken, the link list of the max_tr is swapped
- * with the link list of the global_trace and the buffers are reset for
- * the global_trace so the tracing can continue.
- */
-static struct trace_array max_tr;
-
-static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
-
int tracing_is_enabled(void)
{
return tracing_is_on();
@@ -249,9 +237,6 @@ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
/* trace_types holds a link list of available tracers. */
static struct tracer *trace_types __read_mostly;
-/* current_trace points to the tracer that is currently active */
-static struct tracer *current_trace __read_mostly = &nop_trace;
-
/*
* trace_types_lock is used to protect the trace_types list.
*/
@@ -285,13 +270,13 @@ static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
static inline void trace_access_lock(int cpu)
{
- if (cpu == TRACE_PIPE_ALL_CPU) {
+ if (cpu == RING_BUFFER_ALL_CPUS) {
/* gain it for accessing the whole ring buffer. */
down_write(&all_cpu_access_lock);
} else {
/* gain it for accessing a cpu ring buffer. */
- /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
+ /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
down_read(&all_cpu_access_lock);
/* Secondly block other access to this @cpu ring buffer. */
@@ -301,7 +286,7 @@ static inline void trace_access_lock(int cpu)
static inline void trace_access_unlock(int cpu)
{
- if (cpu == TRACE_PIPE_ALL_CPU) {
+ if (cpu == RING_BUFFER_ALL_CPUS) {
up_write(&all_cpu_access_lock);
} else {
mutex_unlock(&per_cpu(cpu_access_lock, cpu));
@@ -339,30 +324,11 @@ static inline void trace_access_lock_init(void)
#endif
-/* trace_wait is a waitqueue for tasks blocked on trace_poll */
-static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
-
/* trace_flags holds trace_options default values */
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
- TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS;
-
-static int trace_stop_count;
-static DEFINE_RAW_SPINLOCK(tracing_start_lock);
-
-/**
- * trace_wake_up - wake up tasks waiting for trace input
- *
- * Schedules a delayed work to wake up any task that is blocked on the
- * trace_wait queue. These is used with trace_poll for tasks polling the
- * trace.
- */
-static void trace_wake_up(struct irq_work *work)
-{
- wake_up_all(&trace_wait);
-
-}
+ TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
/**
* tracing_on - enable tracing buffers
@@ -372,8 +338,8 @@ static void trace_wake_up(struct irq_work *work)
*/
void tracing_on(void)
{
- if (global_trace.buffer)
- ring_buffer_record_on(global_trace.buffer);
+ if (global_trace.trace_buffer.buffer)
+ ring_buffer_record_on(global_trace.trace_buffer.buffer);
/*
* This flag is only looked at when buffers haven't been
* allocated yet. We don't really care about the race
@@ -385,6 +351,196 @@ void tracing_on(void)
EXPORT_SYMBOL_GPL(tracing_on);
/**
+ * __trace_puts - write a constant string into the trace buffer.
+ * @ip: The address of the caller
+ * @str: The constant string to write
+ * @size: The size of the string.
+ */
+int __trace_puts(unsigned long ip, const char *str, int size)
+{
+ struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
+ struct print_entry *entry;
+ unsigned long irq_flags;
+ int alloc;
+
+ alloc = sizeof(*entry) + size + 2; /* possible \n added */
+
+ local_save_flags(irq_flags);
+ buffer = global_trace.trace_buffer.buffer;
+ event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
+ irq_flags, preempt_count());
+ if (!event)
+ return 0;
+
+ entry = ring_buffer_event_data(event);
+ entry->ip = ip;
+
+ memcpy(&entry->buf, str, size);
+
+ /* Add a newline if necessary */
+ if (entry->buf[size - 1] != '\n') {
+ entry->buf[size] = '\n';
+ entry->buf[size + 1] = '\0';
+ } else
+ entry->buf[size] = '\0';
+
+ __buffer_unlock_commit(buffer, event);
+
+ return size;
+}
+EXPORT_SYMBOL_GPL(__trace_puts);
+
+/**
+ * __trace_bputs - write the pointer to a constant string into trace buffer
+ * @ip: The address of the caller
+ * @str: The constant string to write to the buffer to
+ */
+int __trace_bputs(unsigned long ip, const char *str)
+{
+ struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
+ struct bputs_entry *entry;
+ unsigned long irq_flags;
+ int size = sizeof(struct bputs_entry);
+
+ local_save_flags(irq_flags);
+ buffer = global_trace.trace_buffer.buffer;
+ event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
+ irq_flags, preempt_count());
+ if (!event)
+ return 0;
+
+ entry = ring_buffer_event_data(event);
+ entry->ip = ip;
+ entry->str = str;
+
+ __buffer_unlock_commit(buffer, event);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(__trace_bputs);
+
+#ifdef CONFIG_TRACER_SNAPSHOT
+/**
+ * trace_snapshot - take a snapshot of the current buffer.
+ *
+ * This causes a swap between the snapshot buffer and the current live
+ * tracing buffer. You can use this to take snapshots of the live
+ * trace when some condition is triggered, but continue to trace.
+ *
+ * Note, make sure to allocate the snapshot with either
+ * a tracing_snapshot_alloc(), or by doing it manually
+ * with: echo 1 > /sys/kernel/debug/tracing/snapshot
+ *
+ * If the snapshot buffer is not allocated, it will stop tracing.
+ * Basically making a permanent snapshot.
+ */
+void tracing_snapshot(void)
+{
+ struct trace_array *tr = &global_trace;
+ struct tracer *tracer = tr->current_trace;
+ unsigned long flags;
+
+ if (in_nmi()) {
+ internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
+ internal_trace_puts("*** snapshot is being ignored ***\n");
+ return;
+ }
+
+ if (!tr->allocated_snapshot) {
+ internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
+ internal_trace_puts("*** stopping trace here! ***\n");
+ tracing_off();
+ return;
+ }
+
+ /* Note, snapshot can not be used when the tracer uses it */
+ if (tracer->use_max_tr) {
+ internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
+ internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
+ return;
+ }
+
+ local_irq_save(flags);
+ update_max_tr(tr, current, smp_processor_id());
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot);
+
+static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
+ struct trace_buffer *size_buf, int cpu_id);
+static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
+
+static int alloc_snapshot(struct trace_array *tr)
+{
+ int ret;
+
+ if (!tr->allocated_snapshot) {
+
+ /* allocate spare buffer */
+ ret = resize_buffer_duplicate_size(&tr->max_buffer,
+ &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
+ if (ret < 0)
+ return ret;
+
+ tr->allocated_snapshot = true;
+ }
+
+ return 0;
+}
+
+void free_snapshot(struct trace_array *tr)
+{
+ /*
+ * We don't free the ring buffer. instead, resize it because
+ * The max_tr ring buffer has some state (e.g. ring->clock) and
+ * we want preserve it.
+ */
+ ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
+ set_buffer_entries(&tr->max_buffer, 1);
+ tracing_reset_online_cpus(&tr->max_buffer);
+ tr->allocated_snapshot = false;
+}
+
+/**
+ * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
+ *
+ * This is similar to trace_snapshot(), but it will allocate the
+ * snapshot buffer if it isn't already allocated. Use this only
+ * where it is safe to sleep, as the allocation may sleep.
+ *
+ * This causes a swap between the snapshot buffer and the current live
+ * tracing buffer. You can use this to take snapshots of the live
+ * trace when some condition is triggered, but continue to trace.
+ */
+void tracing_snapshot_alloc(void)
+{
+ struct trace_array *tr = &global_trace;
+ int ret;
+
+ ret = alloc_snapshot(tr);
+ if (WARN_ON(ret < 0))
+ return;
+
+ tracing_snapshot();
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
+#else
+void tracing_snapshot(void)
+{
+ WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot);
+void tracing_snapshot_alloc(void)
+{
+ /* Give warning */
+ tracing_snapshot();
+}
+EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
+#endif /* CONFIG_TRACER_SNAPSHOT */
+
+/**
* tracing_off - turn off tracing buffers
*
* This function stops the tracing buffers from recording data.
@@ -394,8 +550,8 @@ EXPORT_SYMBOL_GPL(tracing_on);
*/
void tracing_off(void)
{
- if (global_trace.buffer)
- ring_buffer_record_off(global_trace.buffer);
+ if (global_trace.trace_buffer.buffer)
+ ring_buffer_record_off(global_trace.trace_buffer.buffer);
/*
* This flag is only looked at when buffers haven't been
* allocated yet. We don't really care about the race
@@ -411,8 +567,8 @@ EXPORT_SYMBOL_GPL(tracing_off);
*/
int tracing_is_on(void)
{
- if (global_trace.buffer)
- return ring_buffer_record_is_on(global_trace.buffer);
+ if (global_trace.trace_buffer.buffer)
+ return ring_buffer_record_is_on(global_trace.trace_buffer.buffer);
return !global_trace.buffer_disabled;
}
EXPORT_SYMBOL_GPL(tracing_is_on);
@@ -479,6 +635,7 @@ static const char *trace_options[] = {
"disable_on_free",
"irq-info",
"markers",
+ "function-trace",
NULL
};
@@ -490,6 +647,8 @@ static struct {
{ trace_clock_local, "local", 1 },
{ trace_clock_global, "global", 1 },
{ trace_clock_counter, "counter", 0 },
+ { trace_clock_jiffies, "uptime", 1 },
+ { trace_clock, "perf", 1 },
ARCH_TRACE_CLOCKS
};
@@ -670,13 +829,14 @@ unsigned long __read_mostly tracing_max_latency;
static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
- struct trace_array_cpu *data = tr->data[cpu];
- struct trace_array_cpu *max_data;
+ struct trace_buffer *trace_buf = &tr->trace_buffer;
+ struct trace_buffer *max_buf = &tr->max_buffer;
+ struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
+ struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
- max_tr.cpu = cpu;
- max_tr.time_start = data->preempt_timestamp;
+ max_buf->cpu = cpu;
+ max_buf->time_start = data->preempt_timestamp;
- max_data = max_tr.data[cpu];
max_data->saved_latency = tracing_max_latency;
max_data->critical_start = data->critical_start;
max_data->critical_end = data->critical_end;
@@ -706,22 +866,22 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
struct ring_buffer *buf;
- if (trace_stop_count)
+ if (tr->stop_count)
return;
WARN_ON_ONCE(!irqs_disabled());
- if (!current_trace->allocated_snapshot) {
+ if (!tr->allocated_snapshot) {
/* Only the nop tracer should hit this when disabling */
- WARN_ON_ONCE(current_trace != &nop_trace);
+ WARN_ON_ONCE(tr->current_trace != &nop_trace);
return;
}
arch_spin_lock(&ftrace_max_lock);
- buf = tr->buffer;
- tr->buffer = max_tr.buffer;
- max_tr.buffer = buf;
+ buf = tr->trace_buffer.buffer;
+ tr->trace_buffer.buffer = tr->max_buffer.buffer;
+ tr->max_buffer.buffer = buf;
__update_max_tr(tr, tsk, cpu);
arch_spin_unlock(&ftrace_max_lock);
@@ -740,19 +900,19 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
int ret;
- if (trace_stop_count)
+ if (tr->stop_count)
return;
WARN_ON_ONCE(!irqs_disabled());
- if (!current_trace->allocated_snapshot) {
+ if (tr->allocated_snapshot) {
/* Only the nop tracer should hit this when disabling */
- WARN_ON_ONCE(current_trace != &nop_trace);
+ WARN_ON_ONCE(tr->current_trace != &nop_trace);
return;
}
arch_spin_lock(&ftrace_max_lock);
- ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
+ ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
if (ret == -EBUSY) {
/*
@@ -761,7 +921,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
* the max trace buffer (no one writes directly to it)
* and flag that it failed.
*/
- trace_array_printk(&max_tr, _THIS_IP_,
+ trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
"Failed to swap buffers due to commit in progress\n");
}
@@ -774,37 +934,78 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
static void default_wait_pipe(struct trace_iterator *iter)
{
- DEFINE_WAIT(wait);
+ /* Iterators are static, they should be filled or empty */
+ if (trace_buffer_iter(iter, iter->cpu_file))
+ return;
+
+ ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
+}
+
+#ifdef CONFIG_FTRACE_STARTUP_TEST
+static int run_tracer_selftest(struct tracer *type)
+{
+ struct trace_array *tr = &global_trace;
+ struct tracer *saved_tracer = tr->current_trace;
+ int ret;
- prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
+ if (!type->selftest || tracing_selftest_disabled)
+ return 0;
/*
- * The events can happen in critical sections where
- * checking a work queue can cause deadlocks.
- * After adding a task to the queue, this flag is set
- * only to notify events to try to wake up the queue
- * using irq_work.
- *
- * We don't clear it even if the buffer is no longer
- * empty. The flag only causes the next event to run
- * irq_work to do the work queue wake up. The worse
- * that can happen if we race with !trace_empty() is that
- * an event will cause an irq_work to try to wake up
- * an empty queue.
- *
- * There's no reason to protect this flag either, as
- * the work queue and irq_work logic will do the necessary
- * synchronization for the wake ups. The only thing
- * that is necessary is that the wake up happens after
- * a task has been queued. It's OK for spurious wake ups.
+ * Run a selftest on this tracer.
+ * Here we reset the trace buffer, and set the current
+ * tracer to be this tracer. The tracer can then run some
+ * internal tracing to verify that everything is in order.
+ * If we fail, we do not register this tracer.
*/
- trace_wakeup_needed = true;
+ tracing_reset_online_cpus(&tr->trace_buffer);
- if (trace_empty(iter))
- schedule();
+ tr->current_trace = type;
- finish_wait(&trace_wait, &wait);
+#ifdef CONFIG_TRACER_MAX_TRACE
+ if (type->use_max_tr) {
+ /* If we expanded the buffers, make sure the max is expanded too */
+ if (ring_buffer_expanded)
+ ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
+ RING_BUFFER_ALL_CPUS);
+ tr->allocated_snapshot = true;
+ }
+#endif
+
+ /* the test is responsible for initializing and enabling */
+ pr_info("Testing tracer %s: ", type->name);
+ ret = type->selftest(type, tr);
+ /* the test is responsible for resetting too */
+ tr->current_trace = saved_tracer;
+ if (ret) {
+ printk(KERN_CONT "FAILED!\n");
+ /* Add the warning after printing 'FAILED' */
+ WARN_ON(1);
+ return -1;
+ }
+ /* Only reset on passing, to avoid touching corrupted buffers */
+ tracing_reset_online_cpus(&tr->trace_buffer);
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+ if (type->use_max_tr) {
+ tr->allocated_snapshot = false;
+
+ /* Shrink the max buffer again */
+ if (ring_buffer_expanded)
+ ring_buffer_resize(tr->max_buffer.buffer, 1,
+ RING_BUFFER_ALL_CPUS);
+ }
+#endif
+
+ printk(KERN_CONT "PASSED\n");
+ return 0;
+}
+#else
+static inline int run_tracer_selftest(struct tracer *type)
+{
+ return 0;
}
+#endif /* CONFIG_FTRACE_STARTUP_TEST */
/**
* register_tracer - register a tracer with the ftrace system.
@@ -851,57 +1052,9 @@ int register_tracer(struct tracer *type)
if (!type->wait_pipe)
type->wait_pipe = default_wait_pipe;
-
-#ifdef CONFIG_FTRACE_STARTUP_TEST
- if (type->selftest && !tracing_selftest_disabled) {
- struct tracer *saved_tracer = current_trace;
- struct trace_array *tr = &global_trace;
-
- /*
- * Run a selftest on this tracer.
- * Here we reset the trace buffer, and set the current
- * tracer to be this tracer. The tracer can then run some
- * internal tracing to verify that everything is in order.
- * If we fail, we do not register this tracer.
- */
- tracing_reset_online_cpus(tr);
-
- current_trace = type;
-
- if (type->use_max_tr) {
- /* If we expanded the buffers, make sure the max is expanded too */
- if (ring_buffer_expanded)
- ring_buffer_resize(max_tr.buffer, trace_buf_size,
- RING_BUFFER_ALL_CPUS);
- type->allocated_snapshot = true;
- }
-
- /* the test is responsible for initializing and enabling */
- pr_info("Testing tracer %s: ", type->name);
- ret = type->selftest(type, tr);
- /* the test is responsible for resetting too */
- current_trace = saved_tracer;
- if (ret) {
- printk(KERN_CONT "FAILED!\n");
- /* Add the warning after printing 'FAILED' */
- WARN_ON(1);
- goto out;
- }
- /* Only reset on passing, to avoid touching corrupted buffers */
- tracing_reset_online_cpus(tr);
-
- if (type->use_max_tr) {
- type->allocated_snapshot = false;
-
- /* Shrink the max buffer again */
- if (ring_buffer_expanded)
- ring_buffer_resize(max_tr.buffer, 1,
- RING_BUFFER_ALL_CPUS);
- }
-
- printk(KERN_CONT "PASSED\n");
- }
-#endif
+ ret = run_tracer_selftest(type);
+ if (ret < 0)
+ goto out;
type->next = trace_types;
trace_types = type;
@@ -921,7 +1074,7 @@ int register_tracer(struct tracer *type)
tracing_set_tracer(type->name);
default_bootup_tracer = NULL;
/* disable other selftests, since this will break it. */
- tracing_selftest_disabled = 1;
+ tracing_selftest_disabled = true;
#ifdef CONFIG_FTRACE_STARTUP_TEST
printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
type->name);
@@ -931,9 +1084,9 @@ int register_tracer(struct tracer *type)
return ret;
}
-void tracing_reset(struct trace_array *tr, int cpu)
+void tracing_reset(struct trace_buffer *buf, int cpu)
{
- struct ring_buffer *buffer = tr->buffer;
+ struct ring_buffer *buffer = buf->buffer;
if (!buffer)
return;
@@ -947,9 +1100,9 @@ void tracing_reset(struct trace_array *tr, int cpu)
ring_buffer_record_enable(buffer);
}
-void tracing_reset_online_cpus(struct trace_array *tr)
+void tracing_reset_online_cpus(struct trace_buffer *buf)
{
- struct ring_buffer *buffer = tr->buffer;
+ struct ring_buffer *buffer = buf->buffer;
int cpu;
if (!buffer)
@@ -960,7 +1113,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
/* Make sure all commits have finished */
synchronize_sched();
- tr->time_start = ftrace_now(tr->cpu);
+ buf->time_start = ftrace_now(buf->cpu);
for_each_online_cpu(cpu)
ring_buffer_reset_cpu(buffer, cpu);
@@ -970,12 +1123,21 @@ void tracing_reset_online_cpus(struct trace_array *tr)
void tracing_reset_current(int cpu)
{
- tracing_reset(&global_trace, cpu);
+ tracing_reset(&global_trace.trace_buffer, cpu);
}
-void tracing_reset_current_online_cpus(void)
+void tracing_reset_all_online_cpus(void)
{
- tracing_reset_online_cpus(&global_trace);
+ struct trace_array *tr;
+
+ mutex_lock(&trace_types_lock);
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ tracing_reset_online_cpus(&tr->trace_buffer);
+#ifdef CONFIG_TRACER_MAX_TRACE
+ tracing_reset_online_cpus(&tr->max_buffer);
+#endif
+ }
+ mutex_unlock(&trace_types_lock);
}
#define SAVED_CMDLINES 128
@@ -998,7 +1160,7 @@ static void trace_init_cmdlines(void)
int is_tracing_stopped(void)
{
- return trace_stop_count;
+ return global_trace.stop_count;
}
/**
@@ -1030,12 +1192,12 @@ void tracing_start(void)
if (tracing_disabled)
return;
- raw_spin_lock_irqsave(&tracing_start_lock, flags);
- if (--trace_stop_count) {
- if (trace_stop_count < 0) {
+ raw_spin_lock_irqsave(&global_trace.start_lock, flags);
+ if (--global_trace.stop_count) {
+ if (global_trace.stop_count < 0) {
/* Someone screwed up their debugging */
WARN_ON_ONCE(1);
- trace_stop_count = 0;
+ global_trace.stop_count = 0;
}
goto out;
}
@@ -1043,19 +1205,52 @@ void tracing_start(void)
/* Prevent the buffers from switching */
arch_spin_lock(&ftrace_max_lock);
- buffer = global_trace.buffer;
+ buffer = global_trace.trace_buffer.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
- buffer = max_tr.buffer;
+#ifdef CONFIG_TRACER_MAX_TRACE
+ buffer = global_trace.max_buffer.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
+#endif
arch_spin_unlock(&ftrace_max_lock);
ftrace_start();
out:
- raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
+ raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
+}
+
+static void tracing_start_tr(struct trace_array *tr)
+{
+ struct ring_buffer *buffer;
+ unsigned long flags;
+
+ if (tracing_disabled)
+ return;
+
+ /* If global, we need to also start the max tracer */
+ if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
+ return tracing_start();
+
+ raw_spin_lock_irqsave(&tr->start_lock, flags);
+
+ if (--tr->stop_count) {
+ if (tr->stop_count < 0) {
+ /* Someone screwed up their debugging */
+ WARN_ON_ONCE(1);
+ tr->stop_count = 0;
+ }
+ goto out;
+ }
+
+ buffer = tr->trace_buffer.buffer;
+ if (buffer)
+ ring_buffer_record_enable(buffer);
+
+ out:
+ raw_spin_unlock_irqrestore(&tr->start_lock, flags);
}
/**
@@ -1070,25 +1265,48 @@ void tracing_stop(void)
unsigned long flags;
ftrace_stop();
- raw_spin_lock_irqsave(&tracing_start_lock, flags);
- if (trace_stop_count++)
+ raw_spin_lock_irqsave(&global_trace.start_lock, flags);
+ if (global_trace.stop_count++)
goto out;
/* Prevent the buffers from switching */
arch_spin_lock(&ftrace_max_lock);
- buffer = global_trace.buffer;
+ buffer = global_trace.trace_buffer.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
- buffer = max_tr.buffer;
+#ifdef CONFIG_TRACER_MAX_TRACE
+ buffer = global_trace.max_buffer.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
+#endif
arch_spin_unlock(&ftrace_max_lock);
out:
- raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
+ raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
+}
+
+static void tracing_stop_tr(struct trace_array *tr)
+{
+ struct ring_buffer *buffer;
+ unsigned long flags;
+
+ /* If global, we need to also stop the max tracer */
+ if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
+ return tracing_stop();
+
+ raw_spin_lock_irqsave(&tr->start_lock, flags);
+ if (tr->stop_count++)
+ goto out;
+
+ buffer = tr->trace_buffer.buffer;
+ if (buffer)
+ ring_buffer_record_disable(buffer);
+
+ out:
+ raw_spin_unlock_irqrestore(&tr->start_lock, flags);
}
void trace_stop_cmdline_recording(void);
@@ -1221,11 +1439,6 @@ void
__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
{
__this_cpu_write(trace_cmdline_save, true);
- if (trace_wakeup_needed) {
- trace_wakeup_needed = false;
- /* irq_work_queue() supplies it's own memory barriers */
- irq_work_queue(&trace_work_wakeup);
- }
ring_buffer_unlock_commit(buffer, event);
}
@@ -1249,11 +1462,23 @@ void trace_buffer_unlock_commit(struct ring_buffer *buffer,
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
struct ring_buffer_event *
+trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
+ struct ftrace_event_file *ftrace_file,
+ int type, unsigned long len,
+ unsigned long flags, int pc)
+{
+ *current_rb = ftrace_file->tr->trace_buffer.buffer;
+ return trace_buffer_lock_reserve(*current_rb,
+ type, len, flags, pc);
+}
+EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
+
+struct ring_buffer_event *
trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
int type, unsigned long len,
unsigned long flags, int pc)
{
- *current_rb = global_trace.buffer;
+ *current_rb = global_trace.trace_buffer.buffer;
return trace_buffer_lock_reserve(*current_rb,
type, len, flags, pc);
}
@@ -1292,7 +1517,7 @@ trace_function(struct trace_array *tr,
int pc)
{
struct ftrace_event_call *call = &event_function;
- struct ring_buffer *buffer = tr->buffer;
+ struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer_event *event;
struct ftrace_entry *entry;
@@ -1433,13 +1658,14 @@ void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
int pc)
{
- __ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL);
+ __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
}
/**
* trace_dump_stack - record a stack back trace in the trace buffer
+ * @skip: Number of functions to skip (helper handlers)
*/
-void trace_dump_stack(void)
+void trace_dump_stack(int skip)
{
unsigned long flags;
@@ -1448,8 +1674,13 @@ void trace_dump_stack(void)
local_save_flags(flags);
- /* skipping 3 traces, seems to get us at the caller of this function */
- __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL);
+ /*
+ * Skip 3 more, seems to get us at the caller of
+ * this function.
+ */
+ skip += 3;
+ __ftrace_trace_stack(global_trace.trace_buffer.buffer,
+ flags, skip, preempt_count(), NULL);
}
static DEFINE_PER_CPU(int, user_stack_count);
@@ -1619,7 +1850,7 @@ void trace_printk_init_buffers(void)
* directly here. If the global_trace.buffer is already
* allocated here, then this was called by module code.
*/
- if (global_trace.buffer)
+ if (global_trace.trace_buffer.buffer)
tracing_start_cmdline_record();
}
@@ -1679,7 +1910,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
local_save_flags(flags);
size = sizeof(*entry) + sizeof(u32) * len;
- buffer = tr->buffer;
+ buffer = tr->trace_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
flags, pc);
if (!event)
@@ -1702,27 +1933,12 @@ out:
}
EXPORT_SYMBOL_GPL(trace_vbprintk);
-int trace_array_printk(struct trace_array *tr,
- unsigned long ip, const char *fmt, ...)
-{
- int ret;
- va_list ap;
-
- if (!(trace_flags & TRACE_ITER_PRINTK))
- return 0;
-
- va_start(ap, fmt);
- ret = trace_array_vprintk(tr, ip, fmt, ap);
- va_end(ap);
- return ret;
-}
-
-int trace_array_vprintk(struct trace_array *tr,
- unsigned long ip, const char *fmt, va_list args)
+static int
+__trace_array_vprintk(struct ring_buffer *buffer,
+ unsigned long ip, const char *fmt, va_list args)
{
struct ftrace_event_call *call = &event_print;
struct ring_buffer_event *event;
- struct ring_buffer *buffer;
int len = 0, size, pc;
struct print_entry *entry;
unsigned long flags;
@@ -1750,7 +1966,6 @@ int trace_array_vprintk(struct trace_array *tr,
local_save_flags(flags);
size = sizeof(*entry) + len + 1;
- buffer = tr->buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
flags, pc);
if (!event)
@@ -1771,6 +1986,42 @@ int trace_array_vprintk(struct trace_array *tr,
return len;
}
+int trace_array_vprintk(struct trace_array *tr,
+ unsigned long ip, const char *fmt, va_list args)
+{
+ return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
+}
+
+int trace_array_printk(struct trace_array *tr,
+ unsigned long ip, const char *fmt, ...)
+{
+ int ret;
+ va_list ap;
+
+ if (!(trace_flags & TRACE_ITER_PRINTK))
+ return 0;
+
+ va_start(ap, fmt);
+ ret = trace_array_vprintk(tr, ip, fmt, ap);
+ va_end(ap);
+ return ret;
+}
+
+int trace_array_printk_buf(struct ring_buffer *buffer,
+ unsigned long ip, const char *fmt, ...)
+{
+ int ret;
+ va_list ap;
+
+ if (!(trace_flags & TRACE_ITER_PRINTK))
+ return 0;
+
+ va_start(ap, fmt);
+ ret = __trace_array_vprintk(buffer, ip, fmt, ap);
+ va_end(ap);
+ return ret;
+}
+
int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
{
return trace_array_vprintk(&global_trace, ip, fmt, args);
@@ -1796,7 +2047,7 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
if (buf_iter)
event = ring_buffer_iter_peek(buf_iter, ts);
else
- event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
+ event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
lost_events);
if (event) {
@@ -1811,7 +2062,7 @@ static struct trace_entry *
__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
unsigned long *missing_events, u64 *ent_ts)
{
- struct ring_buffer *buffer = iter->tr->buffer;
+ struct ring_buffer *buffer = iter->trace_buffer->buffer;
struct trace_entry *ent, *next = NULL;
unsigned long lost_events = 0, next_lost = 0;
int cpu_file = iter->cpu_file;
@@ -1824,7 +2075,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
* If we are in a per_cpu trace file, don't bother by iterating over
* all cpu and peek directly.
*/
- if (cpu_file > TRACE_PIPE_ALL_CPU) {
+ if (cpu_file > RING_BUFFER_ALL_CPUS) {
if (ring_buffer_empty_cpu(buffer, cpu_file))
return NULL;
ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
@@ -1888,7 +2139,7 @@ void *trace_find_next_entry_inc(struct trace_iterator *iter)
static void trace_consume(struct trace_iterator *iter)
{
- ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
+ ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
&iter->lost_events);
}
@@ -1921,13 +2172,12 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
void tracing_iter_reset(struct trace_iterator *iter, int cpu)
{
- struct trace_array *tr = iter->tr;
struct ring_buffer_event *event;
struct ring_buffer_iter *buf_iter;
unsigned long entries = 0;
u64 ts;
- tr->data[cpu]->skipped_entries = 0;
+ per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
buf_iter = trace_buffer_iter(iter, cpu);
if (!buf_iter)
@@ -1941,13 +2191,13 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
* by the timestamp being before the start of the buffer.
*/
while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
- if (ts >= iter->tr->time_start)
+ if (ts >= iter->trace_buffer->time_start)
break;
entries++;
ring_buffer_read(buf_iter, NULL);
}
- tr->data[cpu]->skipped_entries = entries;
+ per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
}
/*
@@ -1957,6 +2207,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
static void *s_start(struct seq_file *m, loff_t *pos)
{
struct trace_iterator *iter = m->private;
+ struct trace_array *tr = iter->tr;
int cpu_file = iter->cpu_file;
void *p = NULL;
loff_t l = 0;
@@ -1969,12 +2220,14 @@ static void *s_start(struct seq_file *m, loff_t *pos)
* will point to the same string as current_trace->name.
*/
mutex_lock(&trace_types_lock);
- if (unlikely(current_trace && iter->trace->name != current_trace->name))
- *iter->trace = *current_trace;
+ if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
+ *iter->trace = *tr->current_trace;
mutex_unlock(&trace_types_lock);
+#ifdef CONFIG_TRACER_MAX_TRACE
if (iter->snapshot && iter->trace->use_max_tr)
return ERR_PTR(-EBUSY);
+#endif
if (!iter->snapshot)
atomic_inc(&trace_record_cmdline_disabled);
@@ -1984,7 +2237,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
iter->cpu = 0;
iter->idx = -1;
- if (cpu_file == TRACE_PIPE_ALL_CPU) {
+ if (cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu)
tracing_iter_reset(iter, cpu);
} else
@@ -2016,17 +2269,21 @@ static void s_stop(struct seq_file *m, void *p)
{
struct trace_iterator *iter = m->private;
+#ifdef CONFIG_TRACER_MAX_TRACE
if (iter->snapshot && iter->trace->use_max_tr)
return;
+#endif
if (!iter->snapshot)
atomic_dec(&trace_record_cmdline_disabled);
+
trace_access_unlock(iter->cpu_file);
trace_event_read_unlock();
}
static void
-get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries)
+get_total_entries(struct trace_buffer *buf,
+ unsigned long *total, unsigned long *entries)
{
unsigned long count;
int cpu;
@@ -2035,19 +2292,19 @@ get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *e
*entries = 0;
for_each_tracing_cpu(cpu) {
- count = ring_buffer_entries_cpu(tr->buffer, cpu);
+ count = ring_buffer_entries_cpu(buf->buffer, cpu);
/*
* If this buffer has skipped entries, then we hold all
* entries for the trace and we need to ignore the
* ones before the time stamp.
*/
- if (tr->data[cpu]->skipped_entries) {
- count -= tr->data[cpu]->skipped_entries;
+ if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
+ count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
/* total is the same as the entries */
*total += count;
} else
*total += count +
- ring_buffer_overrun_cpu(tr->buffer, cpu);
+ ring_buffer_overrun_cpu(buf->buffer, cpu);
*entries += count;
}
}
@@ -2064,27 +2321,27 @@ static void print_lat_help_header(struct seq_file *m)
seq_puts(m, "# \\ / ||||| \\ | / \n");
}
-static void print_event_info(struct trace_array *tr, struct seq_file *m)
+static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
{
unsigned long total;
unsigned long entries;
- get_total_entries(tr, &total, &entries);
+ get_total_entries(buf, &total, &entries);
seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
entries, total, num_online_cpus());
seq_puts(m, "#\n");
}
-static void print_func_help_header(struct trace_array *tr, struct seq_file *m)
+static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
{
- print_event_info(tr, m);
+ print_event_info(buf, m);
seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
seq_puts(m, "# | | | | |\n");
}
-static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m)
+static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
{
- print_event_info(tr, m);
+ print_event_info(buf, m);
seq_puts(m, "# _-----=> irqs-off\n");
seq_puts(m, "# / _----=> need-resched\n");
seq_puts(m, "# | / _---=> hardirq/softirq\n");
@@ -2098,16 +2355,16 @@ void
print_trace_header(struct seq_file *m, struct trace_iterator *iter)
{
unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
- struct trace_array *tr = iter->tr;
- struct trace_array_cpu *data = tr->data[tr->cpu];
- struct tracer *type = current_trace;
+ struct trace_buffer *buf = iter->trace_buffer;
+ struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
+ struct tracer *type = iter->trace;
unsigned long entries;
unsigned long total;
const char *name = "preemption";
name = type->name;
- get_total_entries(tr, &total, &entries);
+ get_total_entries(buf, &total, &entries);
seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
name, UTS_RELEASE);
@@ -2118,7 +2375,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
nsecs_to_usecs(data->saved_latency),
entries,
total,
- tr->cpu,
+ buf->cpu,
#if defined(CONFIG_PREEMPT_NONE)
"server",
#elif defined(CONFIG_PREEMPT_VOLUNTARY)
@@ -2169,7 +2426,7 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
if (cpumask_test_cpu(iter->cpu, iter->started))
return;
- if (iter->tr->data[iter->cpu]->skipped_entries)
+ if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
return;
cpumask_set_cpu(iter->cpu, iter->started);
@@ -2292,14 +2549,14 @@ int trace_empty(struct trace_iterator *iter)
int cpu;
/* If we are looking at one CPU buffer, only check that one */
- if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
+ if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
cpu = iter->cpu_file;
buf_iter = trace_buffer_iter(iter, cpu);
if (buf_iter) {
if (!ring_buffer_iter_empty(buf_iter))
return 0;
} else {
- if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
+ if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
return 0;
}
return 1;
@@ -2311,7 +2568,7 @@ int trace_empty(struct trace_iterator *iter)
if (!ring_buffer_iter_empty(buf_iter))
return 0;
} else {
- if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
+ if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
return 0;
}
}
@@ -2335,6 +2592,11 @@ enum print_line_t print_trace_line(struct trace_iterator *iter)
return ret;
}
+ if (iter->ent->type == TRACE_BPUTS &&
+ trace_flags & TRACE_ITER_PRINTK &&
+ trace_flags & TRACE_ITER_PRINTK_MSGONLY)
+ return trace_print_bputs_msg_only(iter);
+
if (iter->ent->type == TRACE_BPRINT &&
trace_flags & TRACE_ITER_PRINTK &&
trace_flags & TRACE_ITER_PRINTK_MSGONLY)
@@ -2389,9 +2651,9 @@ void trace_default_header(struct seq_file *m)
} else {
if (!(trace_flags & TRACE_ITER_VERBOSE)) {
if (trace_flags & TRACE_ITER_IRQ_INFO)
- print_func_help_header_irq(iter->tr, m);
+ print_func_help_header_irq(iter->trace_buffer, m);
else
- print_func_help_header(iter->tr, m);
+ print_func_help_header(iter->trace_buffer, m);
}
}
}
@@ -2405,14 +2667,8 @@ static void test_ftrace_alive(struct seq_file *m)
}
#ifdef CONFIG_TRACER_MAX_TRACE
-static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
+static void show_snapshot_main_help(struct seq_file *m)
{
- if (iter->trace->allocated_snapshot)
- seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
- else
- seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
-
- seq_printf(m, "# Snapshot commands:\n");
seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
seq_printf(m, "# Takes a snapshot of the main buffer.\n");
@@ -2420,6 +2676,35 @@ static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
seq_printf(m, "# is not a '0' or '1')\n");
}
+
+static void show_snapshot_percpu_help(struct seq_file *m)
+{
+ seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
+#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
+ seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
+ seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
+#else
+ seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
+ seq_printf(m, "# Must use main snapshot file to allocate.\n");
+#endif
+ seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
+ seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
+ seq_printf(m, "# is not a '0' or '1')\n");
+}
+
+static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
+{
+ if (iter->tr->allocated_snapshot)
+ seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
+ else
+ seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
+
+ seq_printf(m, "# Snapshot commands:\n");
+ if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
+ show_snapshot_main_help(m);
+ else
+ show_snapshot_percpu_help(m);
+}
#else
/* Should never be called */
static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
@@ -2479,7 +2764,8 @@ static const struct seq_operations tracer_seq_ops = {
static struct trace_iterator *
__tracing_open(struct inode *inode, struct file *file, bool snapshot)
{
- long cpu_file = (long) inode->i_private;
+ struct trace_cpu *tc = inode->i_private;
+ struct trace_array *tr = tc->tr;
struct trace_iterator *iter;
int cpu;
@@ -2504,26 +2790,31 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
if (!iter->trace)
goto fail;
- *iter->trace = *current_trace;
+ *iter->trace = *tr->current_trace;
if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
goto fail;
- if (current_trace->print_max || snapshot)
- iter->tr = &max_tr;
+ iter->tr = tr;
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+ /* Currently only the top directory has a snapshot */
+ if (tr->current_trace->print_max || snapshot)
+ iter->trace_buffer = &tr->max_buffer;
else
- iter->tr = &global_trace;
+#endif
+ iter->trace_buffer = &tr->trace_buffer;
iter->snapshot = snapshot;
iter->pos = -1;
mutex_init(&iter->mutex);
- iter->cpu_file = cpu_file;
+ iter->cpu_file = tc->cpu;
/* Notify the tracer early; before we stop tracing. */
if (iter->trace && iter->trace->open)
iter->trace->open(iter);
/* Annotate start of buffers if we had overruns */
- if (ring_buffer_overruns(iter->tr->buffer))
+ if (ring_buffer_overruns(iter->trace_buffer->buffer))
iter->iter_flags |= TRACE_FILE_ANNOTATE;
/* Output in nanoseconds only if we are using a clock in nanoseconds. */
@@ -2532,12 +2823,12 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
/* stop the trace while dumping if we are not opening "snapshot" */
if (!iter->snapshot)
- tracing_stop();
+ tracing_stop_tr(tr);
- if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
+ if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) {
iter->buffer_iter[cpu] =
- ring_buffer_read_prepare(iter->tr->buffer, cpu);
+ ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
}
ring_buffer_read_prepare_sync();
for_each_tracing_cpu(cpu) {
@@ -2547,12 +2838,14 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
} else {
cpu = iter->cpu_file;
iter->buffer_iter[cpu] =
- ring_buffer_read_prepare(iter->tr->buffer, cpu);
+ ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
ring_buffer_read_prepare_sync();
ring_buffer_read_start(iter->buffer_iter[cpu]);
tracing_iter_reset(iter, cpu);
}
+ tr->ref++;
+
mutex_unlock(&trace_types_lock);
return iter;
@@ -2579,14 +2872,20 @@ static int tracing_release(struct inode *inode, struct file *file)
{
struct seq_file *m = file->private_data;
struct trace_iterator *iter;
+ struct trace_array *tr;
int cpu;
if (!(file->f_mode & FMODE_READ))
return 0;
iter = m->private;
+ tr = iter->tr;
mutex_lock(&trace_types_lock);
+
+ WARN_ON(!tr->ref);
+ tr->ref--;
+
for_each_tracing_cpu(cpu) {
if (iter->buffer_iter[cpu])
ring_buffer_read_finish(iter->buffer_iter[cpu]);
@@ -2597,7 +2896,7 @@ static int tracing_release(struct inode *inode, struct file *file)
if (!iter->snapshot)
/* reenable tracing if it was previously enabled */
- tracing_start();
+ tracing_start_tr(tr);
mutex_unlock(&trace_types_lock);
mutex_destroy(&iter->mutex);
@@ -2616,12 +2915,13 @@ static int tracing_open(struct inode *inode, struct file *file)
/* If this file was open for write, then erase contents */
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC)) {
- long cpu = (long) inode->i_private;
+ struct trace_cpu *tc = inode->i_private;
+ struct trace_array *tr = tc->tr;
- if (cpu == TRACE_PIPE_ALL_CPU)
- tracing_reset_online_cpus(&global_trace);
+ if (tc->cpu == RING_BUFFER_ALL_CPUS)
+ tracing_reset_online_cpus(&tr->trace_buffer);
else
- tracing_reset(&global_trace, cpu);
+ tracing_reset(&tr->trace_buffer, tc->cpu);
}
if (file->f_mode & FMODE_READ) {
@@ -2768,8 +3068,9 @@ static ssize_t
tracing_cpumask_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *ppos)
{
- int err, cpu;
+ struct trace_array *tr = filp->private_data;
cpumask_var_t tracing_cpumask_new;
+ int err, cpu;
if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
return -ENOMEM;
@@ -2789,13 +3090,13 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
*/
if (cpumask_test_cpu(cpu, tracing_cpumask) &&
!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
- atomic_inc(&global_trace.data[cpu]->disabled);
- ring_buffer_record_disable_cpu(global_trace.buffer, cpu);
+ atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
+ ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
}
if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
cpumask_test_cpu(cpu, tracing_cpumask_new)) {
- atomic_dec(&global_trace.data[cpu]->disabled);
- ring_buffer_record_enable_cpu(global_trace.buffer, cpu);
+ atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
+ ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
}
}
arch_spin_unlock(&ftrace_max_lock);
@@ -2824,12 +3125,13 @@ static const struct file_operations tracing_cpumask_fops = {
static int tracing_trace_options_show(struct seq_file *m, void *v)
{
struct tracer_opt *trace_opts;
+ struct trace_array *tr = m->private;
u32 tracer_flags;
int i;
mutex_lock(&trace_types_lock);
- tracer_flags = current_trace->flags->val;
- trace_opts = current_trace->flags->opts;
+ tracer_flags = tr->current_trace->flags->val;
+ trace_opts = tr->current_trace->flags->opts;
for (i = 0; trace_options[i]; i++) {
if (trace_flags & (1 << i))
@@ -2893,15 +3195,15 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
return 0;
}
-int set_tracer_flag(unsigned int mask, int enabled)
+int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
{
/* do nothing if flag is already set */
if (!!(trace_flags & mask) == !!enabled)
return 0;
/* Give the tracer a chance to approve the change */
- if (current_trace->flag_changed)
- if (current_trace->flag_changed(current_trace, mask, !!enabled))
+ if (tr->current_trace->flag_changed)
+ if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled))
return -EINVAL;
if (enabled)
@@ -2913,9 +3215,9 @@ int set_tracer_flag(unsigned int mask, int enabled)
trace_event_enable_cmd_record(enabled);
if (mask == TRACE_ITER_OVERWRITE) {
- ring_buffer_change_overwrite(global_trace.buffer, enabled);
+ ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
#ifdef CONFIG_TRACER_MAX_TRACE
- ring_buffer_change_overwrite(max_tr.buffer, enabled);
+ ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
#endif
}
@@ -2925,7 +3227,7 @@ int set_tracer_flag(unsigned int mask, int enabled)
return 0;
}
-static int trace_set_options(char *option)
+static int trace_set_options(struct trace_array *tr, char *option)
{
char *cmp;
int neg = 0;
@@ -2943,14 +3245,14 @@ static int trace_set_options(char *option)
for (i = 0; trace_options[i]; i++) {
if (strcmp(cmp, trace_options[i]) == 0) {
- ret = set_tracer_flag(1 << i, !neg);
+ ret = set_tracer_flag(tr, 1 << i, !neg);
break;
}
}
/* If no option could be set, test the specific tracer options */
if (!trace_options[i])
- ret = set_tracer_option(current_trace, cmp, neg);
+ ret = set_tracer_option(tr->current_trace, cmp, neg);
mutex_unlock(&trace_types_lock);
@@ -2961,6 +3263,8 @@ static ssize_t
tracing_trace_options_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
+ struct seq_file *m = filp->private_data;
+ struct trace_array *tr = m->private;
char buf[64];
int ret;
@@ -2972,7 +3276,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
buf[cnt] = 0;
- ret = trace_set_options(buf);
+ ret = trace_set_options(tr, buf);
if (ret < 0)
return ret;
@@ -2985,7 +3289,8 @@ static int tracing_trace_options_open(struct inode *inode, struct file *file)
{
if (tracing_disabled)
return -ENODEV;
- return single_open(file, tracing_trace_options_show, NULL);
+
+ return single_open(file, tracing_trace_options_show, inode->i_private);
}
static const struct file_operations tracing_iter_fops = {
@@ -2998,20 +3303,84 @@ static const struct file_operations tracing_iter_fops = {
static const char readme_msg[] =
"tracing mini-HOWTO:\n\n"
- "# mount -t debugfs nodev /sys/kernel/debug\n\n"
- "# cat /sys/kernel/debug/tracing/available_tracers\n"
- "wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n"
- "# cat /sys/kernel/debug/tracing/current_tracer\n"
- "nop\n"
- "# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n"
- "# cat /sys/kernel/debug/tracing/current_tracer\n"
- "wakeup\n"
- "# cat /sys/kernel/debug/tracing/trace_options\n"
- "noprint-parent nosym-offset nosym-addr noverbose\n"
- "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
- "# echo 1 > /sys/kernel/debug/tracing/tracing_on\n"
- "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
- "# echo 0 > /sys/kernel/debug/tracing/tracing_on\n"
+ "# echo 0 > tracing_on : quick way to disable tracing\n"
+ "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
+ " Important files:\n"
+ " trace\t\t\t- The static contents of the buffer\n"
+ "\t\t\t To clear the buffer write into this file: echo > trace\n"
+ " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
+ " current_tracer\t- function and latency tracers\n"
+ " available_tracers\t- list of configured tracers for current_tracer\n"
+ " buffer_size_kb\t- view and modify size of per cpu buffer\n"
+ " buffer_total_size_kb - view total size of all cpu buffers\n\n"
+ " trace_clock\t\t-change the clock used to order events\n"
+ " local: Per cpu clock but may not be synced across CPUs\n"
+ " global: Synced across CPUs but slows tracing down.\n"
+ " counter: Not a clock, but just an increment\n"
+ " uptime: Jiffy counter from time of boot\n"
+ " perf: Same clock that perf events use\n"
+#ifdef CONFIG_X86_64
+ " x86-tsc: TSC cycle counter\n"
+#endif
+ "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
+ " tracing_cpumask\t- Limit which CPUs to trace\n"
+ " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
+ "\t\t\t Remove sub-buffer with rmdir\n"
+ " trace_options\t\t- Set format or modify how tracing happens\n"
+ "\t\t\t Disable an option by adding a suffix 'no' to the option name\n"
+#ifdef CONFIG_DYNAMIC_FTRACE
+ "\n available_filter_functions - list of functions that can be filtered on\n"
+ " set_ftrace_filter\t- echo function name in here to only trace these functions\n"
+ " accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
+ " modules: Can select a group via module\n"
+ " Format: :mod:<module-name>\n"
+ " example: echo :mod:ext3 > set_ftrace_filter\n"
+ " triggers: a command to perform when function is hit\n"
+ " Format: <function>:<trigger>[:count]\n"
+ " trigger: traceon, traceoff\n"
+ " enable_event:<system>:<event>\n"
+ " disable_event:<system>:<event>\n"
+#ifdef CONFIG_STACKTRACE
+ " stacktrace\n"
+#endif
+#ifdef CONFIG_TRACER_SNAPSHOT
+ " snapshot\n"
+#endif
+ " example: echo do_fault:traceoff > set_ftrace_filter\n"
+ " echo do_trap:traceoff:3 > set_ftrace_filter\n"
+ " The first one will disable tracing every time do_fault is hit\n"
+ " The second will disable tracing at most 3 times when do_trap is hit\n"
+ " The first time do trap is hit and it disables tracing, the counter\n"
+ " will decrement to 2. If tracing is already disabled, the counter\n"
+ " will not decrement. It only decrements when the trigger did work\n"
+ " To remove trigger without count:\n"
+ " echo '!<function>:<trigger> > set_ftrace_filter\n"
+ " To remove trigger with a count:\n"
+ " echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
+ " set_ftrace_notrace\t- echo function name in here to never trace.\n"
+ " accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
+ " modules: Can select a group via module command :mod:\n"
+ " Does not accept triggers\n"
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#ifdef CONFIG_FUNCTION_TRACER
+ " set_ftrace_pid\t- Write pid(s) to only function trace those pids (function)\n"
+#endif
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
+ " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
+#endif
+#ifdef CONFIG_TRACER_SNAPSHOT
+ "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n"
+ "\t\t\t Read the contents for more information\n"
+#endif
+#ifdef CONFIG_STACKTRACE
+ " stack_trace\t\t- Shows the max stack trace when active\n"
+ " stack_max_size\t- Shows current max stack size that was traced\n"
+ "\t\t\t Write into this file to reset the max size (trigger a new trace)\n"
+#ifdef CONFIG_DYNAMIC_FTRACE
+ " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n"
+#endif
+#endif /* CONFIG_STACKTRACE */
;
static ssize_t
@@ -3083,11 +3452,12 @@ static ssize_t
tracing_set_trace_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
+ struct trace_array *tr = filp->private_data;
char buf[MAX_TRACER_SIZE+2];
int r;
mutex_lock(&trace_types_lock);
- r = sprintf(buf, "%s\n", current_trace->name);
+ r = sprintf(buf, "%s\n", tr->current_trace->name);
mutex_unlock(&trace_types_lock);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
@@ -3095,43 +3465,48 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
int tracer_init(struct tracer *t, struct trace_array *tr)
{
- tracing_reset_online_cpus(tr);
+ tracing_reset_online_cpus(&tr->trace_buffer);
return t->init(tr);
}
-static void set_buffer_entries(struct trace_array *tr, unsigned long val)
+static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
{
int cpu;
+
for_each_tracing_cpu(cpu)
- tr->data[cpu]->entries = val;
+ per_cpu_ptr(buf->data, cpu)->entries = val;
}
+#ifdef CONFIG_TRACER_MAX_TRACE
/* resize @tr's buffer to the size of @size_tr's entries */
-static int resize_buffer_duplicate_size(struct trace_array *tr,
- struct trace_array *size_tr, int cpu_id)
+static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
+ struct trace_buffer *size_buf, int cpu_id)
{
int cpu, ret = 0;
if (cpu_id == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) {
- ret = ring_buffer_resize(tr->buffer,
- size_tr->data[cpu]->entries, cpu);
+ ret = ring_buffer_resize(trace_buf->buffer,
+ per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
if (ret < 0)
break;
- tr->data[cpu]->entries = size_tr->data[cpu]->entries;
+ per_cpu_ptr(trace_buf->data, cpu)->entries =
+ per_cpu_ptr(size_buf->data, cpu)->entries;
}
} else {
- ret = ring_buffer_resize(tr->buffer,
- size_tr->data[cpu_id]->entries, cpu_id);
+ ret = ring_buffer_resize(trace_buf->buffer,
+ per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
if (ret == 0)
- tr->data[cpu_id]->entries =
- size_tr->data[cpu_id]->entries;
+ per_cpu_ptr(trace_buf->data, cpu_id)->entries =
+ per_cpu_ptr(size_buf->data, cpu_id)->entries;
}
return ret;
}
+#endif /* CONFIG_TRACER_MAX_TRACE */
-static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
+static int __tracing_resize_ring_buffer(struct trace_array *tr,
+ unsigned long size, int cpu)
{
int ret;
@@ -3140,23 +3515,25 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
* we use the size that was given, and we can forget about
* expanding it later.
*/
- ring_buffer_expanded = 1;
+ ring_buffer_expanded = true;
/* May be called before buffers are initialized */
- if (!global_trace.buffer)
+ if (!tr->trace_buffer.buffer)
return 0;
- ret = ring_buffer_resize(global_trace.buffer, size, cpu);
+ ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
if (ret < 0)
return ret;
- if (!current_trace->use_max_tr)
+#ifdef CONFIG_TRACER_MAX_TRACE
+ if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
+ !tr->current_trace->use_max_tr)
goto out;
- ret = ring_buffer_resize(max_tr.buffer, size, cpu);
+ ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
if (ret < 0) {
- int r = resize_buffer_duplicate_size(&global_trace,
- &global_trace, cpu);
+ int r = resize_buffer_duplicate_size(&tr->trace_buffer,
+ &tr->trace_buffer, cpu);
if (r < 0) {
/*
* AARGH! We are left with different
@@ -3179,20 +3556,23 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
}
if (cpu == RING_BUFFER_ALL_CPUS)
- set_buffer_entries(&max_tr, size);
+ set_buffer_entries(&tr->max_buffer, size);
else
- max_tr.data[cpu]->entries = size;
+ per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
out:
+#endif /* CONFIG_TRACER_MAX_TRACE */
+
if (cpu == RING_BUFFER_ALL_CPUS)
- set_buffer_entries(&global_trace, size);
+ set_buffer_entries(&tr->trace_buffer, size);
else
- global_trace.data[cpu]->entries = size;
+ per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
return ret;
}
-static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
+static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
+ unsigned long size, int cpu_id)
{
int ret = size;
@@ -3206,7 +3586,7 @@ static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
}
}
- ret = __tracing_resize_ring_buffer(size, cpu_id);
+ ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
if (ret < 0)
ret = -ENOMEM;
@@ -3233,7 +3613,7 @@ int tracing_update_buffers(void)
mutex_lock(&trace_types_lock);
if (!ring_buffer_expanded)
- ret = __tracing_resize_ring_buffer(trace_buf_size,
+ ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
RING_BUFFER_ALL_CPUS);
mutex_unlock(&trace_types_lock);
@@ -3243,7 +3623,7 @@ int tracing_update_buffers(void)
struct trace_option_dentry;
static struct trace_option_dentry *
-create_trace_option_files(struct tracer *tracer);
+create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
static void
destroy_trace_option_files(struct trace_option_dentry *topts);
@@ -3253,13 +3633,15 @@ static int tracing_set_tracer(const char *buf)
static struct trace_option_dentry *topts;
struct trace_array *tr = &global_trace;
struct tracer *t;
+#ifdef CONFIG_TRACER_MAX_TRACE
bool had_max_tr;
+#endif
int ret = 0;
mutex_lock(&trace_types_lock);
if (!ring_buffer_expanded) {
- ret = __tracing_resize_ring_buffer(trace_buf_size,
+ ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
RING_BUFFER_ALL_CPUS);
if (ret < 0)
goto out;
@@ -3274,18 +3656,21 @@ static int tracing_set_tracer(const char *buf)
ret = -EINVAL;
goto out;
}
- if (t == current_trace)
+ if (t == tr->current_trace)
goto out;
trace_branch_disable();
- current_trace->enabled = false;
+ tr->current_trace->enabled = false;
- if (current_trace->reset)
- current_trace->reset(tr);
+ if (tr->current_trace->reset)
+ tr->current_trace->reset(tr);
- had_max_tr = current_trace->allocated_snapshot;
- current_trace = &nop_trace;
+ /* Current trace needs to be nop_trace before synchronize_sched */
+ tr->current_trace = &nop_trace;
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+ had_max_tr = tr->allocated_snapshot;
if (had_max_tr && !t->use_max_tr) {
/*
@@ -3296,27 +3681,20 @@ static int tracing_set_tracer(const char *buf)
* so a synchronized_sched() is sufficient.
*/
synchronize_sched();
- /*
- * We don't free the ring buffer. instead, resize it because
- * The max_tr ring buffer has some state (e.g. ring->clock) and
- * we want preserve it.
- */
- ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
- set_buffer_entries(&max_tr, 1);
- tracing_reset_online_cpus(&max_tr);
- current_trace->allocated_snapshot = false;
+ free_snapshot(tr);
}
+#endif
destroy_trace_option_files(topts);
- topts = create_trace_option_files(t);
+ topts = create_trace_option_files(tr, t);
+
+#ifdef CONFIG_TRACER_MAX_TRACE
if (t->use_max_tr && !had_max_tr) {
- /* we need to make per cpu buffer sizes equivalent */
- ret = resize_buffer_duplicate_size(&max_tr, &global_trace,
- RING_BUFFER_ALL_CPUS);
+ ret = alloc_snapshot(tr);
if (ret < 0)
goto out;
- t->allocated_snapshot = true;
}
+#endif
if (t->init) {
ret = tracer_init(t, tr);
@@ -3324,8 +3702,8 @@ static int tracing_set_tracer(const char *buf)
goto out;
}
- current_trace = t;
- current_trace->enabled = true;
+ tr->current_trace = t;
+ tr->current_trace->enabled = true;
trace_branch_enable(tr);
out:
mutex_unlock(&trace_types_lock);
@@ -3399,7 +3777,8 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
static int tracing_open_pipe(struct inode *inode, struct file *filp)
{
- long cpu_file = (long) inode->i_private;
+ struct trace_cpu *tc = inode->i_private;
+ struct trace_array *tr = tc->tr;
struct trace_iterator *iter;
int ret = 0;
@@ -3424,7 +3803,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
ret = -ENOMEM;
goto fail;
}
- *iter->trace = *current_trace;
+ *iter->trace = *tr->current_trace;
if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
ret = -ENOMEM;
@@ -3441,8 +3820,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
if (trace_clocks[trace_clock_id].in_ns)
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
- iter->cpu_file = cpu_file;
- iter->tr = &global_trace;
+ iter->cpu_file = tc->cpu;
+ iter->tr = tc->tr;
+ iter->trace_buffer = &tc->tr->trace_buffer;
mutex_init(&iter->mutex);
filp->private_data = iter;
@@ -3481,24 +3861,28 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
}
static unsigned int
-tracing_poll_pipe(struct file *filp, poll_table *poll_table)
+trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
{
- struct trace_iterator *iter = filp->private_data;
+ /* Iterators are static, they should be filled or empty */
+ if (trace_buffer_iter(iter, iter->cpu_file))
+ return POLLIN | POLLRDNORM;
- if (trace_flags & TRACE_ITER_BLOCK) {
+ if (trace_flags & TRACE_ITER_BLOCK)
/*
* Always select as readable when in blocking mode
*/
return POLLIN | POLLRDNORM;
- } else {
- if (!trace_empty(iter))
- return POLLIN | POLLRDNORM;
- poll_wait(filp, &trace_wait, poll_table);
- if (!trace_empty(iter))
- return POLLIN | POLLRDNORM;
+ else
+ return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
+ filp, poll_table);
+}
- return 0;
- }
+static unsigned int
+tracing_poll_pipe(struct file *filp, poll_table *poll_table)
+{
+ struct trace_iterator *iter = filp->private_data;
+
+ return trace_poll(iter, filp, poll_table);
}
/*
@@ -3564,6 +3948,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_iterator *iter = filp->private_data;
+ struct trace_array *tr = iter->tr;
ssize_t sret;
/* return any leftover data */
@@ -3575,8 +3960,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
/* copy the tracer to avoid using a global lock all around */
mutex_lock(&trace_types_lock);
- if (unlikely(iter->trace->name != current_trace->name))
- *iter->trace = *current_trace;
+ if (unlikely(iter->trace->name != tr->current_trace->name))
+ *iter->trace = *tr->current_trace;
mutex_unlock(&trace_types_lock);
/*
@@ -3732,6 +4117,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
.ops = &tracing_pipe_buf_ops,
.spd_release = tracing_spd_release_pipe,
};
+ struct trace_array *tr = iter->tr;
ssize_t ret;
size_t rem;
unsigned int i;
@@ -3741,8 +4127,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
/* copy the tracer to avoid using a global lock all around */
mutex_lock(&trace_types_lock);
- if (unlikely(iter->trace->name != current_trace->name))
- *iter->trace = *current_trace;
+ if (unlikely(iter->trace->name != tr->current_trace->name))
+ *iter->trace = *tr->current_trace;
mutex_unlock(&trace_types_lock);
mutex_lock(&iter->mutex);
@@ -3804,43 +4190,19 @@ out_err:
goto out;
}
-struct ftrace_entries_info {
- struct trace_array *tr;
- int cpu;
-};
-
-static int tracing_entries_open(struct inode *inode, struct file *filp)
-{
- struct ftrace_entries_info *info;
-
- if (tracing_disabled)
- return -ENODEV;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->tr = &global_trace;
- info->cpu = (unsigned long)inode->i_private;
-
- filp->private_data = info;
-
- return 0;
-}
-
static ssize_t
tracing_entries_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- struct ftrace_entries_info *info = filp->private_data;
- struct trace_array *tr = info->tr;
+ struct trace_cpu *tc = filp->private_data;
+ struct trace_array *tr = tc->tr;
char buf[64];
int r = 0;
ssize_t ret;
mutex_lock(&trace_types_lock);
- if (info->cpu == RING_BUFFER_ALL_CPUS) {
+ if (tc->cpu == RING_BUFFER_ALL_CPUS) {
int cpu, buf_size_same;
unsigned long size;
@@ -3850,8 +4212,8 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
for_each_tracing_cpu(cpu) {
/* fill in the size from first enabled cpu */
if (size == 0)
- size = tr->data[cpu]->entries;
- if (size != tr->data[cpu]->entries) {
+ size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
+ if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
buf_size_same = 0;
break;
}
@@ -3867,7 +4229,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
} else
r = sprintf(buf, "X\n");
} else
- r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10);
+ r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10);
mutex_unlock(&trace_types_lock);
@@ -3879,7 +4241,7 @@ static ssize_t
tracing_entries_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- struct ftrace_entries_info *info = filp->private_data;
+ struct trace_cpu *tc = filp->private_data;
unsigned long val;
int ret;
@@ -3894,7 +4256,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
/* value is in KB */
val <<= 10;
- ret = tracing_resize_ring_buffer(val, info->cpu);
+ ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
if (ret < 0)
return ret;
@@ -3903,16 +4265,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
return cnt;
}
-static int
-tracing_entries_release(struct inode *inode, struct file *filp)
-{
- struct ftrace_entries_info *info = filp->private_data;
-
- kfree(info);
-
- return 0;
-}
-
static ssize_t
tracing_total_entries_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
@@ -3924,7 +4276,7 @@ tracing_total_entries_read(struct file *filp, char __user *ubuf,
mutex_lock(&trace_types_lock);
for_each_tracing_cpu(cpu) {
- size += tr->data[cpu]->entries >> 10;
+ size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
if (!ring_buffer_expanded)
expanded_size += trace_buf_size >> 10;
}
@@ -3954,11 +4306,13 @@ tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
static int
tracing_free_buffer_release(struct inode *inode, struct file *filp)
{
+ struct trace_array *tr = inode->i_private;
+
/* disable tracing ? */
if (trace_flags & TRACE_ITER_STOP_ON_FREE)
tracing_off();
/* resize the ring buffer to 0 */
- tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS);
+ tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
return 0;
}
@@ -4027,7 +4381,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
local_save_flags(irq_flags);
size = sizeof(*entry) + cnt + 2; /* possible \n added */
- buffer = global_trace.buffer;
+ buffer = global_trace.trace_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
irq_flags, preempt_count());
if (!event) {
@@ -4069,13 +4423,14 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
static int tracing_clock_show(struct seq_file *m, void *v)
{
+ struct trace_array *tr = m->private;
int i;
for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
seq_printf(m,
"%s%s%s%s", i ? " " : "",
- i == trace_clock_id ? "[" : "", trace_clocks[i].name,
- i == trace_clock_id ? "]" : "");
+ i == tr->clock_id ? "[" : "", trace_clocks[i].name,
+ i == tr->clock_id ? "]" : "");
seq_putc(m, '\n');
return 0;
@@ -4084,6 +4439,8 @@ static int tracing_clock_show(struct seq_file *m, void *v)
static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *fpos)
{
+ struct seq_file *m = filp->private_data;
+ struct trace_array *tr = m->private;
char buf[64];
const char *clockstr;
int i;
@@ -4105,20 +4462,23 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
if (i == ARRAY_SIZE(trace_clocks))
return -EINVAL;
- trace_clock_id = i;
-
mutex_lock(&trace_types_lock);
- ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
- if (max_tr.buffer)
- ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
+ tr->clock_id = i;
+
+ ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
/*
* New clock may not be consistent with the previous clock.
* Reset the buffer so that it doesn't have incomparable timestamps.
*/
- tracing_reset_online_cpus(&global_trace);
- tracing_reset_online_cpus(&max_tr);
+ tracing_reset_online_cpus(&global_trace.trace_buffer);
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+ if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
+ ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
+ tracing_reset_online_cpus(&global_trace.max_buffer);
+#endif
mutex_unlock(&trace_types_lock);
@@ -4131,20 +4491,45 @@ static int tracing_clock_open(struct inode *inode, struct file *file)
{
if (tracing_disabled)
return -ENODEV;
- return single_open(file, tracing_clock_show, NULL);
+
+ return single_open(file, tracing_clock_show, inode->i_private);
}
+struct ftrace_buffer_info {
+ struct trace_iterator iter;
+ void *spare;
+ unsigned int read;
+};
+
#ifdef CONFIG_TRACER_SNAPSHOT
static int tracing_snapshot_open(struct inode *inode, struct file *file)
{
+ struct trace_cpu *tc = inode->i_private;
struct trace_iterator *iter;
+ struct seq_file *m;
int ret = 0;
if (file->f_mode & FMODE_READ) {
iter = __tracing_open(inode, file, true);
if (IS_ERR(iter))
ret = PTR_ERR(iter);
+ } else {
+ /* Writes still need the seq_file to hold the private data */
+ m = kzalloc(sizeof(*m), GFP_KERNEL);
+ if (!m)
+ return -ENOMEM;
+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter) {
+ kfree(m);
+ return -ENOMEM;
+ }
+ iter->tr = tc->tr;
+ iter->trace_buffer = &tc->tr->max_buffer;
+ iter->cpu_file = tc->cpu;
+ m->private = iter;
+ file->private_data = m;
}
+
return ret;
}
@@ -4152,6 +4537,9 @@ static ssize_t
tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
+ struct seq_file *m = filp->private_data;
+ struct trace_iterator *iter = m->private;
+ struct trace_array *tr = iter->tr;
unsigned long val;
int ret;
@@ -4165,40 +4553,48 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
mutex_lock(&trace_types_lock);
- if (current_trace->use_max_tr) {
+ if (tr->current_trace->use_max_tr) {
ret = -EBUSY;
goto out;
}
switch (val) {
case 0:
- if (current_trace->allocated_snapshot) {
- /* free spare buffer */
- ring_buffer_resize(max_tr.buffer, 1,
- RING_BUFFER_ALL_CPUS);
- set_buffer_entries(&max_tr, 1);
- tracing_reset_online_cpus(&max_tr);
- current_trace->allocated_snapshot = false;
+ if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
+ ret = -EINVAL;
+ break;
}
+ if (tr->allocated_snapshot)
+ free_snapshot(tr);
break;
case 1:
- if (!current_trace->allocated_snapshot) {
- /* allocate spare buffer */
- ret = resize_buffer_duplicate_size(&max_tr,
- &global_trace, RING_BUFFER_ALL_CPUS);
+/* Only allow per-cpu swap if the ring buffer supports it */
+#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
+ if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
+ ret = -EINVAL;
+ break;
+ }
+#endif
+ if (!tr->allocated_snapshot) {
+ ret = alloc_snapshot(tr);
if (ret < 0)
break;
- current_trace->allocated_snapshot = true;
}
-
local_irq_disable();
/* Now, we're going to swap */
- update_max_tr(&global_trace, current, smp_processor_id());
+ if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
+ update_max_tr(tr, current, smp_processor_id());
+ else
+ update_max_tr_single(tr, current, iter->cpu_file);
local_irq_enable();
break;
default:
- if (current_trace->allocated_snapshot)
- tracing_reset_online_cpus(&max_tr);
+ if (tr->allocated_snapshot) {
+ if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
+ tracing_reset_online_cpus(&tr->max_buffer);
+ else
+ tracing_reset(&tr->max_buffer, iter->cpu_file);
+ }
break;
}
@@ -4210,6 +4606,51 @@ out:
mutex_unlock(&trace_types_lock);
return ret;
}
+
+static int tracing_snapshot_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = file->private_data;
+
+ if (file->f_mode & FMODE_READ)
+ return tracing_release(inode, file);
+
+ /* If write only, the seq_file is just a stub */
+ if (m)
+ kfree(m->private);
+ kfree(m);
+
+ return 0;
+}
+
+static int tracing_buffers_open(struct inode *inode, struct file *filp);
+static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos);
+static int tracing_buffers_release(struct inode *inode, struct file *file);
+static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len, unsigned int flags);
+
+static int snapshot_raw_open(struct inode *inode, struct file *filp)
+{
+ struct ftrace_buffer_info *info;
+ int ret;
+
+ ret = tracing_buffers_open(inode, filp);
+ if (ret < 0)
+ return ret;
+
+ info = filp->private_data;
+
+ if (info->iter.trace->use_max_tr) {
+ tracing_buffers_release(inode, filp);
+ return -EBUSY;
+ }
+
+ info->iter.snapshot = true;
+ info->iter.trace_buffer = &info->iter.tr->max_buffer;
+
+ return ret;
+}
+
#endif /* CONFIG_TRACER_SNAPSHOT */
@@ -4237,10 +4678,9 @@ static const struct file_operations tracing_pipe_fops = {
};
static const struct file_operations tracing_entries_fops = {
- .open = tracing_entries_open,
+ .open = tracing_open_generic,
.read = tracing_entries_read,
.write = tracing_entries_write,
- .release = tracing_entries_release,
.llseek = generic_file_llseek,
};
@@ -4275,20 +4715,23 @@ static const struct file_operations snapshot_fops = {
.read = seq_read,
.write = tracing_snapshot_write,
.llseek = tracing_seek,
- .release = tracing_release,
+ .release = tracing_snapshot_release,
};
-#endif /* CONFIG_TRACER_SNAPSHOT */
-struct ftrace_buffer_info {
- struct trace_array *tr;
- void *spare;
- int cpu;
- unsigned int read;
+static const struct file_operations snapshot_raw_fops = {
+ .open = snapshot_raw_open,
+ .read = tracing_buffers_read,
+ .release = tracing_buffers_release,
+ .splice_read = tracing_buffers_splice_read,
+ .llseek = no_llseek,
};
+#endif /* CONFIG_TRACER_SNAPSHOT */
+
static int tracing_buffers_open(struct inode *inode, struct file *filp)
{
- int cpu = (int)(long)inode->i_private;
+ struct trace_cpu *tc = inode->i_private;
+ struct trace_array *tr = tc->tr;
struct ftrace_buffer_info *info;
if (tracing_disabled)
@@ -4298,72 +4741,131 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
if (!info)
return -ENOMEM;
- info->tr = &global_trace;
- info->cpu = cpu;
- info->spare = NULL;
+ mutex_lock(&trace_types_lock);
+
+ tr->ref++;
+
+ info->iter.tr = tr;
+ info->iter.cpu_file = tc->cpu;
+ info->iter.trace = tr->current_trace;
+ info->iter.trace_buffer = &tr->trace_buffer;
+ info->spare = NULL;
/* Force reading ring buffer for first read */
- info->read = (unsigned int)-1;
+ info->read = (unsigned int)-1;
filp->private_data = info;
+ mutex_unlock(&trace_types_lock);
+
return nonseekable_open(inode, filp);
}
+static unsigned int
+tracing_buffers_poll(struct file *filp, poll_table *poll_table)
+{
+ struct ftrace_buffer_info *info = filp->private_data;
+ struct trace_iterator *iter = &info->iter;
+
+ return trace_poll(iter, filp, poll_table);
+}
+
static ssize_t
tracing_buffers_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ftrace_buffer_info *info = filp->private_data;
+ struct trace_iterator *iter = &info->iter;
ssize_t ret;
- size_t size;
+ ssize_t size;
if (!count)
return 0;
+ mutex_lock(&trace_types_lock);
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+ if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
+ size = -EBUSY;
+ goto out_unlock;
+ }
+#endif
+
if (!info->spare)
- info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu);
+ info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
+ iter->cpu_file);
+ size = -ENOMEM;
if (!info->spare)
- return -ENOMEM;
+ goto out_unlock;
/* Do we have previous read data to read? */
if (info->read < PAGE_SIZE)
goto read;
- trace_access_lock(info->cpu);
- ret = ring_buffer_read_page(info->tr->buffer,
+ again:
+ trace_access_lock(iter->cpu_file);
+ ret = ring_buffer_read_page(iter->trace_buffer->buffer,
&info->spare,
count,
- info->cpu, 0);
- trace_access_unlock(info->cpu);
- if (ret < 0)
- return 0;
+ iter->cpu_file, 0);
+ trace_access_unlock(iter->cpu_file);
- info->read = 0;
+ if (ret < 0) {
+ if (trace_empty(iter)) {
+ if ((filp->f_flags & O_NONBLOCK)) {
+ size = -EAGAIN;
+ goto out_unlock;
+ }
+ mutex_unlock(&trace_types_lock);
+ iter->trace->wait_pipe(iter);
+ mutex_lock(&trace_types_lock);
+ if (signal_pending(current)) {
+ size = -EINTR;
+ goto out_unlock;
+ }
+ goto again;
+ }
+ size = 0;
+ goto out_unlock;
+ }
-read:
+ info->read = 0;
+ read:
size = PAGE_SIZE - info->read;
if (size > count)
size = count;
ret = copy_to_user(ubuf, info->spare + info->read, size);
- if (ret == size)
- return -EFAULT;
+ if (ret == size) {
+ size = -EFAULT;
+ goto out_unlock;
+ }
size -= ret;
*ppos += size;
info->read += size;
+ out_unlock:
+ mutex_unlock(&trace_types_lock);
+
return size;
}
static int tracing_buffers_release(struct inode *inode, struct file *file)
{
struct ftrace_buffer_info *info = file->private_data;
+ struct trace_iterator *iter = &info->iter;
+
+ mutex_lock(&trace_types_lock);
+
+ WARN_ON(!iter->tr->ref);
+ iter->tr->ref--;
if (info->spare)
- ring_buffer_free_read_page(info->tr->buffer, info->spare);
+ ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
kfree(info);
+ mutex_unlock(&trace_types_lock);
+
return 0;
}
@@ -4428,6 +4930,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
unsigned int flags)
{
struct ftrace_buffer_info *info = file->private_data;
+ struct trace_iterator *iter = &info->iter;
struct partial_page partial_def[PIPE_DEF_BUFFERS];
struct page *pages_def[PIPE_DEF_BUFFERS];
struct splice_pipe_desc spd = {
@@ -4440,10 +4943,21 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
};
struct buffer_ref *ref;
int entries, size, i;
- size_t ret;
+ ssize_t ret;
- if (splice_grow_spd(pipe, &spd))
- return -ENOMEM;
+ mutex_lock(&trace_types_lock);
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+ if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
+ ret = -EBUSY;
+ goto out;
+ }
+#endif
+
+ if (splice_grow_spd(pipe, &spd)) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (*ppos & (PAGE_SIZE - 1)) {
ret = -EINVAL;
@@ -4458,8 +4972,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
len &= PAGE_MASK;
}
- trace_access_lock(info->cpu);
- entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
+ again:
+ trace_access_lock(iter->cpu_file);
+ entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
struct page *page;
@@ -4470,15 +4985,15 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
break;
ref->ref = 1;
- ref->buffer = info->tr->buffer;
- ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu);
+ ref->buffer = iter->trace_buffer->buffer;
+ ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
if (!ref->page) {
kfree(ref);
break;
}
r = ring_buffer_read_page(ref->buffer, &ref->page,
- len, info->cpu, 1);
+ len, iter->cpu_file, 1);
if (r < 0) {
ring_buffer_free_read_page(ref->buffer, ref->page);
kfree(ref);
@@ -4502,31 +5017,40 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
spd.nr_pages++;
*ppos += PAGE_SIZE;
- entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
+ entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
}
- trace_access_unlock(info->cpu);
+ trace_access_unlock(iter->cpu_file);
spd.nr_pages = i;
/* did we read anything? */
if (!spd.nr_pages) {
- if (flags & SPLICE_F_NONBLOCK)
+ if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
ret = -EAGAIN;
- else
- ret = 0;
- /* TODO: block */
- goto out;
+ goto out;
+ }
+ mutex_unlock(&trace_types_lock);
+ iter->trace->wait_pipe(iter);
+ mutex_lock(&trace_types_lock);
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ goto out;
+ }
+ goto again;
}
ret = splice_to_pipe(pipe, &spd);
splice_shrink_spd(&spd);
out:
+ mutex_unlock(&trace_types_lock);
+
return ret;
}
static const struct file_operations tracing_buffers_fops = {
.open = tracing_buffers_open,
.read = tracing_buffers_read,
+ .poll = tracing_buffers_poll,
.release = tracing_buffers_release,
.splice_read = tracing_buffers_splice_read,
.llseek = no_llseek,
@@ -4536,12 +5060,14 @@ static ssize_t
tracing_stats_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
- unsigned long cpu = (unsigned long)filp->private_data;
- struct trace_array *tr = &global_trace;
+ struct trace_cpu *tc = filp->private_data;
+ struct trace_array *tr = tc->tr;
+ struct trace_buffer *trace_buf = &tr->trace_buffer;
struct trace_seq *s;
unsigned long cnt;
unsigned long long t;
unsigned long usec_rem;
+ int cpu = tc->cpu;
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s)
@@ -4549,41 +5075,41 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
trace_seq_init(s);
- cnt = ring_buffer_entries_cpu(tr->buffer, cpu);
+ cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
trace_seq_printf(s, "entries: %ld\n", cnt);
- cnt = ring_buffer_overrun_cpu(tr->buffer, cpu);
+ cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
trace_seq_printf(s, "overrun: %ld\n", cnt);
- cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
+ cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
trace_seq_printf(s, "commit overrun: %ld\n", cnt);
- cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
+ cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
trace_seq_printf(s, "bytes: %ld\n", cnt);
if (trace_clocks[trace_clock_id].in_ns) {
/* local or global for trace_clock */
- t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
+ t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
usec_rem = do_div(t, USEC_PER_SEC);
trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
t, usec_rem);
- t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
+ t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
usec_rem = do_div(t, USEC_PER_SEC);
trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
} else {
/* counter or tsc mode for trace_clock */
trace_seq_printf(s, "oldest event ts: %llu\n",
- ring_buffer_oldest_event_ts(tr->buffer, cpu));
+ ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
trace_seq_printf(s, "now ts: %llu\n",
- ring_buffer_time_stamp(tr->buffer, cpu));
+ ring_buffer_time_stamp(trace_buf->buffer, cpu));
}
- cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu);
+ cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
trace_seq_printf(s, "dropped events: %ld\n", cnt);
- cnt = ring_buffer_read_events_cpu(tr->buffer, cpu);
+ cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
trace_seq_printf(s, "read events: %ld\n", cnt);
count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
@@ -4635,60 +5161,161 @@ static const struct file_operations tracing_dyn_info_fops = {
.read = tracing_read_dyn_info,
.llseek = generic_file_llseek,
};
-#endif
+#endif /* CONFIG_DYNAMIC_FTRACE */
-static struct dentry *d_tracer;
+#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
+static void
+ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
+{
+ tracing_snapshot();
+}
-struct dentry *tracing_init_dentry(void)
+static void
+ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
+{
+ unsigned long *count = (long *)data;
+
+ if (!*count)
+ return;
+
+ if (*count != -1)
+ (*count)--;
+
+ tracing_snapshot();
+}
+
+static int
+ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
+ struct ftrace_probe_ops *ops, void *data)
+{
+ long count = (long)data;
+
+ seq_printf(m, "%ps:", (void *)ip);
+
+ seq_printf(m, "snapshot");
+
+ if (count == -1)
+ seq_printf(m, ":unlimited\n");
+ else
+ seq_printf(m, ":count=%ld\n", count);
+
+ return 0;
+}
+
+static struct ftrace_probe_ops snapshot_probe_ops = {
+ .func = ftrace_snapshot,
+ .print = ftrace_snapshot_print,
+};
+
+static struct ftrace_probe_ops snapshot_count_probe_ops = {
+ .func = ftrace_count_snapshot,
+ .print = ftrace_snapshot_print,
+};
+
+static int
+ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
+ char *glob, char *cmd, char *param, int enable)
{
- static int once;
+ struct ftrace_probe_ops *ops;
+ void *count = (void *)-1;
+ char *number;
+ int ret;
- if (d_tracer)
- return d_tracer;
+ /* hash funcs only work with set_ftrace_filter */
+ if (!enable)
+ return -EINVAL;
+
+ ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
+
+ if (glob[0] == '!') {
+ unregister_ftrace_function_probe_func(glob+1, ops);
+ return 0;
+ }
+
+ if (!param)
+ goto out_reg;
+
+ number = strsep(&param, ":");
+
+ if (!strlen(number))
+ goto out_reg;
+
+ /*
+ * We use the callback data field (which is a pointer)
+ * as our counter.
+ */
+ ret = kstrtoul(number, 0, (unsigned long *)&count);
+ if (ret)
+ return ret;
+
+ out_reg:
+ ret = register_ftrace_function_probe(glob, ops, count);
+
+ if (ret >= 0)
+ alloc_snapshot(&global_trace);
+
+ return ret < 0 ? ret : 0;
+}
+
+static struct ftrace_func_command ftrace_snapshot_cmd = {
+ .name = "snapshot",
+ .func = ftrace_trace_snapshot_callback,
+};
+
+static int register_snapshot_cmd(void)
+{
+ return register_ftrace_command(&ftrace_snapshot_cmd);
+}
+#else
+static inline int register_snapshot_cmd(void) { return 0; }
+#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
+
+struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
+{
+ if (tr->dir)
+ return tr->dir;
if (!debugfs_initialized())
return NULL;
- d_tracer = debugfs_create_dir("tracing", NULL);
+ if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
+ tr->dir = debugfs_create_dir("tracing", NULL);
- if (!d_tracer && !once) {
- once = 1;
- pr_warning("Could not create debugfs directory 'tracing'\n");
- return NULL;
- }
+ if (!tr->dir)
+ pr_warn_once("Could not create debugfs directory 'tracing'\n");
- return d_tracer;
+ return tr->dir;
}
-static struct dentry *d_percpu;
+struct dentry *tracing_init_dentry(void)
+{
+ return tracing_init_dentry_tr(&global_trace);
+}
-static struct dentry *tracing_dentry_percpu(void)
+static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
{
- static int once;
struct dentry *d_tracer;
- if (d_percpu)
- return d_percpu;
-
- d_tracer = tracing_init_dentry();
+ if (tr->percpu_dir)
+ return tr->percpu_dir;
+ d_tracer = tracing_init_dentry_tr(tr);
if (!d_tracer)
return NULL;
- d_percpu = debugfs_create_dir("per_cpu", d_tracer);
+ tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
- if (!d_percpu && !once) {
- once = 1;
- pr_warning("Could not create debugfs directory 'per_cpu'\n");
- return NULL;
- }
+ WARN_ONCE(!tr->percpu_dir,
+ "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
- return d_percpu;
+ return tr->percpu_dir;
}
-static void tracing_init_debugfs_percpu(long cpu)
+static void
+tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
{
- struct dentry *d_percpu = tracing_dentry_percpu();
+ struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+ struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
struct dentry *d_cpu;
char cpu_dir[30]; /* 30 characters should be more than enough */
@@ -4704,20 +5331,28 @@ static void tracing_init_debugfs_percpu(long cpu)
/* per cpu trace_pipe */
trace_create_file("trace_pipe", 0444, d_cpu,
- (void *) cpu, &tracing_pipe_fops);
+ (void *)&data->trace_cpu, &tracing_pipe_fops);
/* per cpu trace */
trace_create_file("trace", 0644, d_cpu,
- (void *) cpu, &tracing_fops);
+ (void *)&data->trace_cpu, &tracing_fops);
trace_create_file("trace_pipe_raw", 0444, d_cpu,
- (void *) cpu, &tracing_buffers_fops);
+ (void *)&data->trace_cpu, &tracing_buffers_fops);
trace_create_file("stats", 0444, d_cpu,
- (void *) cpu, &tracing_stats_fops);
+ (void *)&data->trace_cpu, &tracing_stats_fops);
trace_create_file("buffer_size_kb", 0444, d_cpu,
- (void *) cpu, &tracing_entries_fops);
+ (void *)&data->trace_cpu, &tracing_entries_fops);
+
+#ifdef CONFIG_TRACER_SNAPSHOT
+ trace_create_file("snapshot", 0644, d_cpu,
+ (void *)&data->trace_cpu, &snapshot_fops);
+
+ trace_create_file("snapshot_raw", 0444, d_cpu,
+ (void *)&data->trace_cpu, &snapshot_raw_fops);
+#endif
}
#ifdef CONFIG_FTRACE_SELFTEST
@@ -4728,6 +5363,7 @@ static void tracing_init_debugfs_percpu(long cpu)
struct trace_option_dentry {
struct tracer_opt *opt;
struct tracer_flags *flags;
+ struct trace_array *tr;
struct dentry *entry;
};
@@ -4763,7 +5399,7 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (!!(topt->flags->val & topt->opt->bit) != val) {
mutex_lock(&trace_types_lock);
- ret = __set_tracer_option(current_trace, topt->flags,
+ ret = __set_tracer_option(topt->tr->current_trace, topt->flags,
topt->opt, !val);
mutex_unlock(&trace_types_lock);
if (ret)
@@ -4802,6 +5438,7 @@ static ssize_t
trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
+ struct trace_array *tr = &global_trace;
long index = (long)filp->private_data;
unsigned long val;
int ret;
@@ -4814,7 +5451,7 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
return -EINVAL;
mutex_lock(&trace_types_lock);
- ret = set_tracer_flag(1 << index, val);
+ ret = set_tracer_flag(tr, 1 << index, val);
mutex_unlock(&trace_types_lock);
if (ret < 0)
@@ -4848,40 +5485,41 @@ struct dentry *trace_create_file(const char *name,
}
-static struct dentry *trace_options_init_dentry(void)
+static struct dentry *trace_options_init_dentry(struct trace_array *tr)
{
struct dentry *d_tracer;
- static struct dentry *t_options;
- if (t_options)
- return t_options;
+ if (tr->options)
+ return tr->options;
- d_tracer = tracing_init_dentry();
+ d_tracer = tracing_init_dentry_tr(tr);
if (!d_tracer)
return NULL;
- t_options = debugfs_create_dir("options", d_tracer);
- if (!t_options) {
+ tr->options = debugfs_create_dir("options", d_tracer);
+ if (!tr->options) {
pr_warning("Could not create debugfs directory 'options'\n");
return NULL;
}
- return t_options;
+ return tr->options;
}
static void
-create_trace_option_file(struct trace_option_dentry *topt,
+create_trace_option_file(struct trace_array *tr,
+ struct trace_option_dentry *topt,
struct tracer_flags *flags,
struct tracer_opt *opt)
{
struct dentry *t_options;
- t_options = trace_options_init_dentry();
+ t_options = trace_options_init_dentry(tr);
if (!t_options)
return;
topt->flags = flags;
topt->opt = opt;
+ topt->tr = tr;
topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
&trace_options_fops);
@@ -4889,7 +5527,7 @@ create_trace_option_file(struct trace_option_dentry *topt,
}
static struct trace_option_dentry *
-create_trace_option_files(struct tracer *tracer)
+create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
{
struct trace_option_dentry *topts;
struct tracer_flags *flags;
@@ -4914,7 +5552,7 @@ create_trace_option_files(struct tracer *tracer)
return NULL;
for (cnt = 0; opts[cnt].name; cnt++)
- create_trace_option_file(&topts[cnt], flags,
+ create_trace_option_file(tr, &topts[cnt], flags,
&opts[cnt]);
return topts;
@@ -4937,11 +5575,12 @@ destroy_trace_option_files(struct trace_option_dentry *topts)
}
static struct dentry *
-create_trace_option_core_file(const char *option, long index)
+create_trace_option_core_file(struct trace_array *tr,
+ const char *option, long index)
{
struct dentry *t_options;
- t_options = trace_options_init_dentry();
+ t_options = trace_options_init_dentry(tr);
if (!t_options)
return NULL;
@@ -4949,17 +5588,17 @@ create_trace_option_core_file(const char *option, long index)
&trace_options_core_fops);
}
-static __init void create_trace_options_dir(void)
+static __init void create_trace_options_dir(struct trace_array *tr)
{
struct dentry *t_options;
int i;
- t_options = trace_options_init_dentry();
+ t_options = trace_options_init_dentry(tr);
if (!t_options)
return;
for (i = 0; trace_options[i]; i++)
- create_trace_option_core_file(trace_options[i], i);
+ create_trace_option_core_file(tr, trace_options[i], i);
}
static ssize_t
@@ -4967,7 +5606,7 @@ rb_simple_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
- struct ring_buffer *buffer = tr->buffer;
+ struct ring_buffer *buffer = tr->trace_buffer.buffer;
char buf[64];
int r;
@@ -4986,7 +5625,7 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
- struct ring_buffer *buffer = tr->buffer;
+ struct ring_buffer *buffer = tr->trace_buffer.buffer;
unsigned long val;
int ret;
@@ -4998,12 +5637,12 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
mutex_lock(&trace_types_lock);
if (val) {
ring_buffer_record_on(buffer);
- if (current_trace->start)
- current_trace->start(tr);
+ if (tr->current_trace->start)
+ tr->current_trace->start(tr);
} else {
ring_buffer_record_off(buffer);
- if (current_trace->stop)
- current_trace->stop(tr);
+ if (tr->current_trace->stop)
+ tr->current_trace->stop(tr);
}
mutex_unlock(&trace_types_lock);
}
@@ -5020,23 +5659,310 @@ static const struct file_operations rb_simple_fops = {
.llseek = default_llseek,
};
+struct dentry *trace_instance_dir;
+
+static void
+init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
+
+static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
+{
+ int cpu;
+
+ for_each_tracing_cpu(cpu) {
+ memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu));
+ per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu;
+ per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr;
+ }
+}
+
+static int
+allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
+{
+ enum ring_buffer_flags rb_flags;
+
+ rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
+
+ buf->buffer = ring_buffer_alloc(size, rb_flags);
+ if (!buf->buffer)
+ return -ENOMEM;
+
+ buf->data = alloc_percpu(struct trace_array_cpu);
+ if (!buf->data) {
+ ring_buffer_free(buf->buffer);
+ return -ENOMEM;
+ }
+
+ init_trace_buffers(tr, buf);
+
+ /* Allocate the first page for all buffers */
+ set_buffer_entries(&tr->trace_buffer,
+ ring_buffer_size(tr->trace_buffer.buffer, 0));
+
+ return 0;
+}
+
+static int allocate_trace_buffers(struct trace_array *tr, int size)
+{
+ int ret;
+
+ ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+ ret = allocate_trace_buffer(tr, &tr->max_buffer,
+ allocate_snapshot ? size : 1);
+ if (WARN_ON(ret)) {
+ ring_buffer_free(tr->trace_buffer.buffer);
+ free_percpu(tr->trace_buffer.data);
+ return -ENOMEM;
+ }
+ tr->allocated_snapshot = allocate_snapshot;
+
+ /*
+ * Only the top level trace array gets its snapshot allocated
+ * from the kernel command line.
+ */
+ allocate_snapshot = false;
+#endif
+ return 0;
+}
+
+static int new_instance_create(const char *name)
+{
+ struct trace_array *tr;
+ int ret;
+
+ mutex_lock(&trace_types_lock);
+
+ ret = -EEXIST;
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ if (tr->name && strcmp(tr->name, name) == 0)
+ goto out_unlock;
+ }
+
+ ret = -ENOMEM;
+ tr = kzalloc(sizeof(*tr), GFP_KERNEL);
+ if (!tr)
+ goto out_unlock;
+
+ tr->name = kstrdup(name, GFP_KERNEL);
+ if (!tr->name)
+ goto out_free_tr;
+
+ raw_spin_lock_init(&tr->start_lock);
+
+ tr->current_trace = &nop_trace;
+
+ INIT_LIST_HEAD(&tr->systems);
+ INIT_LIST_HEAD(&tr->events);
+
+ if (allocate_trace_buffers(tr, trace_buf_size) < 0)
+ goto out_free_tr;
+
+ /* Holder for file callbacks */
+ tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
+ tr->trace_cpu.tr = tr;
+
+ tr->dir = debugfs_create_dir(name, trace_instance_dir);
+ if (!tr->dir)
+ goto out_free_tr;
+
+ ret = event_trace_add_tracer(tr->dir, tr);
+ if (ret)
+ goto out_free_tr;
+
+ init_tracer_debugfs(tr, tr->dir);
+
+ list_add(&tr->list, &ftrace_trace_arrays);
+
+ mutex_unlock(&trace_types_lock);
+
+ return 0;
+
+ out_free_tr:
+ if (tr->trace_buffer.buffer)
+ ring_buffer_free(tr->trace_buffer.buffer);
+ kfree(tr->name);
+ kfree(tr);
+
+ out_unlock:
+ mutex_unlock(&trace_types_lock);
+
+ return ret;
+
+}
+
+static int instance_delete(const char *name)
+{
+ struct trace_array *tr;
+ int found = 0;
+ int ret;
+
+ mutex_lock(&trace_types_lock);
+
+ ret = -ENODEV;
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ if (tr->name && strcmp(tr->name, name) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ goto out_unlock;
+
+ ret = -EBUSY;
+ if (tr->ref)
+ goto out_unlock;
+
+ list_del(&tr->list);
+
+ event_trace_del_tracer(tr);
+ debugfs_remove_recursive(tr->dir);
+ free_percpu(tr->trace_buffer.data);
+ ring_buffer_free(tr->trace_buffer.buffer);
+
+ kfree(tr->name);
+ kfree(tr);
+
+ ret = 0;
+
+ out_unlock:
+ mutex_unlock(&trace_types_lock);
+
+ return ret;
+}
+
+static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
+{
+ struct dentry *parent;
+ int ret;
+
+ /* Paranoid: Make sure the parent is the "instances" directory */
+ parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
+ if (WARN_ON_ONCE(parent != trace_instance_dir))
+ return -ENOENT;
+
+ /*
+ * The inode mutex is locked, but debugfs_create_dir() will also
+ * take the mutex. As the instances directory can not be destroyed
+ * or changed in any other way, it is safe to unlock it, and
+ * let the dentry try. If two users try to make the same dir at
+ * the same time, then the new_instance_create() will determine the
+ * winner.
+ */
+ mutex_unlock(&inode->i_mutex);
+
+ ret = new_instance_create(dentry->d_iname);
+
+ mutex_lock(&inode->i_mutex);
+
+ return ret;
+}
+
+static int instance_rmdir(struct inode *inode, struct dentry *dentry)
+{
+ struct dentry *parent;
+ int ret;
+
+ /* Paranoid: Make sure the parent is the "instances" directory */
+ parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
+ if (WARN_ON_ONCE(parent != trace_instance_dir))
+ return -ENOENT;
+
+ /* The caller did a dget() on dentry */
+ mutex_unlock(&dentry->d_inode->i_mutex);
+
+ /*
+ * The inode mutex is locked, but debugfs_create_dir() will also
+ * take the mutex. As the instances directory can not be destroyed
+ * or changed in any other way, it is safe to unlock it, and
+ * let the dentry try. If two users try to make the same dir at
+ * the same time, then the instance_delete() will determine the
+ * winner.
+ */
+ mutex_unlock(&inode->i_mutex);
+
+ ret = instance_delete(dentry->d_iname);
+
+ mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
+ mutex_lock(&dentry->d_inode->i_mutex);
+
+ return ret;
+}
+
+static const struct inode_operations instance_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .mkdir = instance_mkdir,
+ .rmdir = instance_rmdir,
+};
+
+static __init void create_trace_instances(struct dentry *d_tracer)
+{
+ trace_instance_dir = debugfs_create_dir("instances", d_tracer);
+ if (WARN_ON(!trace_instance_dir))
+ return;
+
+ /* Hijack the dir inode operations, to allow mkdir */
+ trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
+}
+
+static void
+init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
+{
+ int cpu;
+
+ trace_create_file("trace_options", 0644, d_tracer,
+ tr, &tracing_iter_fops);
+
+ trace_create_file("trace", 0644, d_tracer,
+ (void *)&tr->trace_cpu, &tracing_fops);
+
+ trace_create_file("trace_pipe", 0444, d_tracer,
+ (void *)&tr->trace_cpu, &tracing_pipe_fops);
+
+ trace_create_file("buffer_size_kb", 0644, d_tracer,
+ (void *)&tr->trace_cpu, &tracing_entries_fops);
+
+ trace_create_file("buffer_total_size_kb", 0444, d_tracer,
+ tr, &tracing_total_entries_fops);
+
+ trace_create_file("free_buffer", 0644, d_tracer,
+ tr, &tracing_free_buffer_fops);
+
+ trace_create_file("trace_marker", 0220, d_tracer,
+ tr, &tracing_mark_fops);
+
+ trace_create_file("trace_clock", 0644, d_tracer, tr,
+ &trace_clock_fops);
+
+ trace_create_file("tracing_on", 0644, d_tracer,
+ tr, &rb_simple_fops);
+
+#ifdef CONFIG_TRACER_SNAPSHOT
+ trace_create_file("snapshot", 0644, d_tracer,
+ (void *)&tr->trace_cpu, &snapshot_fops);
+#endif
+
+ for_each_tracing_cpu(cpu)
+ tracing_init_debugfs_percpu(tr, cpu);
+
+}
+
static __init int tracer_init_debugfs(void)
{
struct dentry *d_tracer;
- int cpu;
trace_access_lock_init();
d_tracer = tracing_init_dentry();
+ if (!d_tracer)
+ return 0;
- trace_create_file("trace_options", 0644, d_tracer,
- NULL, &tracing_iter_fops);
+ init_tracer_debugfs(&global_trace, d_tracer);
trace_create_file("tracing_cpumask", 0644, d_tracer,
- NULL, &tracing_cpumask_fops);
-
- trace_create_file("trace", 0644, d_tracer,
- (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
+ &global_trace, &tracing_cpumask_fops);
trace_create_file("available_tracers", 0444, d_tracer,
&global_trace, &show_traces_fops);
@@ -5055,44 +5981,17 @@ static __init int tracer_init_debugfs(void)
trace_create_file("README", 0444, d_tracer,
NULL, &tracing_readme_fops);
- trace_create_file("trace_pipe", 0444, d_tracer,
- (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
-
- trace_create_file("buffer_size_kb", 0644, d_tracer,
- (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops);
-
- trace_create_file("buffer_total_size_kb", 0444, d_tracer,
- &global_trace, &tracing_total_entries_fops);
-
- trace_create_file("free_buffer", 0644, d_tracer,
- &global_trace, &tracing_free_buffer_fops);
-
- trace_create_file("trace_marker", 0220, d_tracer,
- NULL, &tracing_mark_fops);
-
trace_create_file("saved_cmdlines", 0444, d_tracer,
NULL, &tracing_saved_cmdlines_fops);
- trace_create_file("trace_clock", 0644, d_tracer, NULL,
- &trace_clock_fops);
-
- trace_create_file("tracing_on", 0644, d_tracer,
- &global_trace, &rb_simple_fops);
-
#ifdef CONFIG_DYNAMIC_FTRACE
trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
#endif
-#ifdef CONFIG_TRACER_SNAPSHOT
- trace_create_file("snapshot", 0644, d_tracer,
- (void *) TRACE_PIPE_ALL_CPU, &snapshot_fops);
-#endif
+ create_trace_instances(d_tracer);
- create_trace_options_dir();
-
- for_each_tracing_cpu(cpu)
- tracing_init_debugfs_percpu(cpu);
+ create_trace_options_dir(&global_trace);
return 0;
}
@@ -5148,8 +6047,8 @@ void
trace_printk_seq(struct trace_seq *s)
{
/* Probably should print a warning here. */
- if (s->len >= 1000)
- s->len = 1000;
+ if (s->len >= TRACE_MAX_PRINT)
+ s->len = TRACE_MAX_PRINT;
/* should be zero ended, but we are paranoid. */
s->buffer[s->len] = 0;
@@ -5162,46 +6061,43 @@ trace_printk_seq(struct trace_seq *s)
void trace_init_global_iter(struct trace_iterator *iter)
{
iter->tr = &global_trace;
- iter->trace = current_trace;
- iter->cpu_file = TRACE_PIPE_ALL_CPU;
+ iter->trace = iter->tr->current_trace;
+ iter->cpu_file = RING_BUFFER_ALL_CPUS;
+ iter->trace_buffer = &global_trace.trace_buffer;
}
-static void
-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
+void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
{
- static arch_spinlock_t ftrace_dump_lock =
- (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
+ static atomic_t dump_running;
unsigned int old_userobj;
- static int dump_ran;
unsigned long flags;
int cnt = 0, cpu;
- /* only one dump */
- local_irq_save(flags);
- arch_spin_lock(&ftrace_dump_lock);
- if (dump_ran)
- goto out;
-
- dump_ran = 1;
+ /* Only allow one dump user at a time. */
+ if (atomic_inc_return(&dump_running) != 1) {
+ atomic_dec(&dump_running);
+ return;
+ }
+ /*
+ * Always turn off tracing when we dump.
+ * We don't need to show trace output of what happens
+ * between multiple crashes.
+ *
+ * If the user does a sysrq-z, then they can re-enable
+ * tracing with echo 1 > tracing_on.
+ */
tracing_off();
- /* Did function tracer already get disabled? */
- if (ftrace_is_dead()) {
- printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
- printk("# MAY BE MISSING FUNCTION EVENTS\n");
- }
-
- if (disable_tracing)
- ftrace_kill();
+ local_irq_save(flags);
/* Simulate the iterator */
trace_init_global_iter(&iter);
for_each_tracing_cpu(cpu) {
- atomic_inc(&iter.tr->data[cpu]->disabled);
+ atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
}
old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
@@ -5211,7 +6107,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
switch (oops_dump_mode) {
case DUMP_ALL:
- iter.cpu_file = TRACE_PIPE_ALL_CPU;
+ iter.cpu_file = RING_BUFFER_ALL_CPUS;
break;
case DUMP_ORIG:
iter.cpu_file = raw_smp_processor_id();
@@ -5220,11 +6116,17 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
goto out_enable;
default:
printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
- iter.cpu_file = TRACE_PIPE_ALL_CPU;
+ iter.cpu_file = RING_BUFFER_ALL_CPUS;
}
printk(KERN_TRACE "Dumping ftrace buffer:\n");
+ /* Did function tracer already get disabled? */
+ if (ftrace_is_dead()) {
+ printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
+ printk("# MAY BE MISSING FUNCTION EVENTS\n");
+ }
+
/*
* We need to stop all tracing on all CPUS to read the
* the next buffer. This is a bit expensive, but is
@@ -5264,33 +6166,19 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
printk(KERN_TRACE "---------------------------------\n");
out_enable:
- /* Re-enable tracing if requested */
- if (!disable_tracing) {
- trace_flags |= old_userobj;
+ trace_flags |= old_userobj;
- for_each_tracing_cpu(cpu) {
- atomic_dec(&iter.tr->data[cpu]->disabled);
- }
- tracing_on();
+ for_each_tracing_cpu(cpu) {
+ atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
}
-
- out:
- arch_spin_unlock(&ftrace_dump_lock);
+ atomic_dec(&dump_running);
local_irq_restore(flags);
}
-
-/* By default: disable tracing after the dump */
-void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
-{
- __ftrace_dump(true, oops_dump_mode);
-}
EXPORT_SYMBOL_GPL(ftrace_dump);
__init static int tracer_alloc_buffers(void)
{
int ring_buf_size;
- enum ring_buffer_flags rb_flags;
- int i;
int ret = -ENOMEM;
@@ -5311,49 +6199,27 @@ __init static int tracer_alloc_buffers(void)
else
ring_buf_size = 1;
- rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
-
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
cpumask_copy(tracing_cpumask, cpu_all_mask);
+ raw_spin_lock_init(&global_trace.start_lock);
+
/* TODO: make the number of buffers hot pluggable with CPUS */
- global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags);
- if (!global_trace.buffer) {
+ if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
WARN_ON(1);
goto out_free_cpumask;
}
+
if (global_trace.buffer_disabled)
tracing_off();
-
-#ifdef CONFIG_TRACER_MAX_TRACE
- max_tr.buffer = ring_buffer_alloc(1, rb_flags);
- if (!max_tr.buffer) {
- printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
- WARN_ON(1);
- ring_buffer_free(global_trace.buffer);
- goto out_free_cpumask;
- }
-#endif
-
- /* Allocate the first page for all buffers */
- for_each_tracing_cpu(i) {
- global_trace.data[i] = &per_cpu(global_trace_cpu, i);
- max_tr.data[i] = &per_cpu(max_tr_data, i);
- }
-
- set_buffer_entries(&global_trace,
- ring_buffer_size(global_trace.buffer, 0));
-#ifdef CONFIG_TRACER_MAX_TRACE
- set_buffer_entries(&max_tr, 1);
-#endif
-
trace_init_cmdlines();
- init_irq_work(&trace_work_wakeup, trace_wake_up);
register_tracer(&nop_trace);
+ global_trace.current_trace = &nop_trace;
+
/* All seems OK, enable tracing */
tracing_disabled = 0;
@@ -5362,16 +6228,32 @@ __init static int tracer_alloc_buffers(void)
register_die_notifier(&trace_die_notifier);
+ global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
+
+ /* Holder for file callbacks */
+ global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
+ global_trace.trace_cpu.tr = &global_trace;
+
+ INIT_LIST_HEAD(&global_trace.systems);
+ INIT_LIST_HEAD(&global_trace.events);
+ list_add(&global_trace.list, &ftrace_trace_arrays);
+
while (trace_boot_options) {
char *option;
option = strsep(&trace_boot_options, ",");
- trace_set_options(option);
+ trace_set_options(&global_trace, option);
}
+ register_snapshot_cmd();
+
return 0;
out_free_cpumask:
+ free_percpu(global_trace.trace_buffer.data);
+#ifdef CONFIG_TRACER_MAX_TRACE
+ free_percpu(global_trace.max_buffer.data);
+#endif
free_cpumask_var(tracing_cpumask);
out_free_buffer_mask:
free_cpumask_var(tracing_buffer_mask);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 2081971367e..9e014582e76 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -13,6 +13,11 @@
#include <linux/trace_seq.h>
#include <linux/ftrace_event.h>
+#ifdef CONFIG_FTRACE_SYSCALLS
+#include <asm/unistd.h> /* For NR_SYSCALLS */
+#include <asm/syscall.h> /* some archs define it here */
+#endif
+
enum trace_type {
__TRACE_FIRST_TYPE = 0,
@@ -29,6 +34,7 @@ enum trace_type {
TRACE_GRAPH_ENT,
TRACE_USER_STACK,
TRACE_BLK,
+ TRACE_BPUTS,
__TRACE_LAST_TYPE,
};
@@ -127,12 +133,21 @@ enum trace_flag_type {
#define TRACE_BUF_SIZE 1024
+struct trace_array;
+
+struct trace_cpu {
+ struct trace_array *tr;
+ struct dentry *dir;
+ int cpu;
+};
+
/*
* The CPU trace array - it consists of thousands of trace entries
* plus some other descriptor data: (for example which task started
* the trace, etc.)
*/
struct trace_array_cpu {
+ struct trace_cpu trace_cpu;
atomic_t disabled;
void *buffer_page; /* ring buffer spare */
@@ -151,20 +166,83 @@ struct trace_array_cpu {
char comm[TASK_COMM_LEN];
};
+struct tracer;
+
+struct trace_buffer {
+ struct trace_array *tr;
+ struct ring_buffer *buffer;
+ struct trace_array_cpu __percpu *data;
+ cycle_t time_start;
+ int cpu;
+};
+
/*
* The trace array - an array of per-CPU trace arrays. This is the
* highest level data structure that individual tracers deal with.
* They have on/off state as well:
*/
struct trace_array {
- struct ring_buffer *buffer;
- int cpu;
+ struct list_head list;
+ char *name;
+ struct trace_buffer trace_buffer;
+#ifdef CONFIG_TRACER_MAX_TRACE
+ /*
+ * The max_buffer is used to snapshot the trace when a maximum
+ * latency is reached, or when the user initiates a snapshot.
+ * Some tracers will use this to store a maximum trace while
+ * it continues examining live traces.
+ *
+ * The buffers for the max_buffer are set up the same as the trace_buffer
+ * When a snapshot is taken, the buffer of the max_buffer is swapped
+ * with the buffer of the trace_buffer and the buffers are reset for
+ * the trace_buffer so the tracing can continue.
+ */
+ struct trace_buffer max_buffer;
+ bool allocated_snapshot;
+#endif
int buffer_disabled;
- cycle_t time_start;
+ struct trace_cpu trace_cpu; /* place holder */
+#ifdef CONFIG_FTRACE_SYSCALLS
+ int sys_refcount_enter;
+ int sys_refcount_exit;
+ DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
+ DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
+#endif
+ int stop_count;
+ int clock_id;
+ struct tracer *current_trace;
+ unsigned int flags;
+ raw_spinlock_t start_lock;
+ struct dentry *dir;
+ struct dentry *options;
+ struct dentry *percpu_dir;
+ struct dentry *event_dir;
+ struct list_head systems;
+ struct list_head events;
struct task_struct *waiter;
- struct trace_array_cpu *data[NR_CPUS];
+ int ref;
};
+enum {
+ TRACE_ARRAY_FL_GLOBAL = (1 << 0)
+};
+
+extern struct list_head ftrace_trace_arrays;
+
+/*
+ * The global tracer (top) should be the first trace array added,
+ * but we check the flag anyway.
+ */
+static inline struct trace_array *top_trace_array(void)
+{
+ struct trace_array *tr;
+
+ tr = list_entry(ftrace_trace_arrays.prev,
+ typeof(*tr), list);
+ WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
+ return tr;
+}
+
#define FTRACE_CMP_TYPE(var, type) \
__builtin_types_compatible_p(typeof(var), type *)
@@ -200,6 +278,7 @@ extern void __ftrace_bad_type(void);
IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
+ IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
TRACE_MMIO_RW); \
IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
@@ -289,9 +368,10 @@ struct tracer {
struct tracer *next;
struct tracer_flags *flags;
bool print_max;
- bool use_max_tr;
- bool allocated_snapshot;
bool enabled;
+#ifdef CONFIG_TRACER_MAX_TRACE
+ bool use_max_tr;
+#endif
};
@@ -427,8 +507,6 @@ static __always_inline void trace_clear_recursion(int bit)
current->trace_recursion = val;
}
-#define TRACE_PIPE_ALL_CPU -1
-
static inline struct ring_buffer_iter *
trace_buffer_iter(struct trace_iterator *iter, int cpu)
{
@@ -439,10 +517,10 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void);
-void tracing_reset(struct trace_array *tr, int cpu);
-void tracing_reset_online_cpus(struct trace_array *tr);
+void tracing_reset(struct trace_buffer *buf, int cpu);
+void tracing_reset_online_cpus(struct trace_buffer *buf);
void tracing_reset_current(int cpu);
-void tracing_reset_current_online_cpus(void);
+void tracing_reset_all_online_cpus(void);
int tracing_open_generic(struct inode *inode, struct file *filp);
struct dentry *trace_create_file(const char *name,
umode_t mode,
@@ -450,6 +528,7 @@ struct dentry *trace_create_file(const char *name,
void *data,
const struct file_operations *fops);
+struct dentry *tracing_init_dentry_tr(struct trace_array *tr);
struct dentry *tracing_init_dentry(void);
struct ring_buffer_event;
@@ -583,7 +662,7 @@ extern int DYN_FTRACE_TEST_NAME(void);
#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
extern int DYN_FTRACE_TEST_NAME2(void);
-extern int ring_buffer_expanded;
+extern bool ring_buffer_expanded;
extern bool tracing_selftest_disabled;
DECLARE_PER_CPU(int, ftrace_cpu_disabled);
@@ -619,6 +698,8 @@ trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args);
int trace_array_printk(struct trace_array *tr,
unsigned long ip, const char *fmt, ...);
+int trace_array_printk_buf(struct ring_buffer *buffer,
+ unsigned long ip, const char *fmt, ...);
void trace_printk_seq(struct trace_seq *s);
enum print_line_t print_trace_line(struct trace_iterator *iter);
@@ -786,6 +867,7 @@ enum trace_iterator_flags {
TRACE_ITER_STOP_ON_FREE = 0x400000,
TRACE_ITER_IRQ_INFO = 0x800000,
TRACE_ITER_MARKERS = 0x1000000,
+ TRACE_ITER_FUNCTION = 0x2000000,
};
/*
@@ -832,8 +914,8 @@ enum {
struct ftrace_event_field {
struct list_head link;
- char *name;
- char *type;
+ const char *name;
+ const char *type;
int filter_type;
int offset;
int size;
@@ -851,12 +933,19 @@ struct event_filter {
struct event_subsystem {
struct list_head list;
const char *name;
- struct dentry *entry;
struct event_filter *filter;
- int nr_events;
int ref_count;
};
+struct ftrace_subsystem_dir {
+ struct list_head list;
+ struct event_subsystem *subsystem;
+ struct trace_array *tr;
+ struct dentry *entry;
+ int ref_count;
+ int nr_events;
+};
+
#define FILTER_PRED_INVALID ((unsigned short)-1)
#define FILTER_PRED_IS_RIGHT (1 << 15)
#define FILTER_PRED_FOLD (1 << 15)
@@ -906,22 +995,20 @@ struct filter_pred {
unsigned short right;
};
-extern struct list_head ftrace_common_fields;
-
extern enum regex_type
filter_parse_regex(char *buff, int len, char **search, int *not);
extern void print_event_filter(struct ftrace_event_call *call,
struct trace_seq *s);
extern int apply_event_filter(struct ftrace_event_call *call,
char *filter_string);
-extern int apply_subsystem_event_filter(struct event_subsystem *system,
+extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
char *filter_string);
extern void print_subsystem_event_filter(struct event_subsystem *system,
struct trace_seq *s);
extern int filter_assign_type(const char *type);
-struct list_head *
-trace_get_fields(struct ftrace_event_call *event_call);
+struct ftrace_event_field *
+trace_find_event_field(struct ftrace_event_call *call, char *name);
static inline int
filter_check_discard(struct ftrace_event_call *call, void *rec,
@@ -938,6 +1025,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec,
}
extern void trace_event_enable_cmd_record(bool enable);
+extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
+extern int event_trace_del_tracer(struct trace_array *tr);
extern struct mutex event_mutex;
extern struct list_head ftrace_events;
@@ -948,7 +1037,18 @@ extern const char *__stop___trace_bprintk_fmt[];
void trace_printk_init_buffers(void);
void trace_printk_start_comm(void);
int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
-int set_tracer_flag(unsigned int mask, int enabled);
+int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
+
+/*
+ * Normal trace_printk() and friends allocates special buffers
+ * to do the manipulation, as well as saves the print formats
+ * into sections to display. But the trace infrastructure wants
+ * to use these without the added overhead at the price of being
+ * a bit slower (used mainly for warnings, where we don't care
+ * about performance). The internal_trace_puts() is for such
+ * a purpose.
+ */
+#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 95e96842ed2..d594da0dc03 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -32,6 +32,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
{
struct ftrace_event_call *call = &event_branch;
struct trace_array *tr = branch_tracer;
+ struct trace_array_cpu *data;
struct ring_buffer_event *event;
struct trace_branch *entry;
struct ring_buffer *buffer;
@@ -51,11 +52,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
local_irq_save(flags);
cpu = raw_smp_processor_id();
- if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
+ data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+ if (atomic_inc_return(&data->disabled) != 1)
goto out;
pc = preempt_count();
- buffer = tr->buffer;
+ buffer = tr->trace_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
sizeof(*entry), flags, pc);
if (!event)
@@ -80,7 +82,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
__buffer_unlock_commit(buffer, event);
out:
- atomic_dec(&tr->data[cpu]->disabled);
+ atomic_dec(&data->disabled);
local_irq_restore(flags);
}
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index aa8f5f48dae..26dc348332b 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -57,6 +57,16 @@ u64 notrace trace_clock(void)
return local_clock();
}
+/*
+ * trace_jiffy_clock(): Simply use jiffies as a clock counter.
+ */
+u64 notrace trace_clock_jiffies(void)
+{
+ u64 jiffy = jiffies - INITIAL_JIFFIES;
+
+ /* Return nsecs */
+ return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
+}
/*
* trace_clock_global(): special globally coherent trace clock
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 4108e1250ca..e2d027ac66a 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -223,8 +223,8 @@ FTRACE_ENTRY(bprint, bprint_entry,
__dynamic_array( u32, buf )
),
- F_printk("%08lx fmt:%p",
- __entry->ip, __entry->fmt),
+ F_printk("%pf: %s",
+ (void *)__entry->ip, __entry->fmt),
FILTER_OTHER
);
@@ -238,8 +238,23 @@ FTRACE_ENTRY(print, print_entry,
__dynamic_array( char, buf )
),
- F_printk("%08lx %s",
- __entry->ip, __entry->buf),
+ F_printk("%pf: %s",
+ (void *)__entry->ip, __entry->buf),
+
+ FILTER_OTHER
+);
+
+FTRACE_ENTRY(bputs, bputs_entry,
+
+ TRACE_BPUTS,
+
+ F_STRUCT(
+ __field( unsigned long, ip )
+ __field( const char *, str )
+ ),
+
+ F_printk("%pf: %s",
+ (void *)__entry->ip, __entry->str),
FILTER_OTHER
);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 57e9b284250..53582e982e5 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -34,9 +34,27 @@ char event_storage[EVENT_STORAGE_SIZE];
EXPORT_SYMBOL_GPL(event_storage);
LIST_HEAD(ftrace_events);
-LIST_HEAD(ftrace_common_fields);
+static LIST_HEAD(ftrace_common_fields);
-struct list_head *
+#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
+
+static struct kmem_cache *field_cachep;
+static struct kmem_cache *file_cachep;
+
+/* Double loops, do not use break, only goto's work */
+#define do_for_each_event_file(tr, file) \
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
+ list_for_each_entry(file, &tr->events, list)
+
+#define do_for_each_event_file_safe(tr, file) \
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
+ struct ftrace_event_file *___n; \
+ list_for_each_entry_safe(file, ___n, &tr->events, list)
+
+#define while_for_each_event_file() \
+ }
+
+static struct list_head *
trace_get_fields(struct ftrace_event_call *event_call)
{
if (!event_call->class->get_fields)
@@ -44,23 +62,45 @@ trace_get_fields(struct ftrace_event_call *event_call)
return event_call->class->get_fields(event_call);
}
+static struct ftrace_event_field *
+__find_event_field(struct list_head *head, char *name)
+{
+ struct ftrace_event_field *field;
+
+ list_for_each_entry(field, head, link) {
+ if (!strcmp(field->name, name))
+ return field;
+ }
+
+ return NULL;
+}
+
+struct ftrace_event_field *
+trace_find_event_field(struct ftrace_event_call *call, char *name)
+{
+ struct ftrace_event_field *field;
+ struct list_head *head;
+
+ field = __find_event_field(&ftrace_common_fields, name);
+ if (field)
+ return field;
+
+ head = trace_get_fields(call);
+ return __find_event_field(head, name);
+}
+
static int __trace_define_field(struct list_head *head, const char *type,
const char *name, int offset, int size,
int is_signed, int filter_type)
{
struct ftrace_event_field *field;
- field = kzalloc(sizeof(*field), GFP_KERNEL);
+ field = kmem_cache_alloc(field_cachep, GFP_TRACE);
if (!field)
goto err;
- field->name = kstrdup(name, GFP_KERNEL);
- if (!field->name)
- goto err;
-
- field->type = kstrdup(type, GFP_KERNEL);
- if (!field->type)
- goto err;
+ field->name = name;
+ field->type = type;
if (filter_type == FILTER_OTHER)
field->filter_type = filter_assign_type(type);
@@ -76,9 +116,7 @@ static int __trace_define_field(struct list_head *head, const char *type,
return 0;
err:
- if (field)
- kfree(field->name);
- kfree(field);
+ kmem_cache_free(field_cachep, field);
return -ENOMEM;
}
@@ -120,7 +158,7 @@ static int trace_define_common_fields(void)
return ret;
}
-void trace_destroy_fields(struct ftrace_event_call *call)
+static void trace_destroy_fields(struct ftrace_event_call *call)
{
struct ftrace_event_field *field, *next;
struct list_head *head;
@@ -128,9 +166,7 @@ void trace_destroy_fields(struct ftrace_event_call *call)
head = trace_get_fields(call);
list_for_each_entry_safe(field, next, head, link) {
list_del(&field->link);
- kfree(field->type);
- kfree(field->name);
- kfree(field);
+ kmem_cache_free(field_cachep, field);
}
}
@@ -149,15 +185,17 @@ EXPORT_SYMBOL_GPL(trace_event_raw_init);
int ftrace_event_reg(struct ftrace_event_call *call,
enum trace_reg type, void *data)
{
+ struct ftrace_event_file *file = data;
+
switch (type) {
case TRACE_REG_REGISTER:
return tracepoint_probe_register(call->name,
call->class->probe,
- call);
+ file);
case TRACE_REG_UNREGISTER:
tracepoint_probe_unregister(call->name,
call->class->probe,
- call);
+ file);
return 0;
#ifdef CONFIG_PERF_EVENTS
@@ -183,54 +221,100 @@ EXPORT_SYMBOL_GPL(ftrace_event_reg);
void trace_event_enable_cmd_record(bool enable)
{
- struct ftrace_event_call *call;
+ struct ftrace_event_file *file;
+ struct trace_array *tr;
mutex_lock(&event_mutex);
- list_for_each_entry(call, &ftrace_events, list) {
- if (!(call->flags & TRACE_EVENT_FL_ENABLED))
+ do_for_each_event_file(tr, file) {
+
+ if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
continue;
if (enable) {
tracing_start_cmdline_record();
- call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
+ set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
} else {
tracing_stop_cmdline_record();
- call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
+ clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
}
- }
+ } while_for_each_event_file();
mutex_unlock(&event_mutex);
}
-static int ftrace_event_enable_disable(struct ftrace_event_call *call,
- int enable)
+static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
+ int enable, int soft_disable)
{
+ struct ftrace_event_call *call = file->event_call;
int ret = 0;
+ int disable;
switch (enable) {
case 0:
- if (call->flags & TRACE_EVENT_FL_ENABLED) {
- call->flags &= ~TRACE_EVENT_FL_ENABLED;
- if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) {
+ /*
+ * When soft_disable is set and enable is cleared, we want
+ * to clear the SOFT_DISABLED flag but leave the event in the
+ * state that it was. That is, if the event was enabled and
+ * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
+ * is set we do not want the event to be enabled before we
+ * clear the bit.
+ *
+ * When soft_disable is not set but the SOFT_MODE flag is,
+ * we do nothing. Do not disable the tracepoint, otherwise
+ * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
+ */
+ if (soft_disable) {
+ disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
+ clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
+ } else
+ disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
+
+ if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
+ clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
+ if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
tracing_stop_cmdline_record();
- call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
+ clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
}
- call->class->reg(call, TRACE_REG_UNREGISTER, NULL);
+ call->class->reg(call, TRACE_REG_UNREGISTER, file);
}
+ /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT */
+ if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
+ set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
break;
case 1:
- if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
+ /*
+ * When soft_disable is set and enable is set, we want to
+ * register the tracepoint for the event, but leave the event
+ * as is. That means, if the event was already enabled, we do
+ * nothing (but set SOFT_MODE). If the event is disabled, we
+ * set SOFT_DISABLED before enabling the event tracepoint, so
+ * it still seems to be disabled.
+ */
+ if (!soft_disable)
+ clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
+ else
+ set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
+
+ if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
+
+ /* Keep the event disabled, when going to SOFT_MODE. */
+ if (soft_disable)
+ set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
+
if (trace_flags & TRACE_ITER_RECORD_CMD) {
tracing_start_cmdline_record();
- call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
+ set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
}
- ret = call->class->reg(call, TRACE_REG_REGISTER, NULL);
+ ret = call->class->reg(call, TRACE_REG_REGISTER, file);
if (ret) {
tracing_stop_cmdline_record();
pr_info("event trace: Could not enable event "
"%s\n", call->name);
break;
}
- call->flags |= TRACE_EVENT_FL_ENABLED;
+ set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
+
+ /* WAS_ENABLED gets set but never cleared. */
+ call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
}
break;
}
@@ -238,13 +322,19 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
return ret;
}
-static void ftrace_clear_events(void)
+static int ftrace_event_enable_disable(struct ftrace_event_file *file,
+ int enable)
{
- struct ftrace_event_call *call;
+ return __ftrace_event_enable_disable(file, enable, 0);
+}
+
+static void ftrace_clear_events(struct trace_array *tr)
+{
+ struct ftrace_event_file *file;
mutex_lock(&event_mutex);
- list_for_each_entry(call, &ftrace_events, list) {
- ftrace_event_enable_disable(call, 0);
+ list_for_each_entry(file, &tr->events, list) {
+ ftrace_event_enable_disable(file, 0);
}
mutex_unlock(&event_mutex);
}
@@ -257,11 +347,12 @@ static void __put_system(struct event_subsystem *system)
if (--system->ref_count)
return;
+ list_del(&system->list);
+
if (filter) {
kfree(filter->filter_string);
kfree(filter);
}
- kfree(system->name);
kfree(system);
}
@@ -271,24 +362,45 @@ static void __get_system(struct event_subsystem *system)
system->ref_count++;
}
-static void put_system(struct event_subsystem *system)
+static void __get_system_dir(struct ftrace_subsystem_dir *dir)
+{
+ WARN_ON_ONCE(dir->ref_count == 0);
+ dir->ref_count++;
+ __get_system(dir->subsystem);
+}
+
+static void __put_system_dir(struct ftrace_subsystem_dir *dir)
+{
+ WARN_ON_ONCE(dir->ref_count == 0);
+ /* If the subsystem is about to be freed, the dir must be too */
+ WARN_ON_ONCE(dir->subsystem->ref_count == 1 && dir->ref_count != 1);
+
+ __put_system(dir->subsystem);
+ if (!--dir->ref_count)
+ kfree(dir);
+}
+
+static void put_system(struct ftrace_subsystem_dir *dir)
{
mutex_lock(&event_mutex);
- __put_system(system);
+ __put_system_dir(dir);
mutex_unlock(&event_mutex);
}
/*
* __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
*/
-static int __ftrace_set_clr_event(const char *match, const char *sub,
- const char *event, int set)
+static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
+ const char *sub, const char *event, int set)
{
+ struct ftrace_event_file *file;
struct ftrace_event_call *call;
int ret = -EINVAL;
mutex_lock(&event_mutex);
- list_for_each_entry(call, &ftrace_events, list) {
+ list_for_each_entry(file, &tr->events, list) {
+
+ call = file->event_call;
if (!call->name || !call->class || !call->class->reg)
continue;
@@ -307,7 +419,7 @@ static int __ftrace_set_clr_event(const char *match, const char *sub,
if (event && strcmp(event, call->name) != 0)
continue;
- ftrace_event_enable_disable(call, set);
+ ftrace_event_enable_disable(file, set);
ret = 0;
}
@@ -316,7 +428,7 @@ static int __ftrace_set_clr_event(const char *match, const char *sub,
return ret;
}
-static int ftrace_set_clr_event(char *buf, int set)
+static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
{
char *event = NULL, *sub = NULL, *match;
@@ -344,7 +456,7 @@ static int ftrace_set_clr_event(char *buf, int set)
event = NULL;
}
- return __ftrace_set_clr_event(match, sub, event, set);
+ return __ftrace_set_clr_event(tr, match, sub, event, set);
}
/**
@@ -361,7 +473,9 @@ static int ftrace_set_clr_event(char *buf, int set)
*/
int trace_set_clr_event(const char *system, const char *event, int set)
{
- return __ftrace_set_clr_event(NULL, system, event, set);
+ struct trace_array *tr = top_trace_array();
+
+ return __ftrace_set_clr_event(tr, NULL, system, event, set);
}
EXPORT_SYMBOL_GPL(trace_set_clr_event);
@@ -373,6 +487,8 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_parser parser;
+ struct seq_file *m = file->private_data;
+ struct trace_array *tr = m->private;
ssize_t read, ret;
if (!cnt)
@@ -395,7 +511,7 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
parser.buffer[parser.idx] = 0;
- ret = ftrace_set_clr_event(parser.buffer + !set, set);
+ ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
if (ret)
goto out_put;
}
@@ -411,17 +527,20 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct ftrace_event_call *call = v;
+ struct ftrace_event_file *file = v;
+ struct ftrace_event_call *call;
+ struct trace_array *tr = m->private;
(*pos)++;
- list_for_each_entry_continue(call, &ftrace_events, list) {
+ list_for_each_entry_continue(file, &tr->events, list) {
+ call = file->event_call;
/*
* The ftrace subsystem is for showing formats only.
* They can not be enabled or disabled via the event files.
*/
if (call->class && call->class->reg)
- return call;
+ return file;
}
return NULL;
@@ -429,30 +548,32 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
static void *t_start(struct seq_file *m, loff_t *pos)
{
- struct ftrace_event_call *call;
+ struct ftrace_event_file *file;
+ struct trace_array *tr = m->private;
loff_t l;
mutex_lock(&event_mutex);
- call = list_entry(&ftrace_events, struct ftrace_event_call, list);
+ file = list_entry(&tr->events, struct ftrace_event_file, list);
for (l = 0; l <= *pos; ) {
- call = t_next(m, call, &l);
- if (!call)
+ file = t_next(m, file, &l);
+ if (!file)
break;
}
- return call;
+ return file;
}
static void *
s_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct ftrace_event_call *call = v;
+ struct ftrace_event_file *file = v;
+ struct trace_array *tr = m->private;
(*pos)++;
- list_for_each_entry_continue(call, &ftrace_events, list) {
- if (call->flags & TRACE_EVENT_FL_ENABLED)
- return call;
+ list_for_each_entry_continue(file, &tr->events, list) {
+ if (file->flags & FTRACE_EVENT_FL_ENABLED)
+ return file;
}
return NULL;
@@ -460,23 +581,25 @@ s_next(struct seq_file *m, void *v, loff_t *pos)
static void *s_start(struct seq_file *m, loff_t *pos)
{
- struct ftrace_event_call *call;
+ struct ftrace_event_file *file;
+ struct trace_array *tr = m->private;
loff_t l;
mutex_lock(&event_mutex);
- call = list_entry(&ftrace_events, struct ftrace_event_call, list);
+ file = list_entry(&tr->events, struct ftrace_event_file, list);
for (l = 0; l <= *pos; ) {
- call = s_next(m, call, &l);
- if (!call)
+ file = s_next(m, file, &l);
+ if (!file)
break;
}
- return call;
+ return file;
}
static int t_show(struct seq_file *m, void *v)
{
- struct ftrace_event_call *call = v;
+ struct ftrace_event_file *file = v;
+ struct ftrace_event_call *call = file->event_call;
if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
seq_printf(m, "%s:", call->class->system);
@@ -494,25 +617,31 @@ static ssize_t
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_call *call = filp->private_data;
+ struct ftrace_event_file *file = filp->private_data;
char *buf;
- if (call->flags & TRACE_EVENT_FL_ENABLED)
- buf = "1\n";
- else
+ if (file->flags & FTRACE_EVENT_FL_ENABLED) {
+ if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)
+ buf = "0*\n";
+ else
+ buf = "1\n";
+ } else
buf = "0\n";
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
}
static ssize_t
event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_call *call = filp->private_data;
+ struct ftrace_event_file *file = filp->private_data;
unsigned long val;
int ret;
+ if (!file)
+ return -EINVAL;
+
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
@@ -525,7 +654,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
case 0:
case 1:
mutex_lock(&event_mutex);
- ret = ftrace_event_enable_disable(call, val);
+ ret = ftrace_event_enable_disable(file, val);
mutex_unlock(&event_mutex);
break;
@@ -543,14 +672,18 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
const char set_to_char[4] = { '?', '0', '1', 'X' };
- struct event_subsystem *system = filp->private_data;
+ struct ftrace_subsystem_dir *dir = filp->private_data;
+ struct event_subsystem *system = dir->subsystem;
struct ftrace_event_call *call;
+ struct ftrace_event_file *file;
+ struct trace_array *tr = dir->tr;
char buf[2];
int set = 0;
int ret;
mutex_lock(&event_mutex);
- list_for_each_entry(call, &ftrace_events, list) {
+ list_for_each_entry(file, &tr->events, list) {
+ call = file->event_call;
if (!call->name || !call->class || !call->class->reg)
continue;
@@ -562,7 +695,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
* or if all events or cleared, or if we have
* a mixture.
*/
- set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
+ set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
/*
* If we have a mixture, no need to look further.
@@ -584,7 +717,8 @@ static ssize_t
system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct event_subsystem *system = filp->private_data;
+ struct ftrace_subsystem_dir *dir = filp->private_data;
+ struct event_subsystem *system = dir->subsystem;
const char *name = NULL;
unsigned long val;
ssize_t ret;
@@ -607,7 +741,7 @@ system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (system)
name = system->name;
- ret = __ftrace_set_clr_event(NULL, name, NULL, val);
+ ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
if (ret)
goto out;
@@ -845,43 +979,75 @@ static LIST_HEAD(event_subsystems);
static int subsystem_open(struct inode *inode, struct file *filp)
{
struct event_subsystem *system = NULL;
+ struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
+ struct trace_array *tr;
int ret;
- if (!inode->i_private)
- goto skip_search;
-
/* Make sure the system still exists */
mutex_lock(&event_mutex);
- list_for_each_entry(system, &event_subsystems, list) {
- if (system == inode->i_private) {
- /* Don't open systems with no events */
- if (!system->nr_events) {
- system = NULL;
- break;
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ list_for_each_entry(dir, &tr->systems, list) {
+ if (dir == inode->i_private) {
+ /* Don't open systems with no events */
+ if (dir->nr_events) {
+ __get_system_dir(dir);
+ system = dir->subsystem;
+ }
+ goto exit_loop;
}
- __get_system(system);
- break;
}
}
+ exit_loop:
mutex_unlock(&event_mutex);
- if (system != inode->i_private)
+ if (!system)
return -ENODEV;
- skip_search:
+ /* Some versions of gcc think dir can be uninitialized here */
+ WARN_ON(!dir);
+
ret = tracing_open_generic(inode, filp);
- if (ret < 0 && system)
- put_system(system);
+ if (ret < 0)
+ put_system(dir);
+
+ return ret;
+}
+
+static int system_tr_open(struct inode *inode, struct file *filp)
+{
+ struct ftrace_subsystem_dir *dir;
+ struct trace_array *tr = inode->i_private;
+ int ret;
+
+ /* Make a temporary dir that has no system but points to tr */
+ dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+ if (!dir)
+ return -ENOMEM;
+
+ dir->tr = tr;
+
+ ret = tracing_open_generic(inode, filp);
+ if (ret < 0)
+ kfree(dir);
+
+ filp->private_data = dir;
return ret;
}
static int subsystem_release(struct inode *inode, struct file *file)
{
- struct event_subsystem *system = inode->i_private;
+ struct ftrace_subsystem_dir *dir = file->private_data;
- if (system)
- put_system(system);
+ /*
+ * If dir->subsystem is NULL, then this is a temporary
+ * descriptor that was made for a trace_array to enable
+ * all subsystems.
+ */
+ if (dir->subsystem)
+ put_system(dir);
+ else
+ kfree(dir);
return 0;
}
@@ -890,7 +1056,8 @@ static ssize_t
subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct event_subsystem *system = filp->private_data;
+ struct ftrace_subsystem_dir *dir = filp->private_data;
+ struct event_subsystem *system = dir->subsystem;
struct trace_seq *s;
int r;
@@ -915,7 +1082,7 @@ static ssize_t
subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct event_subsystem *system = filp->private_data;
+ struct ftrace_subsystem_dir *dir = filp->private_data;
char *buf;
int err;
@@ -932,7 +1099,7 @@ subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
}
buf[cnt] = '\0';
- err = apply_subsystem_event_filter(system, buf);
+ err = apply_subsystem_event_filter(dir, buf);
free_page((unsigned long) buf);
if (err < 0)
return err;
@@ -1041,30 +1208,35 @@ static const struct file_operations ftrace_system_enable_fops = {
.release = subsystem_release,
};
+static const struct file_operations ftrace_tr_enable_fops = {
+ .open = system_tr_open,
+ .read = system_enable_read,
+ .write = system_enable_write,
+ .llseek = default_llseek,
+ .release = subsystem_release,
+};
+
static const struct file_operations ftrace_show_header_fops = {
.open = tracing_open_generic,
.read = show_header,
.llseek = default_llseek,
};
-static struct dentry *event_trace_events_dir(void)
+static int
+ftrace_event_open(struct inode *inode, struct file *file,
+ const struct seq_operations *seq_ops)
{
- static struct dentry *d_tracer;
- static struct dentry *d_events;
-
- if (d_events)
- return d_events;
-
- d_tracer = tracing_init_dentry();
- if (!d_tracer)
- return NULL;
+ struct seq_file *m;
+ int ret;
- d_events = debugfs_create_dir("events", d_tracer);
- if (!d_events)
- pr_warning("Could not create debugfs "
- "'events' directory\n");
+ ret = seq_open(file, seq_ops);
+ if (ret < 0)
+ return ret;
+ m = file->private_data;
+ /* copy tr over to seq ops */
+ m->private = inode->i_private;
- return d_events;
+ return ret;
}
static int
@@ -1072,117 +1244,165 @@ ftrace_event_avail_open(struct inode *inode, struct file *file)
{
const struct seq_operations *seq_ops = &show_event_seq_ops;
- return seq_open(file, seq_ops);
+ return ftrace_event_open(inode, file, seq_ops);
}
static int
ftrace_event_set_open(struct inode *inode, struct file *file)
{
const struct seq_operations *seq_ops = &show_set_event_seq_ops;
+ struct trace_array *tr = inode->i_private;
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
- ftrace_clear_events();
+ ftrace_clear_events(tr);
- return seq_open(file, seq_ops);
+ return ftrace_event_open(inode, file, seq_ops);
+}
+
+static struct event_subsystem *
+create_new_subsystem(const char *name)
+{
+ struct event_subsystem *system;
+
+ /* need to create new entry */
+ system = kmalloc(sizeof(*system), GFP_KERNEL);
+ if (!system)
+ return NULL;
+
+ system->ref_count = 1;
+ system->name = name;
+
+ system->filter = NULL;
+
+ system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
+ if (!system->filter)
+ goto out_free;
+
+ list_add(&system->list, &event_subsystems);
+
+ return system;
+
+ out_free:
+ kfree(system);
+ return NULL;
}
static struct dentry *
-event_subsystem_dir(const char *name, struct dentry *d_events)
+event_subsystem_dir(struct trace_array *tr, const char *name,
+ struct ftrace_event_file *file, struct dentry *parent)
{
+ struct ftrace_subsystem_dir *dir;
struct event_subsystem *system;
struct dentry *entry;
/* First see if we did not already create this dir */
- list_for_each_entry(system, &event_subsystems, list) {
+ list_for_each_entry(dir, &tr->systems, list) {
+ system = dir->subsystem;
if (strcmp(system->name, name) == 0) {
- system->nr_events++;
- return system->entry;
+ dir->nr_events++;
+ file->system = dir;
+ return dir->entry;
}
}
- /* need to create new entry */
- system = kmalloc(sizeof(*system), GFP_KERNEL);
- if (!system) {
- pr_warning("No memory to create event subsystem %s\n",
- name);
- return d_events;
+ /* Now see if the system itself exists. */
+ list_for_each_entry(system, &event_subsystems, list) {
+ if (strcmp(system->name, name) == 0)
+ break;
}
+ /* Reset system variable when not found */
+ if (&system->list == &event_subsystems)
+ system = NULL;
- system->entry = debugfs_create_dir(name, d_events);
- if (!system->entry) {
- pr_warning("Could not create event subsystem %s\n",
- name);
- kfree(system);
- return d_events;
- }
+ dir = kmalloc(sizeof(*dir), GFP_KERNEL);
+ if (!dir)
+ goto out_fail;
- system->nr_events = 1;
- system->ref_count = 1;
- system->name = kstrdup(name, GFP_KERNEL);
- if (!system->name) {
- debugfs_remove(system->entry);
- kfree(system);
- return d_events;
+ if (!system) {
+ system = create_new_subsystem(name);
+ if (!system)
+ goto out_free;
+ } else
+ __get_system(system);
+
+ dir->entry = debugfs_create_dir(name, parent);
+ if (!dir->entry) {
+ pr_warning("Failed to create system directory %s\n", name);
+ __put_system(system);
+ goto out_free;
}
- list_add(&system->list, &event_subsystems);
-
- system->filter = NULL;
-
- system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
- if (!system->filter) {
- pr_warning("Could not allocate filter for subsystem "
- "'%s'\n", name);
- return system->entry;
- }
+ dir->tr = tr;
+ dir->ref_count = 1;
+ dir->nr_events = 1;
+ dir->subsystem = system;
+ file->system = dir;
- entry = debugfs_create_file("filter", 0644, system->entry, system,
+ entry = debugfs_create_file("filter", 0644, dir->entry, dir,
&ftrace_subsystem_filter_fops);
if (!entry) {
kfree(system->filter);
system->filter = NULL;
- pr_warning("Could not create debugfs "
- "'%s/filter' entry\n", name);
+ pr_warning("Could not create debugfs '%s/filter' entry\n", name);
}
- trace_create_file("enable", 0644, system->entry, system,
+ trace_create_file("enable", 0644, dir->entry, dir,
&ftrace_system_enable_fops);
- return system->entry;
+ list_add(&dir->list, &tr->systems);
+
+ return dir->entry;
+
+ out_free:
+ kfree(dir);
+ out_fail:
+ /* Only print this message if failed on memory allocation */
+ if (!dir || !system)
+ pr_warning("No memory to create event subsystem %s\n",
+ name);
+ return NULL;
}
static int
-event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
+event_create_dir(struct dentry *parent,
+ struct ftrace_event_file *file,
const struct file_operations *id,
const struct file_operations *enable,
const struct file_operations *filter,
const struct file_operations *format)
{
+ struct ftrace_event_call *call = file->event_call;
+ struct trace_array *tr = file->tr;
struct list_head *head;
+ struct dentry *d_events;
int ret;
/*
* If the trace point header did not define TRACE_SYSTEM
* then the system would be called "TRACE_SYSTEM".
*/
- if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
- d_events = event_subsystem_dir(call->class->system, d_events);
-
- call->dir = debugfs_create_dir(call->name, d_events);
- if (!call->dir) {
- pr_warning("Could not create debugfs "
- "'%s' directory\n", call->name);
+ if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
+ d_events = event_subsystem_dir(tr, call->class->system, file, parent);
+ if (!d_events)
+ return -ENOMEM;
+ } else
+ d_events = parent;
+
+ file->dir = debugfs_create_dir(call->name, d_events);
+ if (!file->dir) {
+ pr_warning("Could not create debugfs '%s' directory\n",
+ call->name);
return -1;
}
if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
- trace_create_file("enable", 0644, call->dir, call,
+ trace_create_file("enable", 0644, file->dir, file,
enable);
#ifdef CONFIG_PERF_EVENTS
if (call->event.type && call->class->reg)
- trace_create_file("id", 0444, call->dir, call,
+ trace_create_file("id", 0444, file->dir, call,
id);
#endif
@@ -1196,23 +1416,76 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
if (ret < 0) {
pr_warning("Could not initialize trace point"
" events/%s\n", call->name);
- return ret;
+ return -1;
}
}
- trace_create_file("filter", 0644, call->dir, call,
+ trace_create_file("filter", 0644, file->dir, call,
filter);
- trace_create_file("format", 0444, call->dir, call,
+ trace_create_file("format", 0444, file->dir, call,
format);
return 0;
}
+static void remove_subsystem(struct ftrace_subsystem_dir *dir)
+{
+ if (!dir)
+ return;
+
+ if (!--dir->nr_events) {
+ debugfs_remove_recursive(dir->entry);
+ list_del(&dir->list);
+ __put_system_dir(dir);
+ }
+}
+
+static void remove_event_from_tracers(struct ftrace_event_call *call)
+{
+ struct ftrace_event_file *file;
+ struct trace_array *tr;
+
+ do_for_each_event_file_safe(tr, file) {
+
+ if (file->event_call != call)
+ continue;
+
+ list_del(&file->list);
+ debugfs_remove_recursive(file->dir);
+ remove_subsystem(file->system);
+ kmem_cache_free(file_cachep, file);
+
+ /*
+ * The do_for_each_event_file_safe() is
+ * a double loop. After finding the call for this
+ * trace_array, we use break to jump to the next
+ * trace_array.
+ */
+ break;
+ } while_for_each_event_file();
+}
+
static void event_remove(struct ftrace_event_call *call)
{
- ftrace_event_enable_disable(call, 0);
+ struct trace_array *tr;
+ struct ftrace_event_file *file;
+
+ do_for_each_event_file(tr, file) {
+ if (file->event_call != call)
+ continue;
+ ftrace_event_enable_disable(file, 0);
+ /*
+ * The do_for_each_event_file() is
+ * a double loop. After finding the call for this
+ * trace_array, we use break to jump to the next
+ * trace_array.
+ */
+ break;
+ } while_for_each_event_file();
+
if (call->event.funcs)
__unregister_ftrace_event(&call->event);
+ remove_event_from_tracers(call);
list_del(&call->list);
}
@@ -1234,82 +1507,99 @@ static int event_init(struct ftrace_event_call *call)
}
static int
-__trace_add_event_call(struct ftrace_event_call *call, struct module *mod,
- const struct file_operations *id,
- const struct file_operations *enable,
- const struct file_operations *filter,
- const struct file_operations *format)
+__register_event(struct ftrace_event_call *call, struct module *mod)
{
- struct dentry *d_events;
int ret;
ret = event_init(call);
if (ret < 0)
return ret;
- d_events = event_trace_events_dir();
- if (!d_events)
- return -ENOENT;
-
- ret = event_create_dir(call, d_events, id, enable, filter, format);
- if (!ret)
- list_add(&call->list, &ftrace_events);
+ list_add(&call->list, &ftrace_events);
call->mod = mod;
- return ret;
+ return 0;
+}
+
+/* Add an event to a trace directory */
+static int
+__trace_add_new_event(struct ftrace_event_call *call,
+ struct trace_array *tr,
+ const struct file_operations *id,
+ const struct file_operations *enable,
+ const struct file_operations *filter,
+ const struct file_operations *format)
+{
+ struct ftrace_event_file *file;
+
+ file = kmem_cache_alloc(file_cachep, GFP_TRACE);
+ if (!file)
+ return -ENOMEM;
+
+ file->event_call = call;
+ file->tr = tr;
+ list_add(&file->list, &tr->events);
+
+ return event_create_dir(tr->event_dir, file, id, enable, filter, format);
}
+/*
+ * Just create a decriptor for early init. A descriptor is required
+ * for enabling events at boot. We want to enable events before
+ * the filesystem is initialized.
+ */
+static __init int
+__trace_early_add_new_event(struct ftrace_event_call *call,
+ struct trace_array *tr)
+{
+ struct ftrace_event_file *file;
+
+ file = kmem_cache_alloc(file_cachep, GFP_TRACE);
+ if (!file)
+ return -ENOMEM;
+
+ file->event_call = call;
+ file->tr = tr;
+ list_add(&file->list, &tr->events);
+
+ return 0;
+}
+
+struct ftrace_module_file_ops;
+static void __add_event_to_tracers(struct ftrace_event_call *call,
+ struct ftrace_module_file_ops *file_ops);
+
/* Add an additional event_call dynamically */
int trace_add_event_call(struct ftrace_event_call *call)
{
int ret;
mutex_lock(&event_mutex);
- ret = __trace_add_event_call(call, NULL, &ftrace_event_id_fops,
- &ftrace_enable_fops,
- &ftrace_event_filter_fops,
- &ftrace_event_format_fops);
- mutex_unlock(&event_mutex);
- return ret;
-}
-static void remove_subsystem_dir(const char *name)
-{
- struct event_subsystem *system;
-
- if (strcmp(name, TRACE_SYSTEM) == 0)
- return;
+ ret = __register_event(call, NULL);
+ if (ret >= 0)
+ __add_event_to_tracers(call, NULL);
- list_for_each_entry(system, &event_subsystems, list) {
- if (strcmp(system->name, name) == 0) {
- if (!--system->nr_events) {
- debugfs_remove_recursive(system->entry);
- list_del(&system->list);
- __put_system(system);
- }
- break;
- }
- }
+ mutex_unlock(&event_mutex);
+ return ret;
}
/*
- * Must be called under locking both of event_mutex and trace_event_mutex.
+ * Must be called under locking both of event_mutex and trace_event_sem.
*/
static void __trace_remove_event_call(struct ftrace_event_call *call)
{
event_remove(call);
trace_destroy_fields(call);
destroy_preds(call);
- debugfs_remove_recursive(call->dir);
- remove_subsystem_dir(call->class->system);
}
/* Remove an event_call */
void trace_remove_event_call(struct ftrace_event_call *call)
{
mutex_lock(&event_mutex);
- down_write(&trace_event_mutex);
+ down_write(&trace_event_sem);
__trace_remove_event_call(call);
- up_write(&trace_event_mutex);
+ up_write(&trace_event_sem);
mutex_unlock(&event_mutex);
}
@@ -1336,6 +1626,26 @@ struct ftrace_module_file_ops {
};
static struct ftrace_module_file_ops *
+find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
+{
+ /*
+ * As event_calls are added in groups by module,
+ * when we find one file_ops, we don't need to search for
+ * each call in that module, as the rest should be the
+ * same. Only search for a new one if the last one did
+ * not match.
+ */
+ if (file_ops && mod == file_ops->mod)
+ return file_ops;
+
+ list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
+ if (file_ops->mod == mod)
+ return file_ops;
+ }
+ return NULL;
+}
+
+static struct ftrace_module_file_ops *
trace_create_file_ops(struct module *mod)
{
struct ftrace_module_file_ops *file_ops;
@@ -1386,9 +1696,8 @@ static void trace_module_add_events(struct module *mod)
return;
for_each_event(call, start, end) {
- __trace_add_event_call(*call, mod,
- &file_ops->id, &file_ops->enable,
- &file_ops->filter, &file_ops->format);
+ __register_event(*call, mod);
+ __add_event_to_tracers(*call, file_ops);
}
}
@@ -1396,12 +1705,13 @@ static void trace_module_remove_events(struct module *mod)
{
struct ftrace_module_file_ops *file_ops;
struct ftrace_event_call *call, *p;
- bool found = false;
+ bool clear_trace = false;
- down_write(&trace_event_mutex);
+ down_write(&trace_event_sem);
list_for_each_entry_safe(call, p, &ftrace_events, list) {
if (call->mod == mod) {
- found = true;
+ if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
+ clear_trace = true;
__trace_remove_event_call(call);
}
}
@@ -1415,14 +1725,18 @@ static void trace_module_remove_events(struct module *mod)
list_del(&file_ops->list);
kfree(file_ops);
}
+ up_write(&trace_event_sem);
/*
* It is safest to reset the ring buffer if the module being unloaded
- * registered any events.
+ * registered any events that were used. The only worry is if
+ * a new module gets loaded, and takes on the same id as the events
+ * of this module. When printing out the buffer, traced events left
+ * over from this module may be passed to the new module events and
+ * unexpected results may occur.
*/
- if (found)
- tracing_reset_current_online_cpus();
- up_write(&trace_event_mutex);
+ if (clear_trace)
+ tracing_reset_all_online_cpus();
}
static int trace_module_notify(struct notifier_block *self,
@@ -1443,14 +1757,433 @@ static int trace_module_notify(struct notifier_block *self,
return 0;
}
+
+static int
+__trace_add_new_mod_event(struct ftrace_event_call *call,
+ struct trace_array *tr,
+ struct ftrace_module_file_ops *file_ops)
+{
+ return __trace_add_new_event(call, tr,
+ &file_ops->id, &file_ops->enable,
+ &file_ops->filter, &file_ops->format);
+}
+
#else
-static int trace_module_notify(struct notifier_block *self,
- unsigned long val, void *data)
+static inline struct ftrace_module_file_ops *
+find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
+{
+ return NULL;
+}
+static inline int trace_module_notify(struct notifier_block *self,
+ unsigned long val, void *data)
{
return 0;
}
+static inline int
+__trace_add_new_mod_event(struct ftrace_event_call *call,
+ struct trace_array *tr,
+ struct ftrace_module_file_ops *file_ops)
+{
+ return -ENODEV;
+}
#endif /* CONFIG_MODULES */
+/* Create a new event directory structure for a trace directory. */
+static void
+__trace_add_event_dirs(struct trace_array *tr)
+{
+ struct ftrace_module_file_ops *file_ops = NULL;
+ struct ftrace_event_call *call;
+ int ret;
+
+ list_for_each_entry(call, &ftrace_events, list) {
+ if (call->mod) {
+ /*
+ * Directories for events by modules need to
+ * keep module ref counts when opened (as we don't
+ * want the module to disappear when reading one
+ * of these files). The file_ops keep account of
+ * the module ref count.
+ */
+ file_ops = find_ftrace_file_ops(file_ops, call->mod);
+ if (!file_ops)
+ continue; /* Warn? */
+ ret = __trace_add_new_mod_event(call, tr, file_ops);
+ if (ret < 0)
+ pr_warning("Could not create directory for event %s\n",
+ call->name);
+ continue;
+ }
+ ret = __trace_add_new_event(call, tr,
+ &ftrace_event_id_fops,
+ &ftrace_enable_fops,
+ &ftrace_event_filter_fops,
+ &ftrace_event_format_fops);
+ if (ret < 0)
+ pr_warning("Could not create directory for event %s\n",
+ call->name);
+ }
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+/* Avoid typos */
+#define ENABLE_EVENT_STR "enable_event"
+#define DISABLE_EVENT_STR "disable_event"
+
+struct event_probe_data {
+ struct ftrace_event_file *file;
+ unsigned long count;
+ int ref;
+ bool enable;
+};
+
+static struct ftrace_event_file *
+find_event_file(struct trace_array *tr, const char *system, const char *event)
+{
+ struct ftrace_event_file *file;
+ struct ftrace_event_call *call;
+
+ list_for_each_entry(file, &tr->events, list) {
+
+ call = file->event_call;
+
+ if (!call->name || !call->class || !call->class->reg)
+ continue;
+
+ if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
+ continue;
+
+ if (strcmp(event, call->name) == 0 &&
+ strcmp(system, call->class->system) == 0)
+ return file;
+ }
+ return NULL;
+}
+
+static void
+event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
+{
+ struct event_probe_data **pdata = (struct event_probe_data **)_data;
+ struct event_probe_data *data = *pdata;
+
+ if (!data)
+ return;
+
+ if (data->enable)
+ clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
+ else
+ set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
+}
+
+static void
+event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
+{
+ struct event_probe_data **pdata = (struct event_probe_data **)_data;
+ struct event_probe_data *data = *pdata;
+
+ if (!data)
+ return;
+
+ if (!data->count)
+ return;
+
+ /* Skip if the event is in a state we want to switch to */
+ if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
+ return;
+
+ if (data->count != -1)
+ (data->count)--;
+
+ event_enable_probe(ip, parent_ip, _data);
+}
+
+static int
+event_enable_print(struct seq_file *m, unsigned long ip,
+ struct ftrace_probe_ops *ops, void *_data)
+{
+ struct event_probe_data *data = _data;
+
+ seq_printf(m, "%ps:", (void *)ip);
+
+ seq_printf(m, "%s:%s:%s",
+ data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
+ data->file->event_call->class->system,
+ data->file->event_call->name);
+
+ if (data->count == -1)
+ seq_printf(m, ":unlimited\n");
+ else
+ seq_printf(m, ":count=%ld\n", data->count);
+
+ return 0;
+}
+
+static int
+event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
+ void **_data)
+{
+ struct event_probe_data **pdata = (struct event_probe_data **)_data;
+ struct event_probe_data *data = *pdata;
+
+ data->ref++;
+ return 0;
+}
+
+static void
+event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
+ void **_data)
+{
+ struct event_probe_data **pdata = (struct event_probe_data **)_data;
+ struct event_probe_data *data = *pdata;
+
+ if (WARN_ON_ONCE(data->ref <= 0))
+ return;
+
+ data->ref--;
+ if (!data->ref) {
+ /* Remove the SOFT_MODE flag */
+ __ftrace_event_enable_disable(data->file, 0, 1);
+ module_put(data->file->event_call->mod);
+ kfree(data);
+ }
+ *pdata = NULL;
+}
+
+static struct ftrace_probe_ops event_enable_probe_ops = {
+ .func = event_enable_probe,
+ .print = event_enable_print,
+ .init = event_enable_init,
+ .free = event_enable_free,
+};
+
+static struct ftrace_probe_ops event_enable_count_probe_ops = {
+ .func = event_enable_count_probe,
+ .print = event_enable_print,
+ .init = event_enable_init,
+ .free = event_enable_free,
+};
+
+static struct ftrace_probe_ops event_disable_probe_ops = {
+ .func = event_enable_probe,
+ .print = event_enable_print,
+ .init = event_enable_init,
+ .free = event_enable_free,
+};
+
+static struct ftrace_probe_ops event_disable_count_probe_ops = {
+ .func = event_enable_count_probe,
+ .print = event_enable_print,
+ .init = event_enable_init,
+ .free = event_enable_free,
+};
+
+static int
+event_enable_func(struct ftrace_hash *hash,
+ char *glob, char *cmd, char *param, int enabled)
+{
+ struct trace_array *tr = top_trace_array();
+ struct ftrace_event_file *file;
+ struct ftrace_probe_ops *ops;
+ struct event_probe_data *data;
+ const char *system;
+ const char *event;
+ char *number;
+ bool enable;
+ int ret;
+
+ /* hash funcs only work with set_ftrace_filter */
+ if (!enabled)
+ return -EINVAL;
+
+ if (!param)
+ return -EINVAL;
+
+ system = strsep(&param, ":");
+ if (!param)
+ return -EINVAL;
+
+ event = strsep(&param, ":");
+
+ mutex_lock(&event_mutex);
+
+ ret = -EINVAL;
+ file = find_event_file(tr, system, event);
+ if (!file)
+ goto out;
+
+ enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
+
+ if (enable)
+ ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
+ else
+ ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
+
+ if (glob[0] == '!') {
+ unregister_ftrace_function_probe_func(glob+1, ops);
+ ret = 0;
+ goto out;
+ }
+
+ ret = -ENOMEM;
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto out;
+
+ data->enable = enable;
+ data->count = -1;
+ data->file = file;
+
+ if (!param)
+ goto out_reg;
+
+ number = strsep(&param, ":");
+
+ ret = -EINVAL;
+ if (!strlen(number))
+ goto out_free;
+
+ /*
+ * We use the callback data field (which is a pointer)
+ * as our counter.
+ */
+ ret = kstrtoul(number, 0, &data->count);
+ if (ret)
+ goto out_free;
+
+ out_reg:
+ /* Don't let event modules unload while probe registered */
+ ret = try_module_get(file->event_call->mod);
+ if (!ret)
+ goto out_free;
+
+ ret = __ftrace_event_enable_disable(file, 1, 1);
+ if (ret < 0)
+ goto out_put;
+ ret = register_ftrace_function_probe(glob, ops, data);
+ if (!ret)
+ goto out_disable;
+ out:
+ mutex_unlock(&event_mutex);
+ return ret;
+
+ out_disable:
+ __ftrace_event_enable_disable(file, 0, 1);
+ out_put:
+ module_put(file->event_call->mod);
+ out_free:
+ kfree(data);
+ goto out;
+}
+
+static struct ftrace_func_command event_enable_cmd = {
+ .name = ENABLE_EVENT_STR,
+ .func = event_enable_func,
+};
+
+static struct ftrace_func_command event_disable_cmd = {
+ .name = DISABLE_EVENT_STR,
+ .func = event_enable_func,
+};
+
+static __init int register_event_cmds(void)
+{
+ int ret;
+
+ ret = register_ftrace_command(&event_enable_cmd);
+ if (WARN_ON(ret < 0))
+ return ret;
+ ret = register_ftrace_command(&event_disable_cmd);
+ if (WARN_ON(ret < 0))
+ unregister_ftrace_command(&event_enable_cmd);
+ return ret;
+}
+#else
+static inline int register_event_cmds(void) { return 0; }
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+/*
+ * The top level array has already had its ftrace_event_file
+ * descriptors created in order to allow for early events to
+ * be recorded. This function is called after the debugfs has been
+ * initialized, and we now have to create the files associated
+ * to the events.
+ */
+static __init void
+__trace_early_add_event_dirs(struct trace_array *tr)
+{
+ struct ftrace_event_file *file;
+ int ret;
+
+
+ list_for_each_entry(file, &tr->events, list) {
+ ret = event_create_dir(tr->event_dir, file,
+ &ftrace_event_id_fops,
+ &ftrace_enable_fops,
+ &ftrace_event_filter_fops,
+ &ftrace_event_format_fops);
+ if (ret < 0)
+ pr_warning("Could not create directory for event %s\n",
+ file->event_call->name);
+ }
+}
+
+/*
+ * For early boot up, the top trace array requires to have
+ * a list of events that can be enabled. This must be done before
+ * the filesystem is set up in order to allow events to be traced
+ * early.
+ */
+static __init void
+__trace_early_add_events(struct trace_array *tr)
+{
+ struct ftrace_event_call *call;
+ int ret;
+
+ list_for_each_entry(call, &ftrace_events, list) {
+ /* Early boot up should not have any modules loaded */
+ if (WARN_ON_ONCE(call->mod))
+ continue;
+
+ ret = __trace_early_add_new_event(call, tr);
+ if (ret < 0)
+ pr_warning("Could not create early event %s\n",
+ call->name);
+ }
+}
+
+/* Remove the event directory structure for a trace directory. */
+static void
+__trace_remove_event_dirs(struct trace_array *tr)
+{
+ struct ftrace_event_file *file, *next;
+
+ list_for_each_entry_safe(file, next, &tr->events, list) {
+ list_del(&file->list);
+ debugfs_remove_recursive(file->dir);
+ remove_subsystem(file->system);
+ kmem_cache_free(file_cachep, file);
+ }
+}
+
+static void
+__add_event_to_tracers(struct ftrace_event_call *call,
+ struct ftrace_module_file_ops *file_ops)
+{
+ struct trace_array *tr;
+
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ if (file_ops)
+ __trace_add_new_mod_event(call, tr, file_ops);
+ else
+ __trace_add_new_event(call, tr,
+ &ftrace_event_id_fops,
+ &ftrace_enable_fops,
+ &ftrace_event_filter_fops,
+ &ftrace_event_format_fops);
+ }
+}
+
static struct notifier_block trace_module_nb = {
.notifier_call = trace_module_notify,
.priority = 0,
@@ -1464,15 +2197,135 @@ static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
static __init int setup_trace_event(char *str)
{
strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
- ring_buffer_expanded = 1;
- tracing_selftest_disabled = 1;
+ ring_buffer_expanded = true;
+ tracing_selftest_disabled = true;
return 1;
}
__setup("trace_event=", setup_trace_event);
+/* Expects to have event_mutex held when called */
+static int
+create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
+{
+ struct dentry *d_events;
+ struct dentry *entry;
+
+ entry = debugfs_create_file("set_event", 0644, parent,
+ tr, &ftrace_set_event_fops);
+ if (!entry) {
+ pr_warning("Could not create debugfs 'set_event' entry\n");
+ return -ENOMEM;
+ }
+
+ d_events = debugfs_create_dir("events", parent);
+ if (!d_events) {
+ pr_warning("Could not create debugfs 'events' directory\n");
+ return -ENOMEM;
+ }
+
+ /* ring buffer internal formats */
+ trace_create_file("header_page", 0444, d_events,
+ ring_buffer_print_page_header,
+ &ftrace_show_header_fops);
+
+ trace_create_file("header_event", 0444, d_events,
+ ring_buffer_print_entry_header,
+ &ftrace_show_header_fops);
+
+ trace_create_file("enable", 0644, d_events,
+ tr, &ftrace_tr_enable_fops);
+
+ tr->event_dir = d_events;
+
+ return 0;
+}
+
+/**
+ * event_trace_add_tracer - add a instance of a trace_array to events
+ * @parent: The parent dentry to place the files/directories for events in
+ * @tr: The trace array associated with these events
+ *
+ * When a new instance is created, it needs to set up its events
+ * directory, as well as other files associated with events. It also
+ * creates the event hierachry in the @parent/events directory.
+ *
+ * Returns 0 on success.
+ */
+int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
+{
+ int ret;
+
+ mutex_lock(&event_mutex);
+
+ ret = create_event_toplevel_files(parent, tr);
+ if (ret)
+ goto out_unlock;
+
+ down_write(&trace_event_sem);
+ __trace_add_event_dirs(tr);
+ up_write(&trace_event_sem);
+
+ out_unlock:
+ mutex_unlock(&event_mutex);
+
+ return ret;
+}
+
+/*
+ * The top trace array already had its file descriptors created.
+ * Now the files themselves need to be created.
+ */
+static __init int
+early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
+{
+ int ret;
+
+ mutex_lock(&event_mutex);
+
+ ret = create_event_toplevel_files(parent, tr);
+ if (ret)
+ goto out_unlock;
+
+ down_write(&trace_event_sem);
+ __trace_early_add_event_dirs(tr);
+ up_write(&trace_event_sem);
+
+ out_unlock:
+ mutex_unlock(&event_mutex);
+
+ return ret;
+}
+
+int event_trace_del_tracer(struct trace_array *tr)
+{
+ /* Disable any running events */
+ __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
+
+ mutex_lock(&event_mutex);
+
+ down_write(&trace_event_sem);
+ __trace_remove_event_dirs(tr);
+ debugfs_remove_recursive(tr->event_dir);
+ up_write(&trace_event_sem);
+
+ tr->event_dir = NULL;
+
+ mutex_unlock(&event_mutex);
+
+ return 0;
+}
+
+static __init int event_trace_memsetup(void)
+{
+ field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
+ file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
+ return 0;
+}
+
static __init int event_trace_enable(void)
{
+ struct trace_array *tr = top_trace_array();
struct ftrace_event_call **iter, *call;
char *buf = bootup_event_buf;
char *token;
@@ -1486,6 +2339,14 @@ static __init int event_trace_enable(void)
list_add(&call->list, &ftrace_events);
}
+ /*
+ * We need the top trace array to have a working set of trace
+ * points at early init, before the debug files and directories
+ * are created. Create the file entries now, and attach them
+ * to the actual file dentries later.
+ */
+ __trace_early_add_events(tr);
+
while (true) {
token = strsep(&buf, ",");
@@ -1494,73 +2355,43 @@ static __init int event_trace_enable(void)
if (!*token)
continue;
- ret = ftrace_set_clr_event(token, 1);
+ ret = ftrace_set_clr_event(tr, token, 1);
if (ret)
pr_warn("Failed to enable trace event: %s\n", token);
}
trace_printk_start_comm();
+ register_event_cmds();
+
return 0;
}
static __init int event_trace_init(void)
{
- struct ftrace_event_call *call;
+ struct trace_array *tr;
struct dentry *d_tracer;
struct dentry *entry;
- struct dentry *d_events;
int ret;
+ tr = top_trace_array();
+
d_tracer = tracing_init_dentry();
if (!d_tracer)
return 0;
entry = debugfs_create_file("available_events", 0444, d_tracer,
- NULL, &ftrace_avail_fops);
+ tr, &ftrace_avail_fops);
if (!entry)
pr_warning("Could not create debugfs "
"'available_events' entry\n");
- entry = debugfs_create_file("set_event", 0644, d_tracer,
- NULL, &ftrace_set_event_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'set_event' entry\n");
-
- d_events = event_trace_events_dir();
- if (!d_events)
- return 0;
-
- /* ring buffer internal formats */
- trace_create_file("header_page", 0444, d_events,
- ring_buffer_print_page_header,
- &ftrace_show_header_fops);
-
- trace_create_file("header_event", 0444, d_events,
- ring_buffer_print_entry_header,
- &ftrace_show_header_fops);
-
- trace_create_file("enable", 0644, d_events,
- NULL, &ftrace_system_enable_fops);
-
if (trace_define_common_fields())
pr_warning("tracing: Failed to allocate common fields");
- /*
- * Early initialization already enabled ftrace event.
- * Now it's only necessary to create the event directory.
- */
- list_for_each_entry(call, &ftrace_events, list) {
-
- ret = event_create_dir(call, d_events,
- &ftrace_event_id_fops,
- &ftrace_enable_fops,
- &ftrace_event_filter_fops,
- &ftrace_event_format_fops);
- if (ret < 0)
- event_remove(call);
- }
+ ret = early_event_add_tracer(d_tracer, tr);
+ if (ret)
+ return ret;
ret = register_module_notifier(&trace_module_nb);
if (ret)
@@ -1568,6 +2399,7 @@ static __init int event_trace_init(void)
return 0;
}
+early_initcall(event_trace_memsetup);
core_initcall(event_trace_enable);
fs_initcall(event_trace_init);
@@ -1627,13 +2459,20 @@ static __init void event_test_stuff(void)
*/
static __init void event_trace_self_tests(void)
{
+ struct ftrace_subsystem_dir *dir;
+ struct ftrace_event_file *file;
struct ftrace_event_call *call;
struct event_subsystem *system;
+ struct trace_array *tr;
int ret;
+ tr = top_trace_array();
+
pr_info("Running tests on trace events:\n");
- list_for_each_entry(call, &ftrace_events, list) {
+ list_for_each_entry(file, &tr->events, list) {
+
+ call = file->event_call;
/* Only test those that have a probe */
if (!call->class || !call->class->probe)
@@ -1657,15 +2496,15 @@ static __init void event_trace_self_tests(void)
* If an event is already enabled, someone is using
* it and the self test should not be on.
*/
- if (call->flags & TRACE_EVENT_FL_ENABLED) {
+ if (file->flags & FTRACE_EVENT_FL_ENABLED) {
pr_warning("Enabled event during self test!\n");
WARN_ON_ONCE(1);
continue;
}
- ftrace_event_enable_disable(call, 1);
+ ftrace_event_enable_disable(file, 1);
event_test_stuff();
- ftrace_event_enable_disable(call, 0);
+ ftrace_event_enable_disable(file, 0);
pr_cont("OK\n");
}
@@ -1674,7 +2513,9 @@ static __init void event_trace_self_tests(void)
pr_info("Running tests on trace event systems:\n");
- list_for_each_entry(system, &event_subsystems, list) {
+ list_for_each_entry(dir, &tr->systems, list) {
+
+ system = dir->subsystem;
/* the ftrace system is special, skip it */
if (strcmp(system->name, "ftrace") == 0)
@@ -1682,7 +2523,7 @@ static __init void event_trace_self_tests(void)
pr_info("Testing event system %s: ", system->name);
- ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
+ ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
if (WARN_ON_ONCE(ret)) {
pr_warning("error enabling system %s\n",
system->name);
@@ -1691,7 +2532,7 @@ static __init void event_trace_self_tests(void)
event_test_stuff();
- ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
+ ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
if (WARN_ON_ONCE(ret)) {
pr_warning("error disabling system %s\n",
system->name);
@@ -1706,7 +2547,7 @@ static __init void event_trace_self_tests(void)
pr_info("Running tests on all trace events:\n");
pr_info("Testing all events: ");
- ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
+ ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
if (WARN_ON_ONCE(ret)) {
pr_warning("error enabling all events\n");
return;
@@ -1715,7 +2556,7 @@ static __init void event_trace_self_tests(void)
event_test_stuff();
/* reset sysname */
- ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
+ ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
if (WARN_ON_ONCE(ret)) {
pr_warning("error disabling all events\n");
return;
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index e5b0ca8b8d4..a6361178de5 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -658,33 +658,6 @@ void print_subsystem_event_filter(struct event_subsystem *system,
mutex_unlock(&event_mutex);
}
-static struct ftrace_event_field *
-__find_event_field(struct list_head *head, char *name)
-{
- struct ftrace_event_field *field;
-
- list_for_each_entry(field, head, link) {
- if (!strcmp(field->name, name))
- return field;
- }
-
- return NULL;
-}
-
-static struct ftrace_event_field *
-find_event_field(struct ftrace_event_call *call, char *name)
-{
- struct ftrace_event_field *field;
- struct list_head *head;
-
- field = __find_event_field(&ftrace_common_fields, name);
- if (field)
- return field;
-
- head = trace_get_fields(call);
- return __find_event_field(head, name);
-}
-
static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
{
stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
@@ -1337,7 +1310,7 @@ static struct filter_pred *create_pred(struct filter_parse_state *ps,
return NULL;
}
- field = find_event_field(call, operand1);
+ field = trace_find_event_field(call, operand1);
if (!field) {
parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
return NULL;
@@ -1907,16 +1880,17 @@ out_unlock:
return err;
}
-int apply_subsystem_event_filter(struct event_subsystem *system,
+int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
char *filter_string)
{
+ struct event_subsystem *system = dir->subsystem;
struct event_filter *filter;
int err = 0;
mutex_lock(&event_mutex);
/* Make sure the system still has events */
- if (!system->nr_events) {
+ if (!dir->nr_events) {
err = -ENODEV;
goto out_unlock;
}
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index e039906b037..d21a7467008 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -129,7 +129,7 @@ static void __always_unused ____ftrace_check_##name(void) \
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
-int \
+static int __init \
ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
{ \
struct struct_name field; \
@@ -168,7 +168,7 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
#define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\
regfn) \
\
-struct ftrace_event_class event_class_ftrace_##call = { \
+struct ftrace_event_class __refdata event_class_ftrace_##call = { \
.system = __stringify(TRACE_SYSTEM), \
.define_fields = ftrace_define_fields_##call, \
.fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 60115252332..c4d6d719198 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -28,7 +28,7 @@ static void tracing_stop_function_trace(void);
static int function_trace_init(struct trace_array *tr)
{
func_trace = tr;
- tr->cpu = get_cpu();
+ tr->trace_buffer.cpu = get_cpu();
put_cpu();
tracing_start_cmdline_record();
@@ -44,7 +44,7 @@ static void function_trace_reset(struct trace_array *tr)
static void function_trace_start(struct trace_array *tr)
{
- tracing_reset_online_cpus(tr);
+ tracing_reset_online_cpus(&tr->trace_buffer);
}
/* Our option */
@@ -76,7 +76,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
goto out;
cpu = smp_processor_id();
- data = tr->data[cpu];
+ data = per_cpu_ptr(tr->trace_buffer.data, cpu);
if (!atomic_read(&data->disabled)) {
local_save_flags(flags);
trace_function(tr, ip, parent_ip, flags, pc);
@@ -107,7 +107,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
*/
local_irq_save(flags);
cpu = raw_smp_processor_id();
- data = tr->data[cpu];
+ data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
@@ -214,66 +214,89 @@ static struct tracer function_trace __read_mostly =
};
#ifdef CONFIG_DYNAMIC_FTRACE
-static void
-ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
+static int update_count(void **data)
{
- long *count = (long *)data;
-
- if (tracing_is_on())
- return;
+ unsigned long *count = (long *)data;
if (!*count)
- return;
+ return 0;
if (*count != -1)
(*count)--;
- tracing_on();
+ return 1;
}
static void
-ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
+ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
{
- long *count = (long *)data;
+ if (tracing_is_on())
+ return;
+
+ if (update_count(data))
+ tracing_on();
+}
+static void
+ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
+{
if (!tracing_is_on())
return;
- if (!*count)
+ if (update_count(data))
+ tracing_off();
+}
+
+static void
+ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
+{
+ if (tracing_is_on())
return;
- if (*count != -1)
- (*count)--;
+ tracing_on();
+}
+
+static void
+ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
+{
+ if (!tracing_is_on())
+ return;
tracing_off();
}
-static int
-ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
- struct ftrace_probe_ops *ops, void *data);
+/*
+ * Skip 4:
+ * ftrace_stacktrace()
+ * function_trace_probe_call()
+ * ftrace_ops_list_func()
+ * ftrace_call()
+ */
+#define STACK_SKIP 4
-static struct ftrace_probe_ops traceon_probe_ops = {
- .func = ftrace_traceon,
- .print = ftrace_trace_onoff_print,
-};
+static void
+ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
+{
+ trace_dump_stack(STACK_SKIP);
+}
-static struct ftrace_probe_ops traceoff_probe_ops = {
- .func = ftrace_traceoff,
- .print = ftrace_trace_onoff_print,
-};
+static void
+ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
+{
+ if (!tracing_is_on())
+ return;
+
+ if (update_count(data))
+ trace_dump_stack(STACK_SKIP);
+}
static int
-ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
- struct ftrace_probe_ops *ops, void *data)
+ftrace_probe_print(const char *name, struct seq_file *m,
+ unsigned long ip, void *data)
{
long count = (long)data;
- seq_printf(m, "%ps:", (void *)ip);
-
- if (ops == &traceon_probe_ops)
- seq_printf(m, "traceon");
- else
- seq_printf(m, "traceoff");
+ seq_printf(m, "%ps:%s", (void *)ip, name);
if (count == -1)
seq_printf(m, ":unlimited\n");
@@ -284,26 +307,61 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
}
static int
-ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
+ftrace_traceon_print(struct seq_file *m, unsigned long ip,
+ struct ftrace_probe_ops *ops, void *data)
{
- struct ftrace_probe_ops *ops;
-
- /* we register both traceon and traceoff to this callback */
- if (strcmp(cmd, "traceon") == 0)
- ops = &traceon_probe_ops;
- else
- ops = &traceoff_probe_ops;
+ return ftrace_probe_print("traceon", m, ip, data);
+}
- unregister_ftrace_function_probe_func(glob, ops);
+static int
+ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
+ struct ftrace_probe_ops *ops, void *data)
+{
+ return ftrace_probe_print("traceoff", m, ip, data);
+}
- return 0;
+static int
+ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
+ struct ftrace_probe_ops *ops, void *data)
+{
+ return ftrace_probe_print("stacktrace", m, ip, data);
}
+static struct ftrace_probe_ops traceon_count_probe_ops = {
+ .func = ftrace_traceon_count,
+ .print = ftrace_traceon_print,
+};
+
+static struct ftrace_probe_ops traceoff_count_probe_ops = {
+ .func = ftrace_traceoff_count,
+ .print = ftrace_traceoff_print,
+};
+
+static struct ftrace_probe_ops stacktrace_count_probe_ops = {
+ .func = ftrace_stacktrace_count,
+ .print = ftrace_stacktrace_print,
+};
+
+static struct ftrace_probe_ops traceon_probe_ops = {
+ .func = ftrace_traceon,
+ .print = ftrace_traceon_print,
+};
+
+static struct ftrace_probe_ops traceoff_probe_ops = {
+ .func = ftrace_traceoff,
+ .print = ftrace_traceoff_print,
+};
+
+static struct ftrace_probe_ops stacktrace_probe_ops = {
+ .func = ftrace_stacktrace,
+ .print = ftrace_stacktrace_print,
+};
+
static int
-ftrace_trace_onoff_callback(struct ftrace_hash *hash,
- char *glob, char *cmd, char *param, int enable)
+ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
+ struct ftrace_hash *hash, char *glob,
+ char *cmd, char *param, int enable)
{
- struct ftrace_probe_ops *ops;
void *count = (void *)-1;
char *number;
int ret;
@@ -312,14 +370,10 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash,
if (!enable)
return -EINVAL;
- if (glob[0] == '!')
- return ftrace_trace_onoff_unreg(glob+1, cmd, param);
-
- /* we register both traceon and traceoff to this callback */
- if (strcmp(cmd, "traceon") == 0)
- ops = &traceon_probe_ops;
- else
- ops = &traceoff_probe_ops;
+ if (glob[0] == '!') {
+ unregister_ftrace_function_probe_func(glob+1, ops);
+ return 0;
+ }
if (!param)
goto out_reg;
@@ -343,6 +397,34 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash,
return ret < 0 ? ret : 0;
}
+static int
+ftrace_trace_onoff_callback(struct ftrace_hash *hash,
+ char *glob, char *cmd, char *param, int enable)
+{
+ struct ftrace_probe_ops *ops;
+
+ /* we register both traceon and traceoff to this callback */
+ if (strcmp(cmd, "traceon") == 0)
+ ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
+ else
+ ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
+
+ return ftrace_trace_probe_callback(ops, hash, glob, cmd,
+ param, enable);
+}
+
+static int
+ftrace_stacktrace_callback(struct ftrace_hash *hash,
+ char *glob, char *cmd, char *param, int enable)
+{
+ struct ftrace_probe_ops *ops;
+
+ ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
+
+ return ftrace_trace_probe_callback(ops, hash, glob, cmd,
+ param, enable);
+}
+
static struct ftrace_func_command ftrace_traceon_cmd = {
.name = "traceon",
.func = ftrace_trace_onoff_callback,
@@ -353,6 +435,11 @@ static struct ftrace_func_command ftrace_traceoff_cmd = {
.func = ftrace_trace_onoff_callback,
};
+static struct ftrace_func_command ftrace_stacktrace_cmd = {
+ .name = "stacktrace",
+ .func = ftrace_stacktrace_callback,
+};
+
static int __init init_func_cmd_traceon(void)
{
int ret;
@@ -364,6 +451,12 @@ static int __init init_func_cmd_traceon(void)
ret = register_ftrace_command(&ftrace_traceon_cmd);
if (ret)
unregister_ftrace_command(&ftrace_traceoff_cmd);
+
+ ret = register_ftrace_command(&ftrace_stacktrace_cmd);
+ if (ret) {
+ unregister_ftrace_command(&ftrace_traceoff_cmd);
+ unregister_ftrace_command(&ftrace_traceon_cmd);
+ }
return ret;
}
#else
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 39ada66389c..8388bc99f2e 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -218,7 +218,7 @@ int __trace_graph_entry(struct trace_array *tr,
{
struct ftrace_event_call *call = &event_funcgraph_entry;
struct ring_buffer_event *event;
- struct ring_buffer *buffer = tr->buffer;
+ struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ftrace_graph_ent_entry *entry;
if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
@@ -265,7 +265,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
local_irq_save(flags);
cpu = raw_smp_processor_id();
- data = tr->data[cpu];
+ data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
@@ -323,7 +323,7 @@ void __trace_graph_return(struct trace_array *tr,
{
struct ftrace_event_call *call = &event_funcgraph_exit;
struct ring_buffer_event *event;
- struct ring_buffer *buffer = tr->buffer;
+ struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ftrace_graph_ret_entry *entry;
if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
@@ -350,7 +350,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
local_irq_save(flags);
cpu = raw_smp_processor_id();
- data = tr->data[cpu];
+ data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
@@ -560,9 +560,9 @@ get_return_for_leaf(struct trace_iterator *iter,
* We need to consume the current entry to see
* the next one.
*/
- ring_buffer_consume(iter->tr->buffer, iter->cpu,
+ ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
NULL, NULL);
- event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
+ event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
NULL, NULL);
}
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 443b25b43b4..b19d065a28c 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -33,6 +33,7 @@ enum {
static int trace_type __read_mostly;
static int save_flags;
+static bool function_enabled;
static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
static int start_irqsoff_tracer(struct trace_array *tr, int graph);
@@ -121,7 +122,7 @@ static int func_prolog_dec(struct trace_array *tr,
if (!irqs_disabled_flags(*flags))
return 0;
- *data = tr->data[cpu];
+ *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&(*data)->disabled);
if (likely(disabled == 1))
@@ -175,7 +176,7 @@ static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
per_cpu(tracing_cpu, cpu) = 0;
tracing_max_latency = 0;
- tracing_reset_online_cpus(irqsoff_trace);
+ tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
return start_irqsoff_tracer(irqsoff_trace, set);
}
@@ -380,7 +381,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
if (per_cpu(tracing_cpu, cpu))
return;
- data = tr->data[cpu];
+ data = per_cpu_ptr(tr->trace_buffer.data, cpu);
if (unlikely(!data) || atomic_read(&data->disabled))
return;
@@ -418,7 +419,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
if (!tracer_enabled)
return;
- data = tr->data[cpu];
+ data = per_cpu_ptr(tr->trace_buffer.data, cpu);
if (unlikely(!data) ||
!data->critical_start || atomic_read(&data->disabled))
@@ -528,15 +529,60 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
}
#endif /* CONFIG_PREEMPT_TRACER */
-static int start_irqsoff_tracer(struct trace_array *tr, int graph)
+static int register_irqsoff_function(int graph, int set)
{
- int ret = 0;
+ int ret;
- if (!graph)
- ret = register_ftrace_function(&trace_ops);
- else
+ /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
+ if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION)))
+ return 0;
+
+ if (graph)
ret = register_ftrace_graph(&irqsoff_graph_return,
&irqsoff_graph_entry);
+ else
+ ret = register_ftrace_function(&trace_ops);
+
+ if (!ret)
+ function_enabled = true;
+
+ return ret;
+}
+
+static void unregister_irqsoff_function(int graph)
+{
+ if (!function_enabled)
+ return;
+
+ if (graph)
+ unregister_ftrace_graph();
+ else
+ unregister_ftrace_function(&trace_ops);
+
+ function_enabled = false;
+}
+
+static void irqsoff_function_set(int set)
+{
+ if (set)
+ register_irqsoff_function(is_graph(), 1);
+ else
+ unregister_irqsoff_function(is_graph());
+}
+
+static int irqsoff_flag_changed(struct tracer *tracer, u32 mask, int set)
+{
+ if (mask & TRACE_ITER_FUNCTION)
+ irqsoff_function_set(set);
+
+ return trace_keep_overwrite(tracer, mask, set);
+}
+
+static int start_irqsoff_tracer(struct trace_array *tr, int graph)
+{
+ int ret;
+
+ ret = register_irqsoff_function(graph, 0);
if (!ret && tracing_is_enabled())
tracer_enabled = 1;
@@ -550,10 +596,7 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
{
tracer_enabled = 0;
- if (!graph)
- unregister_ftrace_function(&trace_ops);
- else
- unregister_ftrace_graph();
+ unregister_irqsoff_function(graph);
}
static void __irqsoff_tracer_init(struct trace_array *tr)
@@ -561,14 +604,14 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
save_flags = trace_flags;
/* non overwrite screws up the latency tracers */
- set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
- set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
+ set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
+ set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
tracing_max_latency = 0;
irqsoff_trace = tr;
/* make sure that the tracer is visible */
smp_wmb();
- tracing_reset_online_cpus(tr);
+ tracing_reset_online_cpus(&tr->trace_buffer);
if (start_irqsoff_tracer(tr, is_graph()))
printk(KERN_ERR "failed to start irqsoff tracer\n");
@@ -581,8 +624,8 @@ static void irqsoff_tracer_reset(struct trace_array *tr)
stop_irqsoff_tracer(tr, is_graph());
- set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
- set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
+ set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
+ set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
}
static void irqsoff_tracer_start(struct trace_array *tr)
@@ -615,7 +658,7 @@ static struct tracer irqsoff_tracer __read_mostly =
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
.set_flag = irqsoff_set_flag,
- .flag_changed = trace_keep_overwrite,
+ .flag_changed = irqsoff_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_irqsoff,
#endif
@@ -649,7 +692,7 @@ static struct tracer preemptoff_tracer __read_mostly =
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
.set_flag = irqsoff_set_flag,
- .flag_changed = trace_keep_overwrite,
+ .flag_changed = irqsoff_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptoff,
#endif
@@ -685,7 +728,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
.set_flag = irqsoff_set_flag,
- .flag_changed = trace_keep_overwrite,
+ .flag_changed = irqsoff_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptirqsoff,
#endif
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index 3c5c5dfea0b..bd90e1b0608 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -26,7 +26,7 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
trace_init_global_iter(&iter);
for_each_tracing_cpu(cpu) {
- atomic_inc(&iter.tr->data[cpu]->disabled);
+ atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
}
old_userobj = trace_flags;
@@ -43,17 +43,17 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
iter.iter_flags |= TRACE_FILE_LAT_FMT;
iter.pos = -1;
- if (cpu_file == TRACE_PIPE_ALL_CPU) {
+ if (cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) {
iter.buffer_iter[cpu] =
- ring_buffer_read_prepare(iter.tr->buffer, cpu);
+ ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu);
ring_buffer_read_start(iter.buffer_iter[cpu]);
tracing_iter_reset(&iter, cpu);
}
} else {
iter.cpu_file = cpu_file;
iter.buffer_iter[cpu_file] =
- ring_buffer_read_prepare(iter.tr->buffer, cpu_file);
+ ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu_file);
ring_buffer_read_start(iter.buffer_iter[cpu_file]);
tracing_iter_reset(&iter, cpu_file);
}
@@ -83,7 +83,7 @@ out:
trace_flags = old_userobj;
for_each_tracing_cpu(cpu) {
- atomic_dec(&iter.tr->data[cpu]->disabled);
+ atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
}
for_each_tracing_cpu(cpu)
@@ -115,7 +115,7 @@ static int kdb_ftdump(int argc, const char **argv)
!cpu_online(cpu_file))
return KDB_BADINT;
} else {
- cpu_file = TRACE_PIPE_ALL_CPU;
+ cpu_file = RING_BUFFER_ALL_CPUS;
}
kdb_trap_printk++;
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index fd3c8aae55e..a5e8f4878bf 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -31,7 +31,7 @@ static void mmio_reset_data(struct trace_array *tr)
overrun_detected = false;
prev_overruns = 0;
- tracing_reset_online_cpus(tr);
+ tracing_reset_online_cpus(&tr->trace_buffer);
}
static int mmio_trace_init(struct trace_array *tr)
@@ -128,7 +128,7 @@ static void mmio_close(struct trace_iterator *iter)
static unsigned long count_overruns(struct trace_iterator *iter)
{
unsigned long cnt = atomic_xchg(&dropped_count, 0);
- unsigned long over = ring_buffer_overruns(iter->tr->buffer);
+ unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
if (over > prev_overruns)
cnt += over - prev_overruns;
@@ -309,7 +309,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
struct mmiotrace_rw *rw)
{
struct ftrace_event_call *call = &event_mmiotrace_rw;
- struct ring_buffer *buffer = tr->buffer;
+ struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer_event *event;
struct trace_mmiotrace_rw *entry;
int pc = preempt_count();
@@ -330,7 +330,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
void mmio_trace_rw(struct mmiotrace_rw *rw)
{
struct trace_array *tr = mmio_trace_array;
- struct trace_array_cpu *data = tr->data[smp_processor_id()];
+ struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
__trace_mmiotrace_rw(tr, data, rw);
}
@@ -339,7 +339,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
struct mmiotrace_map *map)
{
struct ftrace_event_call *call = &event_mmiotrace_map;
- struct ring_buffer *buffer = tr->buffer;
+ struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer_event *event;
struct trace_mmiotrace_map *entry;
int pc = preempt_count();
@@ -363,7 +363,7 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
struct trace_array_cpu *data;
preempt_disable();
- data = tr->data[smp_processor_id()];
+ data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
__trace_mmiotrace_map(tr, data, map);
preempt_enable();
}
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 697e88d1390..bb922d9ee51 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -14,7 +14,7 @@
/* must be a power of 2 */
#define EVENT_HASHSIZE 128
-DECLARE_RWSEM(trace_event_mutex);
+DECLARE_RWSEM(trace_event_sem);
static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
@@ -37,6 +37,22 @@ int trace_print_seq(struct seq_file *m, struct trace_seq *s)
return ret;
}
+enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
+{
+ struct trace_seq *s = &iter->seq;
+ struct trace_entry *entry = iter->ent;
+ struct bputs_entry *field;
+ int ret;
+
+ trace_assign_type(field, entry);
+
+ ret = trace_seq_puts(s, field->str);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ return TRACE_TYPE_HANDLED;
+}
+
enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
@@ -397,6 +413,32 @@ ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
}
EXPORT_SYMBOL(ftrace_print_hex_seq);
+int ftrace_raw_output_prep(struct trace_iterator *iter,
+ struct trace_event *trace_event)
+{
+ struct ftrace_event_call *event;
+ struct trace_seq *s = &iter->seq;
+ struct trace_seq *p = &iter->tmp_seq;
+ struct trace_entry *entry;
+ int ret;
+
+ event = container_of(trace_event, struct ftrace_event_call, event);
+ entry = iter->ent;
+
+ if (entry->type != event->event.type) {
+ WARN_ON_ONCE(1);
+ return TRACE_TYPE_UNHANDLED;
+ }
+
+ trace_seq_init(p);
+ ret = trace_seq_printf(s, "%s: ", event->name);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ return 0;
+}
+EXPORT_SYMBOL(ftrace_raw_output_prep);
+
#ifdef CONFIG_KRETPROBES
static inline const char *kretprobed(const char *name)
{
@@ -617,7 +659,7 @@ lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
{
unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE;
unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
- unsigned long long abs_ts = iter->ts - iter->tr->time_start;
+ unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start;
unsigned long long rel_ts = next_ts - iter->ts;
struct trace_seq *s = &iter->seq;
@@ -783,12 +825,12 @@ static int trace_search_list(struct list_head **list)
void trace_event_read_lock(void)
{
- down_read(&trace_event_mutex);
+ down_read(&trace_event_sem);
}
void trace_event_read_unlock(void)
{
- up_read(&trace_event_mutex);
+ up_read(&trace_event_sem);
}
/**
@@ -811,7 +853,7 @@ int register_ftrace_event(struct trace_event *event)
unsigned key;
int ret = 0;
- down_write(&trace_event_mutex);
+ down_write(&trace_event_sem);
if (WARN_ON(!event))
goto out;
@@ -866,14 +908,14 @@ int register_ftrace_event(struct trace_event *event)
ret = event->type;
out:
- up_write(&trace_event_mutex);
+ up_write(&trace_event_sem);
return ret;
}
EXPORT_SYMBOL_GPL(register_ftrace_event);
/*
- * Used by module code with the trace_event_mutex held for write.
+ * Used by module code with the trace_event_sem held for write.
*/
int __unregister_ftrace_event(struct trace_event *event)
{
@@ -888,9 +930,9 @@ int __unregister_ftrace_event(struct trace_event *event)
*/
int unregister_ftrace_event(struct trace_event *event)
{
- down_write(&trace_event_mutex);
+ down_write(&trace_event_sem);
__unregister_ftrace_event(event);
- up_write(&trace_event_mutex);
+ up_write(&trace_event_sem);
return 0;
}
@@ -1217,6 +1259,64 @@ static struct trace_event trace_user_stack_event = {
.funcs = &trace_user_stack_funcs,
};
+/* TRACE_BPUTS */
+static enum print_line_t
+trace_bputs_print(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct trace_entry *entry = iter->ent;
+ struct trace_seq *s = &iter->seq;
+ struct bputs_entry *field;
+
+ trace_assign_type(field, entry);
+
+ if (!seq_print_ip_sym(s, field->ip, flags))
+ goto partial;
+
+ if (!trace_seq_puts(s, ": "))
+ goto partial;
+
+ if (!trace_seq_puts(s, field->str))
+ goto partial;
+
+ return TRACE_TYPE_HANDLED;
+
+ partial:
+ return TRACE_TYPE_PARTIAL_LINE;
+}
+
+
+static enum print_line_t
+trace_bputs_raw(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct bputs_entry *field;
+ struct trace_seq *s = &iter->seq;
+
+ trace_assign_type(field, iter->ent);
+
+ if (!trace_seq_printf(s, ": %lx : ", field->ip))
+ goto partial;
+
+ if (!trace_seq_puts(s, field->str))
+ goto partial;
+
+ return TRACE_TYPE_HANDLED;
+
+ partial:
+ return TRACE_TYPE_PARTIAL_LINE;
+}
+
+static struct trace_event_functions trace_bputs_funcs = {
+ .trace = trace_bputs_print,
+ .raw = trace_bputs_raw,
+};
+
+static struct trace_event trace_bputs_event = {
+ .type = TRACE_BPUTS,
+ .funcs = &trace_bputs_funcs,
+};
+
/* TRACE_BPRINT */
static enum print_line_t
trace_bprint_print(struct trace_iterator *iter, int flags,
@@ -1329,6 +1429,7 @@ static struct trace_event *events[] __initdata = {
&trace_wake_event,
&trace_stack_event,
&trace_user_stack_event,
+ &trace_bputs_event,
&trace_bprint_event,
&trace_print_event,
NULL
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h
index c038eba0492..127a9d8c835 100644
--- a/kernel/trace/trace_output.h
+++ b/kernel/trace/trace_output.h
@@ -5,6 +5,8 @@
#include "trace.h"
extern enum print_line_t
+trace_print_bputs_msg_only(struct trace_iterator *iter);
+extern enum print_line_t
trace_print_bprintk_msg_only(struct trace_iterator *iter);
extern enum print_line_t
trace_print_printk_msg_only(struct trace_iterator *iter);
@@ -31,7 +33,7 @@ trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry);
/* used by module unregistering */
extern int __unregister_ftrace_event(struct trace_event *event);
-extern struct rw_semaphore trace_event_mutex;
+extern struct rw_semaphore trace_event_sem;
#define MAX_MEMHEX_BYTES 8
#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 3374c792ccd..4e98e3b257a 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -28,7 +28,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
unsigned long flags, int pc)
{
struct ftrace_event_call *call = &event_context_switch;
- struct ring_buffer *buffer = tr->buffer;
+ struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
@@ -69,7 +69,7 @@ probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *n
pc = preempt_count();
local_irq_save(flags);
cpu = raw_smp_processor_id();
- data = ctx_trace->data[cpu];
+ data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu);
if (likely(!atomic_read(&data->disabled)))
tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
@@ -86,7 +86,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
struct ftrace_event_call *call = &event_wakeup;
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
- struct ring_buffer *buffer = tr->buffer;
+ struct ring_buffer *buffer = tr->trace_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
sizeof(*entry), flags, pc);
@@ -123,7 +123,7 @@ probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
pc = preempt_count();
local_irq_save(flags);
cpu = raw_smp_processor_id();
- data = ctx_trace->data[cpu];
+ data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu);
if (likely(!atomic_read(&data->disabled)))
tracing_sched_wakeup_trace(ctx_trace, wakee, current,
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index fde652c9a51..fee77e15d81 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -37,6 +37,7 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
static void wakeup_graph_return(struct ftrace_graph_ret *trace);
static int save_flags;
+static bool function_enabled;
#define TRACE_DISPLAY_GRAPH 1
@@ -89,7 +90,7 @@ func_prolog_preempt_disable(struct trace_array *tr,
if (cpu != wakeup_current_cpu)
goto out_enable;
- *data = tr->data[cpu];
+ *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&(*data)->disabled);
if (unlikely(disabled != 1))
goto out;
@@ -134,15 +135,60 @@ static struct ftrace_ops trace_ops __read_mostly =
};
#endif /* CONFIG_FUNCTION_TRACER */
-static int start_func_tracer(int graph)
+static int register_wakeup_function(int graph, int set)
{
int ret;
- if (!graph)
- ret = register_ftrace_function(&trace_ops);
- else
+ /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
+ if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION)))
+ return 0;
+
+ if (graph)
ret = register_ftrace_graph(&wakeup_graph_return,
&wakeup_graph_entry);
+ else
+ ret = register_ftrace_function(&trace_ops);
+
+ if (!ret)
+ function_enabled = true;
+
+ return ret;
+}
+
+static void unregister_wakeup_function(int graph)
+{
+ if (!function_enabled)
+ return;
+
+ if (graph)
+ unregister_ftrace_graph();
+ else
+ unregister_ftrace_function(&trace_ops);
+
+ function_enabled = false;
+}
+
+static void wakeup_function_set(int set)
+{
+ if (set)
+ register_wakeup_function(is_graph(), 1);
+ else
+ unregister_wakeup_function(is_graph());
+}
+
+static int wakeup_flag_changed(struct tracer *tracer, u32 mask, int set)
+{
+ if (mask & TRACE_ITER_FUNCTION)
+ wakeup_function_set(set);
+
+ return trace_keep_overwrite(tracer, mask, set);
+}
+
+static int start_func_tracer(int graph)
+{
+ int ret;
+
+ ret = register_wakeup_function(graph, 0);
if (!ret && tracing_is_enabled())
tracer_enabled = 1;
@@ -156,10 +202,7 @@ static void stop_func_tracer(int graph)
{
tracer_enabled = 0;
- if (!graph)
- unregister_ftrace_function(&trace_ops);
- else
- unregister_ftrace_graph();
+ unregister_wakeup_function(graph);
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -353,7 +396,7 @@ probe_wakeup_sched_switch(void *ignore,
/* disable local data, not wakeup_cpu data */
cpu = raw_smp_processor_id();
- disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
+ disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
if (likely(disabled != 1))
goto out;
@@ -365,7 +408,7 @@ probe_wakeup_sched_switch(void *ignore,
goto out_unlock;
/* The task we are waiting for is waking up */
- data = wakeup_trace->data[wakeup_cpu];
+ data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
__trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
@@ -387,7 +430,7 @@ out_unlock:
arch_spin_unlock(&wakeup_lock);
local_irq_restore(flags);
out:
- atomic_dec(&wakeup_trace->data[cpu]->disabled);
+ atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
}
static void __wakeup_reset(struct trace_array *tr)
@@ -405,7 +448,7 @@ static void wakeup_reset(struct trace_array *tr)
{
unsigned long flags;
- tracing_reset_online_cpus(tr);
+ tracing_reset_online_cpus(&tr->trace_buffer);
local_irq_save(flags);
arch_spin_lock(&wakeup_lock);
@@ -435,7 +478,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
return;
pc = preempt_count();
- disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
+ disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
if (unlikely(disabled != 1))
goto out;
@@ -458,7 +501,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
local_save_flags(flags);
- data = wakeup_trace->data[wakeup_cpu];
+ data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
data->preempt_timestamp = ftrace_now(cpu);
tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
@@ -472,7 +515,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
out_locked:
arch_spin_unlock(&wakeup_lock);
out:
- atomic_dec(&wakeup_trace->data[cpu]->disabled);
+ atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
}
static void start_wakeup_tracer(struct trace_array *tr)
@@ -543,8 +586,8 @@ static int __wakeup_tracer_init(struct trace_array *tr)
save_flags = trace_flags;
/* non overwrite screws up the latency tracers */
- set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
- set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
+ set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
+ set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
tracing_max_latency = 0;
wakeup_trace = tr;
@@ -573,8 +616,8 @@ static void wakeup_tracer_reset(struct trace_array *tr)
/* make sure we put back any tasks we are tracing */
wakeup_reset(tr);
- set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
- set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
+ set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
+ set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
}
static void wakeup_tracer_start(struct trace_array *tr)
@@ -600,7 +643,7 @@ static struct tracer wakeup_tracer __read_mostly =
.print_line = wakeup_print_line,
.flags = &tracer_flags,
.set_flag = wakeup_set_flag,
- .flag_changed = trace_keep_overwrite,
+ .flag_changed = wakeup_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup,
#endif
@@ -622,7 +665,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
.print_line = wakeup_print_line,
.flags = &tracer_flags,
.set_flag = wakeup_set_flag,
- .flag_changed = trace_keep_overwrite,
+ .flag_changed = wakeup_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup,
#endif
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 51c819c12c2..55e2cf66967 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -21,13 +21,13 @@ static inline int trace_valid_entry(struct trace_entry *entry)
return 0;
}
-static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
+static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
{
struct ring_buffer_event *event;
struct trace_entry *entry;
unsigned int loops = 0;
- while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
+ while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
entry = ring_buffer_event_data(event);
/*
@@ -58,7 +58,7 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
* Test the trace buffer to see if all the elements
* are still sane.
*/
-static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
+static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
{
unsigned long flags, cnt = 0;
int cpu, ret = 0;
@@ -67,7 +67,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
local_irq_save(flags);
arch_spin_lock(&ftrace_max_lock);
- cnt = ring_buffer_entries(tr->buffer);
+ cnt = ring_buffer_entries(buf->buffer);
/*
* The trace_test_buffer_cpu runs a while loop to consume all data.
@@ -78,7 +78,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
*/
tracing_off();
for_each_possible_cpu(cpu) {
- ret = trace_test_buffer_cpu(tr, cpu);
+ ret = trace_test_buffer_cpu(buf, cpu);
if (ret)
break;
}
@@ -355,7 +355,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
msleep(100);
/* we should have nothing in the buffer */
- ret = trace_test_buffer(tr, &count);
+ ret = trace_test_buffer(&tr->trace_buffer, &count);
if (ret)
goto out;
@@ -376,7 +376,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
ftrace_enabled = 0;
/* check the trace buffer */
- ret = trace_test_buffer(tr, &count);
+ ret = trace_test_buffer(&tr->trace_buffer, &count);
tracing_start();
/* we should only have one item */
@@ -666,7 +666,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
ftrace_enabled = 0;
/* check the trace buffer */
- ret = trace_test_buffer(tr, &count);
+ ret = trace_test_buffer(&tr->trace_buffer, &count);
trace->reset(tr);
tracing_start();
@@ -703,8 +703,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
/* Maximum number of functions to trace before diagnosing a hang */
#define GRAPH_MAX_FUNC_TEST 100000000
-static void
-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
static unsigned int graph_hang_thresh;
/* Wrap the real function entry probe to avoid possible hanging */
@@ -714,8 +712,11 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
ftrace_graph_stop();
printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
- if (ftrace_dump_on_oops)
- __ftrace_dump(false, DUMP_ALL);
+ if (ftrace_dump_on_oops) {
+ ftrace_dump(DUMP_ALL);
+ /* ftrace_dump() disables tracing */
+ tracing_on();
+ }
return 0;
}
@@ -737,7 +738,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
* Simulate the init() callback but we attach a watchdog callback
* to detect and recover from possible hangs
*/
- tracing_reset_online_cpus(tr);
+ tracing_reset_online_cpus(&tr->trace_buffer);
set_graph_array(tr);
ret = register_ftrace_graph(&trace_graph_return,
&trace_graph_entry_watchdog);
@@ -760,7 +761,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
tracing_stop();
/* check the trace buffer */
- ret = trace_test_buffer(tr, &count);
+ ret = trace_test_buffer(&tr->trace_buffer, &count);
trace->reset(tr);
tracing_start();
@@ -815,9 +816,9 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
- ret = trace_test_buffer(tr, NULL);
+ ret = trace_test_buffer(&tr->trace_buffer, NULL);
if (!ret)
- ret = trace_test_buffer(&max_tr, &count);
+ ret = trace_test_buffer(&tr->max_buffer, &count);
trace->reset(tr);
tracing_start();
@@ -877,9 +878,9 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
- ret = trace_test_buffer(tr, NULL);
+ ret = trace_test_buffer(&tr->trace_buffer, NULL);
if (!ret)
- ret = trace_test_buffer(&max_tr, &count);
+ ret = trace_test_buffer(&tr->max_buffer, &count);
trace->reset(tr);
tracing_start();
@@ -943,11 +944,11 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
- ret = trace_test_buffer(tr, NULL);
+ ret = trace_test_buffer(&tr->trace_buffer, NULL);
if (ret)
goto out;
- ret = trace_test_buffer(&max_tr, &count);
+ ret = trace_test_buffer(&tr->max_buffer, &count);
if (ret)
goto out;
@@ -973,11 +974,11 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
- ret = trace_test_buffer(tr, NULL);
+ ret = trace_test_buffer(&tr->trace_buffer, NULL);
if (ret)
goto out;
- ret = trace_test_buffer(&max_tr, &count);
+ ret = trace_test_buffer(&tr->max_buffer, &count);
if (!ret && !count) {
printk(KERN_CONT ".. no entries found ..");
@@ -1084,10 +1085,10 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
/* stop the tracing. */
tracing_stop();
/* check both trace buffers */
- ret = trace_test_buffer(tr, NULL);
+ ret = trace_test_buffer(&tr->trace_buffer, NULL);
printk("ret = %d\n", ret);
if (!ret)
- ret = trace_test_buffer(&max_tr, &count);
+ ret = trace_test_buffer(&tr->max_buffer, &count);
trace->reset(tr);
@@ -1126,7 +1127,7 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr
/* stop the tracing. */
tracing_stop();
/* check the trace buffer */
- ret = trace_test_buffer(tr, &count);
+ ret = trace_test_buffer(&tr->trace_buffer, &count);
trace->reset(tr);
tracing_start();
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 83a8b5b7bd3..b20428c5efe 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -20,13 +20,24 @@
#define STACK_TRACE_ENTRIES 500
+#ifdef CC_USING_FENTRY
+# define fentry 1
+#else
+# define fentry 0
+#endif
+
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
{ [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
+/*
+ * Reserve one entry for the passed in ip. This will allow
+ * us to remove most or all of the stack size overhead
+ * added by the stack tracer itself.
+ */
static struct stack_trace max_stack_trace = {
- .max_entries = STACK_TRACE_ENTRIES,
- .entries = stack_dump_trace,
+ .max_entries = STACK_TRACE_ENTRIES - 1,
+ .entries = &stack_dump_trace[1],
};
static unsigned long max_stack_size;
@@ -39,25 +50,34 @@ static DEFINE_MUTEX(stack_sysctl_mutex);
int stack_tracer_enabled;
static int last_stack_tracer_enabled;
-static inline void check_stack(void)
+static inline void
+check_stack(unsigned long ip, unsigned long *stack)
{
unsigned long this_size, flags;
unsigned long *p, *top, *start;
+ static int tracer_frame;
+ int frame_size = ACCESS_ONCE(tracer_frame);
int i;
- this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
+ this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
this_size = THREAD_SIZE - this_size;
+ /* Remove the frame of the tracer */
+ this_size -= frame_size;
if (this_size <= max_stack_size)
return;
/* we do not handle interrupt stacks yet */
- if (!object_is_on_stack(&this_size))
+ if (!object_is_on_stack(stack))
return;
local_irq_save(flags);
arch_spin_lock(&max_stack_lock);
+ /* In case another CPU set the tracer_frame on us */
+ if (unlikely(!frame_size))
+ this_size -= tracer_frame;
+
/* a race could have already updated it */
if (this_size <= max_stack_size)
goto out;
@@ -70,10 +90,18 @@ static inline void check_stack(void)
save_stack_trace(&max_stack_trace);
/*
+ * Add the passed in ip from the function tracer.
+ * Searching for this on the stack will skip over
+ * most of the overhead from the stack tracer itself.
+ */
+ stack_dump_trace[0] = ip;
+ max_stack_trace.nr_entries++;
+
+ /*
* Now find where in the stack these are.
*/
i = 0;
- start = &this_size;
+ start = stack;
top = (unsigned long *)
(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
@@ -97,6 +125,18 @@ static inline void check_stack(void)
found = 1;
/* Start the search from here */
start = p + 1;
+ /*
+ * We do not want to show the overhead
+ * of the stack tracer stack in the
+ * max stack. If we haven't figured
+ * out what that is, then figure it out
+ * now.
+ */
+ if (unlikely(!tracer_frame) && i == 1) {
+ tracer_frame = (p - stack) *
+ sizeof(unsigned long);
+ max_stack_size -= tracer_frame;
+ }
}
}
@@ -113,6 +153,7 @@ static void
stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{
+ unsigned long stack;
int cpu;
preempt_disable_notrace();
@@ -122,7 +163,26 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
if (per_cpu(trace_active, cpu)++ != 0)
goto out;
- check_stack();
+ /*
+ * When fentry is used, the traced function does not get
+ * its stack frame set up, and we lose the parent.
+ * The ip is pretty useless because the function tracer
+ * was called before that function set up its stack frame.
+ * In this case, we use the parent ip.
+ *
+ * By adding the return address of either the parent ip
+ * or the current ip we can disregard most of the stack usage
+ * caused by the stack tracer itself.
+ *
+ * The function tracer always reports the address of where the
+ * mcount call was, but the stack will hold the return address.
+ */
+ if (fentry)
+ ip = parent_ip;
+ else
+ ip += MCOUNT_INSN_SIZE;
+
+ check_stack(ip, &stack);
out:
per_cpu(trace_active, cpu)--;
@@ -371,6 +431,8 @@ static __init int stack_trace_init(void)
struct dentry *d_tracer;
d_tracer = tracing_init_dentry();
+ if (!d_tracer)
+ return 0;
trace_create_file("stack_max_size", 0644, d_tracer,
&max_stack_size, &stack_max_size_fops);
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index 96cffb269e7..847f88a6194 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -307,6 +307,8 @@ static int tracing_stat_init(void)
struct dentry *d_tracing;
d_tracing = tracing_init_dentry();
+ if (!d_tracing)
+ return 0;
stat_dir = debugfs_create_dir("trace_stat", d_tracing);
if (!stat_dir)
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 7a809e32105..8f2ac73c7a5 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -12,10 +12,6 @@
#include "trace.h"
static DEFINE_MUTEX(syscall_trace_lock);
-static int sys_refcount_enter;
-static int sys_refcount_exit;
-static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
-static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
static int syscall_enter_register(struct ftrace_event_call *event,
enum trace_reg type, void *data);
@@ -41,7 +37,7 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name
/*
* Only compare after the "sys" prefix. Archs that use
* syscall wrappers may have syscalls symbols aliases prefixed
- * with "SyS" instead of "sys", leading to an unwanted
+ * with ".SyS" or ".sys" instead of "sys", leading to an unwanted
* mismatch.
*/
return !strcmp(sym + 3, name + 3);
@@ -265,7 +261,7 @@ static void free_syscall_print_fmt(struct ftrace_event_call *call)
kfree(call->print_fmt);
}
-static int syscall_enter_define_fields(struct ftrace_event_call *call)
+static int __init syscall_enter_define_fields(struct ftrace_event_call *call)
{
struct syscall_trace_enter trace;
struct syscall_metadata *meta = call->data;
@@ -288,7 +284,7 @@ static int syscall_enter_define_fields(struct ftrace_event_call *call)
return ret;
}
-static int syscall_exit_define_fields(struct ftrace_event_call *call)
+static int __init syscall_exit_define_fields(struct ftrace_event_call *call)
{
struct syscall_trace_exit trace;
int ret;
@@ -303,8 +299,9 @@ static int syscall_exit_define_fields(struct ftrace_event_call *call)
return ret;
}
-static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
+static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
{
+ struct trace_array *tr = data;
struct syscall_trace_enter *entry;
struct syscall_metadata *sys_data;
struct ring_buffer_event *event;
@@ -315,7 +312,7 @@ static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
syscall_nr = trace_get_syscall_nr(current, regs);
if (syscall_nr < 0)
return;
- if (!test_bit(syscall_nr, enabled_enter_syscalls))
+ if (!test_bit(syscall_nr, tr->enabled_enter_syscalls))
return;
sys_data = syscall_nr_to_meta(syscall_nr);
@@ -324,7 +321,8 @@ static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
- event = trace_current_buffer_lock_reserve(&buffer,
+ buffer = tr->trace_buffer.buffer;
+ event = trace_buffer_lock_reserve(buffer,
sys_data->enter_event->event.type, size, 0, 0);
if (!event)
return;
@@ -338,8 +336,9 @@ static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
trace_current_buffer_unlock_commit(buffer, event, 0, 0);
}
-static void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
+static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
{
+ struct trace_array *tr = data;
struct syscall_trace_exit *entry;
struct syscall_metadata *sys_data;
struct ring_buffer_event *event;
@@ -349,14 +348,15 @@ static void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
syscall_nr = trace_get_syscall_nr(current, regs);
if (syscall_nr < 0)
return;
- if (!test_bit(syscall_nr, enabled_exit_syscalls))
+ if (!test_bit(syscall_nr, tr->enabled_exit_syscalls))
return;
sys_data = syscall_nr_to_meta(syscall_nr);
if (!sys_data)
return;
- event = trace_current_buffer_lock_reserve(&buffer,
+ buffer = tr->trace_buffer.buffer;
+ event = trace_buffer_lock_reserve(buffer,
sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
if (!event)
return;
@@ -370,8 +370,10 @@ static void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
trace_current_buffer_unlock_commit(buffer, event, 0, 0);
}
-static int reg_event_syscall_enter(struct ftrace_event_call *call)
+static int reg_event_syscall_enter(struct ftrace_event_file *file,
+ struct ftrace_event_call *call)
{
+ struct trace_array *tr = file->tr;
int ret = 0;
int num;
@@ -379,33 +381,37 @@ static int reg_event_syscall_enter(struct ftrace_event_call *call)
if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
return -ENOSYS;
mutex_lock(&syscall_trace_lock);
- if (!sys_refcount_enter)
- ret = register_trace_sys_enter(ftrace_syscall_enter, NULL);
+ if (!tr->sys_refcount_enter)
+ ret = register_trace_sys_enter(ftrace_syscall_enter, tr);
if (!ret) {
- set_bit(num, enabled_enter_syscalls);
- sys_refcount_enter++;
+ set_bit(num, tr->enabled_enter_syscalls);
+ tr->sys_refcount_enter++;
}
mutex_unlock(&syscall_trace_lock);
return ret;
}
-static void unreg_event_syscall_enter(struct ftrace_event_call *call)
+static void unreg_event_syscall_enter(struct ftrace_event_file *file,
+ struct ftrace_event_call *call)
{
+ struct trace_array *tr = file->tr;
int num;
num = ((struct syscall_metadata *)call->data)->syscall_nr;
if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
return;
mutex_lock(&syscall_trace_lock);
- sys_refcount_enter--;
- clear_bit(num, enabled_enter_syscalls);
- if (!sys_refcount_enter)
- unregister_trace_sys_enter(ftrace_syscall_enter, NULL);
+ tr->sys_refcount_enter--;
+ clear_bit(num, tr->enabled_enter_syscalls);
+ if (!tr->sys_refcount_enter)
+ unregister_trace_sys_enter(ftrace_syscall_enter, tr);
mutex_unlock(&syscall_trace_lock);
}
-static int reg_event_syscall_exit(struct ftrace_event_call *call)
+static int reg_event_syscall_exit(struct ftrace_event_file *file,
+ struct ftrace_event_call *call)
{
+ struct trace_array *tr = file->tr;
int ret = 0;
int num;
@@ -413,28 +419,30 @@ static int reg_event_syscall_exit(struct ftrace_event_call *call)
if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
return -ENOSYS;
mutex_lock(&syscall_trace_lock);
- if (!sys_refcount_exit)
- ret = register_trace_sys_exit(ftrace_syscall_exit, NULL);
+ if (!tr->sys_refcount_exit)
+ ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
if (!ret) {
- set_bit(num, enabled_exit_syscalls);
- sys_refcount_exit++;
+ set_bit(num, tr->enabled_exit_syscalls);
+ tr->sys_refcount_exit++;
}
mutex_unlock(&syscall_trace_lock);
return ret;
}
-static void unreg_event_syscall_exit(struct ftrace_event_call *call)
+static void unreg_event_syscall_exit(struct ftrace_event_file *file,
+ struct ftrace_event_call *call)
{
+ struct trace_array *tr = file->tr;
int num;
num = ((struct syscall_metadata *)call->data)->syscall_nr;
if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
return;
mutex_lock(&syscall_trace_lock);
- sys_refcount_exit--;
- clear_bit(num, enabled_exit_syscalls);
- if (!sys_refcount_exit)
- unregister_trace_sys_exit(ftrace_syscall_exit, NULL);
+ tr->sys_refcount_exit--;
+ clear_bit(num, tr->enabled_exit_syscalls);
+ if (!tr->sys_refcount_exit)
+ unregister_trace_sys_exit(ftrace_syscall_exit, tr);
mutex_unlock(&syscall_trace_lock);
}
@@ -471,7 +479,7 @@ struct trace_event_functions exit_syscall_print_funcs = {
.trace = print_syscall_exit,
};
-struct ftrace_event_class event_class_syscall_enter = {
+struct ftrace_event_class __refdata event_class_syscall_enter = {
.system = "syscalls",
.reg = syscall_enter_register,
.define_fields = syscall_enter_define_fields,
@@ -479,7 +487,7 @@ struct ftrace_event_class event_class_syscall_enter = {
.raw_init = init_syscall_trace,
};
-struct ftrace_event_class event_class_syscall_exit = {
+struct ftrace_event_class __refdata event_class_syscall_exit = {
.system = "syscalls",
.reg = syscall_exit_register,
.define_fields = syscall_exit_define_fields,
@@ -685,11 +693,13 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
static int syscall_enter_register(struct ftrace_event_call *event,
enum trace_reg type, void *data)
{
+ struct ftrace_event_file *file = data;
+
switch (type) {
case TRACE_REG_REGISTER:
- return reg_event_syscall_enter(event);
+ return reg_event_syscall_enter(file, event);
case TRACE_REG_UNREGISTER:
- unreg_event_syscall_enter(event);
+ unreg_event_syscall_enter(file, event);
return 0;
#ifdef CONFIG_PERF_EVENTS
@@ -711,11 +721,13 @@ static int syscall_enter_register(struct ftrace_event_call *event,
static int syscall_exit_register(struct ftrace_event_call *event,
enum trace_reg type, void *data)
{
+ struct ftrace_event_file *file = data;
+
switch (type) {
case TRACE_REG_REGISTER:
- return reg_event_syscall_exit(event);
+ return reg_event_syscall_exit(file, event);
case TRACE_REG_UNREGISTER:
- unreg_event_syscall_exit(event);
+ unreg_event_syscall_exit(file, event);
return 0;
#ifdef CONFIG_PERF_EVENTS
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 0c05a459204..29f26540e9c 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -112,7 +112,8 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry,
int nr_probes = 0;
struct tracepoint_func *old, *new;
- WARN_ON(!probe);
+ if (WARN_ON(!probe))
+ return ERR_PTR(-EINVAL);
debug_print_probes(entry);
old = entry->funcs;
@@ -152,13 +153,18 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
debug_print_probes(entry);
/* (N -> M), (N > 1, M >= 0) probes */
- for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
- if (!probe ||
- (old[nr_probes].func == probe &&
- old[nr_probes].data == data))
- nr_del++;
+ if (probe) {
+ for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
+ if (old[nr_probes].func == probe &&
+ old[nr_probes].data == data)
+ nr_del++;
+ }
}
+ /*
+ * If probe is NULL, then nr_probes = nr_del = 0, and then the
+ * entire entry will be removed.
+ */
if (nr_probes - nr_del == 0) {
/* N -> 0, (N > 1) */
entry->funcs = NULL;
@@ -173,8 +179,7 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
if (new == NULL)
return ERR_PTR(-ENOMEM);
for (i = 0; old[i].func; i++)
- if (probe &&
- (old[i].func != probe || old[i].data != data))
+ if (old[i].func != probe || old[i].data != data)
new[j++] = old[i];
new[nr_probes - nr_del].func = NULL;
entry->refcount = nr_probes - nr_del;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index b48cd597145..154aa12af48 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -41,7 +41,11 @@
#include <linux/debug_locks.h>
#include <linux/lockdep.h>
#include <linux/idr.h>
+#include <linux/jhash.h>
#include <linux/hashtable.h>
+#include <linux/rculist.h>
+#include <linux/nodemask.h>
+#include <linux/moduleparam.h>
#include "workqueue_internal.h"
@@ -58,12 +62,11 @@ enum {
* %WORKER_UNBOUND set and concurrency management disabled, and may
* be executing on any CPU. The pool behaves as an unbound one.
*
- * Note that DISASSOCIATED can be flipped only while holding
- * assoc_mutex to avoid changing binding state while
+ * Note that DISASSOCIATED should be flipped only while holding
+ * manager_mutex to avoid changing binding state while
* create_worker() is in progress.
*/
POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
- POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */
POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
POOL_FREEZING = 1 << 3, /* freeze in progress */
@@ -74,12 +77,14 @@ enum {
WORKER_PREP = 1 << 3, /* preparing to run works */
WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
WORKER_UNBOUND = 1 << 7, /* worker is unbound */
+ WORKER_REBOUND = 1 << 8, /* worker was rebound */
- WORKER_NOT_RUNNING = WORKER_PREP | WORKER_UNBOUND |
- WORKER_CPU_INTENSIVE,
+ WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE |
+ WORKER_UNBOUND | WORKER_REBOUND,
NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
+ UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
@@ -97,6 +102,8 @@ enum {
*/
RESCUER_NICE_LEVEL = -20,
HIGHPRI_NICE_LEVEL = -20,
+
+ WQ_NAME_LEN = 24,
};
/*
@@ -115,16 +122,26 @@ enum {
* cpu or grabbing pool->lock is enough for read access. If
* POOL_DISASSOCIATED is set, it's identical to L.
*
- * F: wq->flush_mutex protected.
+ * MG: pool->manager_mutex and pool->lock protected. Writes require both
+ * locks. Reads can happen under either lock.
+ *
+ * PL: wq_pool_mutex protected.
+ *
+ * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
+ *
+ * WQ: wq->mutex protected.
*
- * W: workqueue_lock protected.
+ * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
+ *
+ * MD: wq_mayday_lock protected.
*/
/* struct worker is defined in workqueue_internal.h */
struct worker_pool {
spinlock_t lock; /* the pool lock */
- unsigned int cpu; /* I: the associated cpu */
+ int cpu; /* I: the associated cpu */
+ int node; /* I: the associated node ID */
int id; /* I: pool ID */
unsigned int flags; /* X: flags */
@@ -138,12 +155,18 @@ struct worker_pool {
struct timer_list idle_timer; /* L: worker idle timeout */
struct timer_list mayday_timer; /* L: SOS timer for workers */
- /* workers are chained either in busy_hash or idle_list */
+ /* a workers is either on busy_hash or idle_list, or the manager */
DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
/* L: hash of busy workers */
- struct mutex assoc_mutex; /* protect POOL_DISASSOCIATED */
- struct ida worker_ida; /* L: for worker IDs */
+ /* see manage_workers() for details on the two manager mutexes */
+ struct mutex manager_arb; /* manager arbitration */
+ struct mutex manager_mutex; /* manager exclusion */
+ struct idr worker_idr; /* MG: worker IDs and iteration */
+
+ struct workqueue_attrs *attrs; /* I: worker attributes */
+ struct hlist_node hash_node; /* PL: unbound_pool_hash node */
+ int refcnt; /* PL: refcnt for unbound pools */
/*
* The current concurrency level. As it's likely to be accessed
@@ -151,6 +174,12 @@ struct worker_pool {
* cacheline.
*/
atomic_t nr_running ____cacheline_aligned_in_smp;
+
+ /*
+ * Destruction of pool is sched-RCU protected to allow dereferences
+ * from get_work_pool().
+ */
+ struct rcu_head rcu;
} ____cacheline_aligned_in_smp;
/*
@@ -164,75 +193,107 @@ struct pool_workqueue {
struct workqueue_struct *wq; /* I: the owning workqueue */
int work_color; /* L: current color */
int flush_color; /* L: flushing color */
+ int refcnt; /* L: reference count */
int nr_in_flight[WORK_NR_COLORS];
/* L: nr of in_flight works */
int nr_active; /* L: nr of active works */
int max_active; /* L: max active works */
struct list_head delayed_works; /* L: delayed works */
-};
+ struct list_head pwqs_node; /* WR: node on wq->pwqs */
+ struct list_head mayday_node; /* MD: node on wq->maydays */
+
+ /*
+ * Release of unbound pwq is punted to system_wq. See put_pwq()
+ * and pwq_unbound_release_workfn() for details. pool_workqueue
+ * itself is also sched-RCU protected so that the first pwq can be
+ * determined without grabbing wq->mutex.
+ */
+ struct work_struct unbound_release_work;
+ struct rcu_head rcu;
+} __aligned(1 << WORK_STRUCT_FLAG_BITS);
/*
* Structure used to wait for workqueue flush.
*/
struct wq_flusher {
- struct list_head list; /* F: list of flushers */
- int flush_color; /* F: flush color waiting for */
+ struct list_head list; /* WQ: list of flushers */
+ int flush_color; /* WQ: flush color waiting for */
struct completion done; /* flush completion */
};
-/*
- * All cpumasks are assumed to be always set on UP and thus can't be
- * used to determine whether there's something to be done.
- */
-#ifdef CONFIG_SMP
-typedef cpumask_var_t mayday_mask_t;
-#define mayday_test_and_set_cpu(cpu, mask) \
- cpumask_test_and_set_cpu((cpu), (mask))
-#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
-#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
-#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
-#define free_mayday_mask(mask) free_cpumask_var((mask))
-#else
-typedef unsigned long mayday_mask_t;
-#define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
-#define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
-#define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
-#define alloc_mayday_mask(maskp, gfp) true
-#define free_mayday_mask(mask) do { } while (0)
-#endif
+struct wq_device;
/*
- * The externally visible workqueue abstraction is an array of
- * per-CPU workqueues:
+ * The externally visible workqueue. It relays the issued work items to
+ * the appropriate worker_pool through its pool_workqueues.
*/
struct workqueue_struct {
- unsigned int flags; /* W: WQ_* flags */
- union {
- struct pool_workqueue __percpu *pcpu;
- struct pool_workqueue *single;
- unsigned long v;
- } pool_wq; /* I: pwq's */
- struct list_head list; /* W: list of all workqueues */
-
- struct mutex flush_mutex; /* protects wq flushing */
- int work_color; /* F: current work color */
- int flush_color; /* F: current flush color */
+ struct list_head pwqs; /* WR: all pwqs of this wq */
+ struct list_head list; /* PL: list of all workqueues */
+
+ struct mutex mutex; /* protects this wq */
+ int work_color; /* WQ: current work color */
+ int flush_color; /* WQ: current flush color */
atomic_t nr_pwqs_to_flush; /* flush in progress */
- struct wq_flusher *first_flusher; /* F: first flusher */
- struct list_head flusher_queue; /* F: flush waiters */
- struct list_head flusher_overflow; /* F: flush overflow list */
+ struct wq_flusher *first_flusher; /* WQ: first flusher */
+ struct list_head flusher_queue; /* WQ: flush waiters */
+ struct list_head flusher_overflow; /* WQ: flush overflow list */
- mayday_mask_t mayday_mask; /* cpus requesting rescue */
+ struct list_head maydays; /* MD: pwqs requesting rescue */
struct worker *rescuer; /* I: rescue worker */
- int nr_drainers; /* W: drain in progress */
- int saved_max_active; /* W: saved pwq max_active */
+ int nr_drainers; /* WQ: drain in progress */
+ int saved_max_active; /* WQ: saved pwq max_active */
+
+ struct workqueue_attrs *unbound_attrs; /* WQ: only for unbound wqs */
+ struct pool_workqueue *dfl_pwq; /* WQ: only for unbound wqs */
+
+#ifdef CONFIG_SYSFS
+ struct wq_device *wq_dev; /* I: for sysfs interface */
+#endif
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
- char name[]; /* I: workqueue name */
+ char name[WQ_NAME_LEN]; /* I: workqueue name */
+
+ /* hot fields used during command issue, aligned to cacheline */
+ unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
+ struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
+ struct pool_workqueue __rcu *numa_pwq_tbl[]; /* FR: unbound pwqs indexed by node */
};
+static struct kmem_cache *pwq_cache;
+
+static int wq_numa_tbl_len; /* highest possible NUMA node id + 1 */
+static cpumask_var_t *wq_numa_possible_cpumask;
+ /* possible CPUs of each node */
+
+static bool wq_disable_numa;
+module_param_named(disable_numa, wq_disable_numa, bool, 0444);
+
+static bool wq_numa_enabled; /* unbound NUMA affinity enabled */
+
+/* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
+static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
+
+static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
+static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
+
+static LIST_HEAD(workqueues); /* PL: list of all workqueues */
+static bool workqueue_freezing; /* PL: have wqs started freezing? */
+
+/* the per-cpu worker pools */
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
+ cpu_worker_pools);
+
+static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
+
+/* PL: hash of all unbound pools keyed by pool->attrs */
+static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
+
+/* I: attributes used when instantiating standard unbound pools on demand */
+static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
+
struct workqueue_struct *system_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_wq);
struct workqueue_struct *system_highpri_wq __read_mostly;
@@ -244,64 +305,87 @@ EXPORT_SYMBOL_GPL(system_unbound_wq);
struct workqueue_struct *system_freezable_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_freezable_wq);
+static int worker_thread(void *__worker);
+static void copy_workqueue_attrs(struct workqueue_attrs *to,
+ const struct workqueue_attrs *from);
+
#define CREATE_TRACE_POINTS
#include <trace/events/workqueue.h>
-#define for_each_std_worker_pool(pool, cpu) \
- for ((pool) = &std_worker_pools(cpu)[0]; \
- (pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++)
+#define assert_rcu_or_pool_mutex() \
+ rcu_lockdep_assert(rcu_read_lock_sched_held() || \
+ lockdep_is_held(&wq_pool_mutex), \
+ "sched RCU or wq_pool_mutex should be held")
-#define for_each_busy_worker(worker, i, pool) \
- hash_for_each(pool->busy_hash, i, worker, hentry)
+#define assert_rcu_or_wq_mutex(wq) \
+ rcu_lockdep_assert(rcu_read_lock_sched_held() || \
+ lockdep_is_held(&wq->mutex), \
+ "sched RCU or wq->mutex should be held")
-static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
- unsigned int sw)
-{
- if (cpu < nr_cpu_ids) {
- if (sw & 1) {
- cpu = cpumask_next(cpu, mask);
- if (cpu < nr_cpu_ids)
- return cpu;
- }
- if (sw & 2)
- return WORK_CPU_UNBOUND;
- }
- return WORK_CPU_END;
-}
+#ifdef CONFIG_LOCKDEP
+#define assert_manager_or_pool_lock(pool) \
+ WARN_ONCE(debug_locks && \
+ !lockdep_is_held(&(pool)->manager_mutex) && \
+ !lockdep_is_held(&(pool)->lock), \
+ "pool->manager_mutex or ->lock should be held")
+#else
+#define assert_manager_or_pool_lock(pool) do { } while (0)
+#endif
-static inline int __next_pwq_cpu(int cpu, const struct cpumask *mask,
- struct workqueue_struct *wq)
-{
- return __next_wq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
-}
+#define for_each_cpu_worker_pool(pool, cpu) \
+ for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
+ (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
+ (pool)++)
-/*
- * CPU iterators
+/**
+ * for_each_pool - iterate through all worker_pools in the system
+ * @pool: iteration cursor
+ * @pi: integer used for iteration
*
- * An extra cpu number is defined using an invalid cpu number
- * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
- * specific CPU. The following iterators are similar to for_each_*_cpu()
- * iterators but also considers the unbound CPU.
+ * This must be called either with wq_pool_mutex held or sched RCU read
+ * locked. If the pool needs to be used beyond the locking in effect, the
+ * caller is responsible for guaranteeing that the pool stays online.
*
- * for_each_wq_cpu() : possible CPUs + WORK_CPU_UNBOUND
- * for_each_online_wq_cpu() : online CPUs + WORK_CPU_UNBOUND
- * for_each_pwq_cpu() : possible CPUs for bound workqueues,
- * WORK_CPU_UNBOUND for unbound workqueues
+ * The if/else clause exists only for the lockdep assertion and can be
+ * ignored.
*/
-#define for_each_wq_cpu(cpu) \
- for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, 3); \
- (cpu) < WORK_CPU_END; \
- (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, 3))
+#define for_each_pool(pool, pi) \
+ idr_for_each_entry(&worker_pool_idr, pool, pi) \
+ if (({ assert_rcu_or_pool_mutex(); false; })) { } \
+ else
-#define for_each_online_wq_cpu(cpu) \
- for ((cpu) = __next_wq_cpu(-1, cpu_online_mask, 3); \
- (cpu) < WORK_CPU_END; \
- (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3))
+/**
+ * for_each_pool_worker - iterate through all workers of a worker_pool
+ * @worker: iteration cursor
+ * @wi: integer used for iteration
+ * @pool: worker_pool to iterate workers of
+ *
+ * This must be called with either @pool->manager_mutex or ->lock held.
+ *
+ * The if/else clause exists only for the lockdep assertion and can be
+ * ignored.
+ */
+#define for_each_pool_worker(worker, wi, pool) \
+ idr_for_each_entry(&(pool)->worker_idr, (worker), (wi)) \
+ if (({ assert_manager_or_pool_lock((pool)); false; })) { } \
+ else
-#define for_each_pwq_cpu(cpu, wq) \
- for ((cpu) = __next_pwq_cpu(-1, cpu_possible_mask, (wq)); \
- (cpu) < WORK_CPU_END; \
- (cpu) = __next_pwq_cpu((cpu), cpu_possible_mask, (wq)))
+/**
+ * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
+ * @pwq: iteration cursor
+ * @wq: the target workqueue
+ *
+ * This must be called either with wq->mutex held or sched RCU read locked.
+ * If the pwq needs to be used beyond the locking in effect, the caller is
+ * responsible for guaranteeing that the pwq stays online.
+ *
+ * The if/else clause exists only for the lockdep assertion and can be
+ * ignored.
+ */
+#define for_each_pwq(pwq, wq) \
+ list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \
+ if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
+ else
#ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -419,77 +503,35 @@ static inline void debug_work_activate(struct work_struct *work) { }
static inline void debug_work_deactivate(struct work_struct *work) { }
#endif
-/* Serializes the accesses to the list of workqueues. */
-static DEFINE_SPINLOCK(workqueue_lock);
-static LIST_HEAD(workqueues);
-static bool workqueue_freezing; /* W: have wqs started freezing? */
-
-/*
- * The CPU and unbound standard worker pools. The unbound ones have
- * POOL_DISASSOCIATED set, and their workers have WORKER_UNBOUND set.
- */
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
- cpu_std_worker_pools);
-static struct worker_pool unbound_std_worker_pools[NR_STD_WORKER_POOLS];
-
-/* idr of all pools */
-static DEFINE_MUTEX(worker_pool_idr_mutex);
-static DEFINE_IDR(worker_pool_idr);
-
-static int worker_thread(void *__worker);
-
-static struct worker_pool *std_worker_pools(int cpu)
-{
- if (cpu != WORK_CPU_UNBOUND)
- return per_cpu(cpu_std_worker_pools, cpu);
- else
- return unbound_std_worker_pools;
-}
-
-static int std_worker_pool_pri(struct worker_pool *pool)
-{
- return pool - std_worker_pools(pool->cpu);
-}
-
/* allocate ID and assign it to @pool */
static int worker_pool_assign_id(struct worker_pool *pool)
{
int ret;
- mutex_lock(&worker_pool_idr_mutex);
+ lockdep_assert_held(&wq_pool_mutex);
+
ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL);
- if (ret >= 0)
+ if (ret >= 0) {
pool->id = ret;
- mutex_unlock(&worker_pool_idr_mutex);
-
- return ret < 0 ? ret : 0;
+ return 0;
+ }
+ return ret;
}
-/*
- * Lookup worker_pool by id. The idr currently is built during boot and
- * never modified. Don't worry about locking for now.
+/**
+ * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
+ * @wq: the target workqueue
+ * @node: the node ID
+ *
+ * This must be called either with pwq_lock held or sched RCU read locked.
+ * If the pwq needs to be used beyond the locking in effect, the caller is
+ * responsible for guaranteeing that the pwq stays online.
*/
-static struct worker_pool *worker_pool_by_id(int pool_id)
+static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
+ int node)
{
- return idr_find(&worker_pool_idr, pool_id);
-}
-
-static struct worker_pool *get_std_worker_pool(int cpu, bool highpri)
-{
- struct worker_pool *pools = std_worker_pools(cpu);
-
- return &pools[highpri];
-}
-
-static struct pool_workqueue *get_pwq(unsigned int cpu,
- struct workqueue_struct *wq)
-{
- if (!(wq->flags & WQ_UNBOUND)) {
- if (likely(cpu < nr_cpu_ids))
- return per_cpu_ptr(wq->pool_wq.pcpu, cpu);
- } else if (likely(cpu == WORK_CPU_UNBOUND))
- return wq->pool_wq.single;
- return NULL;
+ assert_rcu_or_wq_mutex(wq);
+ return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
}
static unsigned int work_color_to_flags(int color)
@@ -531,7 +573,7 @@ static int work_next_color(int color)
static inline void set_work_data(struct work_struct *work, unsigned long data,
unsigned long flags)
{
- BUG_ON(!work_pending(work));
+ WARN_ON_ONCE(!work_pending(work));
atomic_long_set(&work->data, data | flags | work_static(work));
}
@@ -583,13 +625,23 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
* @work: the work item of interest
*
* Return the worker_pool @work was last associated with. %NULL if none.
+ *
+ * Pools are created and destroyed under wq_pool_mutex, and allows read
+ * access under sched-RCU read lock. As such, this function should be
+ * called under wq_pool_mutex or with preemption disabled.
+ *
+ * All fields of the returned pool are accessible as long as the above
+ * mentioned locking is in effect. If the returned pool needs to be used
+ * beyond the critical section, the caller is responsible for ensuring the
+ * returned pool is and stays online.
*/
static struct worker_pool *get_work_pool(struct work_struct *work)
{
unsigned long data = atomic_long_read(&work->data);
- struct worker_pool *pool;
int pool_id;
+ assert_rcu_or_pool_mutex();
+
if (data & WORK_STRUCT_PWQ)
return ((struct pool_workqueue *)
(data & WORK_STRUCT_WQ_DATA_MASK))->pool;
@@ -598,9 +650,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work)
if (pool_id == WORK_OFFQ_POOL_NONE)
return NULL;
- pool = worker_pool_by_id(pool_id);
- WARN_ON_ONCE(!pool);
- return pool;
+ return idr_find(&worker_pool_idr, pool_id);
}
/**
@@ -689,7 +739,7 @@ static bool need_to_manage_workers(struct worker_pool *pool)
/* Do we have too many workers and should some go away? */
static bool too_many_workers(struct worker_pool *pool)
{
- bool managing = pool->flags & POOL_MANAGING_WORKERS;
+ bool managing = mutex_is_locked(&pool->manager_arb);
int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
int nr_busy = pool->nr_workers - nr_idle;
@@ -744,7 +794,7 @@ static void wake_up_worker(struct worker_pool *pool)
* CONTEXT:
* spin_lock_irq(rq->lock)
*/
-void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
+void wq_worker_waking_up(struct task_struct *task, int cpu)
{
struct worker *worker = kthread_data(task);
@@ -769,8 +819,7 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
* RETURNS:
* Worker task on @cpu to wake up, %NULL if none.
*/
-struct task_struct *wq_worker_sleeping(struct task_struct *task,
- unsigned int cpu)
+struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
{
struct worker *worker = kthread_data(task), *to_wakeup = NULL;
struct worker_pool *pool;
@@ -786,7 +835,8 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
pool = worker->pool;
/* this can only happen on the local cpu */
- BUG_ON(cpu != raw_smp_processor_id());
+ if (WARN_ON_ONCE(cpu != raw_smp_processor_id()))
+ return NULL;
/*
* The counterpart of the following dec_and_test, implied mb,
@@ -891,13 +941,12 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
* recycled work item as currently executing and make it wait until the
* current execution finishes, introducing an unwanted dependency.
*
- * This function checks the work item address, work function and workqueue
- * to avoid false positives. Note that this isn't complete as one may
- * construct a work function which can introduce dependency onto itself
- * through a recycled work item. Well, if somebody wants to shoot oneself
- * in the foot that badly, there's only so much we can do, and if such
- * deadlock actually occurs, it should be easy to locate the culprit work
- * function.
+ * This function checks the work item address and work function to avoid
+ * false positives. Note that this isn't complete as one may construct a
+ * work function which can introduce dependency onto itself through a
+ * recycled work item. Well, if somebody wants to shoot oneself in the
+ * foot that badly, there's only so much we can do, and if such deadlock
+ * actually occurs, it should be easy to locate the culprit work function.
*
* CONTEXT:
* spin_lock_irq(pool->lock).
@@ -961,6 +1010,64 @@ static void move_linked_works(struct work_struct *work, struct list_head *head,
*nextp = n;
}
+/**
+ * get_pwq - get an extra reference on the specified pool_workqueue
+ * @pwq: pool_workqueue to get
+ *
+ * Obtain an extra reference on @pwq. The caller should guarantee that
+ * @pwq has positive refcnt and be holding the matching pool->lock.
+ */
+static void get_pwq(struct pool_workqueue *pwq)
+{
+ lockdep_assert_held(&pwq->pool->lock);
+ WARN_ON_ONCE(pwq->refcnt <= 0);
+ pwq->refcnt++;
+}
+
+/**
+ * put_pwq - put a pool_workqueue reference
+ * @pwq: pool_workqueue to put
+ *
+ * Drop a reference of @pwq. If its refcnt reaches zero, schedule its
+ * destruction. The caller should be holding the matching pool->lock.
+ */
+static void put_pwq(struct pool_workqueue *pwq)
+{
+ lockdep_assert_held(&pwq->pool->lock);
+ if (likely(--pwq->refcnt))
+ return;
+ if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
+ return;
+ /*
+ * @pwq can't be released under pool->lock, bounce to
+ * pwq_unbound_release_workfn(). This never recurses on the same
+ * pool->lock as this path is taken only for unbound workqueues and
+ * the release work item is scheduled on a per-cpu workqueue. To
+ * avoid lockdep warning, unbound pool->locks are given lockdep
+ * subclass of 1 in get_unbound_pool().
+ */
+ schedule_work(&pwq->unbound_release_work);
+}
+
+/**
+ * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
+ * @pwq: pool_workqueue to put (can be %NULL)
+ *
+ * put_pwq() with locking. This function also allows %NULL @pwq.
+ */
+static void put_pwq_unlocked(struct pool_workqueue *pwq)
+{
+ if (pwq) {
+ /*
+ * As both pwqs and pools are sched-RCU protected, the
+ * following lock operations are safe.
+ */
+ spin_lock_irq(&pwq->pool->lock);
+ put_pwq(pwq);
+ spin_unlock_irq(&pwq->pool->lock);
+ }
+}
+
static void pwq_activate_delayed_work(struct work_struct *work)
{
struct pool_workqueue *pwq = get_work_pwq(work);
@@ -992,9 +1099,9 @@ static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
*/
static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
{
- /* ignore uncolored works */
+ /* uncolored work items don't participate in flushing or nr_active */
if (color == WORK_NO_COLOR)
- return;
+ goto out_put;
pwq->nr_in_flight[color]--;
@@ -1007,11 +1114,11 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
/* is flush in progress and are we at the flushing tip? */
if (likely(pwq->flush_color != color))
- return;
+ goto out_put;
/* are there still in-flight works? */
if (pwq->nr_in_flight[color])
- return;
+ goto out_put;
/* this pwq is done, clear flush_color */
pwq->flush_color = -1;
@@ -1022,6 +1129,8 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
*/
if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
complete(&pwq->wq->first_flusher->done);
+out_put:
+ put_pwq(pwq);
}
/**
@@ -1144,11 +1253,12 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
/* we own @work, set data and link */
set_work_pwq(work, pwq, extra_flags);
list_add_tail(&work->entry, head);
+ get_pwq(pwq);
/*
- * Ensure either worker_sched_deactivated() sees the above
- * list_add_tail() or we see zero nr_running to avoid workers
- * lying around lazily while there are works to be processed.
+ * Ensure either wq_worker_sleeping() sees the above
+ * list_add_tail() or we see zero nr_running to avoid workers lying
+ * around lazily while there are works to be processed.
*/
smp_mb();
@@ -1172,10 +1282,11 @@ static bool is_chained_work(struct workqueue_struct *wq)
return worker && worker->current_pwq->wq == wq;
}
-static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
+static void __queue_work(int cpu, struct workqueue_struct *wq,
struct work_struct *work)
{
struct pool_workqueue *pwq;
+ struct worker_pool *last_pool;
struct list_head *worklist;
unsigned int work_flags;
unsigned int req_cpu = cpu;
@@ -1191,48 +1302,62 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
debug_work_activate(work);
/* if dying, only works from the same workqueue are allowed */
- if (unlikely(wq->flags & WQ_DRAINING) &&
+ if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
+retry:
+ if (req_cpu == WORK_CPU_UNBOUND)
+ cpu = raw_smp_processor_id();
- /* determine the pwq to use */
- if (!(wq->flags & WQ_UNBOUND)) {
- struct worker_pool *last_pool;
-
- if (cpu == WORK_CPU_UNBOUND)
- cpu = raw_smp_processor_id();
-
- /*
- * It's multi cpu. If @work was previously on a different
- * cpu, it might still be running there, in which case the
- * work needs to be queued on that cpu to guarantee
- * non-reentrancy.
- */
- pwq = get_pwq(cpu, wq);
- last_pool = get_work_pool(work);
+ /* pwq which will be used unless @work is executing elsewhere */
+ if (!(wq->flags & WQ_UNBOUND))
+ pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
+ else
+ pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
- if (last_pool && last_pool != pwq->pool) {
- struct worker *worker;
+ /*
+ * If @work was previously on a different pool, it might still be
+ * running there, in which case the work needs to be queued on that
+ * pool to guarantee non-reentrancy.
+ */
+ last_pool = get_work_pool(work);
+ if (last_pool && last_pool != pwq->pool) {
+ struct worker *worker;
- spin_lock(&last_pool->lock);
+ spin_lock(&last_pool->lock);
- worker = find_worker_executing_work(last_pool, work);
+ worker = find_worker_executing_work(last_pool, work);
- if (worker && worker->current_pwq->wq == wq) {
- pwq = get_pwq(last_pool->cpu, wq);
- } else {
- /* meh... not running there, queue here */
- spin_unlock(&last_pool->lock);
- spin_lock(&pwq->pool->lock);
- }
+ if (worker && worker->current_pwq->wq == wq) {
+ pwq = worker->current_pwq;
} else {
+ /* meh... not running there, queue here */
+ spin_unlock(&last_pool->lock);
spin_lock(&pwq->pool->lock);
}
} else {
- pwq = get_pwq(WORK_CPU_UNBOUND, wq);
spin_lock(&pwq->pool->lock);
}
+ /*
+ * pwq is determined and locked. For unbound pools, we could have
+ * raced with pwq release and it could already be dead. If its
+ * refcnt is zero, repeat pwq selection. Note that pwqs never die
+ * without another pwq replacing it in the numa_pwq_tbl or while
+ * work items are executing on it, so the retrying is guaranteed to
+ * make forward-progress.
+ */
+ if (unlikely(!pwq->refcnt)) {
+ if (wq->flags & WQ_UNBOUND) {
+ spin_unlock(&pwq->pool->lock);
+ cpu_relax();
+ goto retry;
+ }
+ /* oops */
+ WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
+ wq->name, cpu);
+ }
+
/* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work);
@@ -1287,22 +1412,6 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
}
EXPORT_SYMBOL_GPL(queue_work_on);
-/**
- * queue_work - queue work on a workqueue
- * @wq: workqueue to use
- * @work: work to queue
- *
- * Returns %false if @work was already on a queue, %true otherwise.
- *
- * We queue the work to the CPU on which it was submitted, but if the CPU dies
- * it can be processed by another CPU.
- */
-bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
-{
- return queue_work_on(WORK_CPU_UNBOUND, wq, work);
-}
-EXPORT_SYMBOL_GPL(queue_work);
-
void delayed_work_timer_fn(unsigned long __data)
{
struct delayed_work *dwork = (struct delayed_work *)__data;
@@ -1378,21 +1487,6 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
EXPORT_SYMBOL_GPL(queue_delayed_work_on);
/**
- * queue_delayed_work - queue work on a workqueue after delay
- * @wq: workqueue to use
- * @dwork: delayable work to queue
- * @delay: number of jiffies to wait before queueing
- *
- * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
- */
-bool queue_delayed_work(struct workqueue_struct *wq,
- struct delayed_work *dwork, unsigned long delay)
-{
- return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
-}
-EXPORT_SYMBOL_GPL(queue_delayed_work);
-
-/**
* mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
* @cpu: CPU number to execute work on
* @wq: workqueue to use
@@ -1431,21 +1525,6 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
EXPORT_SYMBOL_GPL(mod_delayed_work_on);
/**
- * mod_delayed_work - modify delay of or queue a delayed work
- * @wq: workqueue to use
- * @dwork: work to queue
- * @delay: number of jiffies to wait before queueing
- *
- * mod_delayed_work_on() on local CPU.
- */
-bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork,
- unsigned long delay)
-{
- return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
-}
-EXPORT_SYMBOL_GPL(mod_delayed_work);
-
-/**
* worker_enter_idle - enter idle state
* @worker: worker which is entering idle state
*
@@ -1459,9 +1538,10 @@ static void worker_enter_idle(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
- BUG_ON(worker->flags & WORKER_IDLE);
- BUG_ON(!list_empty(&worker->entry) &&
- (worker->hentry.next || worker->hentry.pprev));
+ if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
+ WARN_ON_ONCE(!list_empty(&worker->entry) &&
+ (worker->hentry.next || worker->hentry.pprev)))
+ return;
/* can't use worker_set_flags(), also called from start_worker() */
worker->flags |= WORKER_IDLE;
@@ -1498,22 +1578,25 @@ static void worker_leave_idle(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
- BUG_ON(!(worker->flags & WORKER_IDLE));
+ if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
+ return;
worker_clr_flags(worker, WORKER_IDLE);
pool->nr_idle--;
list_del_init(&worker->entry);
}
/**
- * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock pool
- * @worker: self
+ * worker_maybe_bind_and_lock - try to bind %current to worker_pool and lock it
+ * @pool: target worker_pool
+ *
+ * Bind %current to the cpu of @pool if it is associated and lock @pool.
*
* Works which are scheduled while the cpu is online must at least be
* scheduled to a worker which is bound to the cpu so that if they are
* flushed from cpu callbacks while cpu is going down, they are
* guaranteed to execute on the cpu.
*
- * This function is to be used by rogue workers and rescuers to bind
+ * This function is to be used by unbound workers and rescuers to bind
* themselves to the target cpu and may race with cpu going down or
* coming online. kthread_bind() can't be used because it may put the
* worker to already dead cpu and set_cpus_allowed_ptr() can't be used
@@ -1534,12 +1617,9 @@ static void worker_leave_idle(struct worker *worker)
* %true if the associated pool is online (@worker is successfully
* bound), %false if offline.
*/
-static bool worker_maybe_bind_and_lock(struct worker *worker)
+static bool worker_maybe_bind_and_lock(struct worker_pool *pool)
__acquires(&pool->lock)
{
- struct worker_pool *pool = worker->pool;
- struct task_struct *task = worker->task;
-
while (true) {
/*
* The following call may fail, succeed or succeed
@@ -1548,14 +1628,13 @@ __acquires(&pool->lock)
* against POOL_DISASSOCIATED.
*/
if (!(pool->flags & POOL_DISASSOCIATED))
- set_cpus_allowed_ptr(task, get_cpu_mask(pool->cpu));
+ set_cpus_allowed_ptr(current, pool->attrs->cpumask);
spin_lock_irq(&pool->lock);
if (pool->flags & POOL_DISASSOCIATED)
return false;
- if (task_cpu(task) == pool->cpu &&
- cpumask_equal(&current->cpus_allowed,
- get_cpu_mask(pool->cpu)))
+ if (task_cpu(current) == pool->cpu &&
+ cpumask_equal(&current->cpus_allowed, pool->attrs->cpumask))
return true;
spin_unlock_irq(&pool->lock);
@@ -1570,108 +1649,6 @@ __acquires(&pool->lock)
}
}
-/*
- * Rebind an idle @worker to its CPU. worker_thread() will test
- * list_empty(@worker->entry) before leaving idle and call this function.
- */
-static void idle_worker_rebind(struct worker *worker)
-{
- /* CPU may go down again inbetween, clear UNBOUND only on success */
- if (worker_maybe_bind_and_lock(worker))
- worker_clr_flags(worker, WORKER_UNBOUND);
-
- /* rebind complete, become available again */
- list_add(&worker->entry, &worker->pool->idle_list);
- spin_unlock_irq(&worker->pool->lock);
-}
-
-/*
- * Function for @worker->rebind.work used to rebind unbound busy workers to
- * the associated cpu which is coming back online. This is scheduled by
- * cpu up but can race with other cpu hotplug operations and may be
- * executed twice without intervening cpu down.
- */
-static void busy_worker_rebind_fn(struct work_struct *work)
-{
- struct worker *worker = container_of(work, struct worker, rebind_work);
-
- if (worker_maybe_bind_and_lock(worker))
- worker_clr_flags(worker, WORKER_UNBOUND);
-
- spin_unlock_irq(&worker->pool->lock);
-}
-
-/**
- * rebind_workers - rebind all workers of a pool to the associated CPU
- * @pool: pool of interest
- *
- * @pool->cpu is coming online. Rebind all workers to the CPU. Rebinding
- * is different for idle and busy ones.
- *
- * Idle ones will be removed from the idle_list and woken up. They will
- * add themselves back after completing rebind. This ensures that the
- * idle_list doesn't contain any unbound workers when re-bound busy workers
- * try to perform local wake-ups for concurrency management.
- *
- * Busy workers can rebind after they finish their current work items.
- * Queueing the rebind work item at the head of the scheduled list is
- * enough. Note that nr_running will be properly bumped as busy workers
- * rebind.
- *
- * On return, all non-manager workers are scheduled for rebind - see
- * manage_workers() for the manager special case. Any idle worker
- * including the manager will not appear on @idle_list until rebind is
- * complete, making local wake-ups safe.
- */
-static void rebind_workers(struct worker_pool *pool)
-{
- struct worker *worker, *n;
- int i;
-
- lockdep_assert_held(&pool->assoc_mutex);
- lockdep_assert_held(&pool->lock);
-
- /* dequeue and kick idle ones */
- list_for_each_entry_safe(worker, n, &pool->idle_list, entry) {
- /*
- * idle workers should be off @pool->idle_list until rebind
- * is complete to avoid receiving premature local wake-ups.
- */
- list_del_init(&worker->entry);
-
- /*
- * worker_thread() will see the above dequeuing and call
- * idle_worker_rebind().
- */
- wake_up_process(worker->task);
- }
-
- /* rebind busy workers */
- for_each_busy_worker(worker, i, pool) {
- struct work_struct *rebind_work = &worker->rebind_work;
- struct workqueue_struct *wq;
-
- if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
- work_data_bits(rebind_work)))
- continue;
-
- debug_work_activate(rebind_work);
-
- /*
- * wq doesn't really matter but let's keep @worker->pool
- * and @pwq->pool consistent for sanity.
- */
- if (std_worker_pool_pri(worker->pool))
- wq = system_highpri_wq;
- else
- wq = system_wq;
-
- insert_work(get_pwq(pool->cpu, wq), rebind_work,
- worker->scheduled.next,
- work_color_to_flags(WORK_NO_COLOR));
- }
-}
-
static struct worker *alloc_worker(void)
{
struct worker *worker;
@@ -1680,7 +1657,6 @@ static struct worker *alloc_worker(void)
if (worker) {
INIT_LIST_HEAD(&worker->entry);
INIT_LIST_HEAD(&worker->scheduled);
- INIT_WORK(&worker->rebind_work, busy_worker_rebind_fn);
/* on creation a worker is in !idle && prep state */
worker->flags = WORKER_PREP;
}
@@ -1703,18 +1679,25 @@ static struct worker *alloc_worker(void)
*/
static struct worker *create_worker(struct worker_pool *pool)
{
- const char *pri = std_worker_pool_pri(pool) ? "H" : "";
struct worker *worker = NULL;
int id = -1;
+ char id_buf[16];
+
+ lockdep_assert_held(&pool->manager_mutex);
+ /*
+ * ID is needed to determine kthread name. Allocate ID first
+ * without installing the pointer.
+ */
+ idr_preload(GFP_KERNEL);
spin_lock_irq(&pool->lock);
- while (ida_get_new(&pool->worker_ida, &id)) {
- spin_unlock_irq(&pool->lock);
- if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL))
- goto fail;
- spin_lock_irq(&pool->lock);
- }
+
+ id = idr_alloc(&pool->worker_idr, NULL, 0, 0, GFP_NOWAIT);
+
spin_unlock_irq(&pool->lock);
+ idr_preload_end();
+ if (id < 0)
+ goto fail;
worker = alloc_worker();
if (!worker)
@@ -1723,40 +1706,46 @@ static struct worker *create_worker(struct worker_pool *pool)
worker->pool = pool;
worker->id = id;
- if (pool->cpu != WORK_CPU_UNBOUND)
- worker->task = kthread_create_on_node(worker_thread,
- worker, cpu_to_node(pool->cpu),
- "kworker/%u:%d%s", pool->cpu, id, pri);
+ if (pool->cpu >= 0)
+ snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
+ pool->attrs->nice < 0 ? "H" : "");
else
- worker->task = kthread_create(worker_thread, worker,
- "kworker/u:%d%s", id, pri);
+ snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
+
+ worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
+ "kworker/%s", id_buf);
if (IS_ERR(worker->task))
goto fail;
- if (std_worker_pool_pri(pool))
- set_user_nice(worker->task, HIGHPRI_NICE_LEVEL);
+ /*
+ * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
+ * online CPUs. It'll be re-applied when any of the CPUs come up.
+ */
+ set_user_nice(worker->task, pool->attrs->nice);
+ set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
+
+ /* prevent userland from meddling with cpumask of workqueue workers */
+ worker->task->flags |= PF_NO_SETAFFINITY;
/*
- * Determine CPU binding of the new worker depending on
- * %POOL_DISASSOCIATED. The caller is responsible for ensuring the
- * flag remains stable across this function. See the comments
- * above the flag definition for details.
- *
- * As an unbound worker may later become a regular one if CPU comes
- * online, make sure every worker has %PF_THREAD_BOUND set.
+ * The caller is responsible for ensuring %POOL_DISASSOCIATED
+ * remains stable across this function. See the comments above the
+ * flag definition for details.
*/
- if (!(pool->flags & POOL_DISASSOCIATED)) {
- kthread_bind(worker->task, pool->cpu);
- } else {
- worker->task->flags |= PF_THREAD_BOUND;
+ if (pool->flags & POOL_DISASSOCIATED)
worker->flags |= WORKER_UNBOUND;
- }
+
+ /* successful, commit the pointer to idr */
+ spin_lock_irq(&pool->lock);
+ idr_replace(&pool->worker_idr, worker, worker->id);
+ spin_unlock_irq(&pool->lock);
return worker;
+
fail:
if (id >= 0) {
spin_lock_irq(&pool->lock);
- ida_remove(&pool->worker_ida, id);
+ idr_remove(&pool->worker_idr, id);
spin_unlock_irq(&pool->lock);
}
kfree(worker);
@@ -1781,6 +1770,30 @@ static void start_worker(struct worker *worker)
}
/**
+ * create_and_start_worker - create and start a worker for a pool
+ * @pool: the target pool
+ *
+ * Grab the managership of @pool and create and start a new worker for it.
+ */
+static int create_and_start_worker(struct worker_pool *pool)
+{
+ struct worker *worker;
+
+ mutex_lock(&pool->manager_mutex);
+
+ worker = create_worker(pool);
+ if (worker) {
+ spin_lock_irq(&pool->lock);
+ start_worker(worker);
+ spin_unlock_irq(&pool->lock);
+ }
+
+ mutex_unlock(&pool->manager_mutex);
+
+ return worker ? 0 : -ENOMEM;
+}
+
+/**
* destroy_worker - destroy a workqueue worker
* @worker: worker to be destroyed
*
@@ -1792,11 +1805,14 @@ static void start_worker(struct worker *worker)
static void destroy_worker(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
- int id = worker->id;
+
+ lockdep_assert_held(&pool->manager_mutex);
+ lockdep_assert_held(&pool->lock);
/* sanity check frenzy */
- BUG_ON(worker->current_work);
- BUG_ON(!list_empty(&worker->scheduled));
+ if (WARN_ON(worker->current_work) ||
+ WARN_ON(!list_empty(&worker->scheduled)))
+ return;
if (worker->flags & WORKER_STARTED)
pool->nr_workers--;
@@ -1806,13 +1822,14 @@ static void destroy_worker(struct worker *worker)
list_del_init(&worker->entry);
worker->flags |= WORKER_DIE;
+ idr_remove(&pool->worker_idr, worker->id);
+
spin_unlock_irq(&pool->lock);
kthread_stop(worker->task);
kfree(worker);
spin_lock_irq(&pool->lock);
- ida_remove(&pool->worker_ida, id);
}
static void idle_worker_timeout(unsigned long __pool)
@@ -1841,23 +1858,21 @@ static void idle_worker_timeout(unsigned long __pool)
spin_unlock_irq(&pool->lock);
}
-static bool send_mayday(struct work_struct *work)
+static void send_mayday(struct work_struct *work)
{
struct pool_workqueue *pwq = get_work_pwq(work);
struct workqueue_struct *wq = pwq->wq;
- unsigned int cpu;
- if (!(wq->flags & WQ_RESCUER))
- return false;
+ lockdep_assert_held(&wq_mayday_lock);
+
+ if (!wq->rescuer)
+ return;
/* mayday mayday mayday */
- cpu = pwq->pool->cpu;
- /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
- if (cpu == WORK_CPU_UNBOUND)
- cpu = 0;
- if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
+ if (list_empty(&pwq->mayday_node)) {
+ list_add_tail(&pwq->mayday_node, &wq->maydays);
wake_up_process(wq->rescuer->task);
- return true;
+ }
}
static void pool_mayday_timeout(unsigned long __pool)
@@ -1865,7 +1880,8 @@ static void pool_mayday_timeout(unsigned long __pool)
struct worker_pool *pool = (void *)__pool;
struct work_struct *work;
- spin_lock_irq(&pool->lock);
+ spin_lock_irq(&wq_mayday_lock); /* for wq->maydays */
+ spin_lock(&pool->lock);
if (need_to_create_worker(pool)) {
/*
@@ -1878,7 +1894,8 @@ static void pool_mayday_timeout(unsigned long __pool)
send_mayday(work);
}
- spin_unlock_irq(&pool->lock);
+ spin_unlock(&pool->lock);
+ spin_unlock_irq(&wq_mayday_lock);
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
}
@@ -1893,8 +1910,8 @@ static void pool_mayday_timeout(unsigned long __pool)
* sent to all rescuers with works scheduled on @pool to resolve
* possible allocation deadlock.
*
- * On return, need_to_create_worker() is guaranteed to be false and
- * may_start_working() true.
+ * On return, need_to_create_worker() is guaranteed to be %false and
+ * may_start_working() %true.
*
* LOCKING:
* spin_lock_irq(pool->lock) which may be released and regrabbed
@@ -1902,7 +1919,7 @@ static void pool_mayday_timeout(unsigned long __pool)
* manager.
*
* RETURNS:
- * false if no action was taken and pool->lock stayed locked, true
+ * %false if no action was taken and pool->lock stayed locked, %true
* otherwise.
*/
static bool maybe_create_worker(struct worker_pool *pool)
@@ -1925,7 +1942,8 @@ restart:
del_timer_sync(&pool->mayday_timer);
spin_lock_irq(&pool->lock);
start_worker(worker);
- BUG_ON(need_to_create_worker(pool));
+ if (WARN_ON_ONCE(need_to_create_worker(pool)))
+ goto restart;
return true;
}
@@ -1958,7 +1976,7 @@ restart:
* multiple times. Called only from manager.
*
* RETURNS:
- * false if no action was taken and pool->lock stayed locked, true
+ * %false if no action was taken and pool->lock stayed locked, %true
* otherwise.
*/
static bool maybe_destroy_workers(struct worker_pool *pool)
@@ -2009,42 +2027,37 @@ static bool manage_workers(struct worker *worker)
struct worker_pool *pool = worker->pool;
bool ret = false;
- if (pool->flags & POOL_MANAGING_WORKERS)
+ /*
+ * Managership is governed by two mutexes - manager_arb and
+ * manager_mutex. manager_arb handles arbitration of manager role.
+ * Anyone who successfully grabs manager_arb wins the arbitration
+ * and becomes the manager. mutex_trylock() on pool->manager_arb
+ * failure while holding pool->lock reliably indicates that someone
+ * else is managing the pool and the worker which failed trylock
+ * can proceed to executing work items. This means that anyone
+ * grabbing manager_arb is responsible for actually performing
+ * manager duties. If manager_arb is grabbed and released without
+ * actual management, the pool may stall indefinitely.
+ *
+ * manager_mutex is used for exclusion of actual management
+ * operations. The holder of manager_mutex can be sure that none
+ * of management operations, including creation and destruction of
+ * workers, won't take place until the mutex is released. Because
+ * manager_mutex doesn't interfere with manager role arbitration,
+ * it is guaranteed that the pool's management, while may be
+ * delayed, won't be disturbed by someone else grabbing
+ * manager_mutex.
+ */
+ if (!mutex_trylock(&pool->manager_arb))
return ret;
- pool->flags |= POOL_MANAGING_WORKERS;
-
/*
- * To simplify both worker management and CPU hotplug, hold off
- * management while hotplug is in progress. CPU hotplug path can't
- * grab %POOL_MANAGING_WORKERS to achieve this because that can
- * lead to idle worker depletion (all become busy thinking someone
- * else is managing) which in turn can result in deadlock under
- * extreme circumstances. Use @pool->assoc_mutex to synchronize
- * manager against CPU hotplug.
- *
- * assoc_mutex would always be free unless CPU hotplug is in
- * progress. trylock first without dropping @pool->lock.
+ * With manager arbitration won, manager_mutex would be free in
+ * most cases. trylock first without dropping @pool->lock.
*/
- if (unlikely(!mutex_trylock(&pool->assoc_mutex))) {
+ if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
spin_unlock_irq(&pool->lock);
- mutex_lock(&pool->assoc_mutex);
- /*
- * CPU hotplug could have happened while we were waiting
- * for assoc_mutex. Hotplug itself can't handle us
- * because manager isn't either on idle or busy list, and
- * @pool's state and ours could have deviated.
- *
- * As hotplug is now excluded via assoc_mutex, we can
- * simply try to bind. It will succeed or fail depending
- * on @pool's current state. Try it and adjust
- * %WORKER_UNBOUND accordingly.
- */
- if (worker_maybe_bind_and_lock(worker))
- worker->flags &= ~WORKER_UNBOUND;
- else
- worker->flags |= WORKER_UNBOUND;
-
+ mutex_lock(&pool->manager_mutex);
ret = true;
}
@@ -2057,8 +2070,8 @@ static bool manage_workers(struct worker *worker)
ret |= maybe_destroy_workers(pool);
ret |= maybe_create_worker(pool);
- pool->flags &= ~POOL_MANAGING_WORKERS;
- mutex_unlock(&pool->assoc_mutex);
+ mutex_unlock(&pool->manager_mutex);
+ mutex_unlock(&pool->manager_arb);
return ret;
}
@@ -2212,11 +2225,11 @@ static void process_scheduled_works(struct worker *worker)
* worker_thread - the worker thread function
* @__worker: self
*
- * The worker thread function. There are NR_CPU_WORKER_POOLS dynamic pools
- * of these per each cpu. These workers process all works regardless of
- * their specific target workqueue. The only exception is works which
- * belong to workqueues with a rescuer which will be explained in
- * rescuer_thread().
+ * The worker thread function. All workers belong to a worker_pool -
+ * either a per-cpu one or dynamic unbound one. These workers process all
+ * work items regardless of their specific target workqueue. The only
+ * exception is work items which belong to workqueues with a rescuer which
+ * will be explained in rescuer_thread().
*/
static int worker_thread(void *__worker)
{
@@ -2228,19 +2241,12 @@ static int worker_thread(void *__worker)
woke_up:
spin_lock_irq(&pool->lock);
- /* we are off idle list if destruction or rebind is requested */
- if (unlikely(list_empty(&worker->entry))) {
+ /* am I supposed to die? */
+ if (unlikely(worker->flags & WORKER_DIE)) {
spin_unlock_irq(&pool->lock);
-
- /* if DIE is set, destruction is requested */
- if (worker->flags & WORKER_DIE) {
- worker->task->flags &= ~PF_WQ_WORKER;
- return 0;
- }
-
- /* otherwise, rebind */
- idle_worker_rebind(worker);
- goto woke_up;
+ WARN_ON_ONCE(!list_empty(&worker->entry));
+ worker->task->flags &= ~PF_WQ_WORKER;
+ return 0;
}
worker_leave_idle(worker);
@@ -2258,14 +2264,16 @@ recheck:
* preparing to process a work or actually processing it.
* Make sure nobody diddled with it while I was sleeping.
*/
- BUG_ON(!list_empty(&worker->scheduled));
+ WARN_ON_ONCE(!list_empty(&worker->scheduled));
/*
- * When control reaches this point, we're guaranteed to have
- * at least one idle worker or that someone else has already
- * assumed the manager role.
+ * Finish PREP stage. We're guaranteed to have at least one idle
+ * worker or that someone else has already assumed the manager
+ * role. This is where @worker starts participating in concurrency
+ * management if applicable and concurrency management is restored
+ * after being rebound. See rebind_workers() for details.
*/
- worker_clr_flags(worker, WORKER_PREP);
+ worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
do {
struct work_struct *work =
@@ -2307,7 +2315,7 @@ sleep:
* @__rescuer: self
*
* Workqueue rescuer thread function. There's one rescuer for each
- * workqueue which has WQ_RESCUER set.
+ * workqueue which has WQ_MEM_RECLAIM set.
*
* Regular work processing on a pool may block trying to create a new
* worker which uses GFP_KERNEL allocation which has slight chance of
@@ -2326,8 +2334,6 @@ static int rescuer_thread(void *__rescuer)
struct worker *rescuer = __rescuer;
struct workqueue_struct *wq = rescuer->rescue_wq;
struct list_head *scheduled = &rescuer->scheduled;
- bool is_unbound = wq->flags & WQ_UNBOUND;
- unsigned int cpu;
set_user_nice(current, RESCUER_NICE_LEVEL);
@@ -2345,28 +2351,29 @@ repeat:
return 0;
}
- /*
- * See whether any cpu is asking for help. Unbounded
- * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
- */
- for_each_mayday_cpu(cpu, wq->mayday_mask) {
- unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
- struct pool_workqueue *pwq = get_pwq(tcpu, wq);
+ /* see whether any pwq is asking for help */
+ spin_lock_irq(&wq_mayday_lock);
+
+ while (!list_empty(&wq->maydays)) {
+ struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
+ struct pool_workqueue, mayday_node);
struct worker_pool *pool = pwq->pool;
struct work_struct *work, *n;
__set_current_state(TASK_RUNNING);
- mayday_clear_cpu(cpu, wq->mayday_mask);
+ list_del_init(&pwq->mayday_node);
+
+ spin_unlock_irq(&wq_mayday_lock);
/* migrate to the target cpu if possible */
+ worker_maybe_bind_and_lock(pool);
rescuer->pool = pool;
- worker_maybe_bind_and_lock(rescuer);
/*
* Slurp in all works issued via this workqueue and
* process'em.
*/
- BUG_ON(!list_empty(&rescuer->scheduled));
+ WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
list_for_each_entry_safe(work, n, &pool->worklist, entry)
if (get_work_pwq(work) == pwq)
move_linked_works(work, scheduled, &n);
@@ -2381,9 +2388,13 @@ repeat:
if (keep_working(pool))
wake_up_worker(pool);
- spin_unlock_irq(&pool->lock);
+ rescuer->pool = NULL;
+ spin_unlock(&pool->lock);
+ spin_lock(&wq_mayday_lock);
}
+ spin_unlock_irq(&wq_mayday_lock);
+
/* rescuers should never participate in concurrency management */
WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
schedule();
@@ -2487,7 +2498,7 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
* advanced to @work_color.
*
* CONTEXT:
- * mutex_lock(wq->flush_mutex).
+ * mutex_lock(wq->mutex).
*
* RETURNS:
* %true if @flush_color >= 0 and there's something to flush. %false
@@ -2497,21 +2508,20 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
int flush_color, int work_color)
{
bool wait = false;
- unsigned int cpu;
+ struct pool_workqueue *pwq;
if (flush_color >= 0) {
- BUG_ON(atomic_read(&wq->nr_pwqs_to_flush));
+ WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
atomic_set(&wq->nr_pwqs_to_flush, 1);
}
- for_each_pwq_cpu(cpu, wq) {
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
+ for_each_pwq(pwq, wq) {
struct worker_pool *pool = pwq->pool;
spin_lock_irq(&pool->lock);
if (flush_color >= 0) {
- BUG_ON(pwq->flush_color != -1);
+ WARN_ON_ONCE(pwq->flush_color != -1);
if (pwq->nr_in_flight[flush_color]) {
pwq->flush_color = flush_color;
@@ -2521,7 +2531,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
}
if (work_color >= 0) {
- BUG_ON(work_color != work_next_color(pwq->work_color));
+ WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
pwq->work_color = work_color;
}
@@ -2538,11 +2548,8 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
* flush_workqueue - ensure that any scheduled work has run to completion.
* @wq: workqueue to flush
*
- * Forces execution of the workqueue and blocks until its completion.
- * This is typically used in driver shutdown handlers.
- *
- * We sleep until all works which were queued on entry have been handled,
- * but we are not livelocked by new incoming ones.
+ * This function sleeps until all work items which were queued on entry
+ * have finished execution, but it is not livelocked by new incoming ones.
*/
void flush_workqueue(struct workqueue_struct *wq)
{
@@ -2556,7 +2563,7 @@ void flush_workqueue(struct workqueue_struct *wq)
lock_map_acquire(&wq->lockdep_map);
lock_map_release(&wq->lockdep_map);
- mutex_lock(&wq->flush_mutex);
+ mutex_lock(&wq->mutex);
/*
* Start-to-wait phase
@@ -2569,13 +2576,13 @@ void flush_workqueue(struct workqueue_struct *wq)
* becomes our flush_color and work_color is advanced
* by one.
*/
- BUG_ON(!list_empty(&wq->flusher_overflow));
+ WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
this_flusher.flush_color = wq->work_color;
wq->work_color = next_color;
if (!wq->first_flusher) {
/* no flush in progress, become the first flusher */
- BUG_ON(wq->flush_color != this_flusher.flush_color);
+ WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
wq->first_flusher = &this_flusher;
@@ -2588,7 +2595,7 @@ void flush_workqueue(struct workqueue_struct *wq)
}
} else {
/* wait in queue */
- BUG_ON(wq->flush_color == this_flusher.flush_color);
+ WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
list_add_tail(&this_flusher.list, &wq->flusher_queue);
flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
}
@@ -2601,7 +2608,7 @@ void flush_workqueue(struct workqueue_struct *wq)
list_add_tail(&this_flusher.list, &wq->flusher_overflow);
}
- mutex_unlock(&wq->flush_mutex);
+ mutex_unlock(&wq->mutex);
wait_for_completion(&this_flusher.done);
@@ -2614,7 +2621,7 @@ void flush_workqueue(struct workqueue_struct *wq)
if (wq->first_flusher != &this_flusher)
return;
- mutex_lock(&wq->flush_mutex);
+ mutex_lock(&wq->mutex);
/* we might have raced, check again with mutex held */
if (wq->first_flusher != &this_flusher)
@@ -2622,8 +2629,8 @@ void flush_workqueue(struct workqueue_struct *wq)
wq->first_flusher = NULL;
- BUG_ON(!list_empty(&this_flusher.list));
- BUG_ON(wq->flush_color != this_flusher.flush_color);
+ WARN_ON_ONCE(!list_empty(&this_flusher.list));
+ WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
while (true) {
struct wq_flusher *next, *tmp;
@@ -2636,8 +2643,8 @@ void flush_workqueue(struct workqueue_struct *wq)
complete(&next->done);
}
- BUG_ON(!list_empty(&wq->flusher_overflow) &&
- wq->flush_color != work_next_color(wq->work_color));
+ WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
+ wq->flush_color != work_next_color(wq->work_color));
/* this flush_color is finished, advance by one */
wq->flush_color = work_next_color(wq->flush_color);
@@ -2661,7 +2668,7 @@ void flush_workqueue(struct workqueue_struct *wq)
}
if (list_empty(&wq->flusher_queue)) {
- BUG_ON(wq->flush_color != wq->work_color);
+ WARN_ON_ONCE(wq->flush_color != wq->work_color);
break;
}
@@ -2669,8 +2676,8 @@ void flush_workqueue(struct workqueue_struct *wq)
* Need to flush more colors. Make the next flusher
* the new first flusher and arm pwqs.
*/
- BUG_ON(wq->flush_color == wq->work_color);
- BUG_ON(wq->flush_color != next->flush_color);
+ WARN_ON_ONCE(wq->flush_color == wq->work_color);
+ WARN_ON_ONCE(wq->flush_color != next->flush_color);
list_del_init(&next->list);
wq->first_flusher = next;
@@ -2686,7 +2693,7 @@ void flush_workqueue(struct workqueue_struct *wq)
}
out_unlock:
- mutex_unlock(&wq->flush_mutex);
+ mutex_unlock(&wq->mutex);
}
EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -2704,22 +2711,23 @@ EXPORT_SYMBOL_GPL(flush_workqueue);
void drain_workqueue(struct workqueue_struct *wq)
{
unsigned int flush_cnt = 0;
- unsigned int cpu;
+ struct pool_workqueue *pwq;
/*
* __queue_work() needs to test whether there are drainers, is much
* hotter than drain_workqueue() and already looks at @wq->flags.
- * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
+ * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
*/
- spin_lock(&workqueue_lock);
+ mutex_lock(&wq->mutex);
if (!wq->nr_drainers++)
- wq->flags |= WQ_DRAINING;
- spin_unlock(&workqueue_lock);
+ wq->flags |= __WQ_DRAINING;
+ mutex_unlock(&wq->mutex);
reflush:
flush_workqueue(wq);
- for_each_pwq_cpu(cpu, wq) {
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
+ mutex_lock(&wq->mutex);
+
+ for_each_pwq(pwq, wq) {
bool drained;
spin_lock_irq(&pwq->pool->lock);
@@ -2731,15 +2739,16 @@ reflush:
if (++flush_cnt == 10 ||
(flush_cnt % 100 == 0 && flush_cnt <= 1000))
- pr_warn("workqueue %s: flush on destruction isn't complete after %u tries\n",
+ pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
wq->name, flush_cnt);
+
+ mutex_unlock(&wq->mutex);
goto reflush;
}
- spin_lock(&workqueue_lock);
if (!--wq->nr_drainers)
- wq->flags &= ~WQ_DRAINING;
- spin_unlock(&workqueue_lock);
+ wq->flags &= ~__WQ_DRAINING;
+ mutex_unlock(&wq->mutex);
}
EXPORT_SYMBOL_GPL(drain_workqueue);
@@ -2750,11 +2759,15 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
struct pool_workqueue *pwq;
might_sleep();
+
+ local_irq_disable();
pool = get_work_pool(work);
- if (!pool)
+ if (!pool) {
+ local_irq_enable();
return false;
+ }
- spin_lock_irq(&pool->lock);
+ spin_lock(&pool->lock);
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
@@ -2776,7 +2789,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
* flusher is not running on the same workqueue by verifying write
* access.
*/
- if (pwq->wq->saved_max_active == 1 || pwq->wq->flags & WQ_RESCUER)
+ if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)
lock_map_acquire(&pwq->wq->lockdep_map);
else
lock_map_acquire_read(&pwq->wq->lockdep_map);
@@ -2933,66 +2946,6 @@ bool cancel_delayed_work_sync(struct delayed_work *dwork)
EXPORT_SYMBOL(cancel_delayed_work_sync);
/**
- * schedule_work_on - put work task on a specific cpu
- * @cpu: cpu to put the work task on
- * @work: job to be done
- *
- * This puts a job on a specific cpu
- */
-bool schedule_work_on(int cpu, struct work_struct *work)
-{
- return queue_work_on(cpu, system_wq, work);
-}
-EXPORT_SYMBOL(schedule_work_on);
-
-/**
- * schedule_work - put work task in global workqueue
- * @work: job to be done
- *
- * Returns %false if @work was already on the kernel-global workqueue and
- * %true otherwise.
- *
- * This puts a job in the kernel-global workqueue if it was not already
- * queued and leaves it in the same position on the kernel-global
- * workqueue otherwise.
- */
-bool schedule_work(struct work_struct *work)
-{
- return queue_work(system_wq, work);
-}
-EXPORT_SYMBOL(schedule_work);
-
-/**
- * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
- * @cpu: cpu to use
- * @dwork: job to be done
- * @delay: number of jiffies to wait
- *
- * After waiting for a given time this puts a job in the kernel-global
- * workqueue on the specified CPU.
- */
-bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
- unsigned long delay)
-{
- return queue_delayed_work_on(cpu, system_wq, dwork, delay);
-}
-EXPORT_SYMBOL(schedule_delayed_work_on);
-
-/**
- * schedule_delayed_work - put work task in global workqueue after delay
- * @dwork: job to be done
- * @delay: number of jiffies to wait or 0 for immediate execution
- *
- * After waiting for a given time this puts a job in the kernel-global
- * workqueue.
- */
-bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
-{
- return queue_delayed_work(system_wq, dwork, delay);
-}
-EXPORT_SYMBOL(schedule_delayed_work);
-
-/**
* schedule_on_each_cpu - execute a function synchronously on each online CPU
* @func: the function to call
*
@@ -3085,51 +3038,1025 @@ int execute_in_process_context(work_func_t fn, struct execute_work *ew)
}
EXPORT_SYMBOL_GPL(execute_in_process_context);
-int keventd_up(void)
+#ifdef CONFIG_SYSFS
+/*
+ * Workqueues with WQ_SYSFS flag set is visible to userland via
+ * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the
+ * following attributes.
+ *
+ * per_cpu RO bool : whether the workqueue is per-cpu or unbound
+ * max_active RW int : maximum number of in-flight work items
+ *
+ * Unbound workqueues have the following extra attributes.
+ *
+ * id RO int : the associated pool ID
+ * nice RW int : nice value of the workers
+ * cpumask RW mask : bitmask of allowed CPUs for the workers
+ */
+struct wq_device {
+ struct workqueue_struct *wq;
+ struct device dev;
+};
+
+static struct workqueue_struct *dev_to_wq(struct device *dev)
+{
+ struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
+
+ return wq_dev->wq;
+}
+
+static ssize_t wq_per_cpu_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct workqueue_struct *wq = dev_to_wq(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
+}
+
+static ssize_t wq_max_active_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct workqueue_struct *wq = dev_to_wq(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
+}
+
+static ssize_t wq_max_active_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct workqueue_struct *wq = dev_to_wq(dev);
+ int val;
+
+ if (sscanf(buf, "%d", &val) != 1 || val <= 0)
+ return -EINVAL;
+
+ workqueue_set_max_active(wq, val);
+ return count;
+}
+
+static struct device_attribute wq_sysfs_attrs[] = {
+ __ATTR(per_cpu, 0444, wq_per_cpu_show, NULL),
+ __ATTR(max_active, 0644, wq_max_active_show, wq_max_active_store),
+ __ATTR_NULL,
+};
+
+static ssize_t wq_pool_ids_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct workqueue_struct *wq = dev_to_wq(dev);
+ const char *delim = "";
+ int node, written = 0;
+
+ rcu_read_lock_sched();
+ for_each_node(node) {
+ written += scnprintf(buf + written, PAGE_SIZE - written,
+ "%s%d:%d", delim, node,
+ unbound_pwq_by_node(wq, node)->pool->id);
+ delim = " ";
+ }
+ written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
+ rcu_read_unlock_sched();
+
+ return written;
+}
+
+static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct workqueue_struct *wq = dev_to_wq(dev);
+ int written;
+
+ mutex_lock(&wq->mutex);
+ written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
+ mutex_unlock(&wq->mutex);
+
+ return written;
+}
+
+/* prepare workqueue_attrs for sysfs store operations */
+static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
+{
+ struct workqueue_attrs *attrs;
+
+ attrs = alloc_workqueue_attrs(GFP_KERNEL);
+ if (!attrs)
+ return NULL;
+
+ mutex_lock(&wq->mutex);
+ copy_workqueue_attrs(attrs, wq->unbound_attrs);
+ mutex_unlock(&wq->mutex);
+ return attrs;
+}
+
+static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct workqueue_struct *wq = dev_to_wq(dev);
+ struct workqueue_attrs *attrs;
+ int ret;
+
+ attrs = wq_sysfs_prep_attrs(wq);
+ if (!attrs)
+ return -ENOMEM;
+
+ if (sscanf(buf, "%d", &attrs->nice) == 1 &&
+ attrs->nice >= -20 && attrs->nice <= 19)
+ ret = apply_workqueue_attrs(wq, attrs);
+ else
+ ret = -EINVAL;
+
+ free_workqueue_attrs(attrs);
+ return ret ?: count;
+}
+
+static ssize_t wq_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- return system_wq != NULL;
+ struct workqueue_struct *wq = dev_to_wq(dev);
+ int written;
+
+ mutex_lock(&wq->mutex);
+ written = cpumask_scnprintf(buf, PAGE_SIZE, wq->unbound_attrs->cpumask);
+ mutex_unlock(&wq->mutex);
+
+ written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
+ return written;
}
-static int alloc_pwqs(struct workqueue_struct *wq)
+static ssize_t wq_cpumask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
+ struct workqueue_struct *wq = dev_to_wq(dev);
+ struct workqueue_attrs *attrs;
+ int ret;
+
+ attrs = wq_sysfs_prep_attrs(wq);
+ if (!attrs)
+ return -ENOMEM;
+
+ ret = cpumask_parse(buf, attrs->cpumask);
+ if (!ret)
+ ret = apply_workqueue_attrs(wq, attrs);
+
+ free_workqueue_attrs(attrs);
+ return ret ?: count;
+}
+
+static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct workqueue_struct *wq = dev_to_wq(dev);
+ int written;
+
+ mutex_lock(&wq->mutex);
+ written = scnprintf(buf, PAGE_SIZE, "%d\n",
+ !wq->unbound_attrs->no_numa);
+ mutex_unlock(&wq->mutex);
+
+ return written;
+}
+
+static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct workqueue_struct *wq = dev_to_wq(dev);
+ struct workqueue_attrs *attrs;
+ int v, ret;
+
+ attrs = wq_sysfs_prep_attrs(wq);
+ if (!attrs)
+ return -ENOMEM;
+
+ ret = -EINVAL;
+ if (sscanf(buf, "%d", &v) == 1) {
+ attrs->no_numa = !v;
+ ret = apply_workqueue_attrs(wq, attrs);
+ }
+
+ free_workqueue_attrs(attrs);
+ return ret ?: count;
+}
+
+static struct device_attribute wq_sysfs_unbound_attrs[] = {
+ __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
+ __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
+ __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
+ __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
+ __ATTR_NULL,
+};
+
+static struct bus_type wq_subsys = {
+ .name = "workqueue",
+ .dev_attrs = wq_sysfs_attrs,
+};
+
+static int __init wq_sysfs_init(void)
+{
+ return subsys_virtual_register(&wq_subsys, NULL);
+}
+core_initcall(wq_sysfs_init);
+
+static void wq_device_release(struct device *dev)
+{
+ struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
+
+ kfree(wq_dev);
+}
+
+/**
+ * workqueue_sysfs_register - make a workqueue visible in sysfs
+ * @wq: the workqueue to register
+ *
+ * Expose @wq in sysfs under /sys/bus/workqueue/devices.
+ * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
+ * which is the preferred method.
+ *
+ * Workqueue user should use this function directly iff it wants to apply
+ * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
+ * apply_workqueue_attrs() may race against userland updating the
+ * attributes.
+ *
+ * Returns 0 on success, -errno on failure.
+ */
+int workqueue_sysfs_register(struct workqueue_struct *wq)
+{
+ struct wq_device *wq_dev;
+ int ret;
+
/*
- * pwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
- * Make sure that the alignment isn't lower than that of
- * unsigned long long.
+ * Adjusting max_active or creating new pwqs by applyting
+ * attributes breaks ordering guarantee. Disallow exposing ordered
+ * workqueues.
*/
- const size_t size = sizeof(struct pool_workqueue);
- const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
- __alignof__(unsigned long long));
+ if (WARN_ON(wq->flags & __WQ_ORDERED))
+ return -EINVAL;
- if (!(wq->flags & WQ_UNBOUND))
- wq->pool_wq.pcpu = __alloc_percpu(size, align);
- else {
- void *ptr;
+ wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
+ if (!wq_dev)
+ return -ENOMEM;
+
+ wq_dev->wq = wq;
+ wq_dev->dev.bus = &wq_subsys;
+ wq_dev->dev.init_name = wq->name;
+ wq_dev->dev.release = wq_device_release;
+
+ /*
+ * unbound_attrs are created separately. Suppress uevent until
+ * everything is ready.
+ */
+ dev_set_uevent_suppress(&wq_dev->dev, true);
+
+ ret = device_register(&wq_dev->dev);
+ if (ret) {
+ kfree(wq_dev);
+ wq->wq_dev = NULL;
+ return ret;
+ }
+
+ if (wq->flags & WQ_UNBOUND) {
+ struct device_attribute *attr;
+
+ for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
+ ret = device_create_file(&wq_dev->dev, attr);
+ if (ret) {
+ device_unregister(&wq_dev->dev);
+ wq->wq_dev = NULL;
+ return ret;
+ }
+ }
+ }
+
+ kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
+ return 0;
+}
+
+/**
+ * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
+ * @wq: the workqueue to unregister
+ *
+ * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
+ */
+static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
+{
+ struct wq_device *wq_dev = wq->wq_dev;
+
+ if (!wq->wq_dev)
+ return;
+
+ wq->wq_dev = NULL;
+ device_unregister(&wq_dev->dev);
+}
+#else /* CONFIG_SYSFS */
+static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
+#endif /* CONFIG_SYSFS */
+
+/**
+ * free_workqueue_attrs - free a workqueue_attrs
+ * @attrs: workqueue_attrs to free
+ *
+ * Undo alloc_workqueue_attrs().
+ */
+void free_workqueue_attrs(struct workqueue_attrs *attrs)
+{
+ if (attrs) {
+ free_cpumask_var(attrs->cpumask);
+ kfree(attrs);
+ }
+}
+
+/**
+ * alloc_workqueue_attrs - allocate a workqueue_attrs
+ * @gfp_mask: allocation mask to use
+ *
+ * Allocate a new workqueue_attrs, initialize with default settings and
+ * return it. Returns NULL on failure.
+ */
+struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
+{
+ struct workqueue_attrs *attrs;
+
+ attrs = kzalloc(sizeof(*attrs), gfp_mask);
+ if (!attrs)
+ goto fail;
+ if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
+ goto fail;
+
+ cpumask_copy(attrs->cpumask, cpu_possible_mask);
+ return attrs;
+fail:
+ free_workqueue_attrs(attrs);
+ return NULL;
+}
+
+static void copy_workqueue_attrs(struct workqueue_attrs *to,
+ const struct workqueue_attrs *from)
+{
+ to->nice = from->nice;
+ cpumask_copy(to->cpumask, from->cpumask);
+}
+
+/* hash value of the content of @attr */
+static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
+{
+ u32 hash = 0;
+
+ hash = jhash_1word(attrs->nice, hash);
+ hash = jhash(cpumask_bits(attrs->cpumask),
+ BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
+ return hash;
+}
+
+/* content equality test */
+static bool wqattrs_equal(const struct workqueue_attrs *a,
+ const struct workqueue_attrs *b)
+{
+ if (a->nice != b->nice)
+ return false;
+ if (!cpumask_equal(a->cpumask, b->cpumask))
+ return false;
+ return true;
+}
+
+/**
+ * init_worker_pool - initialize a newly zalloc'd worker_pool
+ * @pool: worker_pool to initialize
+ *
+ * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs.
+ * Returns 0 on success, -errno on failure. Even on failure, all fields
+ * inside @pool proper are initialized and put_unbound_pool() can be called
+ * on @pool safely to release it.
+ */
+static int init_worker_pool(struct worker_pool *pool)
+{
+ spin_lock_init(&pool->lock);
+ pool->id = -1;
+ pool->cpu = -1;
+ pool->node = NUMA_NO_NODE;
+ pool->flags |= POOL_DISASSOCIATED;
+ INIT_LIST_HEAD(&pool->worklist);
+ INIT_LIST_HEAD(&pool->idle_list);
+ hash_init(pool->busy_hash);
+
+ init_timer_deferrable(&pool->idle_timer);
+ pool->idle_timer.function = idle_worker_timeout;
+ pool->idle_timer.data = (unsigned long)pool;
+
+ setup_timer(&pool->mayday_timer, pool_mayday_timeout,
+ (unsigned long)pool);
+
+ mutex_init(&pool->manager_arb);
+ mutex_init(&pool->manager_mutex);
+ idr_init(&pool->worker_idr);
+
+ INIT_HLIST_NODE(&pool->hash_node);
+ pool->refcnt = 1;
+
+ /* shouldn't fail above this point */
+ pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
+ if (!pool->attrs)
+ return -ENOMEM;
+ return 0;
+}
+
+static void rcu_free_pool(struct rcu_head *rcu)
+{
+ struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
+
+ idr_destroy(&pool->worker_idr);
+ free_workqueue_attrs(pool->attrs);
+ kfree(pool);
+}
+
+/**
+ * put_unbound_pool - put a worker_pool
+ * @pool: worker_pool to put
+ *
+ * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU
+ * safe manner. get_unbound_pool() calls this function on its failure path
+ * and this function should be able to release pools which went through,
+ * successfully or not, init_worker_pool().
+ *
+ * Should be called with wq_pool_mutex held.
+ */
+static void put_unbound_pool(struct worker_pool *pool)
+{
+ struct worker *worker;
+
+ lockdep_assert_held(&wq_pool_mutex);
+
+ if (--pool->refcnt)
+ return;
+
+ /* sanity checks */
+ if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) ||
+ WARN_ON(!list_empty(&pool->worklist)))
+ return;
+
+ /* release id and unhash */
+ if (pool->id >= 0)
+ idr_remove(&worker_pool_idr, pool->id);
+ hash_del(&pool->hash_node);
+
+ /*
+ * Become the manager and destroy all workers. Grabbing
+ * manager_arb prevents @pool's workers from blocking on
+ * manager_mutex.
+ */
+ mutex_lock(&pool->manager_arb);
+ mutex_lock(&pool->manager_mutex);
+ spin_lock_irq(&pool->lock);
+
+ while ((worker = first_worker(pool)))
+ destroy_worker(worker);
+ WARN_ON(pool->nr_workers || pool->nr_idle);
+
+ spin_unlock_irq(&pool->lock);
+ mutex_unlock(&pool->manager_mutex);
+ mutex_unlock(&pool->manager_arb);
+
+ /* shut down the timers */
+ del_timer_sync(&pool->idle_timer);
+ del_timer_sync(&pool->mayday_timer);
+
+ /* sched-RCU protected to allow dereferences from get_work_pool() */
+ call_rcu_sched(&pool->rcu, rcu_free_pool);
+}
+
+/**
+ * get_unbound_pool - get a worker_pool with the specified attributes
+ * @attrs: the attributes of the worker_pool to get
+ *
+ * Obtain a worker_pool which has the same attributes as @attrs, bump the
+ * reference count and return it. If there already is a matching
+ * worker_pool, it will be used; otherwise, this function attempts to
+ * create a new one. On failure, returns NULL.
+ *
+ * Should be called with wq_pool_mutex held.
+ */
+static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
+{
+ u32 hash = wqattrs_hash(attrs);
+ struct worker_pool *pool;
+ int node;
+
+ lockdep_assert_held(&wq_pool_mutex);
+
+ /* do we already have a matching pool? */
+ hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
+ if (wqattrs_equal(pool->attrs, attrs)) {
+ pool->refcnt++;
+ goto out_unlock;
+ }
+ }
+
+ /* nope, create a new one */
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool || init_worker_pool(pool) < 0)
+ goto fail;
+
+ if (workqueue_freezing)
+ pool->flags |= POOL_FREEZING;
+
+ lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
+ copy_workqueue_attrs(pool->attrs, attrs);
+
+ /* if cpumask is contained inside a NUMA node, we belong to that node */
+ if (wq_numa_enabled) {
+ for_each_node(node) {
+ if (cpumask_subset(pool->attrs->cpumask,
+ wq_numa_possible_cpumask[node])) {
+ pool->node = node;
+ break;
+ }
+ }
+ }
+
+ if (worker_pool_assign_id(pool) < 0)
+ goto fail;
+
+ /* create and start the initial worker */
+ if (create_and_start_worker(pool) < 0)
+ goto fail;
+
+ /* install */
+ hash_add(unbound_pool_hash, &pool->hash_node, hash);
+out_unlock:
+ return pool;
+fail:
+ if (pool)
+ put_unbound_pool(pool);
+ return NULL;
+}
+
+static void rcu_free_pwq(struct rcu_head *rcu)
+{
+ kmem_cache_free(pwq_cache,
+ container_of(rcu, struct pool_workqueue, rcu));
+}
+
+/*
+ * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
+ * and needs to be destroyed.
+ */
+static void pwq_unbound_release_workfn(struct work_struct *work)
+{
+ struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
+ unbound_release_work);
+ struct workqueue_struct *wq = pwq->wq;
+ struct worker_pool *pool = pwq->pool;
+ bool is_last;
+
+ if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
+ return;
+
+ /*
+ * Unlink @pwq. Synchronization against wq->mutex isn't strictly
+ * necessary on release but do it anyway. It's easier to verify
+ * and consistent with the linking path.
+ */
+ mutex_lock(&wq->mutex);
+ list_del_rcu(&pwq->pwqs_node);
+ is_last = list_empty(&wq->pwqs);
+ mutex_unlock(&wq->mutex);
+
+ mutex_lock(&wq_pool_mutex);
+ put_unbound_pool(pool);
+ mutex_unlock(&wq_pool_mutex);
+
+ call_rcu_sched(&pwq->rcu, rcu_free_pwq);
+
+ /*
+ * If we're the last pwq going away, @wq is already dead and no one
+ * is gonna access it anymore. Free it.
+ */
+ if (is_last) {
+ free_workqueue_attrs(wq->unbound_attrs);
+ kfree(wq);
+ }
+}
+
+/**
+ * pwq_adjust_max_active - update a pwq's max_active to the current setting
+ * @pwq: target pool_workqueue
+ *
+ * If @pwq isn't freezing, set @pwq->max_active to the associated
+ * workqueue's saved_max_active and activate delayed work items
+ * accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
+ */
+static void pwq_adjust_max_active(struct pool_workqueue *pwq)
+{
+ struct workqueue_struct *wq = pwq->wq;
+ bool freezable = wq->flags & WQ_FREEZABLE;
+
+ /* for @wq->saved_max_active */
+ lockdep_assert_held(&wq->mutex);
+
+ /* fast exit for non-freezable wqs */
+ if (!freezable && pwq->max_active == wq->saved_max_active)
+ return;
+
+ spin_lock_irq(&pwq->pool->lock);
+
+ if (!freezable || !(pwq->pool->flags & POOL_FREEZING)) {
+ pwq->max_active = wq->saved_max_active;
+
+ while (!list_empty(&pwq->delayed_works) &&
+ pwq->nr_active < pwq->max_active)
+ pwq_activate_first_delayed(pwq);
/*
- * Allocate enough room to align pwq and put an extra
- * pointer at the end pointing back to the originally
- * allocated pointer which will be used for free.
+ * Need to kick a worker after thawed or an unbound wq's
+ * max_active is bumped. It's a slow path. Do it always.
*/
- ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
- if (ptr) {
- wq->pool_wq.single = PTR_ALIGN(ptr, align);
- *(void **)(wq->pool_wq.single + 1) = ptr;
+ wake_up_worker(pwq->pool);
+ } else {
+ pwq->max_active = 0;
+ }
+
+ spin_unlock_irq(&pwq->pool->lock);
+}
+
+/* initialize newly alloced @pwq which is associated with @wq and @pool */
+static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
+ struct worker_pool *pool)
+{
+ BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
+
+ memset(pwq, 0, sizeof(*pwq));
+
+ pwq->pool = pool;
+ pwq->wq = wq;
+ pwq->flush_color = -1;
+ pwq->refcnt = 1;
+ INIT_LIST_HEAD(&pwq->delayed_works);
+ INIT_LIST_HEAD(&pwq->pwqs_node);
+ INIT_LIST_HEAD(&pwq->mayday_node);
+ INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
+}
+
+/* sync @pwq with the current state of its associated wq and link it */
+static void link_pwq(struct pool_workqueue *pwq)
+{
+ struct workqueue_struct *wq = pwq->wq;
+
+ lockdep_assert_held(&wq->mutex);
+
+ /* may be called multiple times, ignore if already linked */
+ if (!list_empty(&pwq->pwqs_node))
+ return;
+
+ /*
+ * Set the matching work_color. This is synchronized with
+ * wq->mutex to avoid confusing flush_workqueue().
+ */
+ pwq->work_color = wq->work_color;
+
+ /* sync max_active to the current setting */
+ pwq_adjust_max_active(pwq);
+
+ /* link in @pwq */
+ list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
+}
+
+/* obtain a pool matching @attr and create a pwq associating the pool and @wq */
+static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
+ const struct workqueue_attrs *attrs)
+{
+ struct worker_pool *pool;
+ struct pool_workqueue *pwq;
+
+ lockdep_assert_held(&wq_pool_mutex);
+
+ pool = get_unbound_pool(attrs);
+ if (!pool)
+ return NULL;
+
+ pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
+ if (!pwq) {
+ put_unbound_pool(pool);
+ return NULL;
+ }
+
+ init_pwq(pwq, wq, pool);
+ return pwq;
+}
+
+/* undo alloc_unbound_pwq(), used only in the error path */
+static void free_unbound_pwq(struct pool_workqueue *pwq)
+{
+ lockdep_assert_held(&wq_pool_mutex);
+
+ if (pwq) {
+ put_unbound_pool(pwq->pool);
+ kmem_cache_free(pwq_cache, pwq);
+ }
+}
+
+/**
+ * wq_calc_node_mask - calculate a wq_attrs' cpumask for the specified node
+ * @attrs: the wq_attrs of interest
+ * @node: the target NUMA node
+ * @cpu_going_down: if >= 0, the CPU to consider as offline
+ * @cpumask: outarg, the resulting cpumask
+ *
+ * Calculate the cpumask a workqueue with @attrs should use on @node. If
+ * @cpu_going_down is >= 0, that cpu is considered offline during
+ * calculation. The result is stored in @cpumask. This function returns
+ * %true if the resulting @cpumask is different from @attrs->cpumask,
+ * %false if equal.
+ *
+ * If NUMA affinity is not enabled, @attrs->cpumask is always used. If
+ * enabled and @node has online CPUs requested by @attrs, the returned
+ * cpumask is the intersection of the possible CPUs of @node and
+ * @attrs->cpumask.
+ *
+ * The caller is responsible for ensuring that the cpumask of @node stays
+ * stable.
+ */
+static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
+ int cpu_going_down, cpumask_t *cpumask)
+{
+ if (!wq_numa_enabled || attrs->no_numa)
+ goto use_dfl;
+
+ /* does @node have any online CPUs @attrs wants? */
+ cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
+ if (cpu_going_down >= 0)
+ cpumask_clear_cpu(cpu_going_down, cpumask);
+
+ if (cpumask_empty(cpumask))
+ goto use_dfl;
+
+ /* yeap, return possible CPUs in @node that @attrs wants */
+ cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
+ return !cpumask_equal(cpumask, attrs->cpumask);
+
+use_dfl:
+ cpumask_copy(cpumask, attrs->cpumask);
+ return false;
+}
+
+/* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
+static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
+ int node,
+ struct pool_workqueue *pwq)
+{
+ struct pool_workqueue *old_pwq;
+
+ lockdep_assert_held(&wq->mutex);
+
+ /* link_pwq() can handle duplicate calls */
+ link_pwq(pwq);
+
+ old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
+ rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
+ return old_pwq;
+}
+
+/**
+ * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
+ * @wq: the target workqueue
+ * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
+ *
+ * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA
+ * machines, this function maps a separate pwq to each NUMA node with
+ * possibles CPUs in @attrs->cpumask so that work items are affine to the
+ * NUMA node it was issued on. Older pwqs are released as in-flight work
+ * items finish. Note that a work item which repeatedly requeues itself
+ * back-to-back will stay on its current pwq.
+ *
+ * Performs GFP_KERNEL allocations. Returns 0 on success and -errno on
+ * failure.
+ */
+int apply_workqueue_attrs(struct workqueue_struct *wq,
+ const struct workqueue_attrs *attrs)
+{
+ struct workqueue_attrs *new_attrs, *tmp_attrs;
+ struct pool_workqueue **pwq_tbl, *dfl_pwq;
+ int node, ret;
+
+ /* only unbound workqueues can change attributes */
+ if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
+ return -EINVAL;
+
+ /* creating multiple pwqs breaks ordering guarantee */
+ if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
+ return -EINVAL;
+
+ pwq_tbl = kzalloc(wq_numa_tbl_len * sizeof(pwq_tbl[0]), GFP_KERNEL);
+ new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
+ tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
+ if (!pwq_tbl || !new_attrs || !tmp_attrs)
+ goto enomem;
+
+ /* make a copy of @attrs and sanitize it */
+ copy_workqueue_attrs(new_attrs, attrs);
+ cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
+
+ /*
+ * We may create multiple pwqs with differing cpumasks. Make a
+ * copy of @new_attrs which will be modified and used to obtain
+ * pools.
+ */
+ copy_workqueue_attrs(tmp_attrs, new_attrs);
+
+ /*
+ * CPUs should stay stable across pwq creations and installations.
+ * Pin CPUs, determine the target cpumask for each node and create
+ * pwqs accordingly.
+ */
+ get_online_cpus();
+
+ mutex_lock(&wq_pool_mutex);
+
+ /*
+ * If something goes wrong during CPU up/down, we'll fall back to
+ * the default pwq covering whole @attrs->cpumask. Always create
+ * it even if we don't use it immediately.
+ */
+ dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
+ if (!dfl_pwq)
+ goto enomem_pwq;
+
+ for_each_node(node) {
+ if (wq_calc_node_cpumask(attrs, node, -1, tmp_attrs->cpumask)) {
+ pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
+ if (!pwq_tbl[node])
+ goto enomem_pwq;
+ } else {
+ dfl_pwq->refcnt++;
+ pwq_tbl[node] = dfl_pwq;
}
}
- /* just in case, make sure it's actually aligned */
- BUG_ON(!IS_ALIGNED(wq->pool_wq.v, align));
- return wq->pool_wq.v ? 0 : -ENOMEM;
+ mutex_unlock(&wq_pool_mutex);
+
+ /* all pwqs have been created successfully, let's install'em */
+ mutex_lock(&wq->mutex);
+
+ copy_workqueue_attrs(wq->unbound_attrs, new_attrs);
+
+ /* save the previous pwq and install the new one */
+ for_each_node(node)
+ pwq_tbl[node] = numa_pwq_tbl_install(wq, node, pwq_tbl[node]);
+
+ /* @dfl_pwq might not have been used, ensure it's linked */
+ link_pwq(dfl_pwq);
+ swap(wq->dfl_pwq, dfl_pwq);
+
+ mutex_unlock(&wq->mutex);
+
+ /* put the old pwqs */
+ for_each_node(node)
+ put_pwq_unlocked(pwq_tbl[node]);
+ put_pwq_unlocked(dfl_pwq);
+
+ put_online_cpus();
+ ret = 0;
+ /* fall through */
+out_free:
+ free_workqueue_attrs(tmp_attrs);
+ free_workqueue_attrs(new_attrs);
+ kfree(pwq_tbl);
+ return ret;
+
+enomem_pwq:
+ free_unbound_pwq(dfl_pwq);
+ for_each_node(node)
+ if (pwq_tbl && pwq_tbl[node] != dfl_pwq)
+ free_unbound_pwq(pwq_tbl[node]);
+ mutex_unlock(&wq_pool_mutex);
+ put_online_cpus();
+enomem:
+ ret = -ENOMEM;
+ goto out_free;
}
-static void free_pwqs(struct workqueue_struct *wq)
+/**
+ * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
+ * @wq: the target workqueue
+ * @cpu: the CPU coming up or going down
+ * @online: whether @cpu is coming up or going down
+ *
+ * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
+ * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update NUMA affinity of
+ * @wq accordingly.
+ *
+ * If NUMA affinity can't be adjusted due to memory allocation failure, it
+ * falls back to @wq->dfl_pwq which may not be optimal but is always
+ * correct.
+ *
+ * Note that when the last allowed CPU of a NUMA node goes offline for a
+ * workqueue with a cpumask spanning multiple nodes, the workers which were
+ * already executing the work items for the workqueue will lose their CPU
+ * affinity and may execute on any CPU. This is similar to how per-cpu
+ * workqueues behave on CPU_DOWN. If a workqueue user wants strict
+ * affinity, it's the user's responsibility to flush the work item from
+ * CPU_DOWN_PREPARE.
+ */
+static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
+ bool online)
{
- if (!(wq->flags & WQ_UNBOUND))
- free_percpu(wq->pool_wq.pcpu);
- else if (wq->pool_wq.single) {
- /* the pointer to free is stored right after the pwq */
- kfree(*(void **)(wq->pool_wq.single + 1));
+ int node = cpu_to_node(cpu);
+ int cpu_off = online ? -1 : cpu;
+ struct pool_workqueue *old_pwq = NULL, *pwq;
+ struct workqueue_attrs *target_attrs;
+ cpumask_t *cpumask;
+
+ lockdep_assert_held(&wq_pool_mutex);
+
+ if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND))
+ return;
+
+ /*
+ * We don't wanna alloc/free wq_attrs for each wq for each CPU.
+ * Let's use a preallocated one. The following buf is protected by
+ * CPU hotplug exclusion.
+ */
+ target_attrs = wq_update_unbound_numa_attrs_buf;
+ cpumask = target_attrs->cpumask;
+
+ mutex_lock(&wq->mutex);
+ if (wq->unbound_attrs->no_numa)
+ goto out_unlock;
+
+ copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
+ pwq = unbound_pwq_by_node(wq, node);
+
+ /*
+ * Let's determine what needs to be done. If the target cpumask is
+ * different from wq's, we need to compare it to @pwq's and create
+ * a new one if they don't match. If the target cpumask equals
+ * wq's, the default pwq should be used. If @pwq is already the
+ * default one, nothing to do; otherwise, install the default one.
+ */
+ if (wq_calc_node_cpumask(wq->unbound_attrs, node, cpu_off, cpumask)) {
+ if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
+ goto out_unlock;
+ } else {
+ if (pwq == wq->dfl_pwq)
+ goto out_unlock;
+ else
+ goto use_dfl_pwq;
+ }
+
+ mutex_unlock(&wq->mutex);
+
+ /* create a new pwq */
+ pwq = alloc_unbound_pwq(wq, target_attrs);
+ if (!pwq) {
+ pr_warning("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
+ wq->name);
+ goto out_unlock;
+ }
+
+ /*
+ * Install the new pwq. As this function is called only from CPU
+ * hotplug callbacks and applying a new attrs is wrapped with
+ * get/put_online_cpus(), @wq->unbound_attrs couldn't have changed
+ * inbetween.
+ */
+ mutex_lock(&wq->mutex);
+ old_pwq = numa_pwq_tbl_install(wq, node, pwq);
+ goto out_unlock;
+
+use_dfl_pwq:
+ spin_lock_irq(&wq->dfl_pwq->pool->lock);
+ get_pwq(wq->dfl_pwq);
+ spin_unlock_irq(&wq->dfl_pwq->pool->lock);
+ old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
+out_unlock:
+ mutex_unlock(&wq->mutex);
+ put_pwq_unlocked(old_pwq);
+}
+
+static int alloc_and_link_pwqs(struct workqueue_struct *wq)
+{
+ bool highpri = wq->flags & WQ_HIGHPRI;
+ int cpu;
+
+ if (!(wq->flags & WQ_UNBOUND)) {
+ wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
+ if (!wq->cpu_pwqs)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ struct pool_workqueue *pwq =
+ per_cpu_ptr(wq->cpu_pwqs, cpu);
+ struct worker_pool *cpu_pools =
+ per_cpu(cpu_worker_pools, cpu);
+
+ init_pwq(pwq, wq, &cpu_pools[highpri]);
+
+ mutex_lock(&wq->mutex);
+ link_pwq(pwq);
+ mutex_unlock(&wq->mutex);
+ }
+ return 0;
+ } else {
+ return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
}
}
@@ -3151,30 +4078,28 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
struct lock_class_key *key,
const char *lock_name, ...)
{
- va_list args, args1;
+ size_t tbl_size = 0;
+ va_list args;
struct workqueue_struct *wq;
- unsigned int cpu;
- size_t namelen;
+ struct pool_workqueue *pwq;
- /* determine namelen, allocate wq and format name */
- va_start(args, lock_name);
- va_copy(args1, args);
- namelen = vsnprintf(NULL, 0, fmt, args) + 1;
+ /* allocate wq and format name */
+ if (flags & WQ_UNBOUND)
+ tbl_size = wq_numa_tbl_len * sizeof(wq->numa_pwq_tbl[0]);
- wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
+ wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
if (!wq)
- goto err;
+ return NULL;
- vsnprintf(wq->name, namelen, fmt, args1);
- va_end(args);
- va_end(args1);
+ if (flags & WQ_UNBOUND) {
+ wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL);
+ if (!wq->unbound_attrs)
+ goto err_free_wq;
+ }
- /*
- * Workqueues which may be used during memory reclaim should
- * have a rescuer to guarantee forward progress.
- */
- if (flags & WQ_MEM_RECLAIM)
- flags |= WQ_RESCUER;
+ va_start(args, lock_name);
+ vsnprintf(wq->name, sizeof(wq->name), fmt, args);
+ va_end(args);
max_active = max_active ?: WQ_DFL_ACTIVE;
max_active = wq_clamp_max_active(max_active, flags, wq->name);
@@ -3182,71 +4107,70 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
/* init wq */
wq->flags = flags;
wq->saved_max_active = max_active;
- mutex_init(&wq->flush_mutex);
+ mutex_init(&wq->mutex);
atomic_set(&wq->nr_pwqs_to_flush, 0);
+ INIT_LIST_HEAD(&wq->pwqs);
INIT_LIST_HEAD(&wq->flusher_queue);
INIT_LIST_HEAD(&wq->flusher_overflow);
+ INIT_LIST_HEAD(&wq->maydays);
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
INIT_LIST_HEAD(&wq->list);
- if (alloc_pwqs(wq) < 0)
- goto err;
-
- for_each_pwq_cpu(cpu, wq) {
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
+ if (alloc_and_link_pwqs(wq) < 0)
+ goto err_free_wq;
- BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
- pwq->pool = get_std_worker_pool(cpu, flags & WQ_HIGHPRI);
- pwq->wq = wq;
- pwq->flush_color = -1;
- pwq->max_active = max_active;
- INIT_LIST_HEAD(&pwq->delayed_works);
- }
-
- if (flags & WQ_RESCUER) {
+ /*
+ * Workqueues which may be used during memory reclaim should
+ * have a rescuer to guarantee forward progress.
+ */
+ if (flags & WQ_MEM_RECLAIM) {
struct worker *rescuer;
- if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
- goto err;
-
- wq->rescuer = rescuer = alloc_worker();
+ rescuer = alloc_worker();
if (!rescuer)
- goto err;
+ goto err_destroy;
rescuer->rescue_wq = wq;
rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
wq->name);
- if (IS_ERR(rescuer->task))
- goto err;
+ if (IS_ERR(rescuer->task)) {
+ kfree(rescuer);
+ goto err_destroy;
+ }
- rescuer->task->flags |= PF_THREAD_BOUND;
+ wq->rescuer = rescuer;
+ rescuer->task->flags |= PF_NO_SETAFFINITY;
wake_up_process(rescuer->task);
}
+ if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
+ goto err_destroy;
+
/*
- * workqueue_lock protects global freeze state and workqueues
- * list. Grab it, set max_active accordingly and add the new
- * workqueue to workqueues list.
+ * wq_pool_mutex protects global freeze state and workqueues list.
+ * Grab it, adjust max_active and add the new @wq to workqueues
+ * list.
*/
- spin_lock(&workqueue_lock);
+ mutex_lock(&wq_pool_mutex);
- if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
- for_each_pwq_cpu(cpu, wq)
- get_pwq(cpu, wq)->max_active = 0;
+ mutex_lock(&wq->mutex);
+ for_each_pwq(pwq, wq)
+ pwq_adjust_max_active(pwq);
+ mutex_unlock(&wq->mutex);
list_add(&wq->list, &workqueues);
- spin_unlock(&workqueue_lock);
+ mutex_unlock(&wq_pool_mutex);
return wq;
-err:
- if (wq) {
- free_pwqs(wq);
- free_mayday_mask(wq->mayday_mask);
- kfree(wq->rescuer);
- kfree(wq);
- }
+
+err_free_wq:
+ free_workqueue_attrs(wq->unbound_attrs);
+ kfree(wq);
+ return NULL;
+err_destroy:
+ destroy_workqueue(wq);
return NULL;
}
EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
@@ -3259,60 +4183,78 @@ EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
*/
void destroy_workqueue(struct workqueue_struct *wq)
{
- unsigned int cpu;
+ struct pool_workqueue *pwq;
+ int node;
/* drain it before proceeding with destruction */
drain_workqueue(wq);
+ /* sanity checks */
+ mutex_lock(&wq->mutex);
+ for_each_pwq(pwq, wq) {
+ int i;
+
+ for (i = 0; i < WORK_NR_COLORS; i++) {
+ if (WARN_ON(pwq->nr_in_flight[i])) {
+ mutex_unlock(&wq->mutex);
+ return;
+ }
+ }
+
+ if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
+ WARN_ON(pwq->nr_active) ||
+ WARN_ON(!list_empty(&pwq->delayed_works))) {
+ mutex_unlock(&wq->mutex);
+ return;
+ }
+ }
+ mutex_unlock(&wq->mutex);
+
/*
* wq list is used to freeze wq, remove from list after
* flushing is complete in case freeze races us.
*/
- spin_lock(&workqueue_lock);
- list_del(&wq->list);
- spin_unlock(&workqueue_lock);
+ mutex_lock(&wq_pool_mutex);
+ list_del_init(&wq->list);
+ mutex_unlock(&wq_pool_mutex);
- /* sanity check */
- for_each_pwq_cpu(cpu, wq) {
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
- int i;
+ workqueue_sysfs_unregister(wq);
- for (i = 0; i < WORK_NR_COLORS; i++)
- BUG_ON(pwq->nr_in_flight[i]);
- BUG_ON(pwq->nr_active);
- BUG_ON(!list_empty(&pwq->delayed_works));
- }
-
- if (wq->flags & WQ_RESCUER) {
+ if (wq->rescuer) {
kthread_stop(wq->rescuer->task);
- free_mayday_mask(wq->mayday_mask);
kfree(wq->rescuer);
+ wq->rescuer = NULL;
}
- free_pwqs(wq);
- kfree(wq);
-}
-EXPORT_SYMBOL_GPL(destroy_workqueue);
-
-/**
- * pwq_set_max_active - adjust max_active of a pwq
- * @pwq: target pool_workqueue
- * @max_active: new max_active value.
- *
- * Set @pwq->max_active to @max_active and activate delayed works if
- * increased.
- *
- * CONTEXT:
- * spin_lock_irq(pool->lock).
- */
-static void pwq_set_max_active(struct pool_workqueue *pwq, int max_active)
-{
- pwq->max_active = max_active;
+ if (!(wq->flags & WQ_UNBOUND)) {
+ /*
+ * The base ref is never dropped on per-cpu pwqs. Directly
+ * free the pwqs and wq.
+ */
+ free_percpu(wq->cpu_pwqs);
+ kfree(wq);
+ } else {
+ /*
+ * We're the sole accessor of @wq at this point. Directly
+ * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
+ * @wq will be freed when the last pwq is released.
+ */
+ for_each_node(node) {
+ pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
+ RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
+ put_pwq_unlocked(pwq);
+ }
- while (!list_empty(&pwq->delayed_works) &&
- pwq->nr_active < pwq->max_active)
- pwq_activate_first_delayed(pwq);
+ /*
+ * Put dfl_pwq. @wq may be freed any time after dfl_pwq is
+ * put. Don't access it afterwards.
+ */
+ pwq = wq->dfl_pwq;
+ wq->dfl_pwq = NULL;
+ put_pwq_unlocked(pwq);
+ }
}
+EXPORT_SYMBOL_GPL(destroy_workqueue);
/**
* workqueue_set_max_active - adjust max_active of a workqueue
@@ -3326,30 +4268,37 @@ static void pwq_set_max_active(struct pool_workqueue *pwq, int max_active)
*/
void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
{
- unsigned int cpu;
+ struct pool_workqueue *pwq;
+
+ /* disallow meddling with max_active for ordered workqueues */
+ if (WARN_ON(wq->flags & __WQ_ORDERED))
+ return;
max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
- spin_lock(&workqueue_lock);
+ mutex_lock(&wq->mutex);
wq->saved_max_active = max_active;
- for_each_pwq_cpu(cpu, wq) {
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
- struct worker_pool *pool = pwq->pool;
-
- spin_lock_irq(&pool->lock);
+ for_each_pwq(pwq, wq)
+ pwq_adjust_max_active(pwq);
- if (!(wq->flags & WQ_FREEZABLE) ||
- !(pool->flags & POOL_FREEZING))
- pwq_set_max_active(pwq, max_active);
+ mutex_unlock(&wq->mutex);
+}
+EXPORT_SYMBOL_GPL(workqueue_set_max_active);
- spin_unlock_irq(&pool->lock);
- }
+/**
+ * current_is_workqueue_rescuer - is %current workqueue rescuer?
+ *
+ * Determine whether %current is a workqueue rescuer. Can be used from
+ * work functions to determine whether it's being run off the rescuer task.
+ */
+bool current_is_workqueue_rescuer(void)
+{
+ struct worker *worker = current_wq_worker();
- spin_unlock(&workqueue_lock);
+ return worker && worker->rescue_wq;
}
-EXPORT_SYMBOL_GPL(workqueue_set_max_active);
/**
* workqueue_congested - test whether a workqueue is congested
@@ -3363,11 +4312,22 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active);
* RETURNS:
* %true if congested, %false otherwise.
*/
-bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
+bool workqueue_congested(int cpu, struct workqueue_struct *wq)
{
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
+ struct pool_workqueue *pwq;
+ bool ret;
- return !list_empty(&pwq->delayed_works);
+ rcu_read_lock_sched();
+
+ if (!(wq->flags & WQ_UNBOUND))
+ pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
+ else
+ pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
+
+ ret = !list_empty(&pwq->delayed_works);
+ rcu_read_unlock_sched();
+
+ return ret;
}
EXPORT_SYMBOL_GPL(workqueue_congested);
@@ -3384,19 +4344,22 @@ EXPORT_SYMBOL_GPL(workqueue_congested);
*/
unsigned int work_busy(struct work_struct *work)
{
- struct worker_pool *pool = get_work_pool(work);
+ struct worker_pool *pool;
unsigned long flags;
unsigned int ret = 0;
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
+ local_irq_save(flags);
+ pool = get_work_pool(work);
if (pool) {
- spin_lock_irqsave(&pool->lock, flags);
+ spin_lock(&pool->lock);
if (find_worker_executing_work(pool, work))
ret |= WORK_BUSY_RUNNING;
- spin_unlock_irqrestore(&pool->lock, flags);
+ spin_unlock(&pool->lock);
}
+ local_irq_restore(flags);
return ret;
}
@@ -3422,31 +4385,28 @@ static void wq_unbind_fn(struct work_struct *work)
int cpu = smp_processor_id();
struct worker_pool *pool;
struct worker *worker;
- int i;
+ int wi;
- for_each_std_worker_pool(pool, cpu) {
- BUG_ON(cpu != smp_processor_id());
+ for_each_cpu_worker_pool(pool, cpu) {
+ WARN_ON_ONCE(cpu != smp_processor_id());
- mutex_lock(&pool->assoc_mutex);
+ mutex_lock(&pool->manager_mutex);
spin_lock_irq(&pool->lock);
/*
- * We've claimed all manager positions. Make all workers
+ * We've blocked all manager operations. Make all workers
* unbound and set DISASSOCIATED. Before this, all workers
* except for the ones which are still executing works from
* before the last CPU down must be on the cpu. After
* this, they may become diasporas.
*/
- list_for_each_entry(worker, &pool->idle_list, entry)
- worker->flags |= WORKER_UNBOUND;
-
- for_each_busy_worker(worker, i, pool)
+ for_each_pool_worker(worker, wi, pool)
worker->flags |= WORKER_UNBOUND;
pool->flags |= POOL_DISASSOCIATED;
spin_unlock_irq(&pool->lock);
- mutex_unlock(&pool->assoc_mutex);
+ mutex_unlock(&pool->manager_mutex);
/*
* Call schedule() so that we cross rq->lock and thus can
@@ -3477,6 +4437,103 @@ static void wq_unbind_fn(struct work_struct *work)
}
}
+/**
+ * rebind_workers - rebind all workers of a pool to the associated CPU
+ * @pool: pool of interest
+ *
+ * @pool->cpu is coming online. Rebind all workers to the CPU.
+ */
+static void rebind_workers(struct worker_pool *pool)
+{
+ struct worker *worker;
+ int wi;
+
+ lockdep_assert_held(&pool->manager_mutex);
+
+ /*
+ * Restore CPU affinity of all workers. As all idle workers should
+ * be on the run-queue of the associated CPU before any local
+ * wake-ups for concurrency management happen, restore CPU affinty
+ * of all workers first and then clear UNBOUND. As we're called
+ * from CPU_ONLINE, the following shouldn't fail.
+ */
+ for_each_pool_worker(worker, wi, pool)
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
+ pool->attrs->cpumask) < 0);
+
+ spin_lock_irq(&pool->lock);
+
+ for_each_pool_worker(worker, wi, pool) {
+ unsigned int worker_flags = worker->flags;
+
+ /*
+ * A bound idle worker should actually be on the runqueue
+ * of the associated CPU for local wake-ups targeting it to
+ * work. Kick all idle workers so that they migrate to the
+ * associated CPU. Doing this in the same loop as
+ * replacing UNBOUND with REBOUND is safe as no worker will
+ * be bound before @pool->lock is released.
+ */
+ if (worker_flags & WORKER_IDLE)
+ wake_up_process(worker->task);
+
+ /*
+ * We want to clear UNBOUND but can't directly call
+ * worker_clr_flags() or adjust nr_running. Atomically
+ * replace UNBOUND with another NOT_RUNNING flag REBOUND.
+ * @worker will clear REBOUND using worker_clr_flags() when
+ * it initiates the next execution cycle thus restoring
+ * concurrency management. Note that when or whether
+ * @worker clears REBOUND doesn't affect correctness.
+ *
+ * ACCESS_ONCE() is necessary because @worker->flags may be
+ * tested without holding any lock in
+ * wq_worker_waking_up(). Without it, NOT_RUNNING test may
+ * fail incorrectly leading to premature concurrency
+ * management operations.
+ */
+ WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
+ worker_flags |= WORKER_REBOUND;
+ worker_flags &= ~WORKER_UNBOUND;
+ ACCESS_ONCE(worker->flags) = worker_flags;
+ }
+
+ spin_unlock_irq(&pool->lock);
+}
+
+/**
+ * restore_unbound_workers_cpumask - restore cpumask of unbound workers
+ * @pool: unbound pool of interest
+ * @cpu: the CPU which is coming up
+ *
+ * An unbound pool may end up with a cpumask which doesn't have any online
+ * CPUs. When a worker of such pool get scheduled, the scheduler resets
+ * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
+ * online CPU before, cpus_allowed of all its workers should be restored.
+ */
+static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
+{
+ static cpumask_t cpumask;
+ struct worker *worker;
+ int wi;
+
+ lockdep_assert_held(&pool->manager_mutex);
+
+ /* is @cpu allowed for @pool? */
+ if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
+ return;
+
+ /* is @cpu the only online CPU? */
+ cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
+ if (cpumask_weight(&cpumask) != 1)
+ return;
+
+ /* as we're called from CPU_ONLINE, the following shouldn't fail */
+ for_each_pool_worker(worker, wi, pool)
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
+ pool->attrs->cpumask) < 0);
+}
+
/*
* Workqueues should be brought up before normal priority CPU notifiers.
* This will be registered high priority CPU notifier.
@@ -3485,39 +4542,46 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
- unsigned int cpu = (unsigned long)hcpu;
+ int cpu = (unsigned long)hcpu;
struct worker_pool *pool;
+ struct workqueue_struct *wq;
+ int pi;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
- for_each_std_worker_pool(pool, cpu) {
- struct worker *worker;
-
+ for_each_cpu_worker_pool(pool, cpu) {
if (pool->nr_workers)
continue;
-
- worker = create_worker(pool);
- if (!worker)
+ if (create_and_start_worker(pool) < 0)
return NOTIFY_BAD;
-
- spin_lock_irq(&pool->lock);
- start_worker(worker);
- spin_unlock_irq(&pool->lock);
}
break;
case CPU_DOWN_FAILED:
case CPU_ONLINE:
- for_each_std_worker_pool(pool, cpu) {
- mutex_lock(&pool->assoc_mutex);
- spin_lock_irq(&pool->lock);
+ mutex_lock(&wq_pool_mutex);
- pool->flags &= ~POOL_DISASSOCIATED;
- rebind_workers(pool);
+ for_each_pool(pool, pi) {
+ mutex_lock(&pool->manager_mutex);
+
+ if (pool->cpu == cpu) {
+ spin_lock_irq(&pool->lock);
+ pool->flags &= ~POOL_DISASSOCIATED;
+ spin_unlock_irq(&pool->lock);
+
+ rebind_workers(pool);
+ } else if (pool->cpu < 0) {
+ restore_unbound_workers_cpumask(pool, cpu);
+ }
- spin_unlock_irq(&pool->lock);
- mutex_unlock(&pool->assoc_mutex);
+ mutex_unlock(&pool->manager_mutex);
}
+
+ /* update NUMA affinity of unbound workqueues */
+ list_for_each_entry(wq, &workqueues, list)
+ wq_update_unbound_numa(wq, cpu, true);
+
+ mutex_unlock(&wq_pool_mutex);
break;
}
return NOTIFY_OK;
@@ -3531,14 +4595,23 @@ static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
- unsigned int cpu = (unsigned long)hcpu;
+ int cpu = (unsigned long)hcpu;
struct work_struct unbind_work;
+ struct workqueue_struct *wq;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE:
- /* unbinding should happen on the local CPU */
+ /* unbinding per-cpu workers should happen on the local CPU */
INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
queue_work_on(cpu, system_highpri_wq, &unbind_work);
+
+ /* update NUMA affinity of unbound workqueues */
+ mutex_lock(&wq_pool_mutex);
+ list_for_each_entry(wq, &workqueues, list)
+ wq_update_unbound_numa(wq, cpu, false);
+ mutex_unlock(&wq_pool_mutex);
+
+ /* wait for per-cpu unbinding to finish */
flush_work(&unbind_work);
break;
}
@@ -3571,7 +4644,7 @@ static void work_for_cpu_fn(struct work_struct *work)
* It is up to the caller to ensure that the cpu doesn't go offline.
* The caller must not hold any locks which would prevent @fn from completing.
*/
-long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
+long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
{
struct work_for_cpu wfc = { .fn = fn, .arg = arg };
@@ -3589,44 +4662,40 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
* freeze_workqueues_begin - begin freezing workqueues
*
* Start freezing workqueues. After this function returns, all freezable
- * workqueues will queue new works to their frozen_works list instead of
+ * workqueues will queue new works to their delayed_works list instead of
* pool->worklist.
*
* CONTEXT:
- * Grabs and releases workqueue_lock and pool->lock's.
+ * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
*/
void freeze_workqueues_begin(void)
{
- unsigned int cpu;
+ struct worker_pool *pool;
+ struct workqueue_struct *wq;
+ struct pool_workqueue *pwq;
+ int pi;
- spin_lock(&workqueue_lock);
+ mutex_lock(&wq_pool_mutex);
- BUG_ON(workqueue_freezing);
+ WARN_ON_ONCE(workqueue_freezing);
workqueue_freezing = true;
- for_each_wq_cpu(cpu) {
- struct worker_pool *pool;
- struct workqueue_struct *wq;
-
- for_each_std_worker_pool(pool, cpu) {
- spin_lock_irq(&pool->lock);
-
- WARN_ON_ONCE(pool->flags & POOL_FREEZING);
- pool->flags |= POOL_FREEZING;
-
- list_for_each_entry(wq, &workqueues, list) {
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
-
- if (pwq && pwq->pool == pool &&
- (wq->flags & WQ_FREEZABLE))
- pwq->max_active = 0;
- }
+ /* set FREEZING */
+ for_each_pool(pool, pi) {
+ spin_lock_irq(&pool->lock);
+ WARN_ON_ONCE(pool->flags & POOL_FREEZING);
+ pool->flags |= POOL_FREEZING;
+ spin_unlock_irq(&pool->lock);
+ }
- spin_unlock_irq(&pool->lock);
- }
+ list_for_each_entry(wq, &workqueues, list) {
+ mutex_lock(&wq->mutex);
+ for_each_pwq(pwq, wq)
+ pwq_adjust_max_active(pwq);
+ mutex_unlock(&wq->mutex);
}
- spin_unlock(&workqueue_lock);
+ mutex_unlock(&wq_pool_mutex);
}
/**
@@ -3636,7 +4705,7 @@ void freeze_workqueues_begin(void)
* between freeze_workqueues_begin() and thaw_workqueues().
*
* CONTEXT:
- * Grabs and releases workqueue_lock.
+ * Grabs and releases wq_pool_mutex.
*
* RETURNS:
* %true if some freezable workqueues are still busy. %false if freezing
@@ -3644,34 +4713,34 @@ void freeze_workqueues_begin(void)
*/
bool freeze_workqueues_busy(void)
{
- unsigned int cpu;
bool busy = false;
+ struct workqueue_struct *wq;
+ struct pool_workqueue *pwq;
- spin_lock(&workqueue_lock);
+ mutex_lock(&wq_pool_mutex);
- BUG_ON(!workqueue_freezing);
+ WARN_ON_ONCE(!workqueue_freezing);
- for_each_wq_cpu(cpu) {
- struct workqueue_struct *wq;
+ list_for_each_entry(wq, &workqueues, list) {
+ if (!(wq->flags & WQ_FREEZABLE))
+ continue;
/*
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
- list_for_each_entry(wq, &workqueues, list) {
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
-
- if (!pwq || !(wq->flags & WQ_FREEZABLE))
- continue;
-
- BUG_ON(pwq->nr_active < 0);
+ rcu_read_lock_sched();
+ for_each_pwq(pwq, wq) {
+ WARN_ON_ONCE(pwq->nr_active < 0);
if (pwq->nr_active) {
busy = true;
+ rcu_read_unlock_sched();
goto out_unlock;
}
}
+ rcu_read_unlock_sched();
}
out_unlock:
- spin_unlock(&workqueue_lock);
+ mutex_unlock(&wq_pool_mutex);
return busy;
}
@@ -3682,104 +4751,141 @@ out_unlock:
* frozen works are transferred to their respective pool worklists.
*
* CONTEXT:
- * Grabs and releases workqueue_lock and pool->lock's.
+ * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
*/
void thaw_workqueues(void)
{
- unsigned int cpu;
+ struct workqueue_struct *wq;
+ struct pool_workqueue *pwq;
+ struct worker_pool *pool;
+ int pi;
- spin_lock(&workqueue_lock);
+ mutex_lock(&wq_pool_mutex);
if (!workqueue_freezing)
goto out_unlock;
- for_each_wq_cpu(cpu) {
- struct worker_pool *pool;
- struct workqueue_struct *wq;
+ /* clear FREEZING */
+ for_each_pool(pool, pi) {
+ spin_lock_irq(&pool->lock);
+ WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
+ pool->flags &= ~POOL_FREEZING;
+ spin_unlock_irq(&pool->lock);
+ }
- for_each_std_worker_pool(pool, cpu) {
- spin_lock_irq(&pool->lock);
+ /* restore max_active and repopulate worklist */
+ list_for_each_entry(wq, &workqueues, list) {
+ mutex_lock(&wq->mutex);
+ for_each_pwq(pwq, wq)
+ pwq_adjust_max_active(pwq);
+ mutex_unlock(&wq->mutex);
+ }
- WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
- pool->flags &= ~POOL_FREEZING;
+ workqueue_freezing = false;
+out_unlock:
+ mutex_unlock(&wq_pool_mutex);
+}
+#endif /* CONFIG_FREEZER */
- list_for_each_entry(wq, &workqueues, list) {
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
+static void __init wq_numa_init(void)
+{
+ cpumask_var_t *tbl;
+ int node, cpu;
- if (!pwq || pwq->pool != pool ||
- !(wq->flags & WQ_FREEZABLE))
- continue;
+ /* determine NUMA pwq table len - highest node id + 1 */
+ for_each_node(node)
+ wq_numa_tbl_len = max(wq_numa_tbl_len, node + 1);
- /* restore max_active and repopulate worklist */
- pwq_set_max_active(pwq, wq->saved_max_active);
- }
+ if (num_possible_nodes() <= 1)
+ return;
- wake_up_worker(pool);
+ if (wq_disable_numa) {
+ pr_info("workqueue: NUMA affinity support disabled\n");
+ return;
+ }
+
+ wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL);
+ BUG_ON(!wq_update_unbound_numa_attrs_buf);
- spin_unlock_irq(&pool->lock);
+ /*
+ * We want masks of possible CPUs of each node which isn't readily
+ * available. Build one from cpu_to_node() which should have been
+ * fully initialized by now.
+ */
+ tbl = kzalloc(wq_numa_tbl_len * sizeof(tbl[0]), GFP_KERNEL);
+ BUG_ON(!tbl);
+
+ for_each_node(node)
+ BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node));
+
+ for_each_possible_cpu(cpu) {
+ node = cpu_to_node(cpu);
+ if (WARN_ON(node == NUMA_NO_NODE)) {
+ pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
+ /* happens iff arch is bonkers, let's just proceed */
+ return;
}
+ cpumask_set_cpu(cpu, tbl[node]);
}
- workqueue_freezing = false;
-out_unlock:
- spin_unlock(&workqueue_lock);
+ wq_numa_possible_cpumask = tbl;
+ wq_numa_enabled = true;
}
-#endif /* CONFIG_FREEZER */
static int __init init_workqueues(void)
{
- unsigned int cpu;
+ int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
+ int i, cpu;
/* make sure we have enough bits for OFFQ pool ID */
BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
WORK_CPU_END * NR_STD_WORKER_POOLS);
+ WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
+
+ pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
+
cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
+ wq_numa_init();
+
/* initialize CPU pools */
- for_each_wq_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
struct worker_pool *pool;
- for_each_std_worker_pool(pool, cpu) {
- spin_lock_init(&pool->lock);
+ i = 0;
+ for_each_cpu_worker_pool(pool, cpu) {
+ BUG_ON(init_worker_pool(pool));
pool->cpu = cpu;
- pool->flags |= POOL_DISASSOCIATED;
- INIT_LIST_HEAD(&pool->worklist);
- INIT_LIST_HEAD(&pool->idle_list);
- hash_init(pool->busy_hash);
-
- init_timer_deferrable(&pool->idle_timer);
- pool->idle_timer.function = idle_worker_timeout;
- pool->idle_timer.data = (unsigned long)pool;
-
- setup_timer(&pool->mayday_timer, pool_mayday_timeout,
- (unsigned long)pool);
-
- mutex_init(&pool->assoc_mutex);
- ida_init(&pool->worker_ida);
+ cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
+ pool->attrs->nice = std_nice[i++];
+ pool->node = cpu_to_node(cpu);
/* alloc pool ID */
+ mutex_lock(&wq_pool_mutex);
BUG_ON(worker_pool_assign_id(pool));
+ mutex_unlock(&wq_pool_mutex);
}
}
/* create the initial worker */
- for_each_online_wq_cpu(cpu) {
+ for_each_online_cpu(cpu) {
struct worker_pool *pool;
- for_each_std_worker_pool(pool, cpu) {
- struct worker *worker;
+ for_each_cpu_worker_pool(pool, cpu) {
+ pool->flags &= ~POOL_DISASSOCIATED;
+ BUG_ON(create_and_start_worker(pool) < 0);
+ }
+ }
- if (cpu != WORK_CPU_UNBOUND)
- pool->flags &= ~POOL_DISASSOCIATED;
+ /* create default unbound wq attrs */
+ for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
+ struct workqueue_attrs *attrs;
- worker = create_worker(pool);
- BUG_ON(!worker);
- spin_lock_irq(&pool->lock);
- start_worker(worker);
- spin_unlock_irq(&pool->lock);
- }
+ BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
+ attrs->nice = std_nice[i];
+ unbound_std_wq_attrs[i] = attrs;
}
system_wq = alloc_workqueue("events", 0, 0);
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index 07650264ec1..84ab6e1dc6f 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -32,14 +32,12 @@ struct worker {
struct list_head scheduled; /* L: scheduled works */
struct task_struct *task; /* I: worker task */
struct worker_pool *pool; /* I: the associated pool */
+ /* L: for rescuers */
/* 64 bytes boundary on 64bit, 32 on 32bit */
unsigned long last_active; /* L: last active timestamp */
unsigned int flags; /* X: flags */
int id; /* I: worker id */
- /* for rebinding worker to CPU */
- struct work_struct rebind_work; /* L: for busy worker */
-
/* used only by rescuers to point to the target workqueue */
struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
};
@@ -58,8 +56,7 @@ static inline struct worker *current_wq_worker(void)
* Scheduler hooks for concurrency managed workqueue. Only to be used from
* sched.c and workqueue.c.
*/
-void wq_worker_waking_up(struct task_struct *task, unsigned int cpu);
-struct task_struct *wq_worker_sleeping(struct task_struct *task,
- unsigned int cpu);
+void wq_worker_waking_up(struct task_struct *task, int cpu);
+struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu);
#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b8dc8e4cbf6..0f1d92163f3 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3321,52 +3321,53 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
schedule_work(&cachep->memcg_params->destroy);
}
-static char *memcg_cache_name(struct mem_cgroup *memcg, struct kmem_cache *s)
-{
- char *name;
- struct dentry *dentry;
-
- rcu_read_lock();
- dentry = rcu_dereference(memcg->css.cgroup->dentry);
- rcu_read_unlock();
-
- BUG_ON(dentry == NULL);
-
- name = kasprintf(GFP_KERNEL, "%s(%d:%s)", s->name,
- memcg_cache_id(memcg), dentry->d_name.name);
-
- return name;
-}
+/*
+ * This lock protects updaters, not readers. We want readers to be as fast as
+ * they can, and they will either see NULL or a valid cache value. Our model
+ * allow them to see NULL, in which case the root memcg will be selected.
+ *
+ * We need this lock because multiple allocations to the same cache from a non
+ * will span more than one worker. Only one of them can create the cache.
+ */
+static DEFINE_MUTEX(memcg_cache_mutex);
+/*
+ * Called with memcg_cache_mutex held
+ */
static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg,
struct kmem_cache *s)
{
- char *name;
struct kmem_cache *new;
+ static char *tmp_name = NULL;
- name = memcg_cache_name(memcg, s);
- if (!name)
- return NULL;
+ lockdep_assert_held(&memcg_cache_mutex);
+
+ /*
+ * kmem_cache_create_memcg duplicates the given name and
+ * cgroup_name for this name requires RCU context.
+ * This static temporary buffer is used to prevent from
+ * pointless shortliving allocation.
+ */
+ if (!tmp_name) {
+ tmp_name = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!tmp_name)
+ return NULL;
+ }
+
+ rcu_read_lock();
+ snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name,
+ memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup));
+ rcu_read_unlock();
- new = kmem_cache_create_memcg(memcg, name, s->object_size, s->align,
+ new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align,
(s->flags & ~SLAB_PANIC), s->ctor, s);
if (new)
new->allocflags |= __GFP_KMEMCG;
- kfree(name);
return new;
}
-/*
- * This lock protects updaters, not readers. We want readers to be as fast as
- * they can, and they will either see NULL or a valid cache value. Our model
- * allow them to see NULL, in which case the root memcg will be selected.
- *
- * We need this lock because multiple allocations to the same cache from a non
- * will span more than one worker. Only one of them can create the cache.
- */
-static DEFINE_MUTEX(memcg_cache_mutex);
static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
struct kmem_cache *cachep)
{
@@ -5912,6 +5913,7 @@ static struct cftype mem_cgroup_files[] = {
},
{
.name = "use_hierarchy",
+ .flags = CFTYPE_INSANE,
.write_u64 = mem_cgroup_hierarchy_write,
.read_u64 = mem_cgroup_hierarchy_read,
},
@@ -6907,6 +6909,21 @@ static void mem_cgroup_move_task(struct cgroup *cont,
}
#endif
+/*
+ * Cgroup retains root cgroups across [un]mount cycles making it necessary
+ * to verify sane_behavior flag on each mount attempt.
+ */
+static void mem_cgroup_bind(struct cgroup *root)
+{
+ /*
+ * use_hierarchy is forced with sane_behavior. cgroup core
+ * guarantees that @root doesn't have any children, so turning it
+ * on for the root memcg is enough.
+ */
+ if (cgroup_sane_behavior(root))
+ mem_cgroup_from_cont(root)->use_hierarchy = true;
+}
+
struct cgroup_subsys mem_cgroup_subsys = {
.name = "memory",
.subsys_id = mem_cgroup_subsys_id,
@@ -6917,6 +6934,7 @@ struct cgroup_subsys mem_cgroup_subsys = {
.can_attach = mem_cgroup_can_attach,
.cancel_attach = mem_cgroup_cancel_attach,
.attach = mem_cgroup_move_task,
+ .bind = mem_cgroup_bind,
.base_cftypes = mem_cgroup_files,
.early_init = 0,
.use_id = 1,
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 1c69e38e3a2..dd0dc574d78 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -25,6 +25,12 @@
static DEFINE_MUTEX(devcgroup_mutex);
+enum devcg_behavior {
+ DEVCG_DEFAULT_NONE,
+ DEVCG_DEFAULT_ALLOW,
+ DEVCG_DEFAULT_DENY,
+};
+
/*
* exception list locking rules:
* hold devcgroup_mutex for update/read.
@@ -42,10 +48,9 @@ struct dev_exception_item {
struct dev_cgroup {
struct cgroup_subsys_state css;
struct list_head exceptions;
- enum {
- DEVCG_DEFAULT_ALLOW,
- DEVCG_DEFAULT_DENY,
- } behavior;
+ enum devcg_behavior behavior;
+ /* temporary list for pending propagation operations */
+ struct list_head propagate_pending;
};
static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
@@ -182,35 +187,62 @@ static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
__dev_exception_clean(dev_cgroup);
}
+static inline bool is_devcg_online(const struct dev_cgroup *devcg)
+{
+ return (devcg->behavior != DEVCG_DEFAULT_NONE);
+}
+
+/**
+ * devcgroup_online - initializes devcgroup's behavior and exceptions based on
+ * parent's
+ * @cgroup: cgroup getting online
+ * returns 0 in case of success, error code otherwise
+ */
+static int devcgroup_online(struct cgroup *cgroup)
+{
+ struct dev_cgroup *dev_cgroup, *parent_dev_cgroup = NULL;
+ int ret = 0;
+
+ mutex_lock(&devcgroup_mutex);
+ dev_cgroup = cgroup_to_devcgroup(cgroup);
+ if (cgroup->parent)
+ parent_dev_cgroup = cgroup_to_devcgroup(cgroup->parent);
+
+ if (parent_dev_cgroup == NULL)
+ dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
+ else {
+ ret = dev_exceptions_copy(&dev_cgroup->exceptions,
+ &parent_dev_cgroup->exceptions);
+ if (!ret)
+ dev_cgroup->behavior = parent_dev_cgroup->behavior;
+ }
+ mutex_unlock(&devcgroup_mutex);
+
+ return ret;
+}
+
+static void devcgroup_offline(struct cgroup *cgroup)
+{
+ struct dev_cgroup *dev_cgroup = cgroup_to_devcgroup(cgroup);
+
+ mutex_lock(&devcgroup_mutex);
+ dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
+ mutex_unlock(&devcgroup_mutex);
+}
+
/*
* called from kernel/cgroup.c with cgroup_lock() held.
*/
static struct cgroup_subsys_state *devcgroup_css_alloc(struct cgroup *cgroup)
{
- struct dev_cgroup *dev_cgroup, *parent_dev_cgroup;
- struct cgroup *parent_cgroup;
- int ret;
+ struct dev_cgroup *dev_cgroup;
dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
if (!dev_cgroup)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&dev_cgroup->exceptions);
- parent_cgroup = cgroup->parent;
-
- if (parent_cgroup == NULL)
- dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
- else {
- parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
- mutex_lock(&devcgroup_mutex);
- ret = dev_exceptions_copy(&dev_cgroup->exceptions,
- &parent_dev_cgroup->exceptions);
- dev_cgroup->behavior = parent_dev_cgroup->behavior;
- mutex_unlock(&devcgroup_mutex);
- if (ret) {
- kfree(dev_cgroup);
- return ERR_PTR(ret);
- }
- }
+ INIT_LIST_HEAD(&dev_cgroup->propagate_pending);
+ dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
return &dev_cgroup->css;
}
@@ -304,9 +336,11 @@ static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
* verify if a certain access is allowed.
* @dev_cgroup: dev cgroup to be tested against
* @refex: new exception
+ * @behavior: behavior of the exception
*/
-static int may_access(struct dev_cgroup *dev_cgroup,
- struct dev_exception_item *refex)
+static bool may_access(struct dev_cgroup *dev_cgroup,
+ struct dev_exception_item *refex,
+ enum devcg_behavior behavior)
{
struct dev_exception_item *ex;
bool match = false;
@@ -330,18 +364,29 @@ static int may_access(struct dev_cgroup *dev_cgroup,
break;
}
- /*
- * In two cases we'll consider this new exception valid:
- * - the dev cgroup has its default policy to allow + exception list:
- * the new exception should *not* match any of the exceptions
- * (behavior == DEVCG_DEFAULT_ALLOW, !match)
- * - the dev cgroup has its default policy to deny + exception list:
- * the new exception *should* match the exceptions
- * (behavior == DEVCG_DEFAULT_DENY, match)
- */
- if ((dev_cgroup->behavior == DEVCG_DEFAULT_DENY) == match)
- return 1;
- return 0;
+ if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
+ if (behavior == DEVCG_DEFAULT_ALLOW) {
+ /* the exception will deny access to certain devices */
+ return true;
+ } else {
+ /* the exception will allow access to certain devices */
+ if (match)
+ /*
+ * a new exception allowing access shouldn't
+ * match an parent's exception
+ */
+ return false;
+ return true;
+ }
+ } else {
+ /* only behavior == DEVCG_DEFAULT_DENY allowed here */
+ if (match)
+ /* parent has an exception that matches the proposed */
+ return true;
+ else
+ return false;
+ }
+ return false;
}
/*
@@ -358,7 +403,7 @@ static int parent_has_perm(struct dev_cgroup *childcg,
if (!pcg)
return 1;
parent = cgroup_to_devcgroup(pcg);
- return may_access(parent, ex);
+ return may_access(parent, ex, childcg->behavior);
}
/**
@@ -374,6 +419,111 @@ static inline int may_allow_all(struct dev_cgroup *parent)
return parent->behavior == DEVCG_DEFAULT_ALLOW;
}
+/**
+ * revalidate_active_exceptions - walks through the active exception list and
+ * revalidates the exceptions based on parent's
+ * behavior and exceptions. The exceptions that
+ * are no longer valid will be removed.
+ * Called with devcgroup_mutex held.
+ * @devcg: cgroup which exceptions will be checked
+ *
+ * This is one of the three key functions for hierarchy implementation.
+ * This function is responsible for re-evaluating all the cgroup's active
+ * exceptions due to a parent's exception change.
+ * Refer to Documentation/cgroups/devices.txt for more details.
+ */
+static void revalidate_active_exceptions(struct dev_cgroup *devcg)
+{
+ struct dev_exception_item *ex;
+ struct list_head *this, *tmp;
+
+ list_for_each_safe(this, tmp, &devcg->exceptions) {
+ ex = container_of(this, struct dev_exception_item, list);
+ if (!parent_has_perm(devcg, ex))
+ dev_exception_rm(devcg, ex);
+ }
+}
+
+/**
+ * get_online_devcg - walks the cgroup tree and fills a list with the online
+ * groups
+ * @root: cgroup used as starting point
+ * @online: list that will be filled with online groups
+ *
+ * Must be called with devcgroup_mutex held. Grabs RCU lock.
+ * Because devcgroup_mutex is held, no devcg will become online or offline
+ * during the tree walk (see devcgroup_online, devcgroup_offline)
+ * A separated list is needed because propagate_behavior() and
+ * propagate_exception() need to allocate memory and can block.
+ */
+static void get_online_devcg(struct cgroup *root, struct list_head *online)
+{
+ struct cgroup *pos;
+ struct dev_cgroup *devcg;
+
+ lockdep_assert_held(&devcgroup_mutex);
+
+ rcu_read_lock();
+ cgroup_for_each_descendant_pre(pos, root) {
+ devcg = cgroup_to_devcgroup(pos);
+ if (is_devcg_online(devcg))
+ list_add_tail(&devcg->propagate_pending, online);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * propagate_exception - propagates a new exception to the children
+ * @devcg_root: device cgroup that added a new exception
+ * @ex: new exception to be propagated
+ *
+ * returns: 0 in case of success, != 0 in case of error
+ */
+static int propagate_exception(struct dev_cgroup *devcg_root,
+ struct dev_exception_item *ex)
+{
+ struct cgroup *root = devcg_root->css.cgroup;
+ struct dev_cgroup *devcg, *parent, *tmp;
+ int rc = 0;
+ LIST_HEAD(pending);
+
+ get_online_devcg(root, &pending);
+
+ list_for_each_entry_safe(devcg, tmp, &pending, propagate_pending) {
+ parent = cgroup_to_devcgroup(devcg->css.cgroup->parent);
+
+ /*
+ * in case both root's behavior and devcg is allow, a new
+ * restriction means adding to the exception list
+ */
+ if (devcg_root->behavior == DEVCG_DEFAULT_ALLOW &&
+ devcg->behavior == DEVCG_DEFAULT_ALLOW) {
+ rc = dev_exception_add(devcg, ex);
+ if (rc)
+ break;
+ } else {
+ /*
+ * in the other possible cases:
+ * root's behavior: allow, devcg's: deny
+ * root's behavior: deny, devcg's: deny
+ * the exception will be removed
+ */
+ dev_exception_rm(devcg, ex);
+ }
+ revalidate_active_exceptions(devcg);
+
+ list_del_init(&devcg->propagate_pending);
+ }
+ return rc;
+}
+
+static inline bool has_children(struct dev_cgroup *devcgroup)
+{
+ struct cgroup *cgrp = devcgroup->css.cgroup;
+
+ return !list_empty(&cgrp->children);
+}
+
/*
* Modify the exception list using allow/deny rules.
* CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD
@@ -392,7 +542,7 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
{
const char *b;
char temp[12]; /* 11 + 1 characters needed for a u32 */
- int count, rc;
+ int count, rc = 0;
struct dev_exception_item ex;
struct cgroup *p = devcgroup->css.cgroup;
struct dev_cgroup *parent = NULL;
@@ -410,6 +560,9 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
case 'a':
switch (filetype) {
case DEVCG_ALLOW:
+ if (has_children(devcgroup))
+ return -EINVAL;
+
if (!may_allow_all(parent))
return -EPERM;
dev_exception_clean(devcgroup);
@@ -423,6 +576,9 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
return rc;
break;
case DEVCG_DENY:
+ if (has_children(devcgroup))
+ return -EINVAL;
+
dev_exception_clean(devcgroup);
devcgroup->behavior = DEVCG_DEFAULT_DENY;
break;
@@ -517,22 +673,28 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
dev_exception_rm(devcgroup, &ex);
return 0;
}
- return dev_exception_add(devcgroup, &ex);
+ rc = dev_exception_add(devcgroup, &ex);
+ break;
case DEVCG_DENY:
/*
* If the default policy is to deny by default, try to remove
* an matching exception instead. And be silent about it: we
* don't want to break compatibility
*/
- if (devcgroup->behavior == DEVCG_DEFAULT_DENY) {
+ if (devcgroup->behavior == DEVCG_DEFAULT_DENY)
dev_exception_rm(devcgroup, &ex);
- return 0;
- }
- return dev_exception_add(devcgroup, &ex);
+ else
+ rc = dev_exception_add(devcgroup, &ex);
+
+ if (rc)
+ break;
+ /* we only propagate new restrictions */
+ rc = propagate_exception(devcgroup, &ex);
+ break;
default:
- return -EINVAL;
+ rc = -EINVAL;
}
- return 0;
+ return rc;
}
static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft,
@@ -571,17 +733,10 @@ struct cgroup_subsys devices_subsys = {
.can_attach = devcgroup_can_attach,
.css_alloc = devcgroup_css_alloc,
.css_free = devcgroup_css_free,
+ .css_online = devcgroup_online,
+ .css_offline = devcgroup_offline,
.subsys_id = devices_subsys_id,
.base_cftypes = dev_cgroup_files,
-
- /*
- * While devices cgroup has the rudimentary hierarchy support which
- * checks the parent's restriction, it doesn't properly propagates
- * config changes in ancestors to their descendents. A child
- * should only be allowed to add more restrictions to the parent's
- * configuration. Fix it and remove the following.
- */
- .broken_hierarchy = true,
};
/**
@@ -609,7 +764,7 @@ static int __devcgroup_check_permission(short type, u32 major, u32 minor,
rcu_read_lock();
dev_cgroup = task_devcgroup(current);
- rc = may_access(dev_cgroup, &ex);
+ rc = may_access(dev_cgroup, &ex, dev_cgroup->behavior);
rcu_read_unlock();
if (!rc)
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 4e67d52eb3a..0d7fd8b5154 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -73,6 +73,7 @@ my $ktest_config;
my $version;
my $have_version = 0;
my $machine;
+my $last_machine;
my $ssh_user;
my $tmpdir;
my $builddir;
@@ -108,6 +109,7 @@ my $scp_to_target;
my $scp_to_target_install;
my $power_off;
my $grub_menu;
+my $last_grub_menu;
my $grub_file;
my $grub_number;
my $grub_reboot;
@@ -1538,7 +1540,9 @@ sub run_scp_mod {
sub get_grub2_index {
- return if (defined($grub_number));
+ return if (defined($grub_number) && defined($last_grub_menu) &&
+ $last_grub_menu eq $grub_menu && defined($last_machine) &&
+ $last_machine eq $machine);
doprint "Find grub2 menu ... ";
$grub_number = -1;
@@ -1565,6 +1569,8 @@ sub get_grub2_index {
die "Could not find '$grub_menu' in $grub_file on $machine"
if (!$found);
doprint "$grub_number\n";
+ $last_grub_menu = $grub_menu;
+ $last_machine = $machine;
}
sub get_grub_index {
@@ -1577,7 +1583,9 @@ sub get_grub_index {
if ($reboot_type ne "grub") {
return;
}
- return if (defined($grub_number));
+ return if (defined($grub_number) && defined($last_grub_menu) &&
+ $last_grub_menu eq $grub_menu && defined($last_machine) &&
+ $last_machine eq $machine);
doprint "Find grub menu ... ";
$grub_number = -1;
@@ -1604,6 +1612,8 @@ sub get_grub_index {
die "Could not find '$grub_menu' in /boot/grub/menu on $machine"
if (!$found);
doprint "$grub_number\n";
+ $last_grub_menu = $grub_menu;
+ $last_machine = $machine;
}
sub wait_for_input