diff options
529 files changed, 10305 insertions, 4240 deletions
@@ -1823,6 +1823,11 @@ S: Kattreinstr 38 S: D-64295 S: Germany +N: Avi Kivity +E: avi.kivity@gmail.com +D: Kernel-based Virtual Machine (KVM) +S: Ra'annana, Israel + N: Andi Kleen E: andi@firstfloor.org U: http://www.halobates.de diff --git a/Documentation/ABI/testing/sysfs-class-devfreq b/Documentation/ABI/testing/sysfs-class-devfreq index 23d78b5aab1..0ba6ea2f89d 100644 --- a/Documentation/ABI/testing/sysfs-class-devfreq +++ b/Documentation/ABI/testing/sysfs-class-devfreq @@ -11,7 +11,7 @@ What: /sys/class/devfreq/.../governor Date: September 2011 Contact: MyungJoo Ham <myungjoo.ham@samsung.com> Description: - The /sys/class/devfreq/.../governor shows the name of the + The /sys/class/devfreq/.../governor show or set the name of the governor used by the corresponding devfreq object. What: /sys/class/devfreq/.../cur_freq @@ -19,15 +19,16 @@ Date: September 2011 Contact: MyungJoo Ham <myungjoo.ham@samsung.com> Description: The /sys/class/devfreq/.../cur_freq shows the current - frequency of the corresponding devfreq object. + frequency of the corresponding devfreq object. Same as + target_freq when get_cur_freq() is not implemented by + devfreq driver. -What: /sys/class/devfreq/.../central_polling -Date: September 2011 -Contact: MyungJoo Ham <myungjoo.ham@samsung.com> +What: /sys/class/devfreq/.../target_freq +Date: September 2012 +Contact: Rajagopal Venkat <rajagopal.venkat@linaro.org> Description: - The /sys/class/devfreq/.../central_polling shows whether - the devfreq ojbect is using devfreq-provided central - polling mechanism or not. + The /sys/class/devfreq/.../target_freq shows the next governor + predicted target frequency of the corresponding devfreq object. What: /sys/class/devfreq/.../polling_interval Date: September 2011 @@ -43,6 +44,17 @@ Description: (/sys/class/devfreq/.../central_polling is 0), this value may be useless. +What: /sys/class/devfreq/.../trans_stat +Date: October 2012 +Contact: MyungJoo Ham <myungjoo.ham@samsung.com> +Descrtiption: + This ABI shows the statistics of devfreq behavior on a + specific device. It shows the time spent in each state and + the number of transitions between states. + In order to activate this ABI, the devfreq target device + driver should provide the list of available frequencies + with its profile. + What: /sys/class/devfreq/.../userspace/set_freq Date: September 2011 Contact: MyungJoo Ham <myungjoo.ham@samsung.com> @@ -50,3 +62,19 @@ Description: The /sys/class/devfreq/.../userspace/set_freq shows and sets the requested frequency for the devfreq object if userspace governor is in effect. + +What: /sys/class/devfreq/.../available_frequencies +Date: October 2012 +Contact: Nishanth Menon <nm@ti.com> +Description: + The /sys/class/devfreq/.../available_frequencies shows + the available frequencies of the corresponding devfreq object. + This is a snapshot of available frequencies and not limited + by the min/max frequency restrictions. + +What: /sys/class/devfreq/.../available_governors +Date: October 2012 +Contact: Nishanth Menon <nm@ti.com> +Description: + The /sys/class/devfreq/.../available_governors shows + currently available governors in the system. diff --git a/Documentation/ABI/testing/sysfs-devices-sun b/Documentation/ABI/testing/sysfs-devices-sun new file mode 100644 index 00000000000..86be9848a77 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-devices-sun @@ -0,0 +1,14 @@ +Whatt: /sys/devices/.../sun +Date: October 2012 +Contact: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> +Description: + The file contains a Slot-unique ID which provided by the _SUN + method in the ACPI namespace. The value is written in Advanced + Configuration and Power Interface Specification as follows: + + "The _SUN value is required to be unique among the slots of + the same type. It is also recommended that this number match + the slot number printed on the physical slot whenever possible." + + So reading the sysfs file, we can identify a physical position + of the slot in the system. diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt index c07f7b4fb88..71c4da41344 100644 --- a/Documentation/cgroups/memory.txt +++ b/Documentation/cgroups/memory.txt @@ -466,6 +466,10 @@ Note: 5.3 swappiness Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only. +Please note that unlike the global swappiness, memcg knob set to 0 +really prevents from any swapping even if there is a swap storage +available. This might lead to memcg OOM killer if there are no file +pages to reclaim. Following cgroups' swappiness can't be changed. - root cgroup (uses /proc/sys/vm/swappiness). diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-spear.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-spear.txt new file mode 100644 index 00000000000..f3d44984d91 --- /dev/null +++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-spear.txt @@ -0,0 +1,42 @@ +SPEAr cpufreq driver +------------------- + +SPEAr SoC cpufreq driver for CPU frequency scaling. +It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) systems +which share clock across all CPUs. + +Required properties: +- cpufreq_tbl: Table of frequencies CPU could be transitioned into, in the + increasing order. + +Optional properties: +- clock-latency: Specify the possible maximum transition latency for clock, in + unit of nanoseconds. + +Both required and optional properties listed above must be defined under node +/cpus/cpu@0. + +Examples: +-------- +cpus { + + <...> + + cpu@0 { + compatible = "arm,cortex-a9"; + reg = <0>; + + <...> + + cpufreq_tbl = < 166000 + 200000 + 250000 + 300000 + 400000 + 500000 + 600000 >; + }; + + <...> + +}; diff --git a/Documentation/devicetree/bindings/net/mdio-gpio.txt b/Documentation/devicetree/bindings/net/mdio-gpio.txt index bc954952901..c79bab02536 100644 --- a/Documentation/devicetree/bindings/net/mdio-gpio.txt +++ b/Documentation/devicetree/bindings/net/mdio-gpio.txt @@ -8,9 +8,16 @@ gpios property as described in section VIII.1 in the following order: MDC, MDIO. +Note: Each gpio-mdio bus should have an alias correctly numbered in "aliases" +node. + Example: -mdio { +aliases { + mdio-gpio0 = <&mdio0>; +}; + +mdio0: mdio { compatible = "virtual,mdio-gpio"; #address-cells = <1>; #size-cells = <0>; diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index a1793d670cd..3844d21d6ca 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -33,7 +33,7 @@ Table of Contents 2 Modifying System Parameters 3 Per-Process Parameters - 3.1 /proc/<pid>/oom_score_adj - Adjust the oom-killer + 3.1 /proc/<pid>/oom_adj & /proc/<pid>/oom_score_adj - Adjust the oom-killer score 3.2 /proc/<pid>/oom_score - Display current oom-killer score 3.3 /proc/<pid>/io - Display the IO accounting fields @@ -1320,10 +1320,10 @@ of the kernel. CHAPTER 3: PER-PROCESS PARAMETERS ------------------------------------------------------------------------------ -3.1 /proc/<pid>/oom_score_adj- Adjust the oom-killer score +3.1 /proc/<pid>/oom_adj & /proc/<pid>/oom_score_adj- Adjust the oom-killer score -------------------------------------------------------------------------------- -This file can be used to adjust the badness heuristic used to select which +These file can be used to adjust the badness heuristic used to select which process gets killed in out of memory conditions. The badness heuristic assigns a value to each candidate task ranging from 0 @@ -1361,6 +1361,12 @@ same system, cpuset, mempolicy, or memory controller resources to use at least equivalent to discounting 50% of the task's allowed memory from being considered as scoring against the task. +For backwards compatibility with previous kernels, /proc/<pid>/oom_adj may also +be used to tune the badness score. Its acceptable values range from -16 +(OOM_ADJUST_MIN) to +15 (OOM_ADJUST_MAX) and a special value of -17 +(OOM_DISABLE) to disable oom killing entirely for that task. Its value is +scaled linearly with /proc/<pid>/oom_score_adj. + The value of /proc/<pid>/oom_score_adj may be reduced no lower than the last value set by a CAP_SYS_RESOURCE process. To reduce the value any lower requires CAP_SYS_RESOURCE. @@ -1375,7 +1381,9 @@ minimal amount of work. ------------------------------------------------------------- This file can be used to check the current score used by the oom-killer is for -any given <pid>. +any given <pid>. Use it together with /proc/<pid>/oom_score_adj to tune which +process should be killed in an out-of-memory situation. + 3.3 /proc/<pid>/io - Display the IO accounting fields ------------------------------------------------------- diff --git a/Documentation/networking/netdev-features.txt b/Documentation/networking/netdev-features.txt index 4164f5c02e4..f310edec8a7 100644 --- a/Documentation/networking/netdev-features.txt +++ b/Documentation/networking/netdev-features.txt @@ -164,4 +164,4 @@ read the CRC recorded by the NIC on receipt of the packet. This requests that the NIC receive all possible frames, including errored frames (such as bad FCS, etc). This can be helpful when sniffing a link with bad packets on it. Some NICs may receive more packets if also put into normal -PROMISC mdoe. +PROMISC mode. diff --git a/Documentation/networking/vxlan.txt b/Documentation/networking/vxlan.txt index 5b34b762d7d..6d993510f09 100644 --- a/Documentation/networking/vxlan.txt +++ b/Documentation/networking/vxlan.txt @@ -32,7 +32,7 @@ no entry is in the forwarding table. # ip link delete vxlan0 3. Show vxlan info - # ip -d show vxlan0 + # ip -d link show vxlan0 It is possible to create, destroy and display the vxlan forwarding table using the new bridge command. @@ -41,7 +41,7 @@ forwarding table using the new bridge command. # bridge fdb add to 00:17:42:8a:b4:05 dst 192.19.0.2 dev vxlan0 2. Delete forwarding table entry - # bridge fdb delete 00:17:42:8a:b4:05 + # bridge fdb delete 00:17:42:8a:b4:05 dev vxlan0 3. Show forwarding table # bridge fdb show dev vxlan0 diff --git a/MAINTAINERS b/MAINTAINERS index 59203e77ce9..9386a63ea8f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -526,17 +526,17 @@ F: drivers/video/geode/ F: arch/x86/include/asm/geode.h AMD IOMMU (AMD-VI) -M: Joerg Roedel <joerg.roedel@amd.com> +M: Joerg Roedel <joro@8bytes.org> L: iommu@lists.linux-foundation.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git -S: Supported +S: Maintained F: drivers/iommu/amd_iommu*.[ch] F: include/linux/amd-iommu.h AMD MICROCODE UPDATE SUPPORT -M: Andreas Herrmann <andreas.herrmann3@amd.com> +M: Andreas Herrmann <herrmann.der.user@googlemail.com> L: amd64-microcode@amd64.org -S: Supported +S: Maintained F: arch/x86/kernel/microcode_amd.c AMS (Apple Motion Sensor) DRIVER @@ -841,6 +841,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git F: arch/arm/mach-sa1100/jornada720.c F: arch/arm/mach-sa1100/include/mach/jornada720.h +ARM/IGEP MACHINE SUPPORT +M: Enric Balletbo i Serra <eballetbo@gmail.com> +M: Javier Martinez Canillas <javier@dowhile0.org> +L: linux-omap@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: arch/arm/mach-omap2/board-igep0020.c + ARM/INCOME PXA270 SUPPORT M: Marek Vasut <marek.vasut@gmail.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -2708,10 +2716,10 @@ F: include/linux/edac.h EDAC-AMD64 M: Doug Thompson <dougthompson@xmission.com> -M: Borislav Petkov <borislav.petkov@amd.com> +M: Borislav Petkov <bp@alien8.de> L: linux-edac@vger.kernel.org W: bluesmoke.sourceforge.net -S: Supported +S: Maintained F: drivers/edac/amd64_edac* EDAC-E752X @@ -3598,6 +3606,49 @@ F: drivers/hid/hid-hyperv.c F: drivers/net/hyperv/ F: drivers/staging/hv/ +I2C OVER PARALLEL PORT +M: Jean Delvare <khali@linux-fr.org> +L: linux-i2c@vger.kernel.org +S: Maintained +F: Documentation/i2c/busses/i2c-parport +F: Documentation/i2c/busses/i2c-parport-light +F: drivers/i2c/busses/i2c-parport.c +F: drivers/i2c/busses/i2c-parport-light.c + +I2C/SMBUS CONTROLLER DRIVERS FOR PC +M: Jean Delvare <khali@linux-fr.org> +L: linux-i2c@vger.kernel.org +S: Maintained +F: Documentation/i2c/busses/i2c-ali1535 +F: Documentation/i2c/busses/i2c-ali1563 +F: Documentation/i2c/busses/i2c-ali15x3 +F: Documentation/i2c/busses/i2c-amd756 +F: Documentation/i2c/busses/i2c-amd8111 +F: Documentation/i2c/busses/i2c-i801 +F: Documentation/i2c/busses/i2c-nforce2 +F: Documentation/i2c/busses/i2c-piix4 +F: Documentation/i2c/busses/i2c-sis5595 +F: Documentation/i2c/busses/i2c-sis630 +F: Documentation/i2c/busses/i2c-sis96x +F: Documentation/i2c/busses/i2c-via +F: Documentation/i2c/busses/i2c-viapro +F: drivers/i2c/busses/i2c-ali1535.c +F: drivers/i2c/busses/i2c-ali1563.c +F: drivers/i2c/busses/i2c-ali15x3.c +F: drivers/i2c/busses/i2c-amd756.c +F: drivers/i2c/busses/i2c-amd756-s4882.c +F: drivers/i2c/busses/i2c-amd8111.c +F: drivers/i2c/busses/i2c-i801.c +F: drivers/i2c/busses/i2c-isch.c +F: drivers/i2c/busses/i2c-nforce2.c +F: drivers/i2c/busses/i2c-nforce2-s4985.c +F: drivers/i2c/busses/i2c-piix4.c +F: drivers/i2c/busses/i2c-sis5595.c +F: drivers/i2c/busses/i2c-sis630.c +F: drivers/i2c/busses/i2c-sis96x.c +F: drivers/i2c/busses/i2c-via.c +F: drivers/i2c/busses/i2c-viapro.c + I2C/SMBUS STUB DRIVER M: "Mark M. Hoffman" <mhoffman@lightlink.com> L: linux-i2c@vger.kernel.org @@ -3605,9 +3656,8 @@ S: Maintained F: drivers/i2c/busses/i2c-stub.c I2C SUBSYSTEM -M: "Jean Delvare (PC drivers, core)" <khali@linux-fr.org> +M: Wolfram Sang <w.sang@pengutronix.de> M: "Ben Dooks (embedded platforms)" <ben-linux@fluff.org> -M: "Wolfram Sang (embedded platforms)" <w.sang@pengutronix.de> L: linux-i2c@vger.kernel.org W: http://i2c.wiki.kernel.org/ T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/ @@ -3618,6 +3668,13 @@ F: drivers/i2c/ F: include/linux/i2c.h F: include/linux/i2c-*.h +I2C-TAOS-EVM DRIVER +M: Jean Delvare <khali@linux-fr.org> +L: linux-i2c@vger.kernel.org +S: Maintained +F: Documentation/i2c/busses/i2c-taos-evm +F: drivers/i2c/busses/i2c-taos-evm.c + I2C-TINY-USB DRIVER M: Till Harbaum <till@harbaum.org> L: linux-i2c@vger.kernel.org @@ -3704,7 +3761,7 @@ S: Maintained F: drivers/platform/x86/ideapad-laptop.c IDE/ATAPI DRIVERS -M: Borislav Petkov <petkovbb@gmail.com> +M: Borislav Petkov <bp@alien8.de> L: linux-ide@vger.kernel.org S: Maintained F: Documentation/cdrom/ide-cd @@ -4231,8 +4288,8 @@ F: include/linux/lockd/ F: include/linux/sunrpc/ KERNEL VIRTUAL MACHINE (KVM) -M: Avi Kivity <avi@redhat.com> M: Marcelo Tosatti <mtosatti@redhat.com> +M: Gleb Natapov <gleb@redhat.com> L: kvm@vger.kernel.org W: http://kvm.qumranet.com S: Supported @@ -5364,7 +5421,7 @@ S: Maintained F: sound/drivers/opl4/ OPROFILE -M: Robert Richter <robert.richter@amd.com> +M: Robert Richter <rric@kernel.org> L: oprofile-list@lists.sf.net S: Maintained F: arch/*/include/asm/oprofile*.h @@ -7210,6 +7267,14 @@ L: linux-xtensa@linux-xtensa.org S: Maintained F: arch/xtensa/ +THERMAL +M: Zhang Rui <rui.zhang@intel.com> +L: linux-pm@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git +S: Supported +F: drivers/thermal/ +F: include/linux/thermal.h + THINKPAD ACPI EXTRAS DRIVER M: Henrique de Moraes Holschuh <ibm-acpi@hmh.eng.br> L: ibm-acpi-devel@lists.sourceforge.net @@ -7887,13 +7952,6 @@ M: Roger Luethi <rl@hellgate.ch> S: Maintained F: drivers/net/ethernet/via/via-rhine.c -VIAPRO SMBUS DRIVER -M: Jean Delvare <khali@linux-fr.org> -L: linux-i2c@vger.kernel.org -S: Maintained -F: Documentation/i2c/busses/i2c-viapro -F: drivers/i2c/busses/i2c-viapro.c - VIA SD/MMC CARD CONTROLLER DRIVER M: Bruce Chang <brucechang@via.com.tw> M: Harald Welte <HaraldWelte@viatech.com> @@ -8148,7 +8206,7 @@ F: drivers/platform/x86 X86 MCE INFRASTRUCTURE M: Tony Luck <tony.luck@intel.com> -M: Borislav Petkov <bp@amd64.org> +M: Borislav Petkov <bp@alien8.de> L: linux-edac@vger.kernel.org S: Maintained F: arch/x86/kernel/cpu/mcheck/* @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 7 SUBLEVEL = 0 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc7 NAME = Terrified Chipmunk # *DOCUMENTATION* diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 1e6956a9060..14db93e4c8a 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -445,7 +445,7 @@ struct procfs_args { * unhappy with OSF UFS. [CHECKME] */ static int -osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags) +osf_ufs_mount(const char *dirname, struct ufs_args __user *args, int flags) { int retval; struct cdfs_args tmp; @@ -465,7 +465,7 @@ osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags) } static int -osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags) +osf_cdfs_mount(const char *dirname, struct cdfs_args __user *args, int flags) { int retval; struct cdfs_args tmp; @@ -485,7 +485,7 @@ osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags) } static int -osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags) +osf_procfs_mount(const char *dirname, struct procfs_args __user *args, int flags) { struct procfs_args tmp; diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ade7e924bef..159e99737e3 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -904,6 +904,7 @@ config ARCH_NOMADIK config PLAT_SPEAR bool "ST SPEAr" + select ARCH_HAS_CPUFREQ select ARCH_REQUIRE_GPIOLIB select ARM_AMBA select CLKDEV_LOOKUP diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile index f2aa09eb658..9137df539b6 100644 --- a/arch/arm/boot/Makefile +++ b/arch/arm/boot/Makefile @@ -33,7 +33,7 @@ ifeq ($(CONFIG_XIP_KERNEL),y) $(obj)/xipImage: vmlinux FORCE $(call if_changed,objcopy) - $(kecho) ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))' + @$(kecho) ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))' $(obj)/Image $(obj)/zImage: FORCE @echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)' @@ -48,14 +48,14 @@ $(obj)/xipImage: FORCE $(obj)/Image: vmlinux FORCE $(call if_changed,objcopy) - $(kecho) ' Kernel: $@ is ready' + @$(kecho) ' Kernel: $@ is ready' $(obj)/compressed/vmlinux: $(obj)/Image FORCE $(Q)$(MAKE) $(build)=$(obj)/compressed $@ $(obj)/zImage: $(obj)/compressed/vmlinux FORCE $(call if_changed,objcopy) - $(kecho) ' Kernel: $@ is ready' + @$(kecho) ' Kernel: $@ is ready' endif @@ -90,7 +90,7 @@ fi $(obj)/uImage: $(obj)/zImage FORCE @$(check_for_multiple_loadaddr) $(call if_changed,uimage) - $(kecho) ' Image $@ is ready' + @$(kecho) ' Image $@ is ready' $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE $(Q)$(MAKE) $(build)=$(obj)/bootp $@ @@ -98,7 +98,7 @@ $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE $(obj)/bootpImage: $(obj)/bootp/bootp FORCE $(call if_changed,objcopy) - $(kecho) ' Kernel: $@ is ready' + @$(kecho) ' Kernel: $@ is ready' PHONY += initrd FORCE initrd: diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi index b1497c7d7d6..df7f2270fc9 100644 --- a/arch/arm/boot/dts/tegra30.dtsi +++ b/arch/arm/boot/dts/tegra30.dtsi @@ -73,8 +73,8 @@ pinmux: pinmux { compatible = "nvidia,tegra30-pinmux"; - reg = <0x70000868 0xd0 /* Pad control registers */ - 0x70003000 0x3e0>; /* Mux registers */ + reg = <0x70000868 0xd4 /* Pad control registers */ + 0x70003000 0x3e4>; /* Mux registers */ }; serial@70006000 { diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c index 1e122bcd784..3cee0e6ea7c 100644 --- a/arch/arm/mach-at91/at91rm9200_devices.c +++ b/arch/arm/mach-at91/at91rm9200_devices.c @@ -68,7 +68,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) /* Enable overcurrent notification */ for (i = 0; i < data->ports; i++) { - if (data->overcurrent_pin[i]) + if (gpio_is_valid(data->overcurrent_pin[i])) at91_set_gpio_input(data->overcurrent_pin[i], 1); } diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c index aa1e5872988..414bd855fb0 100644 --- a/arch/arm/mach-at91/at91sam9260_devices.c +++ b/arch/arm/mach-at91/at91sam9260_devices.c @@ -72,7 +72,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) /* Enable overcurrent notification */ for (i = 0; i < data->ports; i++) { - if (data->overcurrent_pin[i]) + if (gpio_is_valid(data->overcurrent_pin[i])) at91_set_gpio_input(data->overcurrent_pin[i], 1); } diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c index b9487696b7b..cd604aad8e9 100644 --- a/arch/arm/mach-at91/at91sam9261_devices.c +++ b/arch/arm/mach-at91/at91sam9261_devices.c @@ -72,7 +72,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) /* Enable overcurrent notification */ for (i = 0; i < data->ports; i++) { - if (data->overcurrent_pin[i]) + if (gpio_is_valid(data->overcurrent_pin[i])) at91_set_gpio_input(data->overcurrent_pin[i], 1); } diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c index cb85da2ecce..9c61e59a210 100644 --- a/arch/arm/mach-at91/at91sam9263_devices.c +++ b/arch/arm/mach-at91/at91sam9263_devices.c @@ -78,7 +78,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) /* Enable overcurrent notification */ for (i = 0; i < data->ports; i++) { - if (data->overcurrent_pin[i]) + if (gpio_is_valid(data->overcurrent_pin[i])) at91_set_gpio_input(data->overcurrent_pin[i], 1); } diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index b1596072dcc..fcd233cb33d 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c @@ -1841,8 +1841,8 @@ static struct resource sha_resources[] = { .flags = IORESOURCE_MEM, }, [1] = { - .start = AT91SAM9G45_ID_AESTDESSHA, - .end = AT91SAM9G45_ID_AESTDESSHA, + .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, + .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, .flags = IORESOURCE_IRQ, }, }; @@ -1874,8 +1874,8 @@ static struct resource tdes_resources[] = { .flags = IORESOURCE_MEM, }, [1] = { - .start = AT91SAM9G45_ID_AESTDESSHA, - .end = AT91SAM9G45_ID_AESTDESSHA, + .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, + .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, .flags = IORESOURCE_IRQ, }, }; @@ -1910,8 +1910,8 @@ static struct resource aes_resources[] = { .flags = IORESOURCE_MEM, }, [1] = { - .start = AT91SAM9G45_ID_AESTDESSHA, - .end = AT91SAM9G45_ID_AESTDESSHA, + .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, + .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA, .flags = IORESOURCE_IRQ, }, }; diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index cd0c8b1e1ec..14e9947bad6 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c @@ -713,8 +713,7 @@ static int dm644x_venc_setup_clock(enum vpbe_enc_timings_type type, break; case VPBE_ENC_CUSTOM_TIMINGS: if (pclock <= 27000000) { - v |= DM644X_VPSS_MUXSEL_PLL2_MODE | - DM644X_VPSS_DACCLKEN; + v |= DM644X_VPSS_DACCLKEN; writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL)); } else { /* diff --git a/arch/arm/mach-exynos/dma.c b/arch/arm/mach-exynos/dma.c index 21d568b3b14..87e07d6fc61 100644 --- a/arch/arm/mach-exynos/dma.c +++ b/arch/arm/mach-exynos/dma.c @@ -275,6 +275,9 @@ static int __init exynos_dma_init(void) exynos_pdma1_pdata.nr_valid_peri = ARRAY_SIZE(exynos4210_pdma1_peri); exynos_pdma1_pdata.peri_id = exynos4210_pdma1_peri; + + if (samsung_rev() == EXYNOS4210_REV_0) + exynos_mdma1_device.res.start = EXYNOS4_PA_S_MDMA1; } else if (soc_is_exynos4212() || soc_is_exynos4412()) { exynos_pdma0_pdata.nr_valid_peri = ARRAY_SIZE(exynos4212_pdma0_peri); diff --git a/arch/arm/mach-exynos/include/mach/map.h b/arch/arm/mach-exynos/include/mach/map.h index 8480849affb..ed4da4544cd 100644 --- a/arch/arm/mach-exynos/include/mach/map.h +++ b/arch/arm/mach-exynos/include/mach/map.h @@ -90,6 +90,7 @@ #define EXYNOS4_PA_MDMA0 0x10810000 #define EXYNOS4_PA_MDMA1 0x12850000 +#define EXYNOS4_PA_S_MDMA1 0x12840000 #define EXYNOS4_PA_PDMA0 0x12680000 #define EXYNOS4_PA_PDMA1 0x12690000 #define EXYNOS5_PA_MDMA0 0x10800000 diff --git a/arch/arm/mach-highbank/system.c b/arch/arm/mach-highbank/system.c index 82c27230d4a..86e37cd9376 100644 --- a/arch/arm/mach-highbank/system.c +++ b/arch/arm/mach-highbank/system.c @@ -28,6 +28,7 @@ void highbank_restart(char mode, const char *cmd) hignbank_set_pwr_soft_reset(); scu_power_mode(scu_base_addr, SCU_PM_POWEROFF); - cpu_do_idle(); + while (1) + cpu_do_idle(); } diff --git a/arch/arm/mach-imx/clk-gate2.c b/arch/arm/mach-imx/clk-gate2.c index 3c1b8ff9a0a..cc49c7ae186 100644 --- a/arch/arm/mach-imx/clk-gate2.c +++ b/arch/arm/mach-imx/clk-gate2.c @@ -112,7 +112,7 @@ struct clk *clk_register_gate2(struct device *dev, const char *name, clk = clk_register(dev, &gate->hw); if (IS_ERR(clk)) - kfree(clk); + kfree(gate); return clk; } diff --git a/arch/arm/mach-imx/ehci-imx25.c b/arch/arm/mach-imx/ehci-imx25.c index 412c583a24b..576af744695 100644 --- a/arch/arm/mach-imx/ehci-imx25.c +++ b/arch/arm/mach-imx/ehci-imx25.c @@ -30,7 +30,7 @@ #define MX25_H1_SIC_SHIFT 21 #define MX25_H1_SIC_MASK (0x3 << MX25_H1_SIC_SHIFT) #define MX25_H1_PP_BIT (1 << 18) -#define MX25_H1_PM_BIT (1 << 8) +#define MX25_H1_PM_BIT (1 << 16) #define MX25_H1_IPPUE_UP_BIT (1 << 7) #define MX25_H1_IPPUE_DOWN_BIT (1 << 6) #define MX25_H1_TLL_BIT (1 << 5) diff --git a/arch/arm/mach-imx/ehci-imx35.c b/arch/arm/mach-imx/ehci-imx35.c index 779e16eb65c..293397852e4 100644 --- a/arch/arm/mach-imx/ehci-imx35.c +++ b/arch/arm/mach-imx/ehci-imx35.c @@ -30,7 +30,7 @@ #define MX35_H1_SIC_SHIFT 21 #define MX35_H1_SIC_MASK (0x3 << MX35_H1_SIC_SHIFT) #define MX35_H1_PP_BIT (1 << 18) -#define MX35_H1_PM_BIT (1 << 8) +#define MX35_H1_PM_BIT (1 << 16) #define MX35_H1_IPPUE_UP_BIT (1 << 7) #define MX35_H1_IPPUE_DOWN_BIT (1 << 6) #define MX35_H1_TLL_BIT (1 << 5) diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c index 48d5e41dfbf..37859069444 100644 --- a/arch/arm/mach-omap2/board-igep0020.c +++ b/arch/arm/mach-omap2/board-igep0020.c @@ -580,6 +580,11 @@ static void __init igep_wlan_bt_init(void) } else return; + /* Make sure that the GPIO pins are muxed correctly */ + omap_mux_init_gpio(igep_wlan_bt_gpios[0].gpio, OMAP_PIN_OUTPUT); + omap_mux_init_gpio(igep_wlan_bt_gpios[1].gpio, OMAP_PIN_OUTPUT); + omap_mux_init_gpio(igep_wlan_bt_gpios[2].gpio, OMAP_PIN_OUTPUT); + err = gpio_request_array(igep_wlan_bt_gpios, ARRAY_SIZE(igep_wlan_bt_gpios)); if (err) { diff --git a/arch/arm/mach-omap2/clockdomains44xx_data.c b/arch/arm/mach-omap2/clockdomains44xx_data.c index b56d06b4878..95192a062d5 100644 --- a/arch/arm/mach-omap2/clockdomains44xx_data.c +++ b/arch/arm/mach-omap2/clockdomains44xx_data.c @@ -359,7 +359,7 @@ static struct clockdomain iss_44xx_clkdm = { .clkdm_offs = OMAP4430_CM2_CAM_CAM_CDOFFS, .wkdep_srcs = iss_wkup_sleep_deps, .sleepdep_srcs = iss_wkup_sleep_deps, - .flags = CLKDM_CAN_HWSUP_SWSUP, + .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain l3_dss_44xx_clkdm = { diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c index 48daac2581b..84551f205e4 100644 --- a/arch/arm/mach-omap2/common-board-devices.c +++ b/arch/arm/mach-omap2/common-board-devices.c @@ -64,30 +64,36 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce, struct spi_board_info *spi_bi = &ads7846_spi_board_info; int err; - err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown"); - if (err) { - pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err); - return; - } + /* + * If a board defines get_pendown_state() function, request the pendown + * GPIO and set the GPIO debounce time. + * If a board does not define the get_pendown_state() function, then + * the ads7846 driver will setup the pendown GPIO itself. + */ + if (board_pdata && board_pdata->get_pendown_state) { + err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown"); + if (err) { + pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err); + return; + } - if (gpio_debounce) - gpio_set_debounce(gpio_pendown, gpio_debounce); + if (gpio_debounce) + gpio_set_debounce(gpio_pendown, gpio_debounce); + + gpio_export(gpio_pendown, 0); + } spi_bi->bus_num = bus_num; spi_bi->irq = gpio_to_irq(gpio_pendown); + ads7846_config.gpio_pendown = gpio_pendown; + if (board_pdata) { board_pdata->gpio_pendown = gpio_pendown; + board_pdata->gpio_pendown_debounce = gpio_debounce; spi_bi->platform_data = board_pdata; - if (board_pdata->get_pendown_state) - gpio_export(gpio_pendown, 0); - } else { - ads7846_config.gpio_pendown = gpio_pendown; } - if (!board_pdata || (board_pdata && !board_pdata->get_pendown_state)) - gpio_free(gpio_pendown); - spi_register_board_info(&ads7846_spi_board_info, 1); } #else diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index cba60e05e32..c72b5a72772 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c @@ -19,6 +19,7 @@ #include <linux/of.h> #include <linux/pinctrl/machine.h> #include <linux/platform_data/omap4-keypad.h> +#include <linux/platform_data/omap_ocp2scp.h> #include <asm/mach-types.h> #include <asm/mach/map.h> @@ -613,6 +614,83 @@ static void omap_init_vout(void) static inline void omap_init_vout(void) {} #endif +#if defined(CONFIG_OMAP_OCP2SCP) || defined(CONFIG_OMAP_OCP2SCP_MODULE) +static int count_ocp2scp_devices(struct omap_ocp2scp_dev *ocp2scp_dev) +{ + int cnt = 0; + + while (ocp2scp_dev->drv_name != NULL) { + cnt++; + ocp2scp_dev++; + } + + return cnt; +} + +static void omap_init_ocp2scp(void) +{ + struct omap_hwmod *oh; + struct platform_device *pdev; + int bus_id = -1, dev_cnt = 0, i; + struct omap_ocp2scp_dev *ocp2scp_dev; + const char *oh_name, *name; + struct omap_ocp2scp_platform_data *pdata; + + if (!cpu_is_omap44xx()) + return; + + oh_name = "ocp2scp_usb_phy"; + name = "omap-ocp2scp"; + + oh = omap_hwmod_lookup(oh_name); + if (!oh) { + pr_err("%s: could not find omap_hwmod for %s\n", __func__, + oh_name); + return; + } + + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + pr_err("%s: No memory for ocp2scp pdata\n", __func__); + return; + } + + ocp2scp_dev = oh->dev_attr; + dev_cnt = count_ocp2scp_devices(ocp2scp_dev); + + if (!dev_cnt) { + pr_err("%s: No devices connected to ocp2scp\n", __func__); + kfree(pdata); + return; + } + + pdata->devices = kzalloc(sizeof(struct omap_ocp2scp_dev *) + * dev_cnt, GFP_KERNEL); + if (!pdata->devices) { + pr_err("%s: No memory for ocp2scp pdata devices\n", __func__); + kfree(pdata); + return; + } + + for (i = 0; i < dev_cnt; i++, ocp2scp_dev++) + pdata->devices[i] = ocp2scp_dev; + + pdata->dev_cnt = dev_cnt; + + pdev = omap_device_build(name, bus_id, oh, pdata, sizeof(*pdata), NULL, + 0, false); + if (IS_ERR(pdev)) { + pr_err("Could not build omap_device for %s %s\n", + name, oh_name); + kfree(pdata->devices); + kfree(pdata); + return; + } +} +#else +static inline void omap_init_ocp2scp(void) { } +#endif + /*-------------------------------------------------------------------------*/ static int __init omap2_init_devices(void) @@ -640,6 +718,7 @@ static int __init omap2_init_devices(void) omap_init_sham(); omap_init_aes(); omap_init_vout(); + omap_init_ocp2scp(); return 0; } diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index b969ab1d258..87cc6d058de 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -422,6 +422,38 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v) } /** + * _wait_softreset_complete - wait for an OCP softreset to complete + * @oh: struct omap_hwmod * to wait on + * + * Wait until the IP block represented by @oh reports that its OCP + * softreset is complete. This can be triggered by software (see + * _ocp_softreset()) or by hardware upon returning from off-mode (one + * example is HSMMC). Waits for up to MAX_MODULE_SOFTRESET_WAIT + * microseconds. Returns the number of microseconds waited. + */ +static int _wait_softreset_complete(struct omap_hwmod *oh) +{ + struct omap_hwmod_class_sysconfig *sysc; + u32 softrst_mask; + int c = 0; + + sysc = oh->class->sysc; + + if (sysc->sysc_flags & SYSS_HAS_RESET_STATUS) + omap_test_timeout((omap_hwmod_read(oh, sysc->syss_offs) + & SYSS_RESETDONE_MASK), + MAX_MODULE_SOFTRESET_WAIT, c); + else if (sysc->sysc_flags & SYSC_HAS_RESET_STATUS) { + softrst_mask = (0x1 << sysc->sysc_fields->srst_shift); + omap_test_timeout(!(omap_hwmod_read(oh, sysc->sysc_offs) + & softrst_mask), + MAX_MODULE_SOFTRESET_WAIT, c); + } + + return c; +} + +/** * _set_dmadisable: set OCP_SYSCONFIG.DMADISABLE bit in @v * @oh: struct omap_hwmod * * @@ -1282,6 +1314,18 @@ static void _enable_sysc(struct omap_hwmod *oh) if (!oh->class->sysc) return; + /* + * Wait until reset has completed, this is needed as the IP + * block is reset automatically by hardware in some cases + * (off-mode for example), and the drivers require the + * IP to be ready when they access it + */ + if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) + _enable_optional_clocks(oh); + _wait_softreset_complete(oh); + if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) + _disable_optional_clocks(oh); + v = oh->_sysc_cache; sf = oh->class->sysc->sysc_flags; @@ -1804,7 +1848,7 @@ static int _am33xx_disable_module(struct omap_hwmod *oh) */ static int _ocp_softreset(struct omap_hwmod *oh) { - u32 v, softrst_mask; + u32 v; int c = 0; int ret = 0; @@ -1834,19 +1878,7 @@ static int _ocp_softreset(struct omap_hwmod *oh) if (oh->class->sysc->srst_udelay) udelay(oh->class->sysc->srst_udelay); - if (oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS) - omap_test_timeout((omap_hwmod_read(oh, - oh->class->sysc->syss_offs) - & SYSS_RESETDONE_MASK), - MAX_MODULE_SOFTRESET_WAIT, c); - else if (oh->class->sysc->sysc_flags & SYSC_HAS_RESET_STATUS) { - softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift); - omap_test_timeout(!(omap_hwmod_read(oh, - oh->class->sysc->sysc_offs) - & softrst_mask), - MAX_MODULE_SOFTRESET_WAIT, c); - } - + c = _wait_softreset_complete(oh); if (c == MAX_MODULE_SOFTRESET_WAIT) pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n", oh->name, MAX_MODULE_SOFTRESET_WAIT); @@ -2352,6 +2384,9 @@ static int __init _setup_reset(struct omap_hwmod *oh) if (oh->_state != _HWMOD_STATE_INITIALIZED) return -EINVAL; + if (oh->flags & HWMOD_EXT_OPT_MAIN_CLK) + return -EPERM; + if (oh->rst_lines_cnt == 0) { r = _enable(oh); if (r) { diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 652d0285bd6..0b1249e0039 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -21,6 +21,7 @@ #include <linux/io.h> #include <linux/platform_data/gpio-omap.h> #include <linux/power/smartreflex.h> +#include <linux/platform_data/omap_ocp2scp.h> #include <plat/omap_hwmod.h> #include <plat/i2c.h> @@ -2125,6 +2126,14 @@ static struct omap_hwmod omap44xx_mcpdm_hwmod = { .name = "mcpdm", .class = &omap44xx_mcpdm_hwmod_class, .clkdm_name = "abe_clkdm", + /* + * It's suspected that the McPDM requires an off-chip main + * functional clock, controlled via I2C. This IP block is + * currently reset very early during boot, before I2C is + * available, so it doesn't seem that we have any choice in + * the kernel other than to avoid resetting it. + */ + .flags = HWMOD_EXT_OPT_MAIN_CLK, .mpu_irqs = omap44xx_mcpdm_irqs, .sdma_reqs = omap44xx_mcpdm_sdma_reqs, .main_clk = "mcpdm_fck", @@ -2681,6 +2690,32 @@ static struct omap_hwmod_class omap44xx_ocp2scp_hwmod_class = { .sysc = &omap44xx_ocp2scp_sysc, }; +/* ocp2scp dev_attr */ +static struct resource omap44xx_usb_phy_and_pll_addrs[] = { + { + .name = "usb_phy", + .start = 0x4a0ad080, + .end = 0x4a0ae000, + .flags = IORESOURCE_MEM, + }, + { + /* XXX: Remove this once control module driver is in place */ + .name = "ctrl_dev", + .start = 0x4a002300, + .end = 0x4a002303, + .flags = IORESOURCE_MEM, + }, + { } +}; + +static struct omap_ocp2scp_dev ocp2scp_dev_attr[] = { + { + .drv_name = "omap-usb2", + .res = omap44xx_usb_phy_and_pll_addrs, + }, + { } +}; + /* ocp2scp_usb_phy */ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = { .name = "ocp2scp_usb_phy", @@ -2694,6 +2729,7 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = { .modulemode = MODULEMODE_HWCTRL, }, }, + .dev_attr = ocp2scp_dev_attr, }; /* diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c index 635e109f5ad..a256135d8e4 100644 --- a/arch/arm/mach-omap2/twl-common.c +++ b/arch/arm/mach-omap2/twl-common.c @@ -73,6 +73,7 @@ void __init omap4_pmic_init(const char *pmic_type, { /* PMIC part*/ omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE); + omap_mux_init_signal("fref_clk0_out.sys_drm_msecure", OMAP_PIN_OUTPUT); omap_pmic_init(1, 400, pmic_type, 7 + OMAP44XX_IRQ_GIC_START, pmic_data); /* Register additional devices on i2c1 bus if needed */ @@ -366,7 +367,7 @@ static struct regulator_init_data omap4_clk32kg_idata = { }; static struct regulator_consumer_supply omap4_vdd1_supply[] = { - REGULATOR_SUPPLY("vcc", "mpu.0"), + REGULATOR_SUPPLY("vcc", "cpu0"), }; static struct regulator_consumer_supply omap4_vdd2_supply[] = { diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c index 880249b1701..75878c37959 100644 --- a/arch/arm/mach-omap2/vc.c +++ b/arch/arm/mach-omap2/vc.c @@ -264,7 +264,7 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm) if (initialized) { if (voltdm->pmic->i2c_high_speed != i2c_high_speed) - pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).", + pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).\n", __func__, voltdm->name, i2c_high_speed); return; } diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c index 5ecbd17b564..e2c6391863f 100644 --- a/arch/arm/mach-pxa/hx4700.c +++ b/arch/arm/mach-pxa/hx4700.c @@ -28,6 +28,7 @@ #include <linux/mfd/asic3.h> #include <linux/mtd/physmap.h> #include <linux/pda_power.h> +#include <linux/pwm.h> #include <linux/pwm_backlight.h> #include <linux/regulator/driver.h> #include <linux/regulator/gpio-regulator.h> @@ -556,7 +557,7 @@ static struct platform_device hx4700_lcd = { */ static struct platform_pwm_backlight_data backlight_data = { - .pwm_id = 1, + .pwm_id = -1, /* Superseded by pwm_lookup */ .max_brightness = 200, .dft_brightness = 100, .pwm_period_ns = 30923, @@ -571,6 +572,10 @@ static struct platform_device backlight = { }, }; +static struct pwm_lookup hx4700_pwm_lookup[] = { + PWM_LOOKUP("pxa27x-pwm.1", 0, "pwm-backlight", NULL), +}; + /* * USB "Transceiver" */ @@ -872,6 +877,7 @@ static void __init hx4700_init(void) pxa_set_stuart_info(NULL); platform_add_devices(devices, ARRAY_SIZE(devices)); + pwm_add_table(hx4700_pwm_lookup, ARRAY_SIZE(hx4700_pwm_lookup)); pxa_set_ficp_info(&ficp_info); pxa27x_set_i2c_power_info(NULL); diff --git a/arch/arm/mach-pxa/spitz_pm.c b/arch/arm/mach-pxa/spitz_pm.c index 438f02fe122..842596d4d31 100644 --- a/arch/arm/mach-pxa/spitz_pm.c +++ b/arch/arm/mach-pxa/spitz_pm.c @@ -86,10 +86,7 @@ static void spitz_discharge1(int on) gpio_set_value(SPITZ_GPIO_LED_GREEN, on); } -static unsigned long gpio18_config[] = { - GPIO18_RDY, - GPIO18_GPIO, -}; +static unsigned long gpio18_config = GPIO18_GPIO; static void spitz_presuspend(void) { @@ -112,7 +109,7 @@ static void spitz_presuspend(void) PGSR3 &= ~SPITZ_GPIO_G3_STROBE_BIT; PGSR2 |= GPIO_bit(SPITZ_GPIO_KEY_STROBE0); - pxa2xx_mfp_config(&gpio18_config[0], 1); + pxa2xx_mfp_config(&gpio18_config, 1); gpio_request_one(18, GPIOF_OUT_INIT_HIGH, "Unknown"); gpio_free(18); @@ -131,7 +128,6 @@ static void spitz_presuspend(void) static void spitz_postsuspend(void) { - pxa2xx_mfp_config(&gpio18_config[1], 1); } static int spitz_should_wakeup(unsigned int resume_on_alarm) diff --git a/arch/arm/plat-omap/i2c.c b/arch/arm/plat-omap/i2c.c index a5683a84c6e..6013831a043 100644 --- a/arch/arm/plat-omap/i2c.c +++ b/arch/arm/plat-omap/i2c.c @@ -26,12 +26,14 @@ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/i2c.h> +#include <linux/i2c-omap.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/clk.h> #include <mach/irqs.h> #include <plat/i2c.h> +#include <plat/omap-pm.h> #include <plat/omap_device.h> #define OMAP_I2C_SIZE 0x3f @@ -127,6 +129,16 @@ static inline int omap1_i2c_add_bus(int bus_id) #ifdef CONFIG_ARCH_OMAP2PLUS +/* + * XXX This function is a temporary compatibility wrapper - only + * needed until the I2C driver can be converted to call + * omap_pm_set_max_dev_wakeup_lat() and handle a return code. + */ +static void omap_pm_set_max_mpu_wakeup_lat_compat(struct device *dev, long t) +{ + omap_pm_set_max_mpu_wakeup_lat(dev, t); +} + static inline int omap2_i2c_add_bus(int bus_id) { int l; @@ -158,6 +170,15 @@ static inline int omap2_i2c_add_bus(int bus_id) dev_attr = (struct omap_i2c_dev_attr *)oh->dev_attr; pdata->flags = dev_attr->flags; + /* + * When waiting for completion of a i2c transfer, we need to + * set a wake up latency constraint for the MPU. This is to + * ensure quick enough wakeup from idle, when transfer + * completes. + * Only omap3 has support for constraints + */ + if (cpu_is_omap34xx()) + pdata->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat_compat; pdev = omap_device_build(name, bus_id, oh, pdata, sizeof(struct omap_i2c_bus_platform_data), NULL, 0, 0); diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h index b3349f7b1a2..1db02943802 100644 --- a/arch/arm/plat-omap/include/plat/omap_hwmod.h +++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h @@ -443,6 +443,11 @@ struct omap_hwmod_omap4_prcm { * in order to complete the reset. Optional clocks will be disabled * again after the reset. * HWMOD_16BIT_REG: Module has 16bit registers + * HWMOD_EXT_OPT_MAIN_CLK: The only main functional clock source for + * this IP block comes from an off-chip source and is not always + * enabled. This prevents the hwmod code from being able to + * enable and reset the IP block early. XXX Eventually it should + * be possible to query the clock framework for this information. */ #define HWMOD_SWSUP_SIDLE (1 << 0) #define HWMOD_SWSUP_MSTANDBY (1 << 1) @@ -453,6 +458,7 @@ struct omap_hwmod_omap4_prcm { #define HWMOD_NO_IDLEST (1 << 6) #define HWMOD_CONTROL_OPT_CLKS_IN_RESET (1 << 7) #define HWMOD_16BIT_REG (1 << 8) +#define HWMOD_EXT_OPT_MAIN_CLK (1 << 9) /* * omap_hwmod._int_flags definitions diff --git a/arch/arm/tools/Makefile b/arch/arm/tools/Makefile index cd60a81163e..32d05c8219d 100644 --- a/arch/arm/tools/Makefile +++ b/arch/arm/tools/Makefile @@ -5,6 +5,6 @@ # include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types - $(kecho) ' Generating $@' + @$(kecho) ' Generating $@' @mkdir -p $(dir $@) $(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; } diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 54f6116697f..d2f05a60827 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -222,7 +222,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot extern void __iounmap(volatile void __iomem *addr); #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) -#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_XN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) +#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 0f3b4581d92..75fd13d289b 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -38,7 +38,8 @@ #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) #define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) -#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54) +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53) +#define PMD_SECT_UXN (_AT(pmdval_t, 1) << 54) /* * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). @@ -57,7 +58,8 @@ #define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ #define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ #define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */ -#define PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ +#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */ +#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */ /* * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 8960239be72..14aba2db677 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -62,23 +62,23 @@ extern pgprot_t pgprot_default; #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) -#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_XN | PTE_RDONLY) -#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN) -#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG) -#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) -#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY) -#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) -#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY) -#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_XN | PTE_DIRTY) -#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_DIRTY) - -#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_XN | PTE_RDONLY) -#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN) -#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG) -#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) -#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY) -#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) -#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY) +#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) +#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) +#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) +#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) +#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) +#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) + +#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) +#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) +#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) +#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) #endif /* __ASSEMBLY__ */ @@ -130,10 +130,10 @@ extern struct page *empty_zero_page; #define pte_young(pte) (pte_val(pte) & PTE_AF) #define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) #define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY)) -#define pte_exec(pte) (!(pte_val(pte) & PTE_XN)) +#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) #define pte_present_exec_user(pte) \ - ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_XN)) == \ + ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == \ (PTE_VALID | PTE_USER)) #define PTE_BIT_FUNC(fn,op) \ @@ -262,7 +262,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - const pteval_t mask = PTE_USER | PTE_XN | PTE_RDONLY; + const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index acd5b68e887..082e383c1b6 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -637,7 +637,6 @@ mem_init (void) high_memory = __va(max_low_pfn * PAGE_SIZE); - reset_zone_present_pages(); for_each_online_pgdat(pgdat) if (pgdat->bdata->node_bootmem_map) totalram_pages += free_all_bootmem_node(pgdat); diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h index 67e489d8d1b..2df26b57c26 100644 --- a/arch/m68k/include/asm/signal.h +++ b/arch/m68k/include/asm/signal.h @@ -41,7 +41,7 @@ struct k_sigaction { static inline void sigaddset(sigset_t *set, int _sig) { asm ("bfset %0{%1,#1}" - : "+od" (*set) + : "+o" (*set) : "id" ((_sig - 1) ^ 31) : "cc"); } @@ -49,7 +49,7 @@ static inline void sigaddset(sigset_t *set, int _sig) static inline void sigdelset(sigset_t *set, int _sig) { asm ("bfclr %0{%1,#1}" - : "+od" (*set) + : "+o" (*set) : "id" ((_sig - 1) ^ 31) : "cc"); } @@ -65,7 +65,7 @@ static inline int __gen_sigismember(sigset_t *set, int _sig) int ret; asm ("bfextu %1{%2,#1},%0" : "=d" (ret) - : "od" (*set), "id" ((_sig-1) ^ 31) + : "o" (*set), "id" ((_sig-1) ^ 31) : "cc"); return ret; } diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c index d38246e33dd..9f883bf7695 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c +++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c @@ -30,6 +30,7 @@ * measurement, and debugging facilities. */ +#include <linux/irqflags.h> #include <asm/octeon/cvmx.h> #include <asm/octeon/cvmx-l2c.h> #include <asm/octeon/cvmx-spinlock.h> diff --git a/arch/mips/fw/arc/misc.c b/arch/mips/fw/arc/misc.c index 7cf80ca2c1d..f9f5307434c 100644 --- a/arch/mips/fw/arc/misc.c +++ b/arch/mips/fw/arc/misc.c @@ -11,6 +11,7 @@ */ #include <linux/init.h> #include <linux/kernel.h> +#include <linux/irqflags.h> #include <asm/bcache.h> diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index 82ad35ce2b4..46ac73abd5e 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -14,7 +14,6 @@ #endif #include <linux/compiler.h> -#include <linux/irqflags.h> #include <linux/types.h> #include <asm/barrier.h> #include <asm/byteorder.h> /* sigh ... */ @@ -44,6 +43,24 @@ #define smp_mb__before_clear_bit() smp_mb__before_llsc() #define smp_mb__after_clear_bit() smp_llsc_mb() + +/* + * These are the "slower" versions of the functions and are in bitops.c. + * These functions call raw_local_irq_{save,restore}(). + */ +void __mips_set_bit(unsigned long nr, volatile unsigned long *addr); +void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr); +void __mips_change_bit(unsigned long nr, volatile unsigned long *addr); +int __mips_test_and_set_bit(unsigned long nr, + volatile unsigned long *addr); +int __mips_test_and_set_bit_lock(unsigned long nr, + volatile unsigned long *addr); +int __mips_test_and_clear_bit(unsigned long nr, + volatile unsigned long *addr); +int __mips_test_and_change_bit(unsigned long nr, + volatile unsigned long *addr); + + /* * set_bit - Atomically set a bit in memory * @nr: the bit to set @@ -57,7 +74,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long temp; if (kernel_uses_llsc && R10000_LLSC_WAR) { @@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) : "=&r" (temp), "+m" (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - *a |= mask; - raw_local_irq_restore(flags); - } + } else + __mips_set_bit(nr, addr); } /* @@ -118,7 +126,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long temp; if (kernel_uses_llsc && R10000_LLSC_WAR) { @@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) : "=&r" (temp), "+m" (*m) : "ir" (~(1UL << bit))); } while (unlikely(!temp)); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - *a &= ~mask; - raw_local_irq_restore(flags); - } + } else + __mips_clear_bit(nr, addr); } /* @@ -191,7 +190,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad */ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; if (kernel_uses_llsc && R10000_LLSC_WAR) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); @@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) : "=&r" (temp), "+m" (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - *a ^= mask; - raw_local_irq_restore(flags); - } + } else + __mips_change_bit(nr, addr); } /* @@ -244,7 +234,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long res; smp_mb__before_llsc(); @@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr, } while (unlikely(!res)); res = temp & (1UL << bit); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - res = (mask & *a); - *a |= mask; - raw_local_irq_restore(flags); - } + } else + res = __mips_test_and_set_bit(nr, addr); smp_llsc_mb(); @@ -310,7 +290,7 @@ static inline int test_and_set_bit(unsigned long nr, static inline int test_and_set_bit_lock(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long res; if (kernel_uses_llsc && R10000_LLSC_WAR) { @@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr, } while (unlikely(!res)); res = temp & (1UL << bit); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - res = (mask & *a); - *a |= mask; - raw_local_irq_restore(flags); - } + } else + res = __mips_test_and_set_bit_lock(nr, addr); smp_llsc_mb(); @@ -373,7 +343,7 @@ static inline int test_and_set_bit_lock(unsigned long nr, static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long res; smp_mb__before_llsc(); @@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr, } while (unlikely(!res)); res = temp & (1UL << bit); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - res = (mask & *a); - *a &= ~mask; - raw_local_irq_restore(flags); - } + } else + res = __mips_test_and_clear_bit(nr, addr); smp_llsc_mb(); @@ -457,7 +417,7 @@ static inline int test_and_clear_bit(unsigned long nr, static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { - unsigned short bit = nr & SZLONG_MASK; + int bit = nr & SZLONG_MASK; unsigned long res; smp_mb__before_llsc(); @@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr, } while (unlikely(!res)); res = temp & (1UL << bit); - } else { - volatile unsigned long *a = addr; - unsigned long mask; - unsigned long flags; - - a += nr >> SZLONG_LOG; - mask = 1UL << bit; - raw_local_irq_save(flags); - res = (mask & *a); - *a ^= mask; - raw_local_irq_restore(flags); - } + } else + res = __mips_test_and_change_bit(nr, addr); smp_llsc_mb(); diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index 58277e0e9cd..3c5d1464b7b 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h @@ -290,7 +290,7 @@ struct compat_shmid64_ds { static inline int is_compat_task(void) { - return test_thread_flag(TIF_32BIT); + return test_thread_flag(TIF_32BIT_ADDR); } #endif /* _ASM_COMPAT_H */ diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 29d9c23c20c..ff2e0345e01 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -15,6 +15,7 @@ #include <linux/compiler.h> #include <linux/kernel.h> #include <linux/types.h> +#include <linux/irqflags.h> #include <asm/addrspace.h> #include <asm/bug.h> diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h index 309cbcd6909..9f3384c789d 100644 --- a/arch/mips/include/asm/irqflags.h +++ b/arch/mips/include/asm/irqflags.h @@ -16,83 +16,13 @@ #include <linux/compiler.h> #include <asm/hazards.h> -__asm__( - " .macro arch_local_irq_enable \n" - " .set push \n" - " .set reorder \n" - " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n" - " ori $1, 0x400 \n" - " xori $1, 0x400 \n" - " mtc0 $1, $2, 1 \n" -#elif defined(CONFIG_CPU_MIPSR2) - " ei \n" -#else - " mfc0 $1,$12 \n" - " ori $1,0x1f \n" - " xori $1,0x1e \n" - " mtc0 $1,$12 \n" -#endif - " irq_enable_hazard \n" - " .set pop \n" - " .endm"); +#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) -extern void smtc_ipi_replay(void); - -static inline void arch_local_irq_enable(void) -{ -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC kernel needs to do a software replay of queued - * IPIs, at the cost of call overhead on each local_irq_enable() - */ - smtc_ipi_replay(); -#endif - __asm__ __volatile__( - "arch_local_irq_enable" - : /* no outputs */ - : /* no inputs */ - : "memory"); -} - - -/* - * For cli() we have to insert nops to make sure that the new value - * has actually arrived in the status register before the end of this - * macro. - * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs - * no nops at all. - */ -/* - * For TX49, operating only IE bit is not enough. - * - * If mfc0 $12 follows store and the mfc0 is last instruction of a - * page and fetching the next instruction causes TLB miss, the result - * of the mfc0 might wrongly contain EXL bit. - * - * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008 - * - * Workaround: mask EXL bit of the result or place a nop before mfc0. - */ __asm__( " .macro arch_local_irq_disable\n" " .set push \n" " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 $1, $2, 1 \n" - " ori $1, 0x400 \n" - " .set noreorder \n" - " mtc0 $1, $2, 1 \n" -#elif defined(CONFIG_CPU_MIPSR2) " di \n" -#else - " mfc0 $1,$12 \n" - " ori $1,0x1f \n" - " xori $1,0x1f \n" - " .set noreorder \n" - " mtc0 $1,$12 \n" -#endif " irq_disable_hazard \n" " .set pop \n" " .endm \n"); @@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void) : "memory"); } -__asm__( - " .macro arch_local_save_flags flags \n" - " .set push \n" - " .set reorder \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 \\flags, $2, 1 \n" -#else - " mfc0 \\flags, $12 \n" -#endif - " .set pop \n" - " .endm \n"); - -static inline unsigned long arch_local_save_flags(void) -{ - unsigned long flags; - asm volatile("arch_local_save_flags %0" : "=r" (flags)); - return flags; -} __asm__( " .macro arch_local_irq_save result \n" " .set push \n" " .set reorder \n" " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - " mfc0 \\result, $2, 1 \n" - " ori $1, \\result, 0x400 \n" - " .set noreorder \n" - " mtc0 $1, $2, 1 \n" - " andi \\result, \\result, 0x400 \n" -#elif defined(CONFIG_CPU_MIPSR2) " di \\result \n" " andi \\result, 1 \n" -#else - " mfc0 \\result, $12 \n" - " ori $1, \\result, 0x1f \n" - " xori $1, 0x1f \n" - " .set noreorder \n" - " mtc0 $1, $12 \n" -#endif " irq_disable_hazard \n" " .set pop \n" " .endm \n"); @@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void) return flags; } + __asm__( " .macro arch_local_irq_restore flags \n" " .set push \n" " .set noreorder \n" " .set noat \n" -#ifdef CONFIG_MIPS_MT_SMTC - "mfc0 $1, $2, 1 \n" - "andi \\flags, 0x400 \n" - "ori $1, 0x400 \n" - "xori $1, 0x400 \n" - "or \\flags, $1 \n" - "mtc0 \\flags, $2, 1 \n" -#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) +#if defined(CONFIG_IRQ_CPU) /* * Slow, but doesn't suffer from a relatively unlikely race * condition we're having since days 1. */ " beqz \\flags, 1f \n" - " di \n" + " di \n" " ei \n" "1: \n" -#elif defined(CONFIG_CPU_MIPSR2) +#else /* * Fast, dangerous. Life is fun, life is good. */ " mfc0 $1, $12 \n" " ins $1, \\flags, 0, 1 \n" " mtc0 $1, $12 \n" -#else - " mfc0 $1, $12 \n" - " andi \\flags, 1 \n" - " ori $1, 0x1f \n" - " xori $1, 0x1f \n" - " or \\flags, $1 \n" - " mtc0 \\flags, $12 \n" #endif " irq_disable_hazard \n" " .set pop \n" " .endm \n"); - static inline void arch_local_irq_restore(unsigned long flags) { unsigned long __tmp1; -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC kernel needs to do a software replay of queued - * IPIs, at the cost of branch and call overhead on each - * local_irq_restore() - */ - if (unlikely(!(flags & 0x0400))) - smtc_ipi_replay(); -#endif - __asm__ __volatile__( "arch_local_irq_restore\t%0" : "=r" (__tmp1) @@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags) : "0" (flags) : "memory"); } +#else +/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */ +void arch_local_irq_disable(void); +unsigned long arch_local_irq_save(void); +void arch_local_irq_restore(unsigned long flags); +void __arch_local_irq_restore(unsigned long flags); +#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ + + +__asm__( + " .macro arch_local_irq_enable \n" + " .set push \n" + " .set reorder \n" + " .set noat \n" +#ifdef CONFIG_MIPS_MT_SMTC + " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n" + " ori $1, 0x400 \n" + " xori $1, 0x400 \n" + " mtc0 $1, $2, 1 \n" +#elif defined(CONFIG_CPU_MIPSR2) + " ei \n" +#else + " mfc0 $1,$12 \n" + " ori $1,0x1f \n" + " xori $1,0x1e \n" + " mtc0 $1,$12 \n" +#endif + " irq_enable_hazard \n" + " .set pop \n" + " .endm"); + +extern void smtc_ipi_replay(void); + +static inline void arch_local_irq_enable(void) +{ +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC kernel needs to do a software replay of queued + * IPIs, at the cost of call overhead on each local_irq_enable() + */ + smtc_ipi_replay(); +#endif + __asm__ __volatile__( + "arch_local_irq_enable" + : /* no outputs */ + : /* no inputs */ + : "memory"); +} + + +__asm__( + " .macro arch_local_save_flags flags \n" + " .set push \n" + " .set reorder \n" +#ifdef CONFIG_MIPS_MT_SMTC + " mfc0 \\flags, $2, 1 \n" +#else + " mfc0 \\flags, $12 \n" +#endif + " .set pop \n" + " .endm \n"); + +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + asm volatile("arch_local_save_flags %0" : "=r" (flags)); + return flags; +} + static inline int arch_irqs_disabled_flags(unsigned long flags) { @@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) #endif } -#endif +#endif /* #ifndef __ASSEMBLY__ */ /* * Do the CPU's IRQ-state tracing from assembly code. diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 8debe9e9175..18806a52061 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -112,12 +112,6 @@ register struct thread_info *__current_thread_info __asm__("$28"); #define TIF_LOAD_WATCH 25 /* If set, load watch registers */ #define TIF_SYSCALL_TRACE 31 /* syscall trace active */ -#ifdef CONFIG_MIPS32_O32 -#define TIF_32BIT TIF_32BIT_REGS -#elif defined(CONFIG_MIPS32_N32) -#define TIF_32BIT _TIF_32BIT_ADDR -#endif /* CONFIG_MIPS32_O32 */ - #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index a53f8ec37aa..290dc6a1d7a 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -79,7 +79,7 @@ static struct resource data_resource = { .name = "Kernel data", }; void __init add_memory_region(phys_t start, phys_t size, long type) { int x = boot_mem_map.nr_map; - struct boot_mem_map_entry *prev = boot_mem_map.map + x - 1; + int i; /* Sanity check */ if (start + size < start) { @@ -88,15 +88,29 @@ void __init add_memory_region(phys_t start, phys_t size, long type) } /* - * Try to merge with previous entry if any. This is far less than - * perfect but is sufficient for most real world cases. + * Try to merge with existing entry, if any. */ - if (x && prev->addr + prev->size == start && prev->type == type) { - prev->size += size; + for (i = 0; i < boot_mem_map.nr_map; i++) { + struct boot_mem_map_entry *entry = boot_mem_map.map + i; + unsigned long top; + + if (entry->type != type) + continue; + + if (start + size < entry->addr) + continue; /* no overlap */ + + if (entry->addr + entry->size < start) + continue; /* no overlap */ + + top = max(entry->addr + entry->size, start + size); + entry->addr = min(entry->addr, start); + entry->size = top - entry->addr; + return; } - if (x == BOOT_MEM_MAP_MAX) { + if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) { pr_err("Ooops! Too many entries in the memory map!\n"); return; } diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index c4a82e841c7..eeddc58802e 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile @@ -2,8 +2,9 @@ # Makefile for MIPS-specific library files.. # -lib-y += csum_partial.o delay.o memcpy.o memset.o \ - strlen_user.o strncpy_user.o strnlen_user.o uncached.o +lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \ + mips-atomic.o strlen_user.o strncpy_user.o \ + strnlen_user.o uncached.o obj-y += iomap.o obj-$(CONFIG_PCI) += iomap-pci.o diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c new file mode 100644 index 00000000000..239a9c957b0 --- /dev/null +++ b/arch/mips/lib/bitops.c @@ -0,0 +1,179 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org) + * Copyright (c) 1999, 2000 Silicon Graphics, Inc. + */ +#include <linux/bitops.h> +#include <linux/irqflags.h> +#include <linux/export.h> + + +/** + * __mips_set_bit - Atomically set a bit in memory. This is called by + * set_bit() if it cannot find a faster solution. + * @nr: the bit to set + * @addr: the address to start counting from + */ +void __mips_set_bit(unsigned long nr, volatile unsigned long *addr) +{ + volatile unsigned long *a = addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + *a |= mask; + raw_local_irq_restore(flags); +} +EXPORT_SYMBOL(__mips_set_bit); + + +/** + * __mips_clear_bit - Clears a bit in memory. This is called by clear_bit() if + * it cannot find a faster solution. + * @nr: Bit to clear + * @addr: Address to start counting from + */ +void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + volatile unsigned long *a = addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + *a &= ~mask; + raw_local_irq_restore(flags); +} +EXPORT_SYMBOL(__mips_clear_bit); + + +/** + * __mips_change_bit - Toggle a bit in memory. This is called by change_bit() + * if it cannot find a faster solution. + * @nr: Bit to change + * @addr: Address to start counting from + */ +void __mips_change_bit(unsigned long nr, volatile unsigned long *addr) +{ + volatile unsigned long *a = addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + *a ^= mask; + raw_local_irq_restore(flags); +} +EXPORT_SYMBOL(__mips_change_bit); + + +/** + * __mips_test_and_set_bit - Set a bit and return its old value. This is + * called by test_and_set_bit() if it cannot find a faster solution. + * @nr: Bit to set + * @addr: Address to count from + */ +int __mips_test_and_set_bit(unsigned long nr, + volatile unsigned long *addr) +{ + volatile unsigned long *a = addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + unsigned long res; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + res = (mask & *a); + *a |= mask; + raw_local_irq_restore(flags); + return res; +} +EXPORT_SYMBOL(__mips_test_and_set_bit); + + +/** + * __mips_test_and_set_bit_lock - Set a bit and return its old value. This is + * called by test_and_set_bit_lock() if it cannot find a faster solution. + * @nr: Bit to set + * @addr: Address to count from + */ +int __mips_test_and_set_bit_lock(unsigned long nr, + volatile unsigned long *addr) +{ + volatile unsigned long *a = addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + unsigned long res; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + res = (mask & *a); + *a |= mask; + raw_local_irq_restore(flags); + return res; +} +EXPORT_SYMBOL(__mips_test_and_set_bit_lock); + + +/** + * __mips_test_and_clear_bit - Clear a bit and return its old value. This is + * called by test_and_clear_bit() if it cannot find a faster solution. + * @nr: Bit to clear + * @addr: Address to count from + */ +int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + volatile unsigned long *a = addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + unsigned long res; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + res = (mask & *a); + *a &= ~mask; + raw_local_irq_restore(flags); + return res; +} +EXPORT_SYMBOL(__mips_test_and_clear_bit); + + +/** + * __mips_test_and_change_bit - Change a bit and return its old value. This is + * called by test_and_change_bit() if it cannot find a faster solution. + * @nr: Bit to change + * @addr: Address to count from + */ +int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) +{ + volatile unsigned long *a = addr; + unsigned bit = nr & SZLONG_MASK; + unsigned long mask; + unsigned long flags; + unsigned long res; + + a += nr >> SZLONG_LOG; + mask = 1UL << bit; + raw_local_irq_save(flags); + res = (mask & *a); + *a ^= mask; + raw_local_irq_restore(flags); + return res; +} +EXPORT_SYMBOL(__mips_test_and_change_bit); diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c new file mode 100644 index 00000000000..cd160be3ce4 --- /dev/null +++ b/arch/mips/lib/mips-atomic.c @@ -0,0 +1,176 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle + * Copyright (C) 1996 by Paul M. Antoine + * Copyright (C) 1999 Silicon Graphics + * Copyright (C) 2000 MIPS Technologies, Inc. + */ +#include <asm/irqflags.h> +#include <asm/hazards.h> +#include <linux/compiler.h> +#include <linux/preempt.h> +#include <linux/export.h> + +#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) + +/* + * For cli() we have to insert nops to make sure that the new value + * has actually arrived in the status register before the end of this + * macro. + * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs + * no nops at all. + */ +/* + * For TX49, operating only IE bit is not enough. + * + * If mfc0 $12 follows store and the mfc0 is last instruction of a + * page and fetching the next instruction causes TLB miss, the result + * of the mfc0 might wrongly contain EXL bit. + * + * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008 + * + * Workaround: mask EXL bit of the result or place a nop before mfc0. + */ +__asm__( + " .macro arch_local_irq_disable\n" + " .set push \n" + " .set noat \n" +#ifdef CONFIG_MIPS_MT_SMTC + " mfc0 $1, $2, 1 \n" + " ori $1, 0x400 \n" + " .set noreorder \n" + " mtc0 $1, $2, 1 \n" +#elif defined(CONFIG_CPU_MIPSR2) + /* see irqflags.h for inline function */ +#else + " mfc0 $1,$12 \n" + " ori $1,0x1f \n" + " xori $1,0x1f \n" + " .set noreorder \n" + " mtc0 $1,$12 \n" +#endif + " irq_disable_hazard \n" + " .set pop \n" + " .endm \n"); + +notrace void arch_local_irq_disable(void) +{ + preempt_disable(); + __asm__ __volatile__( + "arch_local_irq_disable" + : /* no outputs */ + : /* no inputs */ + : "memory"); + preempt_enable(); +} +EXPORT_SYMBOL(arch_local_irq_disable); + + +__asm__( + " .macro arch_local_irq_save result \n" + " .set push \n" + " .set reorder \n" + " .set noat \n" +#ifdef CONFIG_MIPS_MT_SMTC + " mfc0 \\result, $2, 1 \n" + " ori $1, \\result, 0x400 \n" + " .set noreorder \n" + " mtc0 $1, $2, 1 \n" + " andi \\result, \\result, 0x400 \n" +#elif defined(CONFIG_CPU_MIPSR2) + /* see irqflags.h for inline function */ +#else + " mfc0 \\result, $12 \n" + " ori $1, \\result, 0x1f \n" + " xori $1, 0x1f \n" + " .set noreorder \n" + " mtc0 $1, $12 \n" +#endif + " irq_disable_hazard \n" + " .set pop \n" + " .endm \n"); + +notrace unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + preempt_disable(); + asm volatile("arch_local_irq_save\t%0" + : "=r" (flags) + : /* no inputs */ + : "memory"); + preempt_enable(); + return flags; +} +EXPORT_SYMBOL(arch_local_irq_save); + + +__asm__( + " .macro arch_local_irq_restore flags \n" + " .set push \n" + " .set noreorder \n" + " .set noat \n" +#ifdef CONFIG_MIPS_MT_SMTC + "mfc0 $1, $2, 1 \n" + "andi \\flags, 0x400 \n" + "ori $1, 0x400 \n" + "xori $1, 0x400 \n" + "or \\flags, $1 \n" + "mtc0 \\flags, $2, 1 \n" +#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) + /* see irqflags.h for inline function */ +#elif defined(CONFIG_CPU_MIPSR2) + /* see irqflags.h for inline function */ +#else + " mfc0 $1, $12 \n" + " andi \\flags, 1 \n" + " ori $1, 0x1f \n" + " xori $1, 0x1f \n" + " or \\flags, $1 \n" + " mtc0 \\flags, $12 \n" +#endif + " irq_disable_hazard \n" + " .set pop \n" + " .endm \n"); + +notrace void arch_local_irq_restore(unsigned long flags) +{ + unsigned long __tmp1; + +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC kernel needs to do a software replay of queued + * IPIs, at the cost of branch and call overhead on each + * local_irq_restore() + */ + if (unlikely(!(flags & 0x0400))) + smtc_ipi_replay(); +#endif + preempt_disable(); + __asm__ __volatile__( + "arch_local_irq_restore\t%0" + : "=r" (__tmp1) + : "0" (flags) + : "memory"); + preempt_enable(); +} +EXPORT_SYMBOL(arch_local_irq_restore); + + +notrace void __arch_local_irq_restore(unsigned long flags) +{ + unsigned long __tmp1; + + preempt_disable(); + __asm__ __volatile__( + "arch_local_irq_restore\t%0" + : "=r" (__tmp1) + : "0" (flags) + : "memory"); + preempt_enable(); +} +EXPORT_SYMBOL(__arch_local_irq_restore); + +#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */ diff --git a/arch/mips/mti-malta/malta-platform.c b/arch/mips/mti-malta/malta-platform.c index 80562b81f0f..74732177851 100644 --- a/arch/mips/mti-malta/malta-platform.c +++ b/arch/mips/mti-malta/malta-platform.c @@ -29,6 +29,7 @@ #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/platform_device.h> +#include <asm/mips-boards/maltaint.h> #include <mtd/mtd-abi.h> #define SMC_PORT(base, int) \ @@ -48,7 +49,7 @@ static struct plat_serial8250_port uart8250_data[] = { SMC_PORT(0x2F8, 3), { .mapbase = 0x1f000900, /* The CBUS UART */ - .irq = MIPS_CPU_IRQ_BASE + 2, + .irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2, .uartclk = 3686400, /* Twice the usual clk! */ .iotype = UPIO_MEM32, .flags = CBUS_UART_FLAGS, diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c index fd49aeda9eb..5dede04f2f3 100644 --- a/arch/parisc/kernel/signal32.c +++ b/arch/parisc/kernel/signal32.c @@ -65,7 +65,8 @@ put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz) { compat_sigset_t s; - if (sz != sizeof *set) panic("put_sigset32()"); + if (sz != sizeof *set) + return -EINVAL; sigset_64to32(&s, set); return copy_to_user(up, &s, sizeof s); @@ -77,7 +78,8 @@ get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz) compat_sigset_t s; int r; - if (sz != sizeof *set) panic("put_sigset32()"); + if (sz != sizeof *set) + return -EINVAL; if ((r = copy_from_user(&s, up, sz)) == 0) { sigset_32to64(set, &s); diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 7426e40699b..f76c10863c6 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -73,6 +73,8 @@ static unsigned long get_shared_area(struct address_space *mapping, struct vm_area_struct *vma; int offset = mapping ? get_offset(mapping) : 0; + offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000; + addr = DCACHE_ALIGN(addr - offset) + offset; for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { diff --git a/arch/powerpc/boot/dts/mpc5200b.dtsi b/arch/powerpc/boot/dts/mpc5200b.dtsi index 7ab286ab530..39ed65a44c5 100644 --- a/arch/powerpc/boot/dts/mpc5200b.dtsi +++ b/arch/powerpc/boot/dts/mpc5200b.dtsi @@ -231,6 +231,12 @@ interrupts = <2 7 0>; }; + sclpc@3c00 { + compatible = "fsl,mpc5200-lpbfifo"; + reg = <0x3c00 0x60>; + interrupts = <2 23 0>; + }; + i2c@3d00 { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/powerpc/boot/dts/o2d.dtsi b/arch/powerpc/boot/dts/o2d.dtsi index 3444eb8f0ad..24f66803929 100644 --- a/arch/powerpc/boot/dts/o2d.dtsi +++ b/arch/powerpc/boot/dts/o2d.dtsi @@ -86,12 +86,6 @@ reg = <0>; }; }; - - sclpc@3c00 { - compatible = "fsl,mpc5200-lpbfifo"; - reg = <0x3c00 0x60>; - interrupts = <3 23 0>; - }; }; localbus { diff --git a/arch/powerpc/boot/dts/pcm030.dts b/arch/powerpc/boot/dts/pcm030.dts index 9e354997eb7..96512c05803 100644 --- a/arch/powerpc/boot/dts/pcm030.dts +++ b/arch/powerpc/boot/dts/pcm030.dts @@ -59,7 +59,7 @@ #gpio-cells = <2>; }; - psc@2000 { /* PSC1 in ac97 mode */ + audioplatform: psc@2000 { /* PSC1 in ac97 mode */ compatible = "mpc5200b-psc-ac97","fsl,mpc5200b-psc-ac97"; cell-index = <0>; }; @@ -134,4 +134,9 @@ localbus { status = "disabled"; }; + + sound { + compatible = "phytec,pcm030-audio-fabric"; + asoc-platform = <&audioplatform>; + }; }; diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 8520b58a5e9..b89ef65392d 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c @@ -372,10 +372,11 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq, case MPC52xx_IRQ_L1_MAIN: irqchip = &mpc52xx_main_irqchip; break; case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break; case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break; - default: - pr_err("%s: invalid irq: virq=%i, l1=%i, l2=%i\n", - __func__, virq, l1irq, l2irq); - return -EINVAL; + case MPC52xx_IRQ_L1_CRIT: + pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n", + __func__, l2irq); + irq_set_chip(virq, &no_irq_chip); + return 0; } irq_set_chip_and_handler(virq, irqchip, handle_level_irq); diff --git a/arch/powerpc/platforms/pseries/eeh_pe.c b/arch/powerpc/platforms/pseries/eeh_pe.c index 797cd181dc3..d16c8ded108 100644 --- a/arch/powerpc/platforms/pseries/eeh_pe.c +++ b/arch/powerpc/platforms/pseries/eeh_pe.c @@ -449,7 +449,7 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe) if (list_empty(&pe->edevs)) { cnt = 0; list_for_each_entry(child, &pe->child_list, child) { - if (!(pe->type & EEH_PE_INVALID)) { + if (!(child->type & EEH_PE_INVALID)) { cnt++; break; } diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index d19f4977c83..e5b08472313 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -220,7 +220,8 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) /* Get the top level device in the PE */ edev = of_node_to_eeh_dev(dn); - edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list); + if (edev->pe) + edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list); dn = eeh_dev_to_of_node(edev); if (!dn) return NULL; diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c index 45d00e5fe14..4d806b41960 100644 --- a/arch/powerpc/platforms/pseries/processor_idle.c +++ b/arch/powerpc/platforms/pseries/processor_idle.c @@ -36,7 +36,7 @@ static struct cpuidle_state *cpuidle_state_table; static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before) { - *kt_before = ktime_get_real(); + *kt_before = ktime_get(); *in_purr = mfspr(SPRN_PURR); /* * Indicate to the HV that we are idle. Now would be @@ -50,7 +50,7 @@ static inline s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before) get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr; get_lppaca()->idle = 0; - return ktime_to_us(ktime_sub(ktime_get_real(), kt_before)); + return ktime_to_us(ktime_sub(ktime_get(), kt_before)); } static int snooze_loop(struct cpuidle_device *dev, diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 5dba755a43e..d385f396dfe 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -96,6 +96,7 @@ config S390 select HAVE_MEMBLOCK_NODE_MAP select HAVE_CMPXCHG_LOCAL select HAVE_CMPXCHG_DOUBLE + select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_VIRT_CPU_ACCOUNTING select VIRT_CPU_ACCOUNTING select ARCH_DISCARD_MEMBLOCK diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index a34a9d612fc..18cd6b59265 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -20,7 +20,7 @@ #define PSW32_MASK_CC 0x00003000UL #define PSW32_MASK_PM 0x00000f00UL -#define PSW32_MASK_USER 0x00003F00UL +#define PSW32_MASK_USER 0x0000FF00UL #define PSW32_ADDR_AMODE 0x80000000UL #define PSW32_ADDR_INSN 0x7FFFFFFFUL diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 9ca30538376..9935cbd6a46 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -8,6 +8,9 @@ struct cpu; #ifdef CONFIG_SCHED_BOOK +extern unsigned char cpu_socket_id[NR_CPUS]; +#define topology_physical_package_id(cpu) (cpu_socket_id[cpu]) + extern unsigned char cpu_core_id[NR_CPUS]; extern cpumask_t cpu_core_map[NR_CPUS]; diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h index 705588a16d7..a5ca214b34f 100644 --- a/arch/s390/include/uapi/asm/ptrace.h +++ b/arch/s390/include/uapi/asm/ptrace.h @@ -239,7 +239,7 @@ typedef struct #define PSW_MASK_EA 0x00000000UL #define PSW_MASK_BA 0x00000000UL -#define PSW_MASK_USER 0x00003F00UL +#define PSW_MASK_USER 0x0000FF00UL #define PSW_ADDR_AMODE 0x80000000UL #define PSW_ADDR_INSN 0x7FFFFFFFUL @@ -269,7 +269,7 @@ typedef struct #define PSW_MASK_EA 0x0000000100000000UL #define PSW_MASK_BA 0x0000000080000000UL -#define PSW_MASK_USER 0x00003F8180000000UL +#define PSW_MASK_USER 0x0000FF8180000000UL #define PSW_ADDR_AMODE 0x0000000000000000UL #define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index a1e8a8694bb..593fcc9253f 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -309,6 +309,10 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 | (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE); + /* Check for invalid user address space control. */ + if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) + regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); for (i = 0; i < NUM_GPRS; i++) regs->gprs[i] = (__u64) regs32.gprs[i]; @@ -481,7 +485,10 @@ static int setup_frame32(int sig, struct k_sigaction *ka, /* Set up registers for signal handler */ regs->gprs[15] = (__force __u64) frame; - regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ + /* Force 31 bit amode and default user address space control. */ + regs->psw.mask = PSW_MASK_BA | + (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (__force __u64) ka->sa.sa_handler; regs->gprs[2] = map_signal(sig); @@ -549,7 +556,10 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, /* Set up registers for signal handler */ regs->gprs[15] = (__force __u64) frame; - regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ + /* Force 31 bit amode and default user address space control. */ + regs->psw.mask = PSW_MASK_BA | + (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (__u64) ka->sa.sa_handler; regs->gprs[2] = map_signal(sig); diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index c13a2a37ef0..d1259d87507 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -136,6 +136,10 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */ regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | (user_sregs.regs.psw.mask & PSW_MASK_USER); + /* Check for invalid user address space control. */ + if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) + regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); /* Check for invalid amode */ if (regs->psw.mask & PSW_MASK_EA) regs->psw.mask |= PSW_MASK_BA; @@ -273,7 +277,10 @@ static int setup_frame(int sig, struct k_sigaction *ka, /* Set up registers for signal handler */ regs->gprs[15] = (unsigned long) frame; - regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ + /* Force default amode and default user address space control. */ + regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | + (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; regs->gprs[2] = map_signal(sig); @@ -346,7 +353,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, /* Set up registers for signal handler */ regs->gprs[15] = (unsigned long) frame; - regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ + /* Force default amode and default user address space control. */ + regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | + (psw_user_bits & PSW_MASK_ASC) | + (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; regs->gprs[2] = map_signal(sig); diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 54d93f4b681..dd55f7c2010 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -40,6 +40,7 @@ static DEFINE_SPINLOCK(topology_lock); static struct mask_info core_info; cpumask_t cpu_core_map[NR_CPUS]; unsigned char cpu_core_id[NR_CPUS]; +unsigned char cpu_socket_id[NR_CPUS]; static struct mask_info book_info; cpumask_t cpu_book_map[NR_CPUS]; @@ -83,11 +84,12 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, cpumask_set_cpu(lcpu, &book->mask); cpu_book_id[lcpu] = book->id; cpumask_set_cpu(lcpu, &core->mask); + cpu_core_id[lcpu] = rcpu; if (one_core_per_cpu) { - cpu_core_id[lcpu] = rcpu; + cpu_socket_id[lcpu] = rcpu; core = core->next; } else { - cpu_core_id[lcpu] = core->id; + cpu_socket_id[lcpu] = core->id; } smp_cpu_set_polarization(lcpu, tl_cpu->pp); } diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 8b8285310b5..1f5315d1215 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -180,8 +180,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, - (void __user *)start, len))) + if ((end < start) || (end > TASK_SIZE)) return 0; local_irq_save(flags); @@ -229,7 +228,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; - if (end < start) + if ((end < start) || (end > TASK_SIZE)) goto slow_irqon; /* diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h index f93003123bc..67c62578d17 100644 --- a/arch/sparc/include/asm/prom.h +++ b/arch/sparc/include/asm/prom.h @@ -63,10 +63,13 @@ extern char *of_console_options; extern void irq_trans_init(struct device_node *dp); extern char *build_path_component(struct device_node *dp); -/* SPARC has a local implementation */ +/* SPARC has local implementations */ extern int of_address_to_resource(struct device_node *dev, int index, struct resource *r); #define of_address_to_resource of_address_to_resource +void __iomem *of_iomap(struct device_node *node, int index); +#define of_iomap of_iomap + #endif /* __KERNEL__ */ #endif /* _SPARC_PROM_H */ diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 867de2f8189..689e1ba6280 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -295,9 +295,7 @@ void do_rt_sigreturn(struct pt_regs *regs) err |= restore_fpu_state(regs, fpu_save); err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); - err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf); - - if (err) + if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT) goto segv; err |= __get_user(rwin_save, &sf->rwin_save); diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index e5c5473e69c..c4fbb21e802 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -16,6 +16,8 @@ config UNICORE32 select ARCH_WANT_FRAME_POINTERS select GENERIC_IOMAP select MODULES_USE_ELF_REL + select GENERIC_KERNEL_THREAD + select GENERIC_KERNEL_EXECVE help UniCore-32 is 32-bit Instruction Set Architecture, including a series of low-power-consumption RISC chip @@ -64,6 +66,9 @@ config GENERIC_CALIBRATE_DELAY config ARCH_MAY_HAVE_PC_FDC bool +config ZONE_DMA + def_bool y + config NEED_DMA_MAP_STATE def_bool y @@ -216,7 +221,7 @@ config PUV3_GPIO bool depends on !ARCH_FPGA select GENERIC_GPIO - select GPIO_SYSFS if EXPERIMENTAL + select GPIO_SYSFS default y if PUV3_NB0916 diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index c910c9857e1..601e92f18af 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -1,4 +1,3 @@ -include include/asm-generic/Kbuild.asm generic-y += atomic.h generic-y += auxvec.h diff --git a/arch/unicore32/include/asm/bug.h b/arch/unicore32/include/asm/bug.h index b1ff8cadb08..93a56f3e234 100644 --- a/arch/unicore32/include/asm/bug.h +++ b/arch/unicore32/include/asm/bug.h @@ -19,9 +19,4 @@ extern void die(const char *msg, struct pt_regs *regs, int err); extern void uc32_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, unsigned long err, unsigned long trap); -extern asmlinkage void __backtrace(void); -extern asmlinkage void c_backtrace(unsigned long fp, int pmode); - -extern void __show_regs(struct pt_regs *); - #endif /* __UNICORE_BUG_H__ */ diff --git a/arch/unicore32/include/asm/cmpxchg.h b/arch/unicore32/include/asm/cmpxchg.h index df4d5acfd19..8e797ad4fa2 100644 --- a/arch/unicore32/include/asm/cmpxchg.h +++ b/arch/unicore32/include/asm/cmpxchg.h @@ -35,7 +35,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, : "memory", "cc"); break; default: - ret = __xchg_bad_pointer(); + __xchg_bad_pointer(); } return ret; diff --git a/arch/unicore32/include/asm/kvm_para.h b/arch/unicore32/include/asm/kvm_para.h deleted file mode 100644 index 14fab8f0b95..00000000000 --- a/arch/unicore32/include/asm/kvm_para.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/kvm_para.h> diff --git a/arch/unicore32/include/asm/processor.h b/arch/unicore32/include/asm/processor.h index 14382cb0965..4eaa4216766 100644 --- a/arch/unicore32/include/asm/processor.h +++ b/arch/unicore32/include/asm/processor.h @@ -72,11 +72,6 @@ unsigned long get_wchan(struct task_struct *p); #define cpu_relax() barrier() -/* - * Create a new kernel thread - */ -extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); - #define task_pt_regs(p) \ ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) diff --git a/arch/unicore32/include/asm/ptrace.h b/arch/unicore32/include/asm/ptrace.h index b9caf9b0997..726749dab52 100644 --- a/arch/unicore32/include/asm/ptrace.h +++ b/arch/unicore32/include/asm/ptrace.h @@ -12,80 +12,10 @@ #ifndef __UNICORE_PTRACE_H__ #define __UNICORE_PTRACE_H__ -#define PTRACE_GET_THREAD_AREA 22 - -/* - * PSR bits - */ -#define USER_MODE 0x00000010 -#define REAL_MODE 0x00000011 -#define INTR_MODE 0x00000012 -#define PRIV_MODE 0x00000013 -#define ABRT_MODE 0x00000017 -#define EXTN_MODE 0x0000001b -#define SUSR_MODE 0x0000001f -#define MODE_MASK 0x0000001f -#define PSR_R_BIT 0x00000040 -#define PSR_I_BIT 0x00000080 -#define PSR_V_BIT 0x10000000 -#define PSR_C_BIT 0x20000000 -#define PSR_Z_BIT 0x40000000 -#define PSR_S_BIT 0x80000000 - -/* - * Groups of PSR bits - */ -#define PSR_f 0xff000000 /* Flags */ -#define PSR_c 0x000000ff /* Control */ +#include <uapi/asm/ptrace.h> #ifndef __ASSEMBLY__ -/* - * This struct defines the way the registers are stored on the - * stack during a system call. Note that sizeof(struct pt_regs) - * has to be a multiple of 8. - */ -struct pt_regs { - unsigned long uregs[34]; -}; - -#define UCreg_asr uregs[32] -#define UCreg_pc uregs[31] -#define UCreg_lr uregs[30] -#define UCreg_sp uregs[29] -#define UCreg_ip uregs[28] -#define UCreg_fp uregs[27] -#define UCreg_26 uregs[26] -#define UCreg_25 uregs[25] -#define UCreg_24 uregs[24] -#define UCreg_23 uregs[23] -#define UCreg_22 uregs[22] -#define UCreg_21 uregs[21] -#define UCreg_20 uregs[20] -#define UCreg_19 uregs[19] -#define UCreg_18 uregs[18] -#define UCreg_17 uregs[17] -#define UCreg_16 uregs[16] -#define UCreg_15 uregs[15] -#define UCreg_14 uregs[14] -#define UCreg_13 uregs[13] -#define UCreg_12 uregs[12] -#define UCreg_11 uregs[11] -#define UCreg_10 uregs[10] -#define UCreg_09 uregs[9] -#define UCreg_08 uregs[8] -#define UCreg_07 uregs[7] -#define UCreg_06 uregs[6] -#define UCreg_05 uregs[5] -#define UCreg_04 uregs[4] -#define UCreg_03 uregs[3] -#define UCreg_02 uregs[2] -#define UCreg_01 uregs[1] -#define UCreg_00 uregs[0] -#define UCreg_ORIG_00 uregs[33] - -#ifdef __KERNEL__ - #define user_mode(regs) \ (processor_mode(regs) == USER_MODE) @@ -125,9 +55,5 @@ static inline int valid_user_regs(struct pt_regs *regs) #define instruction_pointer(regs) ((regs)->UCreg_pc) -#endif /* __KERNEL__ */ - #endif /* __ASSEMBLY__ */ - #endif - diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild index baebb3da1d4..0514d7ad685 100644 --- a/arch/unicore32/include/uapi/asm/Kbuild +++ b/arch/unicore32/include/uapi/asm/Kbuild @@ -1,3 +1,10 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm +header-y += byteorder.h +header-y += kvm_para.h +header-y += ptrace.h +header-y += sigcontext.h +header-y += unistd.h + +generic-y += kvm_para.h diff --git a/arch/unicore32/include/asm/byteorder.h b/arch/unicore32/include/uapi/asm/byteorder.h index ebe1b3fef3e..ebe1b3fef3e 100644 --- a/arch/unicore32/include/asm/byteorder.h +++ b/arch/unicore32/include/uapi/asm/byteorder.h diff --git a/arch/unicore32/include/uapi/asm/ptrace.h b/arch/unicore32/include/uapi/asm/ptrace.h new file mode 100644 index 00000000000..187aa2e98a5 --- /dev/null +++ b/arch/unicore32/include/uapi/asm/ptrace.h @@ -0,0 +1,90 @@ +/* + * linux/arch/unicore32/include/asm/ptrace.h + * + * Code specific to PKUnity SoC and UniCore ISA + * + * Copyright (C) 2001-2010 GUAN Xue-tao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _UAPI__UNICORE_PTRACE_H__ +#define _UAPI__UNICORE_PTRACE_H__ + +#define PTRACE_GET_THREAD_AREA 22 + +/* + * PSR bits + */ +#define USER_MODE 0x00000010 +#define REAL_MODE 0x00000011 +#define INTR_MODE 0x00000012 +#define PRIV_MODE 0x00000013 +#define ABRT_MODE 0x00000017 +#define EXTN_MODE 0x0000001b +#define SUSR_MODE 0x0000001f +#define MODE_MASK 0x0000001f +#define PSR_R_BIT 0x00000040 +#define PSR_I_BIT 0x00000080 +#define PSR_V_BIT 0x10000000 +#define PSR_C_BIT 0x20000000 +#define PSR_Z_BIT 0x40000000 +#define PSR_S_BIT 0x80000000 + +/* + * Groups of PSR bits + */ +#define PSR_f 0xff000000 /* Flags */ +#define PSR_c 0x000000ff /* Control */ + +#ifndef __ASSEMBLY__ + +/* + * This struct defines the way the registers are stored on the + * stack during a system call. Note that sizeof(struct pt_regs) + * has to be a multiple of 8. + */ +struct pt_regs { + unsigned long uregs[34]; +}; + +#define UCreg_asr uregs[32] +#define UCreg_pc uregs[31] +#define UCreg_lr uregs[30] +#define UCreg_sp uregs[29] +#define UCreg_ip uregs[28] +#define UCreg_fp uregs[27] +#define UCreg_26 uregs[26] +#define UCreg_25 uregs[25] +#define UCreg_24 uregs[24] +#define UCreg_23 uregs[23] +#define UCreg_22 uregs[22] +#define UCreg_21 uregs[21] +#define UCreg_20 uregs[20] +#define UCreg_19 uregs[19] +#define UCreg_18 uregs[18] +#define UCreg_17 uregs[17] +#define UCreg_16 uregs[16] +#define UCreg_15 uregs[15] +#define UCreg_14 uregs[14] +#define UCreg_13 uregs[13] +#define UCreg_12 uregs[12] +#define UCreg_11 uregs[11] +#define UCreg_10 uregs[10] +#define UCreg_09 uregs[9] +#define UCreg_08 uregs[8] +#define UCreg_07 uregs[7] +#define UCreg_06 uregs[6] +#define UCreg_05 uregs[5] +#define UCreg_04 uregs[4] +#define UCreg_03 uregs[3] +#define UCreg_02 uregs[2] +#define UCreg_01 uregs[1] +#define UCreg_00 uregs[0] +#define UCreg_ORIG_00 uregs[33] + + +#endif /* __ASSEMBLY__ */ + +#endif /* _UAPI__UNICORE_PTRACE_H__ */ diff --git a/arch/unicore32/include/asm/sigcontext.h b/arch/unicore32/include/uapi/asm/sigcontext.h index 6a2d7671c05..6a2d7671c05 100644 --- a/arch/unicore32/include/asm/sigcontext.h +++ b/arch/unicore32/include/uapi/asm/sigcontext.h diff --git a/arch/unicore32/include/asm/unistd.h b/arch/unicore32/include/uapi/asm/unistd.h index 2abcf61c615..d18a3be89b3 100644 --- a/arch/unicore32/include/asm/unistd.h +++ b/arch/unicore32/include/uapi/asm/unistd.h @@ -12,3 +12,4 @@ /* Use the standard ABI for syscalls. */ #include <asm-generic/unistd.h> +#define __ARCH_WANT_SYS_EXECVE diff --git a/arch/unicore32/kernel/entry.S b/arch/unicore32/kernel/entry.S index dcb87ab19dd..7049350c790 100644 --- a/arch/unicore32/kernel/entry.S +++ b/arch/unicore32/kernel/entry.S @@ -573,17 +573,16 @@ ENDPROC(ret_to_user) */ ENTRY(ret_from_fork) b.l schedule_tail - get_thread_info tsk - ldw r1, [tsk+], #TI_FLAGS @ check for syscall tracing - mov why, #1 - cand.a r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? - beq ret_slow_syscall - mov r1, sp - mov r0, #1 @ trace exit [IP = 1] - b.l syscall_trace b ret_slow_syscall ENDPROC(ret_from_fork) +ENTRY(ret_from_kernel_thread) + b.l schedule_tail + mov r0, r5 + adr lr, ret_slow_syscall + mov pc, r4 +ENDPROC(ret_from_kernel_thread) + /*============================================================================= * SWI handler *----------------------------------------------------------------------------- @@ -669,11 +668,6 @@ __cr_alignment: #endif .ltorg -ENTRY(sys_execve) - add r3, sp, #S_OFF - b __sys_execve -ENDPROC(sys_execve) - ENTRY(sys_clone) add ip, sp, #S_OFF stw ip, [sp+], #4 diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c index b008586dad7..a8fe265ce2c 100644 --- a/arch/unicore32/kernel/process.c +++ b/arch/unicore32/kernel/process.c @@ -258,6 +258,7 @@ void release_thread(struct task_struct *dead_task) } asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); +asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread"); int copy_thread(unsigned long clone_flags, unsigned long stack_start, @@ -266,17 +267,22 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, struct thread_info *thread = task_thread_info(p); struct pt_regs *childregs = task_pt_regs(p); - *childregs = *regs; - childregs->UCreg_00 = 0; - childregs->UCreg_sp = stack_start; - memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); thread->cpu_context.sp = (unsigned long)childregs; - thread->cpu_context.pc = (unsigned long)ret_from_fork; - - if (clone_flags & CLONE_SETTLS) - childregs->UCreg_16 = regs->UCreg_03; + if (unlikely(!regs)) { + thread->cpu_context.pc = (unsigned long)ret_from_kernel_thread; + thread->cpu_context.r4 = stack_start; + thread->cpu_context.r5 = stk_sz; + memset(childregs, 0, sizeof(struct pt_regs)); + } else { + thread->cpu_context.pc = (unsigned long)ret_from_fork; + *childregs = *regs; + childregs->UCreg_00 = 0; + childregs->UCreg_sp = stack_start; + if (clone_flags & CLONE_SETTLS) + childregs->UCreg_16 = regs->UCreg_03; + } return 0; } @@ -305,42 +311,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fp) } EXPORT_SYMBOL(dump_fpu); -/* - * Shuffle the argument into the correct register before calling the - * thread function. r1 is the thread argument, r2 is the pointer to - * the thread function, and r3 points to the exit function. - */ -asm(".pushsection .text\n" -" .align\n" -" .type kernel_thread_helper, #function\n" -"kernel_thread_helper:\n" -" mov.a asr, r7\n" -" mov r0, r4\n" -" mov lr, r6\n" -" mov pc, r5\n" -" .size kernel_thread_helper, . - kernel_thread_helper\n" -" .popsection"); - -/* - * Create a kernel thread. - */ -pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) -{ - struct pt_regs regs; - - memset(®s, 0, sizeof(regs)); - - regs.UCreg_04 = (unsigned long)arg; - regs.UCreg_05 = (unsigned long)fn; - regs.UCreg_06 = (unsigned long)do_exit; - regs.UCreg_07 = PRIV_MODE; - regs.UCreg_pc = (unsigned long)kernel_thread_helper; - regs.UCreg_asr = regs.UCreg_07 | PSR_I_BIT; - - return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); -} -EXPORT_SYMBOL(kernel_thread); - unsigned long get_wchan(struct task_struct *p) { struct stackframe frame; diff --git a/arch/unicore32/kernel/setup.h b/arch/unicore32/kernel/setup.h index f23955028a1..30f749da8f7 100644 --- a/arch/unicore32/kernel/setup.h +++ b/arch/unicore32/kernel/setup.h @@ -30,4 +30,10 @@ extern char __vectors_start[], __vectors_end[]; extern void kernel_thread_helper(void); extern void __init early_signal_init(void); + +extern asmlinkage void __backtrace(void); +extern asmlinkage void c_backtrace(unsigned long fp, int pmode); + +extern void __show_regs(struct pt_regs *); + #endif diff --git a/arch/unicore32/kernel/sys.c b/arch/unicore32/kernel/sys.c index fabdee96110..9680134b31f 100644 --- a/arch/unicore32/kernel/sys.c +++ b/arch/unicore32/kernel/sys.c @@ -42,69 +42,6 @@ asmlinkage long __sys_clone(unsigned long clone_flags, unsigned long newsp, parent_tid, child_tid); } -/* sys_execve() executes a new program. - * This is called indirectly via a small wrapper - */ -asmlinkage long __sys_execve(const char __user *filename, - const char __user *const __user *argv, - const char __user *const __user *envp, - struct pt_regs *regs) -{ - int error; - struct filename *fn; - - fn = getname(filename); - error = PTR_ERR(fn); - if (IS_ERR(fn)) - goto out; - error = do_execve(fn->name, argv, envp, regs); - putname(fn); -out: - return error; -} - -int kernel_execve(const char *filename, - const char *const argv[], - const char *const envp[]) -{ - struct pt_regs regs; - int ret; - - memset(®s, 0, sizeof(struct pt_regs)); - ret = do_execve(filename, - (const char __user *const __user *)argv, - (const char __user *const __user *)envp, ®s); - if (ret < 0) - goto out; - - /* - * Save argc to the register structure for userspace. - */ - regs.UCreg_00 = ret; - - /* - * We were successful. We won't be returning to our caller, but - * instead to user space by manipulating the kernel stack. - */ - asm("add r0, %0, %1\n\t" - "mov r1, %2\n\t" - "mov r2, %3\n\t" - "mov r22, #0\n\t" /* not a syscall */ - "mov r23, %0\n\t" /* thread structure */ - "b.l memmove\n\t" /* copy regs to top of stack */ - "mov sp, r0\n\t" /* reposition stack pointer */ - "b ret_to_user" - : - : "r" (current_thread_info()), - "Ir" (THREAD_START_SP - sizeof(regs)), - "r" (®s), - "Ir" (sizeof(regs)) - : "r0", "r1", "r2", "r3", "ip", "lr", "memory"); - - out: - return ret; -} - /* Note: used by the compat code even in 64-bit Linux. */ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c index 2eeb9c04cab..f9b5c10bcce 100644 --- a/arch/unicore32/mm/fault.c +++ b/arch/unicore32/mm/fault.c @@ -168,7 +168,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) } static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr, - struct task_struct *tsk) + unsigned int flags, struct task_struct *tsk) { struct vm_area_struct *vma; int fault; @@ -194,14 +194,7 @@ good_area: * If for any reason at all we couldn't handle the fault, make * sure we exit gracefully rather than endlessly redo the fault. */ - fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, - (!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0); - if (unlikely(fault & VM_FAULT_ERROR)) - return fault; - if (fault & VM_FAULT_MAJOR) - tsk->maj_flt++; - else - tsk->min_flt++; + fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); return fault; check_stack: @@ -216,6 +209,8 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) struct task_struct *tsk; struct mm_struct *mm; int fault, sig, code; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | + ((!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0); tsk = current; mm = tsk->mm; @@ -236,6 +231,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (!user_mode(regs) && !search_exception_tables(regs->UCreg_pc)) goto no_context; +retry: down_read(&mm->mmap_sem); } else { /* @@ -251,7 +247,28 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) #endif } - fault = __do_pf(mm, addr, fsr, tsk); + fault = __do_pf(mm, addr, fsr, flags, tsk); + + /* If we need to retry but a fatal signal is pending, handle the + * signal first. We do not need to release the mmap_sem because + * it would already be released in __lock_page_or_retry in + * mm/filemap.c. */ + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return 0; + + if (!(fault & VM_FAULT_ERROR) && (flags & FAULT_FLAG_ALLOW_RETRY)) { + if (fault & VM_FAULT_MAJOR) + tsk->maj_flt++; + else + tsk->min_flt++; + if (fault & VM_FAULT_RETRY) { + /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk + * of starvation. */ + flags &= ~FAULT_FLAG_ALLOW_RETRY; + goto retry; + } + } + up_read(&mm->mmap_sem); /* diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index c760e073963..e87b0cac14b 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -12,6 +12,8 @@ #include <asm/setup.h> #include <asm/desc.h> +#undef memcpy /* Use memcpy from misc.c */ + #include "eboot.h" static efi_system_table_t *sys_table; diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 2a017441b8b..8c132a625b9 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S @@ -476,6 +476,3 @@ die: setup_corrupt: .byte 7 .string "No setup signature found...\n" - - .data -dummy: .long 0 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index dcfde52979c..19f16ebaf4f 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -205,21 +205,14 @@ static inline bool user_64bit_mode(struct pt_regs *regs) } #endif -/* - * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode - * when it traps. The previous stack will be directly underneath the saved - * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. - * - * This is valid only for kernel mode traps. - */ -static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) -{ #ifdef CONFIG_X86_32 - return (unsigned long)(®s->sp); +extern unsigned long kernel_stack_pointer(struct pt_regs *regs); #else +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ return regs->sp; -#endif } +#endif #define GET_IP(regs) ((regs)->ip) #define GET_FP(regs) ((regs)->bp) diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 11676cf65ae..d5e0d717005 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -101,6 +101,8 @@ static int __init acpi_sleep_setup(char *str) #endif if (strncmp(str, "nonvs", 5) == 0) acpi_nvs_nosave(); + if (strncmp(str, "nonvs_s3", 8) == 0) + acpi_nvs_nosave_s3(); if (strncmp(str, "old_ordering", 12) == 0) acpi_old_suspend_ordering(); str = strchr(str, ','); diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index f7e98a2c0d1..1b7d1656a04 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -631,6 +631,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) } } + /* + * The way access filter has a performance penalty on some workloads. + * Disable it on the affected CPUs. + */ + if ((c->x86 == 0x15) && + (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { + u64 val; + + if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) { + val |= 0x1E; + wrmsrl_safe(0xc0011021, val); + } + } + cpu_detect_cache_sizes(c); /* Multi core CPU? */ diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 698b6ec12e0..1ac581f38df 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -6,7 +6,7 @@ * * Written by Jacob Shin - AMD, Inc. * - * Support: borislav.petkov@amd.com + * Maintained by: Borislav Petkov <bp@alien8.de> * * April 2006 * - added support for AMD Family 0x10 processors diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 5f88abf07e9..4f9a3cbfc4a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c @@ -285,34 +285,39 @@ void cmci_clear(void) raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); } +static long cmci_rediscover_work_func(void *arg) +{ + int banks; + + /* Recheck banks in case CPUs don't all have the same */ + if (cmci_supported(&banks)) + cmci_discover(banks); + + return 0; +} + /* * After a CPU went down cycle through all the others and rediscover * Must run in process context. */ void cmci_rediscover(int dying) { - int banks; - int cpu; - cpumask_var_t old; + int cpu, banks; if (!cmci_supported(&banks)) return; - if (!alloc_cpumask_var(&old, GFP_KERNEL)) - return; - cpumask_copy(old, ¤t->cpus_allowed); for_each_online_cpu(cpu) { if (cpu == dying) continue; - if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) + + if (cpu == smp_processor_id()) { + cmci_rediscover_work_func(NULL); continue; - /* Recheck banks in case CPUs don't all have the same */ - if (cmci_supported(&banks)) - cmci_discover(banks); - } + } - set_cpus_allowed_ptr(current, old); - free_cpumask_var(old); + work_on_cpu(cpu, cmci_rediscover_work_func, NULL); + } } /* diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index b51b2c7ee51..1328fe49a3f 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -995,8 +995,8 @@ END(interrupt) */ .p2align CONFIG_X86_L1_CACHE_SHIFT common_interrupt: - ASM_CLAC XCPT_FRAME + ASM_CLAC addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ interrupt do_IRQ /* 0(%rsp): old_rsp-ARGOFFSET */ @@ -1135,8 +1135,8 @@ END(common_interrupt) */ .macro apicinterrupt num sym do_sym ENTRY(\sym) - ASM_CLAC INTR_FRAME + ASM_CLAC pushq_cfi $~(\num) .Lcommon_\sym: interrupt \do_sym @@ -1190,8 +1190,8 @@ apicinterrupt IRQ_WORK_VECTOR \ */ .macro zeroentry sym do_sym ENTRY(\sym) - ASM_CLAC INTR_FRAME + ASM_CLAC PARAVIRT_ADJUST_EXCEPTION_FRAME pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ subq $ORIG_RAX-R15, %rsp @@ -1208,8 +1208,8 @@ END(\sym) .macro paranoidzeroentry sym do_sym ENTRY(\sym) - ASM_CLAC INTR_FRAME + ASM_CLAC PARAVIRT_ADJUST_EXCEPTION_FRAME pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ subq $ORIG_RAX-R15, %rsp @@ -1227,8 +1227,8 @@ END(\sym) #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) .macro paranoidzeroentry_ist sym do_sym ist ENTRY(\sym) - ASM_CLAC INTR_FRAME + ASM_CLAC PARAVIRT_ADJUST_EXCEPTION_FRAME pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ subq $ORIG_RAX-R15, %rsp @@ -1247,8 +1247,8 @@ END(\sym) .macro errorentry sym do_sym ENTRY(\sym) - ASM_CLAC XCPT_FRAME + ASM_CLAC PARAVIRT_ADJUST_EXCEPTION_FRAME subq $ORIG_RAX-R15, %rsp CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 @@ -1266,8 +1266,8 @@ END(\sym) /* error code is on the stack already */ .macro paranoiderrorentry sym do_sym ENTRY(\sym) - ASM_CLAC XCPT_FRAME + ASM_CLAC PARAVIRT_ADJUST_EXCEPTION_FRAME subq $ORIG_RAX-R15, %rsp CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 7720ff5a9ee..efdec7cd8e0 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -8,8 +8,8 @@ * Tigran Aivazian <tigran@aivazian.fsnet.co.uk> * * Maintainers: - * Andreas Herrmann <andreas.herrmann3@amd.com> - * Borislav Petkov <borislav.petkov@amd.com> + * Andreas Herrmann <herrmann.der.user@googlemail.com> + * Borislav Petkov <bp@alien8.de> * * This driver allows to upgrade microcode on F10h AMD * CPUs and later. @@ -190,6 +190,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size, #define F1XH_MPB_MAX_SIZE 2048 #define F14H_MPB_MAX_SIZE 1824 #define F15H_MPB_MAX_SIZE 4096 +#define F16H_MPB_MAX_SIZE 3458 switch (c->x86) { case 0x14: @@ -198,6 +199,9 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size, case 0x15: max_size = F15H_MPB_MAX_SIZE; break; + case 0x16: + max_size = F16H_MPB_MAX_SIZE; + break; default: max_size = F1XH_MPB_MAX_SIZE; break; diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index b00b33a1839..5e0596b0632 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -22,6 +22,7 @@ #include <linux/perf_event.h> #include <linux/hw_breakpoint.h> #include <linux/rcupdate.h> +#include <linux/module.h> #include <asm/uaccess.h> #include <asm/pgtable.h> @@ -166,6 +167,35 @@ static inline bool invalid_selector(u16 value) #define FLAG_MASK FLAG_MASK_32 +/* + * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode + * when it traps. The previous stack will be directly underneath the saved + * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. + * + * Now, if the stack is empty, '®s->sp' is out of range. In this + * case we try to take the previous stack. To always return a non-null + * stack pointer we fall back to regs as stack if no previous stack + * exists. + * + * This is valid only for kernel mode traps. + */ +unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1); + unsigned long sp = (unsigned long)®s->sp; + struct thread_info *tinfo; + + if (context == (sp & ~(THREAD_SIZE - 1))) + return sp; + + tinfo = (struct thread_info *)context; + if (tinfo->previous_esp) + return tinfo->previous_esp; + + return (unsigned long)regs; +} +EXPORT_SYMBOL_GPL(kernel_stack_pointer); + static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) { BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index a10e4601685..58fc5148882 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -24,6 +24,9 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; + if (!static_cpu_has(X86_FEATURE_XSAVE)) + return 0; + best = kvm_find_cpuid_entry(vcpu, 1, 0); return best && (best->ecx & bit(X86_FEATURE_XSAVE)); } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ad6b1dd06f8..f85815945fc 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -6549,19 +6549,22 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) } } - exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); /* Exposing INVPCID only when PCID is exposed */ best = kvm_find_cpuid_entry(vcpu, 0x7, 0); if (vmx_invpcid_supported() && best && (best->ebx & bit(X86_FEATURE_INVPCID)) && guest_cpuid_has_pcid(vcpu)) { + exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); exec_control |= SECONDARY_EXEC_ENABLE_INVPCID; vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); } else { - exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; - vmcs_write32(SECONDARY_VM_EXEC_CONTROL, - exec_control); + if (cpu_has_secondary_exec_ctrls()) { + exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); + exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; + vmcs_write32(SECONDARY_VM_EXEC_CONTROL, + exec_control); + } if (best) best->ebx &= ~bit(X86_FEATURE_INVPCID); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 224a7e78cb6..4f7641756be 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5781,6 +5781,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, int pending_vec, max_bits, idx; struct desc_ptr dt; + if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE)) + return -EINVAL; + dt.size = sregs->idt.limit; dt.address = sregs->idt.base; kvm_x86_ops->set_idt(vcpu, &dt); diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 0777f042e40..60f926cd8b0 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -197,7 +197,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, } if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 - || vmflag == VM_HUGETLB) { + || vmflag & VM_HUGETLB) { local_flush_tlb(); goto flush_all; } diff --git a/arch/x86/pci/ce4100.c b/arch/x86/pci/ce4100.c index 41bd2a2d2c5..b914e20b5a0 100644 --- a/arch/x86/pci/ce4100.c +++ b/arch/x86/pci/ce4100.c @@ -115,6 +115,16 @@ static void sata_revid_read(struct sim_dev_reg *reg, u32 *value) reg_read(reg, value); } +static void reg_noirq_read(struct sim_dev_reg *reg, u32 *value) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&pci_config_lock, flags); + /* force interrupt pin value to 0 */ + *value = reg->sim_reg.value & 0xfff00ff; + raw_spin_unlock_irqrestore(&pci_config_lock, flags); +} + static struct sim_dev_reg bus1_fixups[] = { DEFINE_REG(2, 0, 0x10, (16*MB), reg_init, reg_read, reg_write) DEFINE_REG(2, 0, 0x14, (256), reg_init, reg_read, reg_write) @@ -144,6 +154,7 @@ static struct sim_dev_reg bus1_fixups[] = { DEFINE_REG(11, 5, 0x10, (64*KB), reg_init, reg_read, reg_write) DEFINE_REG(11, 6, 0x10, (256), reg_init, reg_read, reg_write) DEFINE_REG(11, 7, 0x10, (64*KB), reg_init, reg_read, reg_write) + DEFINE_REG(11, 7, 0x3c, 256, reg_init, reg_noirq_read, reg_write) DEFINE_REG(12, 0, 0x10, (128*KB), reg_init, reg_read, reg_write) DEFINE_REG(12, 0, 0x14, (256), reg_init, reg_read, reg_write) DEFINE_REG(12, 1, 0x10, (1024), reg_init, reg_read, reg_write) @@ -161,8 +172,10 @@ static struct sim_dev_reg bus1_fixups[] = { DEFINE_REG(16, 0, 0x10, (64*KB), reg_init, reg_read, reg_write) DEFINE_REG(16, 0, 0x14, (64*MB), reg_init, reg_read, reg_write) DEFINE_REG(16, 0, 0x18, (64*MB), reg_init, reg_read, reg_write) + DEFINE_REG(16, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write) DEFINE_REG(17, 0, 0x10, (128*KB), reg_init, reg_read, reg_write) DEFINE_REG(18, 0, 0x10, (1*KB), reg_init, reg_read, reg_write) + DEFINE_REG(18, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write) }; static void __init init_sim_regs(void) diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c index 4c61b52191e..92525cb8e54 100644 --- a/arch/x86/platform/ce4100/ce4100.c +++ b/arch/x86/platform/ce4100/ce4100.c @@ -21,12 +21,25 @@ #include <asm/i8259.h> #include <asm/io.h> #include <asm/io_apic.h> +#include <asm/emergency-restart.h> static int ce4100_i8042_detect(void) { return 0; } +/* + * The CE4100 platform has an internal 8051 Microcontroller which is + * responsible for signaling to the external Power Management Unit the + * intention to reset, reboot or power off the system. This 8051 device has + * its command register mapped at I/O port 0xcf9 and the value 0x4 is used + * to power off the system. + */ +static void ce4100_power_off(void) +{ + outb(0x4, 0xcf9); +} + #ifdef CONFIG_SERIAL_8250 static unsigned int mem_serial_in(struct uart_port *p, int offset) @@ -139,8 +152,19 @@ void __init x86_ce4100_early_setup(void) x86_init.mpparse.find_smp_config = x86_init_noop; x86_init.pci.init = ce4100_pci_init; + /* + * By default, the reboot method is ACPI which is supported by the + * CE4100 bootloader CEFDK using FADT.ResetReg Address and ResetValue + * the bootloader will however issue a system power off instead of + * reboot. By using BOOT_KBD we ensure proper system reboot as + * expected. + */ + reboot_type = BOOT_KBD; + #ifdef CONFIG_X86_IO_APIC x86_init.pci.init_irq = sdv_pci_init; x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc_nocheck; #endif + + pm_power_off = ce4100_power_off; } diff --git a/block/blk-exec.c b/block/blk-exec.c index 8b6dc5bd4dd..f71eac35c1b 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -52,11 +52,17 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, rq_end_io_fn *done) { int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; + bool is_pm_resume; WARN_ON(irqs_disabled()); rq->rq_disk = bd_disk; rq->end_io = done; + /* + * need to check this before __blk_run_queue(), because rq can + * be freed before that returns. + */ + is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME; spin_lock_irq(q->queue_lock); @@ -71,7 +77,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, __elv_add_request(q, rq, where); __blk_run_queue(q); /* the queue is stopped so it won't be run */ - if (rq->cmd_type == REQ_TYPE_PM_RESUME) + if (is_pm_resume) q->request_fn(q); spin_unlock_irq(q->queue_lock); } diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index 24c807f9663..eb30e5ab4ca 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c @@ -31,6 +31,7 @@ #include <linux/types.h> #include <linux/memory_hotplug.h> #include <linux/slab.h> +#include <linux/acpi.h> #include <acpi/acpi_drivers.h> #define ACPI_MEMORY_DEVICE_CLASS "memory" @@ -78,6 +79,7 @@ struct acpi_memory_info { unsigned short caching; /* memory cache attribute */ unsigned short write_protect; /* memory read/write attribute */ unsigned int enabled:1; + unsigned int failed:1; }; struct acpi_memory_device { @@ -86,8 +88,6 @@ struct acpi_memory_device { struct list_head res_list; }; -static int acpi_hotmem_initialized; - static acpi_status acpi_memory_get_resource(struct acpi_resource *resource, void *context) { @@ -125,12 +125,20 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context) return AE_OK; } +static void +acpi_memory_free_device_resources(struct acpi_memory_device *mem_device) +{ + struct acpi_memory_info *info, *n; + + list_for_each_entry_safe(info, n, &mem_device->res_list, list) + kfree(info); + INIT_LIST_HEAD(&mem_device->res_list); +} + static int acpi_memory_get_device_resources(struct acpi_memory_device *mem_device) { acpi_status status; - struct acpi_memory_info *info, *n; - if (!list_empty(&mem_device->res_list)) return 0; @@ -138,9 +146,7 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device) status = acpi_walk_resources(mem_device->device->handle, METHOD_NAME__CRS, acpi_memory_get_resource, mem_device); if (ACPI_FAILURE(status)) { - list_for_each_entry_safe(info, n, &mem_device->res_list, list) - kfree(info); - INIT_LIST_HEAD(&mem_device->res_list); + acpi_memory_free_device_resources(mem_device); return -EINVAL; } @@ -170,7 +176,7 @@ acpi_memory_get_device(acpi_handle handle, /* Get the parent device */ result = acpi_bus_get_device(phandle, &pdevice); if (result) { - printk(KERN_WARNING PREFIX "Cannot get acpi bus device"); + acpi_handle_warn(phandle, "Cannot get acpi bus device\n"); return -EINVAL; } @@ -180,14 +186,14 @@ acpi_memory_get_device(acpi_handle handle, */ result = acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE); if (result) { - printk(KERN_WARNING PREFIX "Cannot add acpi bus"); + acpi_handle_warn(handle, "Cannot add acpi bus\n"); return -EINVAL; } end: *mem_device = acpi_driver_data(device); if (!(*mem_device)) { - printk(KERN_ERR "\n driver data not found"); + dev_err(&device->dev, "driver data not found\n"); return -ENODEV; } @@ -224,7 +230,8 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) /* Get the range from the _CRS */ result = acpi_memory_get_device_resources(mem_device); if (result) { - printk(KERN_ERR PREFIX "get_device_resources failed\n"); + dev_err(&mem_device->device->dev, + "get_device_resources failed\n"); mem_device->state = MEMORY_INVALID_STATE; return result; } @@ -251,13 +258,27 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) node = memory_add_physaddr_to_nid(info->start_addr); result = add_memory(node, info->start_addr, info->length); - if (result) + + /* + * If the memory block has been used by the kernel, add_memory() + * returns -EEXIST. If add_memory() returns the other error, it + * means that this memory block is not used by the kernel. + */ + if (result && result != -EEXIST) { + info->failed = 1; continue; - info->enabled = 1; + } + + if (!result) + info->enabled = 1; + /* + * Add num_enable even if add_memory() returns -EEXIST, so the + * device is bound to this driver. + */ num_enabled++; } if (!num_enabled) { - printk(KERN_ERR PREFIX "add_memory failed\n"); + dev_err(&mem_device->device->dev, "add_memory failed\n"); mem_device->state = MEMORY_INVALID_STATE; return -EINVAL; } @@ -272,68 +293,31 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device) return 0; } -static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device) +static int acpi_memory_remove_memory(struct acpi_memory_device *mem_device) { - acpi_status status; - struct acpi_object_list arg_list; - union acpi_object arg; - unsigned long long current_status; - - - /* Issue the _EJ0 command */ - arg_list.count = 1; - arg_list.pointer = &arg; - arg.type = ACPI_TYPE_INTEGER; - arg.integer.value = 1; - status = acpi_evaluate_object(mem_device->device->handle, - "_EJ0", &arg_list, NULL); - /* Return on _EJ0 failure */ - if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, "_EJ0 failed")); - return -ENODEV; - } - - /* Evalute _STA to check if the device is disabled */ - status = acpi_evaluate_integer(mem_device->device->handle, "_STA", - NULL, ¤t_status); - if (ACPI_FAILURE(status)) - return -ENODEV; - - /* Check for device status. Device should be disabled */ - if (current_status & ACPI_STA_DEVICE_ENABLED) - return -EINVAL; + int result = 0; + struct acpi_memory_info *info, *n; - return 0; -} + list_for_each_entry_safe(info, n, &mem_device->res_list, list) { + if (info->failed) + /* The kernel does not use this memory block */ + continue; -static int acpi_memory_disable_device(struct acpi_memory_device *mem_device) -{ - int result; - struct acpi_memory_info *info, *n; + if (!info->enabled) + /* + * The kernel uses this memory block, but it may be not + * managed by us. + */ + return -EBUSY; + result = remove_memory(info->start_addr, info->length); + if (result) + return result; - /* - * Ask the VM to offline this memory range. - * Note: Assume that this function returns zero on success - */ - list_for_each_entry_safe(info, n, &mem_device->res_list, list) { - if (info->enabled) { - result = remove_memory(info->start_addr, info->length); - if (result) - return result; - } + list_del(&info->list); kfree(info); } - /* Power-off and eject the device */ - result = acpi_memory_powerdown_device(mem_device); - if (result) { - /* Set the status of the device to invalid */ - mem_device->state = MEMORY_INVALID_STATE; - return result; - } - - mem_device->state = MEMORY_POWER_OFF_STATE; return result; } @@ -341,6 +325,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data) { struct acpi_memory_device *mem_device; struct acpi_device *device; + struct acpi_eject_event *ej_event = NULL; u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */ switch (event) { @@ -353,7 +338,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "\nReceived DEVICE CHECK notification for device\n")); if (acpi_memory_get_device(handle, &mem_device)) { - printk(KERN_ERR PREFIX "Cannot find driver data\n"); + acpi_handle_err(handle, "Cannot find driver data\n"); break; } @@ -361,7 +346,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data) break; if (acpi_memory_enable_device(mem_device)) { - printk(KERN_ERR PREFIX "Cannot enable memory device\n"); + acpi_handle_err(handle,"Cannot enable memory device\n"); break; } @@ -373,40 +358,28 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data) "\nReceived EJECT REQUEST notification for device\n")); if (acpi_bus_get_device(handle, &device)) { - printk(KERN_ERR PREFIX "Device doesn't exist\n"); + acpi_handle_err(handle, "Device doesn't exist\n"); break; } mem_device = acpi_driver_data(device); if (!mem_device) { - printk(KERN_ERR PREFIX "Driver Data is NULL\n"); + acpi_handle_err(handle, "Driver Data is NULL\n"); break; } - /* - * Currently disabling memory device from kernel mode - * TBD: Can also be disabled from user mode scripts - * TBD: Can also be disabled by Callback registration - * with generic sysfs driver - */ - if (acpi_memory_disable_device(mem_device)) { - printk(KERN_ERR PREFIX "Disable memory device\n"); - /* - * If _EJ0 was called but failed, _OST is not - * necessary. - */ - if (mem_device->state == MEMORY_INVALID_STATE) - return; - + ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL); + if (!ej_event) { + pr_err(PREFIX "No memory, dropping EJECT\n"); break; } - /* - * TBD: Invoke acpi_bus_remove to cleanup data structures - */ + ej_event->handle = handle; + ej_event->event = ACPI_NOTIFY_EJECT_REQUEST; + acpi_os_hotplug_execute(acpi_bus_hot_remove_device, + (void *)ej_event); - /* _EJ0 succeeded; _OST is not necessary */ + /* eject is performed asynchronously */ return; - default: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unsupported event [0x%x]\n", event)); @@ -420,6 +393,15 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data) return; } +static void acpi_memory_device_free(struct acpi_memory_device *mem_device) +{ + if (!mem_device) + return; + + acpi_memory_free_device_resources(mem_device); + kfree(mem_device); +} + static int acpi_memory_device_add(struct acpi_device *device) { int result; @@ -449,23 +431,16 @@ static int acpi_memory_device_add(struct acpi_device *device) /* Set the device state */ mem_device->state = MEMORY_POWER_ON_STATE; - printk(KERN_DEBUG "%s \n", acpi_device_name(device)); - - /* - * Early boot code has recognized memory area by EFI/E820. - * If DSDT shows these memory devices on boot, hotplug is not necessary - * for them. So, it just returns until completion of this driver's - * start up. - */ - if (!acpi_hotmem_initialized) - return 0; + pr_debug("%s\n", acpi_device_name(device)); if (!acpi_memory_check_device(mem_device)) { /* call add_memory func */ result = acpi_memory_enable_device(mem_device); - if (result) - printk(KERN_ERR PREFIX + if (result) { + dev_err(&device->dev, "Error in acpi_memory_enable_device\n"); + acpi_memory_device_free(mem_device); + } } return result; } @@ -473,13 +448,18 @@ static int acpi_memory_device_add(struct acpi_device *device) static int acpi_memory_device_remove(struct acpi_device *device, int type) { struct acpi_memory_device *mem_device = NULL; - + int result; if (!device || !acpi_driver_data(device)) return -EINVAL; mem_device = acpi_driver_data(device); - kfree(mem_device); + + result = acpi_memory_remove_memory(mem_device); + if (result) + return result; + + acpi_memory_device_free(mem_device); return 0; } @@ -568,7 +548,6 @@ static int __init acpi_memory_device_init(void) return -ENODEV; } - acpi_hotmem_initialized = 1; return 0; } diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index af4aad6ee2e..16fa979f718 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -286,7 +286,7 @@ static ssize_t acpi_pad_rrtime_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long num; - if (strict_strtoul(buf, 0, &num)) + if (kstrtoul(buf, 0, &num)) return -EINVAL; if (num < 1 || num >= 100) return -EINVAL; @@ -309,7 +309,7 @@ static ssize_t acpi_pad_idlepct_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long num; - if (strict_strtoul(buf, 0, &num)) + if (kstrtoul(buf, 0, &num)) return -EINVAL; if (num < 1 || num >= 100) return -EINVAL; @@ -332,7 +332,7 @@ static ssize_t acpi_pad_idlecpus_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long num; - if (strict_strtoul(buf, 0, &num)) + if (kstrtoul(buf, 0, &num)) return -EINVAL; mutex_lock(&isolated_cpus_lock); acpi_pad_idle_cpus(num); @@ -457,7 +457,7 @@ static void acpi_pad_notify(acpi_handle handle, u32 event, dev_name(&device->dev), event, 0); break; default: - printk(KERN_WARNING "Unsupported event [0x%x]\n", event); + pr_warn("Unsupported event [0x%x]\n", event); break; } } diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index 7f1d40797e8..c8bc24bd1f7 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile @@ -161,3 +161,6 @@ acpi-y += \ utxfinit.o \ utxferror.o \ utxfmutex.o + +acpi-$(ACPI_FUTURE_USAGE) += uttrack.o utcache.o utclib.o + diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h index 5e8abb07724..432a318c9ed 100644 --- a/drivers/acpi/acpica/acdebug.h +++ b/drivers/acpi/acpica/acdebug.h @@ -44,17 +44,28 @@ #ifndef __ACDEBUG_H__ #define __ACDEBUG_H__ -#define ACPI_DEBUG_BUFFER_SIZE 4196 +#define ACPI_DEBUG_BUFFER_SIZE 0x4000 /* 16K buffer for return objects */ -struct command_info { +struct acpi_db_command_info { char *name; /* Command Name */ u8 min_args; /* Minimum arguments required */ }; -struct argument_info { +struct acpi_db_command_help { + u8 line_count; /* Number of help lines */ + char *invocation; /* Command Invocation */ + char *description; /* Command Description */ +}; + +struct acpi_db_argument_info { char *name; /* Argument Name */ }; +struct acpi_db_execute_walk { + u32 count; + u32 max_count; +}; + #define PARAM_LIST(pl) pl #define DBTEST_OUTPUT_LEVEL(lvl) if (acpi_gbl_db_opt_verbose) #define VERBOSE_PRINT(fp) DBTEST_OUTPUT_LEVEL(lvl) {\ @@ -77,59 +88,71 @@ acpi_db_single_step(struct acpi_walk_state *walk_state, /* * dbcmds - debug commands and output routines */ -acpi_status acpi_db_disassemble_method(char *name); +struct acpi_namespace_node *acpi_db_convert_to_node(char *in_string); void acpi_db_display_table_info(char *table_arg); -void acpi_db_unload_acpi_table(char *table_arg, char *instance_arg); +void acpi_db_display_template(char *buffer_arg); -void -acpi_db_set_method_breakpoint(char *location, - struct acpi_walk_state *walk_state, - union acpi_parse_object *op); +void acpi_db_unload_acpi_table(char *name); -void acpi_db_set_method_call_breakpoint(union acpi_parse_object *op); +void acpi_db_send_notify(char *name, u32 value); -void acpi_db_get_bus_info(void); +void acpi_db_display_interfaces(char *action_arg, char *interface_name_arg); -void acpi_db_disassemble_aml(char *statements, union acpi_parse_object *op); +acpi_status acpi_db_sleep(char *object_arg); -void acpi_db_dump_namespace(char *start_arg, char *depth_arg); +void acpi_db_display_locks(void); -void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg); +void acpi_db_display_resources(char *object_arg); -void acpi_db_send_notify(char *name, u32 value); +ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_display_gpes(void)) + +void acpi_db_display_handlers(void); + +ACPI_HW_DEPENDENT_RETURN_VOID(void + acpi_db_generate_gpe(char *gpe_arg, + char *block_arg)) + +/* + * dbmethod - control method commands + */ +void +acpi_db_set_method_breakpoint(char *location, + struct acpi_walk_state *walk_state, + union acpi_parse_object *op); + +void acpi_db_set_method_call_breakpoint(union acpi_parse_object *op); void acpi_db_set_method_data(char *type_arg, char *index_arg, char *value_arg); -acpi_status -acpi_db_display_objects(char *obj_type_arg, char *display_count_arg); +acpi_status acpi_db_disassemble_method(char *name); -void acpi_db_display_interfaces(char *action_arg, char *interface_name_arg); +void acpi_db_disassemble_aml(char *statements, union acpi_parse_object *op); -acpi_status acpi_db_find_name_in_namespace(char *name_arg); +void acpi_db_batch_execute(char *count_arg); +/* + * dbnames - namespace commands + */ void acpi_db_set_scope(char *name); -ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_db_sleep(char *object_arg)) +void acpi_db_dump_namespace(char *start_arg, char *depth_arg); -void acpi_db_find_references(char *object_arg); +void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg); -void acpi_db_display_locks(void); +acpi_status acpi_db_find_name_in_namespace(char *name_arg); -void acpi_db_display_resources(char *object_arg); +void acpi_db_check_predefined_names(void); -ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_display_gpes(void)) +acpi_status +acpi_db_display_objects(char *obj_type_arg, char *display_count_arg); void acpi_db_check_integrity(void); -ACPI_HW_DEPENDENT_RETURN_VOID(void - acpi_db_generate_gpe(char *gpe_arg, - char *block_arg)) - -void acpi_db_check_predefined_names(void); +void acpi_db_find_references(char *object_arg); -void acpi_db_batch_execute(void); +void acpi_db_get_bus_info(void); /* * dbdisply - debug display commands @@ -161,7 +184,8 @@ acpi_db_display_argument_object(union acpi_operand_object *obj_desc, /* * dbexec - debugger control method execution */ -void acpi_db_execute(char *name, char **args, u32 flags); +void +acpi_db_execute(char *name, char **args, acpi_object_type * types, u32 flags); void acpi_db_create_execution_threads(char *num_threads_arg, @@ -175,7 +199,8 @@ u32 acpi_db_get_cache_info(struct acpi_memory_list *cache); * dbfileio - Debugger file I/O commands */ acpi_object_type -acpi_db_match_argument(char *user_argument, struct argument_info *arguments); +acpi_db_match_argument(char *user_argument, + struct acpi_db_argument_info *arguments); void acpi_db_close_debug_file(void); @@ -208,6 +233,11 @@ acpi_db_command_dispatch(char *input_buffer, void ACPI_SYSTEM_XFACE acpi_db_execute_thread(void *context); +acpi_status acpi_db_user_commands(char prompt, union acpi_parse_object *op); + +char *acpi_db_get_next_token(char *string, + char **next, acpi_object_type * return_type); + /* * dbstats - Generation and display of ACPI table statistics */ diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h index 5935ba6707e..ed33ebcdaeb 100644 --- a/drivers/acpi/acpica/acdispat.h +++ b/drivers/acpi/acpica/acdispat.h @@ -309,10 +309,13 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state *walk_state); acpi_status acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state *walk_state); -struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object - *origin, union acpi_operand_object - *mth_desc, struct acpi_thread_state - *thread); +struct acpi_walk_state * acpi_ds_create_walk_state(acpi_owner_id owner_id, + union acpi_parse_object + *origin, + union acpi_operand_object + *mth_desc, + struct acpi_thread_state + *thread); acpi_status acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state, diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index c0a43b38c6a..e975c672044 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h @@ -84,9 +84,11 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info); acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); -acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); +acpi_status +acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); -acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); +acpi_status +acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, u32 gpe_number); diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index ce79100fb5e..64472e4ec32 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -70,7 +70,7 @@ /* * Enable "slack" in the AML interpreter? Default is FALSE, and the - * interpreter strictly follows the ACPI specification. Setting to TRUE + * interpreter strictly follows the ACPI specification. Setting to TRUE * allows the interpreter to ignore certain errors and/or bad AML constructs. * * Currently, these features are enabled by this flag: @@ -155,26 +155,6 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE); /***************************************************************************** * - * Debug support - * - ****************************************************************************/ - -/* Procedure nesting level for debug output */ - -extern u32 acpi_gbl_nesting_level; - -ACPI_EXTERN u32 acpi_gpe_count; -ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]; - -/* Support for dynamic control method tracing mechanism */ - -ACPI_EXTERN u32 acpi_gbl_original_dbg_level; -ACPI_EXTERN u32 acpi_gbl_original_dbg_layer; -ACPI_EXTERN u32 acpi_gbl_trace_dbg_level; -ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer; - -/***************************************************************************** - * * ACPI Table globals * ****************************************************************************/ @@ -259,15 +239,6 @@ ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE reg * ****************************************************************************/ -#ifdef ACPI_DBG_TRACK_ALLOCATIONS - -/* Lists for tracking memory allocations */ - -ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list; -ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list; -ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats; -#endif - /* Object caches */ ACPI_EXTERN acpi_cache_t *acpi_gbl_namespace_cache; @@ -326,6 +297,15 @@ extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS]; #endif +#ifdef ACPI_DBG_TRACK_ALLOCATIONS + +/* Lists for tracking memory allocations */ + +ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list; +ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list; +ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats; +#endif + /***************************************************************************** * * Namespace globals @@ -396,13 +376,35 @@ ACPI_EXTERN struct acpi_gpe_block_info #if (!ACPI_REDUCED_HARDWARE) ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized; -ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler; +ACPI_EXTERN acpi_gbl_event_handler acpi_gbl_global_event_handler; ACPI_EXTERN void *acpi_gbl_global_event_handler_context; #endif /* !ACPI_REDUCED_HARDWARE */ /***************************************************************************** * + * Debug support + * + ****************************************************************************/ + +/* Procedure nesting level for debug output */ + +extern u32 acpi_gbl_nesting_level; + +/* Event counters */ + +ACPI_EXTERN u32 acpi_gpe_count; +ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]; + +/* Support for dynamic control method tracing mechanism */ + +ACPI_EXTERN u32 acpi_gbl_original_dbg_level; +ACPI_EXTERN u32 acpi_gbl_original_dbg_layer; +ACPI_EXTERN u32 acpi_gbl_trace_dbg_level; +ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer; + +/***************************************************************************** + * * Debugger globals * ****************************************************************************/ @@ -426,10 +428,11 @@ ACPI_EXTERN u8 acpi_gbl_db_opt_stats; ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods; ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]; -ACPI_EXTERN char acpi_gbl_db_line_buf[80]; -ACPI_EXTERN char acpi_gbl_db_parsed_buf[80]; -ACPI_EXTERN char acpi_gbl_db_scope_buf[40]; -ACPI_EXTERN char acpi_gbl_db_debug_filename[40]; +ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]; +ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE]; +ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE]; +ACPI_EXTERN char acpi_gbl_db_scope_buf[80]; +ACPI_EXTERN char acpi_gbl_db_debug_filename[80]; ACPI_EXTERN u8 acpi_gbl_db_output_to_file; ACPI_EXTERN char *acpi_gbl_db_buffer; ACPI_EXTERN char *acpi_gbl_db_filename; diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index c816ee67509..ff8bd0061e8 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -262,10 +262,10 @@ struct acpi_create_field_info { }; typedef -acpi_status(*ACPI_INTERNAL_METHOD) (struct acpi_walk_state * walk_state); +acpi_status(*acpi_internal_method) (struct acpi_walk_state * walk_state); /* - * Bitmapped ACPI types. Used internally only + * Bitmapped ACPI types. Used internally only */ #define ACPI_BTYPE_ANY 0x00000000 #define ACPI_BTYPE_INTEGER 0x00000001 @@ -486,8 +486,10 @@ struct acpi_gpe_device_info { struct acpi_namespace_node *gpe_device; }; -typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *gpe_xrupt_info, - struct acpi_gpe_block_info *gpe_block, void *context); +typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info * + gpe_xrupt_info, + struct acpi_gpe_block_info *gpe_block, + void *context); /* Information about each particular fixed event */ @@ -582,7 +584,7 @@ struct acpi_pscope_state { }; /* - * Thread state - one per thread across multiple walk states. Multiple walk + * Thread state - one per thread across multiple walk states. Multiple walk * states are created when there are nested control methods executing. */ struct acpi_thread_state { @@ -645,7 +647,7 @@ union acpi_generic_state { * ****************************************************************************/ -typedef acpi_status(*ACPI_EXECUTE_OP) (struct acpi_walk_state * walk_state); +typedef acpi_status(*acpi_execute_op) (struct acpi_walk_state * walk_state); /* Address Range info block */ @@ -1031,6 +1033,7 @@ struct acpi_db_method_info { acpi_handle method; acpi_handle main_thread_gate; acpi_handle thread_complete_gate; + acpi_handle info_gate; acpi_thread_id *threads; u32 num_threads; u32 num_created; @@ -1041,6 +1044,7 @@ struct acpi_db_method_info { u32 num_loops; char pathname[128]; char **args; + acpi_object_type *types; /* * Arguments to be passed to method for the command diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index a7f68c47f51..5efad99f216 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h @@ -84,29 +84,29 @@ /* These macros reverse the bytes during the move, converting little-endian to big endian */ - /* Big Endian <== Little Endian */ - /* Hi...Lo Lo...Hi */ + /* Big Endian <== Little Endian */ + /* Hi...Lo Lo...Hi */ /* 16-bit source, 16/32/64 destination */ #define ACPI_MOVE_16_TO_16(d, s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[1];\ - (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[0];} + (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[0];} #define ACPI_MOVE_16_TO_32(d, s) {(*(u32 *)(void *)(d))=0;\ - ((u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\ - ((u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];} + ((u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\ + ((u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];} #define ACPI_MOVE_16_TO_64(d, s) {(*(u64 *)(void *)(d))=0;\ - ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\ - ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];} + ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\ + ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];} /* 32-bit source, 16/32/64 destination */ #define ACPI_MOVE_32_TO_16(d, s) ACPI_MOVE_16_TO_16(d, s) /* Truncate to 16 */ #define ACPI_MOVE_32_TO_32(d, s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[3];\ - (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[2];\ - (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\ - (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];} + (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[2];\ + (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\ + (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];} #define ACPI_MOVE_32_TO_64(d, s) {(*(u64 *)(void *)(d))=0;\ ((u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\ @@ -196,24 +196,12 @@ #endif #endif -/* Macros based on machine integer width */ - -#if ACPI_MACHINE_WIDTH == 32 -#define ACPI_MOVE_SIZE_TO_16(d, s) ACPI_MOVE_32_TO_16(d, s) - -#elif ACPI_MACHINE_WIDTH == 64 -#define ACPI_MOVE_SIZE_TO_16(d, s) ACPI_MOVE_64_TO_16(d, s) - -#else -#error unknown ACPI_MACHINE_WIDTH -#endif - /* * Fast power-of-two math macros for non-optimized compilers */ -#define _ACPI_DIV(value, power_of2) ((u32) ((value) >> (power_of2))) -#define _ACPI_MUL(value, power_of2) ((u32) ((value) << (power_of2))) -#define _ACPI_MOD(value, divisor) ((u32) ((value) & ((divisor) -1))) +#define _ACPI_DIV(value, power_of2) ((u32) ((value) >> (power_of2))) +#define _ACPI_MUL(value, power_of2) ((u32) ((value) << (power_of2))) +#define _ACPI_MOD(value, divisor) ((u32) ((value) & ((divisor) -1))) #define ACPI_DIV_2(a) _ACPI_DIV(a, 1) #define ACPI_MUL_2(a) _ACPI_MUL(a, 1) @@ -238,12 +226,12 @@ /* * Rounding macros (Power of two boundaries only) */ -#define ACPI_ROUND_DOWN(value, boundary) (((acpi_size)(value)) & \ - (~(((acpi_size) boundary)-1))) +#define ACPI_ROUND_DOWN(value, boundary) (((acpi_size)(value)) & \ + (~(((acpi_size) boundary)-1))) -#define ACPI_ROUND_UP(value, boundary) ((((acpi_size)(value)) + \ - (((acpi_size) boundary)-1)) & \ - (~(((acpi_size) boundary)-1))) +#define ACPI_ROUND_UP(value, boundary) ((((acpi_size)(value)) + \ + (((acpi_size) boundary)-1)) & \ + (~(((acpi_size) boundary)-1))) /* Note: sizeof(acpi_size) evaluates to either 4 or 8 (32- vs 64-bit mode) */ @@ -264,7 +252,7 @@ #define ACPI_ROUND_UP_TO(value, boundary) (((value) + ((boundary)-1)) / (boundary)) -#define ACPI_IS_MISALIGNED(value) (((acpi_size) value) & (sizeof(acpi_size)-1)) +#define ACPI_IS_MISALIGNED(value) (((acpi_size) value) & (sizeof(acpi_size)-1)) /* * Bitmask creation @@ -355,7 +343,6 @@ * Ascii error messages can be configured out */ #ifndef ACPI_NO_ERROR_MESSAGES - /* * Error reporting. Callers module and line number are inserted by AE_INFO, * the plist contains a set of parens to allow variable-length lists. @@ -375,18 +362,15 @@ #define ACPI_WARN_PREDEFINED(plist) #define ACPI_INFO_PREDEFINED(plist) -#endif /* ACPI_NO_ERROR_MESSAGES */ +#endif /* ACPI_NO_ERROR_MESSAGES */ /* * Debug macros that are conditionally compiled */ #ifdef ACPI_DEBUG_OUTPUT - /* * Function entry tracing */ -#ifdef CONFIG_ACPI_DEBUG_FUNC_TRACE - #define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \ acpi_ut_trace(ACPI_DEBUG_PARAMETERS) #define ACPI_FUNCTION_TRACE_PTR(a, b) ACPI_FUNCTION_NAME(a) \ @@ -464,45 +448,19 @@ #endif /* ACPI_SIMPLE_RETURN_MACROS */ -#else /* !CONFIG_ACPI_DEBUG_FUNC_TRACE */ - -#define ACPI_FUNCTION_TRACE(a) -#define ACPI_FUNCTION_TRACE_PTR(a,b) -#define ACPI_FUNCTION_TRACE_U32(a,b) -#define ACPI_FUNCTION_TRACE_STR(a,b) -#define ACPI_FUNCTION_EXIT -#define ACPI_FUNCTION_STATUS_EXIT(s) -#define ACPI_FUNCTION_VALUE_EXIT(s) -#define ACPI_FUNCTION_TRACE(a) -#define ACPI_FUNCTION_ENTRY() - -#define return_VOID return -#define return_ACPI_STATUS(s) return(s) -#define return_VALUE(s) return(s) -#define return_UINT8(s) return(s) -#define return_UINT32(s) return(s) -#define return_PTR(s) return(s) - -#endif /* CONFIG_ACPI_DEBUG_FUNC_TRACE */ - /* Conditional execution */ #define ACPI_DEBUG_EXEC(a) a -#define ACPI_NORMAL_EXEC(a) - -#define ACPI_DEBUG_DEFINE(a) a; #define ACPI_DEBUG_ONLY_MEMBERS(a) a; #define _VERBOSE_STRUCTURES -/* Stack and buffer dumping */ +/* Various object display routines for debug */ #define ACPI_DUMP_STACK_ENTRY(a) acpi_ex_dump_operand((a), 0) -#define ACPI_DUMP_OPERANDS(a, b, c) acpi_ex_dump_operands(a, b, c) - +#define ACPI_DUMP_OPERANDS(a, b ,c) acpi_ex_dump_operands(a, b, c) #define ACPI_DUMP_ENTRY(a, b) acpi_ns_dump_entry (a, b) #define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d) -#define ACPI_DUMP_RESOURCE_LIST(a) acpi_rs_dump_resource_list(a) -#define ACPI_DUMP_BUFFER(a, b) acpi_ut_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT) +#define ACPI_DUMP_BUFFER(a, b) acpi_ut_debug_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT) #else /* @@ -510,25 +468,23 @@ * leaving no executable debug code! */ #define ACPI_DEBUG_EXEC(a) -#define ACPI_NORMAL_EXEC(a) a; - -#define ACPI_DEBUG_DEFINE(a) do { } while(0) -#define ACPI_DEBUG_ONLY_MEMBERS(a) do { } while(0) -#define ACPI_FUNCTION_TRACE(a) do { } while(0) -#define ACPI_FUNCTION_TRACE_PTR(a, b) do { } while(0) -#define ACPI_FUNCTION_TRACE_U32(a, b) do { } while(0) -#define ACPI_FUNCTION_TRACE_STR(a, b) do { } while(0) -#define ACPI_FUNCTION_EXIT do { } while(0) -#define ACPI_FUNCTION_STATUS_EXIT(s) do { } while(0) -#define ACPI_FUNCTION_VALUE_EXIT(s) do { } while(0) -#define ACPI_FUNCTION_ENTRY() do { } while(0) -#define ACPI_DUMP_STACK_ENTRY(a) do { } while(0) -#define ACPI_DUMP_OPERANDS(a, b, c) do { } while(0) -#define ACPI_DUMP_ENTRY(a, b) do { } while(0) -#define ACPI_DUMP_TABLES(a, b) do { } while(0) -#define ACPI_DUMP_PATHNAME(a, b, c, d) do { } while(0) -#define ACPI_DUMP_RESOURCE_LIST(a) do { } while(0) -#define ACPI_DUMP_BUFFER(a, b) do { } while(0) +#define ACPI_DEBUG_ONLY_MEMBERS(a) +#define ACPI_FUNCTION_TRACE(a) +#define ACPI_FUNCTION_TRACE_PTR(a, b) +#define ACPI_FUNCTION_TRACE_U32(a, b) +#define ACPI_FUNCTION_TRACE_STR(a, b) +#define ACPI_FUNCTION_EXIT +#define ACPI_FUNCTION_STATUS_EXIT(s) +#define ACPI_FUNCTION_VALUE_EXIT(s) +#define ACPI_FUNCTION_ENTRY() +#define ACPI_DUMP_STACK_ENTRY(a) +#define ACPI_DUMP_OPERANDS(a, b, c) +#define ACPI_DUMP_ENTRY(a, b) +#define ACPI_DUMP_TABLES(a, b) +#define ACPI_DUMP_PATHNAME(a, b, c, d) +#define ACPI_DUMP_BUFFER(a, b) +#define ACPI_DEBUG_PRINT(pl) +#define ACPI_DEBUG_PRINT_RAW(pl) #define return_VOID return #define return_ACPI_STATUS(s) return(s) @@ -556,18 +512,6 @@ #define ACPI_DEBUGGER_EXEC(a) #endif -#ifdef ACPI_DEBUG_OUTPUT -/* - * 1) Set name to blanks - * 2) Copy the object name - */ -#define ACPI_ADD_OBJECT_NAME(a,b) ACPI_MEMSET (a->common.name, ' ', sizeof (a->common.name));\ - ACPI_STRNCPY (a->common.name, acpi_gbl_ns_type_names[b], sizeof (a->common.name)) -#else - -#define ACPI_ADD_OBJECT_NAME(a,b) -#endif - /* * Memory allocation tracking (DEBUG ONLY) */ @@ -578,13 +522,13 @@ /* Memory allocation */ #ifndef ACPI_ALLOCATE -#define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size)(a), ACPI_MEM_PARAMETERS) +#define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size) (a), ACPI_MEM_PARAMETERS) #endif #ifndef ACPI_ALLOCATE_ZEROED -#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size)(a), ACPI_MEM_PARAMETERS) +#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size) (a), ACPI_MEM_PARAMETERS) #endif #ifndef ACPI_FREE -#define ACPI_FREE(a) acpio_os_free(a) +#define ACPI_FREE(a) acpi_os_free(a) #endif #define ACPI_MEM_TRACKING(a) @@ -592,16 +536,25 @@ /* Memory allocation */ -#define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size)(a), ACPI_MEM_PARAMETERS) -#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track((acpi_size)(a), ACPI_MEM_PARAMETERS) +#define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS) +#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS) #define ACPI_FREE(a) acpi_ut_free_and_track(a, ACPI_MEM_PARAMETERS) #define ACPI_MEM_TRACKING(a) a #endif /* ACPI_DBG_TRACK_ALLOCATIONS */ -/* Preemption point */ -#ifndef ACPI_PREEMPTION_POINT -#define ACPI_PREEMPTION_POINT() /* no preemption */ -#endif +/* + * Macros used for ACPICA utilities only + */ + +/* Generate a UUID */ + +#define ACPI_INIT_UUID(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ + (a) & 0xFF, ((a) >> 8) & 0xFF, ((a) >> 16) & 0xFF, ((a) >> 24) & 0xFF, \ + (b) & 0xFF, ((b) >> 8) & 0xFF, \ + (c) & 0xFF, ((c) >> 8) & 0xFF, \ + (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) + +#define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7')) #endif /* ACMACROS_H */ diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 364a1303fb8..24eb9eac951 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h @@ -1,4 +1,3 @@ - /****************************************************************************** * * Name: acobject.h - Definition of union acpi_operand_object (Internal object only) @@ -179,7 +178,7 @@ struct acpi_object_method { union acpi_operand_object *mutex; u8 *aml_start; union { - ACPI_INTERNAL_METHOD implementation; + acpi_internal_method implementation; union acpi_operand_object *handler; } dispatch; @@ -198,7 +197,7 @@ struct acpi_object_method { /****************************************************************************** * - * Objects that can be notified. All share a common notify_info area. + * Objects that can be notified. All share a common notify_info area. * *****************************************************************************/ @@ -235,7 +234,7 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO}; /****************************************************************************** * - * Fields. All share a common header/info field. + * Fields. All share a common header/info field. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h index 9440d053fbb..d786a5128b7 100644 --- a/drivers/acpi/acpica/acopcode.h +++ b/drivers/acpi/acpica/acopcode.h @@ -54,7 +54,7 @@ #define _UNK 0x6B /* - * Reserved ASCII characters. Do not use any of these for + * Reserved ASCII characters. Do not use any of these for * internal opcodes, since they are used to differentiate * name strings from AML opcodes */ @@ -63,7 +63,7 @@ #define _PFX 0x6D /* - * All AML opcodes and the parse-time arguments for each. Used by the AML + * All AML opcodes and the parse-time arguments for each. Used by the AML * parser Each list is compressed into a 32-bit number and stored in the * master opcode table (in psopcode.c). */ @@ -193,7 +193,7 @@ #define ARGP_ZERO_OP ARG_NONE /* - * All AML opcodes and the runtime arguments for each. Used by the AML + * All AML opcodes and the runtime arguments for each. Used by the AML * interpreter Each list is compressed into a 32-bit number and stored * in the master opcode table (in psopcode.c). * diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h index b725d780d34..eefcf47a61a 100644 --- a/drivers/acpi/acpica/acparser.h +++ b/drivers/acpi/acpica/acparser.h @@ -150,8 +150,7 @@ u8 acpi_ps_has_completed_scope(struct acpi_parse_state *parser_state); void acpi_ps_pop_scope(struct acpi_parse_state *parser_state, - union acpi_parse_object **op, - u32 * arg_list, u32 * arg_count); + union acpi_parse_object **op, u32 *arg_list, u32 *arg_count); acpi_status acpi_ps_push_scope(struct acpi_parse_state *parser_state, diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h index 3080c017f5b..9dfa1c83bd4 100644 --- a/drivers/acpi/acpica/acpredef.h +++ b/drivers/acpi/acpica/acpredef.h @@ -150,8 +150,7 @@ enum acpi_return_package_types { * is saved here (rather than in a separate table) in order to minimize the * overall size of the stored data. */ -static const union acpi_predefined_info predefined_names[] = -{ +static const union acpi_predefined_info predefined_names[] = { {{"_AC0", 0, ACPI_RTYPE_INTEGER}}, {{"_AC1", 0, ACPI_RTYPE_INTEGER}}, {{"_AC2", 0, ACPI_RTYPE_INTEGER}}, @@ -538,7 +537,8 @@ static const union acpi_predefined_info predefined_names[] = /* Acpi 1.0 defined _WAK with no return value. Later, it was changed to return a package */ - {{"_WAK", 1, ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE}}, + {{"_WAK", 1, + ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE}}, {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2,0}, 0,0}}, /* Fixed-length (2 Int), but is optional */ /* _WDG/_WED are MS extensions defined by "Windows Instrumentation" */ @@ -551,11 +551,12 @@ static const union acpi_predefined_info predefined_names[] = }; #if 0 + /* This is an internally implemented control method, no need to check */ - {{"_OSI", 1, ACPI_RTYPE_INTEGER}}, +{ { +"_OSI", 1, ACPI_RTYPE_INTEGER}}, /* TBD: */ - _PRT - currently ignore reversed entries. attempt to fix here? think about possibly fixing package elements like _BIF, etc. #endif diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h index f196e2c9a71..937e66c65d1 100644 --- a/drivers/acpi/acpica/acstruct.h +++ b/drivers/acpi/acpica/acstruct.h @@ -53,7 +53,7 @@ ****************************************************************************/ /* - * Walk state - current state of a parse tree walk. Used for both a leisurely + * Walk state - current state of a parse tree walk. Used for both a leisurely * stroll through the tree (for whatever reason), and for control method * execution. */ diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index 5035327ebcc..b0f5f92b674 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h @@ -69,6 +69,22 @@ extern const char *acpi_gbl_siz_decode[]; extern const char *acpi_gbl_trs_decode[]; extern const char *acpi_gbl_ttp_decode[]; extern const char *acpi_gbl_typ_decode[]; +extern const char *acpi_gbl_ppc_decode[]; +extern const char *acpi_gbl_ior_decode[]; +extern const char *acpi_gbl_dts_decode[]; +extern const char *acpi_gbl_ct_decode[]; +extern const char *acpi_gbl_sbt_decode[]; +extern const char *acpi_gbl_am_decode[]; +extern const char *acpi_gbl_sm_decode[]; +extern const char *acpi_gbl_wm_decode[]; +extern const char *acpi_gbl_cph_decode[]; +extern const char *acpi_gbl_cpo_decode[]; +extern const char *acpi_gbl_dp_decode[]; +extern const char *acpi_gbl_ed_decode[]; +extern const char *acpi_gbl_bpb_decode[]; +extern const char *acpi_gbl_sb_decode[]; +extern const char *acpi_gbl_fc_decode[]; +extern const char *acpi_gbl_pt_decode[]; #endif /* Types for Resource descriptor entries */ @@ -79,14 +95,14 @@ extern const char *acpi_gbl_typ_decode[]; #define ACPI_SMALL_VARIABLE_LENGTH 3 typedef -acpi_status(*acpi_walk_aml_callback) (u8 * aml, +acpi_status(*acpi_walk_aml_callback) (u8 *aml, u32 length, u32 offset, u8 resource_index, void **context); typedef acpi_status(*acpi_pkg_callback) (u8 object_type, - union acpi_operand_object * source_object, + union acpi_operand_object *source_object, union acpi_generic_state * state, void *context); @@ -202,7 +218,9 @@ extern const u8 _acpi_ctype[]; #define ACPI_IS_PRINT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP | _ACPI_DI | _ACPI_SP | _ACPI_PU)) #define ACPI_IS_ALPHA(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP)) -#endif /* ACPI_USE_SYSTEM_CLIBRARY */ +#endif /* !ACPI_USE_SYSTEM_CLIBRARY */ + +#define ACPI_IS_ASCII(c) ((c) < 0x80) /* * utcopy - Object construction and conversion interfaces @@ -210,11 +228,11 @@ extern const u8 _acpi_ctype[]; acpi_status acpi_ut_build_simple_object(union acpi_operand_object *obj, union acpi_object *user_obj, - u8 * data_space, u32 * buffer_space_used); + u8 *data_space, u32 *buffer_space_used); acpi_status acpi_ut_build_package_object(union acpi_operand_object *obj, - u8 * buffer, u32 * space_used); + u8 *buffer, u32 *space_used); acpi_status acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *obj, @@ -287,9 +305,10 @@ acpi_ut_ptr_exit(u32 line_number, const char *function_name, const char *module_name, u32 component_id, u8 *ptr); -void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id); +void +acpi_ut_debug_dump_buffer(u8 *buffer, u32 count, u32 display, u32 component_id); -void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display); +void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 offset); void acpi_ut_report_error(char *module_name, u32 line_number); @@ -337,15 +356,19 @@ acpi_ut_execute_power_methods(struct acpi_namespace_node *device_node, */ acpi_status acpi_ut_execute_HID(struct acpi_namespace_node *device_node, - struct acpica_device_id **return_id); + struct acpi_pnp_device_id ** return_id); acpi_status acpi_ut_execute_UID(struct acpi_namespace_node *device_node, - struct acpica_device_id **return_id); + struct acpi_pnp_device_id ** return_id); + +acpi_status +acpi_ut_execute_SUB(struct acpi_namespace_node *device_node, + struct acpi_pnp_device_id **return_id); acpi_status acpi_ut_execute_CID(struct acpi_namespace_node *device_node, - struct acpica_device_id_list **return_cid_list); + struct acpi_pnp_device_id_list ** return_cid_list); /* * utlock - reader/writer locks @@ -479,15 +502,19 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object, void acpi_ut_strupr(char *src_string); +void acpi_ut_strlwr(char *src_string); + +int acpi_ut_stricmp(char *string1, char *string2); + void acpi_ut_print_string(char *string, u8 max_length); u8 acpi_ut_valid_acpi_name(u32 name); -acpi_name acpi_ut_repair_name(char *name); +void acpi_ut_repair_name(char *name); u8 acpi_ut_valid_acpi_char(char character, u32 position); -acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer); +acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer); /* Values for Base above (16=Hex, 10=Decimal) */ @@ -508,12 +535,12 @@ acpi_ut_display_init_pathname(u8 type, * utresrc */ acpi_status -acpi_ut_walk_aml_resources(u8 * aml, +acpi_ut_walk_aml_resources(u8 *aml, acpi_size aml_length, acpi_walk_aml_callback user_function, void **context); -acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index); +acpi_status acpi_ut_validate_resource(void *aml, u8 *return_index); u32 acpi_ut_get_descriptor_length(void *aml); @@ -524,8 +551,7 @@ u8 acpi_ut_get_resource_header_length(void *aml); u8 acpi_ut_get_resource_type(void *aml); acpi_status -acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, - u8 ** end_tag); +acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag); /* * utmutex - mutex support diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h index af4947956ec..968449685e0 100644 --- a/drivers/acpi/acpica/amlresrc.h +++ b/drivers/acpi/acpica/amlresrc.h @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: amlresrc.h - AML resource descriptors diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c index 465f02134b8..57895db3231 100644 --- a/drivers/acpi/acpica/dscontrol.c +++ b/drivers/acpi/acpica/dscontrol.c @@ -280,7 +280,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state, /* * Get the return value and save as the last result - * value. This is the only place where walk_state->return_desc + * value. This is the only place where walk_state->return_desc * is set to anything other than zero! */ walk_state->return_desc = walk_state->operands[0]; diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index 3da6fd8530c..b5b904ee815 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c @@ -277,7 +277,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op, * * RETURN: Status * - * DESCRIPTION: Process all named fields in a field declaration. Names are + * DESCRIPTION: Process all named fields in a field declaration. Names are * entered into the namespace. * ******************************************************************************/ diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index aa9a5d4e405..52eb4e01622 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -170,7 +170,7 @@ acpi_ds_create_method_mutex(union acpi_operand_object *method_desc) * * RETURN: Status * - * DESCRIPTION: Prepare a method for execution. Parses the method if necessary, + * DESCRIPTION: Prepare a method for execution. Parses the method if necessary, * increments the thread count, and waits at the method semaphore * for clearance to execute. * @@ -444,7 +444,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, * RETURN: Status * * DESCRIPTION: Restart a method that was preempted by another (nested) method - * invocation. Handle the return value (if any) from the callee. + * invocation. Handle the return value (if any) from the callee. * ******************************************************************************/ @@ -530,7 +530,7 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state, * * RETURN: None * - * DESCRIPTION: Terminate a control method. Delete everything that the method + * DESCRIPTION: Terminate a control method. Delete everything that the method * created, delete all locals and arguments, and delete the parse * tree if requested. * diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c index 8d55cebaa65..9a83b7e0f3b 100644 --- a/drivers/acpi/acpica/dsmthdat.c +++ b/drivers/acpi/acpica/dsmthdat.c @@ -76,7 +76,7 @@ acpi_ds_method_data_get_type(u16 opcode, * RETURN: Status * * DESCRIPTION: Initialize the data structures that hold the method's arguments - * and locals. The data struct is an array of namespace nodes for + * and locals. The data struct is an array of namespace nodes for * each - this allows ref_of and de_ref_of to work properly for these * special data types. * @@ -129,7 +129,7 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state) * * RETURN: None * - * DESCRIPTION: Delete method locals and arguments. Arguments are only + * DESCRIPTION: Delete method locals and arguments. Arguments are only * deleted if this method was called from another method. * ******************************************************************************/ @@ -183,7 +183,7 @@ void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state) * * RETURN: Status * - * DESCRIPTION: Initialize arguments for a method. The parameter list is a list + * DESCRIPTION: Initialize arguments for a method. The parameter list is a list * of ACPI operand objects, either null terminated or whose length * is defined by max_param_count. * @@ -401,7 +401,7 @@ acpi_ds_method_data_get_value(u8 type, * This means that either 1) The expected argument was * not passed to the method, or 2) A local variable * was referenced by the method (via the ASL) - * before it was initialized. Either case is an error. + * before it was initialized. Either case is an error. */ /* If slack enabled, init the local_x/arg_x to an Integer of value zero */ @@ -465,7 +465,7 @@ acpi_ds_method_data_get_value(u8 type, * * RETURN: None * - * DESCRIPTION: Delete the entry at Opcode:Index. Inserts + * DESCRIPTION: Delete the entry at Opcode:Index. Inserts * a null into the stack slot after the object is deleted. * ******************************************************************************/ @@ -523,7 +523,7 @@ acpi_ds_method_data_delete_value(u8 type, * * RETURN: Status * - * DESCRIPTION: Store a value in an Arg or Local. The obj_desc is installed + * DESCRIPTION: Store a value in an Arg or Local. The obj_desc is installed * as the new value for the Arg or Local and the reference count * for obj_desc is incremented. * @@ -566,7 +566,7 @@ acpi_ds_store_object_to_local(u8 type, /* * If the reference count on the object is more than one, we must - * take a copy of the object before we store. A reference count + * take a copy of the object before we store. A reference count * of exactly 1 means that the object was just created during the * evaluation of an expression, and we can safely use it since it * is not used anywhere else. diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c index 68592dd3496..c9f15d3a368 100644 --- a/drivers/acpi/acpica/dsobject.c +++ b/drivers/acpi/acpica/dsobject.c @@ -293,7 +293,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state, /* * Second arg is the buffer data (optional) byte_list can be either - * individual bytes or a string initializer. In either case, a + * individual bytes or a string initializer. In either case, a * byte_list appears in the AML. */ arg = op->common.value.arg; /* skip first arg */ @@ -568,7 +568,7 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state, /* * Because of the execution pass through the non-control-method - * parts of the table, we can arrive here twice. Only init + * parts of the table, we can arrive here twice. Only init * the named object node the first time through */ if (acpi_ns_get_attached_object(node)) { @@ -618,7 +618,7 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state, * RETURN: Status * * DESCRIPTION: Initialize a namespace object from a parser Op and its - * associated arguments. The namespace object is a more compact + * associated arguments. The namespace object is a more compact * representation of the Op and its arguments. * ******************************************************************************/ diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index aa34d8984d3..0df024e5fb6 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c @@ -649,7 +649,8 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state, ((op->common.parent->common.aml_opcode != AML_PACKAGE_OP) && (op->common.parent->common.aml_opcode != AML_VAR_PACKAGE_OP) - && (op->common.parent->common.aml_opcode != AML_NAME_OP))) { + && (op->common.parent->common.aml_opcode != + AML_NAME_OP))) { walk_state->result_obj = obj_desc; } } diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c index 73a5447475f..afeb99f4948 100644 --- a/drivers/acpi/acpica/dsutils.c +++ b/drivers/acpi/acpica/dsutils.c @@ -61,7 +61,7 @@ ACPI_MODULE_NAME("dsutils") * * RETURN: None. * - * DESCRIPTION: Clear and remove a reference on an implicit return value. Used + * DESCRIPTION: Clear and remove a reference on an implicit return value. Used * to delete "stale" return values (if enabled, the return value * from every operator is saved at least momentarily, in case the * parent method exits.) @@ -107,7 +107,7 @@ void acpi_ds_clear_implicit_return(struct acpi_walk_state *walk_state) * * DESCRIPTION: Implements the optional "implicit return". We save the result * of every ASL operator and control method invocation in case the - * parent method exit. Before storing a new return value, we + * parent method exit. Before storing a new return value, we * delete the previous return value. * ******************************************************************************/ @@ -198,7 +198,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op, * * If there is no parent, or the parent is a scope_op, we are executing * at the method level. An executing method typically has no parent, - * since each method is parsed separately. A method invoked externally + * since each method is parsed separately. A method invoked externally * via execute_control_method has a scope_op as the parent. */ if ((!op->common.parent) || @@ -223,7 +223,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op, } /* - * Decide what to do with the result based on the parent. If + * Decide what to do with the result based on the parent. If * the parent opcode will not use the result, delete the object. * Otherwise leave it as is, it will be deleted when it is used * as an operand later. @@ -266,7 +266,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op, /* * These opcodes allow term_arg(s) as operands and therefore - * the operands can be method calls. The result is used. + * the operands can be method calls. The result is used. */ goto result_used; @@ -284,7 +284,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op, AML_BANK_FIELD_OP)) { /* * These opcodes allow term_arg(s) as operands and therefore - * the operands can be method calls. The result is used. + * the operands can be method calls. The result is used. */ goto result_used; } @@ -329,9 +329,9 @@ acpi_ds_is_result_used(union acpi_parse_object * op, * * RETURN: Status * - * DESCRIPTION: Used after interpretation of an opcode. If there is an internal + * DESCRIPTION: Used after interpretation of an opcode. If there is an internal * result descriptor, check if the parent opcode will actually use - * this result. If not, delete the result now so that it will + * this result. If not, delete the result now so that it will * not become orphaned. * ******************************************************************************/ @@ -376,7 +376,7 @@ acpi_ds_delete_result_if_not_used(union acpi_parse_object *op, * * RETURN: Status * - * DESCRIPTION: Resolve all operands to their values. Used to prepare + * DESCRIPTION: Resolve all operands to their values. Used to prepare * arguments to a control method invocation (a call from one * method to another.) * @@ -391,7 +391,7 @@ acpi_status acpi_ds_resolve_operands(struct acpi_walk_state *walk_state) /* * Attempt to resolve each of the valid operands - * Method arguments are passed by reference, not by value. This means + * Method arguments are passed by reference, not by value. This means * that the actual objects are passed, not copies of the objects. */ for (i = 0; i < walk_state->num_operands; i++) { @@ -451,7 +451,7 @@ void acpi_ds_clear_operands(struct acpi_walk_state *walk_state) * RETURN: Status * * DESCRIPTION: Translate a parse tree object that is an argument to an AML - * opcode to the equivalent interpreter object. This may include + * opcode to the equivalent interpreter object. This may include * looking up a name or entering a new name into the internal * namespace. * @@ -496,9 +496,9 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, /* * Special handling for buffer_field declarations. This is a deferred * opcode that unfortunately defines the field name as the last - * parameter instead of the first. We get here when we are performing + * parameter instead of the first. We get here when we are performing * the deferred execution, so the actual name of the field is already - * in the namespace. We don't want to attempt to look it up again + * in the namespace. We don't want to attempt to look it up again * because we may be executing in a different scope than where the * actual opcode exists. */ @@ -560,7 +560,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, * indicate this to the interpreter, set the * object to the root */ - obj_desc = ACPI_CAST_PTR(union + obj_desc = + ACPI_CAST_PTR(union acpi_operand_object, acpi_gbl_root_node); status = AE_OK; @@ -604,8 +605,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state, /* * If the name is null, this means that this is an * optional result parameter that was not specified - * in the original ASL. Create a Zero Constant for a - * placeholder. (Store to a constant is a Noop.) + * in the original ASL. Create a Zero Constant for a + * placeholder. (Store to a constant is a Noop.) */ opcode = AML_ZERO_OP; /* Has no arguments! */ diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index 642f3c053e8..58593931be9 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c @@ -57,7 +57,7 @@ ACPI_MODULE_NAME("dswexec") /* * Dispatch table for opcode classes */ -static ACPI_EXECUTE_OP acpi_gbl_op_type_dispatch[] = { +static acpi_execute_op acpi_gbl_op_type_dispatch[] = { acpi_ex_opcode_0A_0T_1R, acpi_ex_opcode_1A_0T_0R, acpi_ex_opcode_1A_0T_1R, @@ -204,7 +204,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state, * RETURN: Status * * DESCRIPTION: Descending callback used during the execution of control - * methods. This is where most operators and operands are + * methods. This is where most operators and operands are * dispatched to the interpreter. * ****************************************************************************/ @@ -297,7 +297,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state, if (walk_state->walk_type & ACPI_WALK_METHOD) { /* * Found a named object declaration during method execution; - * we must enter this object into the namespace. The created + * we must enter this object into the namespace. The created * object is temporary and will be deleted upon completion of * the execution of this method. * @@ -348,7 +348,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state, * RETURN: Status * * DESCRIPTION: Ascending callback used during the execution of control - * methods. The only thing we really need to do here is to + * methods. The only thing we really need to do here is to * notice the beginning of IF, ELSE, and WHILE blocks. * ****************************************************************************/ @@ -432,7 +432,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) if (ACPI_SUCCESS(status)) { /* * Dispatch the request to the appropriate interpreter handler - * routine. There is one routine per opcode "type" based upon the + * routine. There is one routine per opcode "type" based upon the * number of opcode arguments and return type. */ status = diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index 89c0114210c..37983574835 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c @@ -254,7 +254,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, acpi_ut_get_type_name(node->type), acpi_ut_get_node_name(node))); - return (AE_AML_OPERAND_TYPE); + return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } break; @@ -602,7 +602,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) region_space, walk_state); if (ACPI_FAILURE(status)) { - return (status); + return_ACPI_STATUS(status); } acpi_ex_exit_interpreter(); diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c index d0e6555061e..3e65a15a735 100644 --- a/drivers/acpi/acpica/dswstate.c +++ b/drivers/acpi/acpica/dswstate.c @@ -51,8 +51,9 @@ ACPI_MODULE_NAME("dswstate") /* Local prototypes */ -static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *ws); -static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *ws); +static acpi_status +acpi_ds_result_stack_push(struct acpi_walk_state *walk_state); +static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *walk_state); /******************************************************************************* * @@ -347,7 +348,7 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state) * * RETURN: Status * - * DESCRIPTION: Pop this walk's object stack. Objects on the stack are NOT + * DESCRIPTION: Pop this walk's object stack. Objects on the stack are NOT * deleted by this routine. * ******************************************************************************/ @@ -491,7 +492,7 @@ acpi_ds_push_walk_state(struct acpi_walk_state *walk_state, * RETURN: A walk_state object popped from the thread's stack * * DESCRIPTION: Remove and return the walkstate object that is at the head of - * the walk stack for the given walk list. NULL indicates that + * the walk stack for the given walk list. NULL indicates that * the list is empty. * ******************************************************************************/ @@ -531,14 +532,17 @@ struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread) * * RETURN: Pointer to the new walk state. * - * DESCRIPTION: Allocate and initialize a new walk state. The current walk + * DESCRIPTION: Allocate and initialize a new walk state. The current walk * state is set to this new state. * ******************************************************************************/ -struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object - *origin, union acpi_operand_object - *method_desc, struct acpi_thread_state +struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, + union acpi_parse_object + *origin, + union acpi_operand_object + *method_desc, + struct acpi_thread_state *thread) { struct acpi_walk_state *walk_state; @@ -653,7 +657,7 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state, /* * Setup the current scope. * Find a Named Op that has a namespace node associated with it. - * search upwards from this Op. Current scope is the first + * search upwards from this Op. Current scope is the first * Op with a namespace node. */ extra_op = parser_state->start_op; @@ -704,13 +708,13 @@ void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state) ACPI_FUNCTION_TRACE_PTR(ds_delete_walk_state, walk_state); if (!walk_state) { - return; + return_VOID; } if (walk_state->descriptor_type != ACPI_DESC_TYPE_WALK) { ACPI_ERROR((AE_INFO, "%p is not a valid walk state", walk_state)); - return; + return_VOID; } /* There should not be any open scopes */ diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index ef0193d74b5..36d12057442 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -89,7 +89,8 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) /* Set the mask bit only if there are references to this GPE */ if (gpe_event_info->runtime_count) { - ACPI_SET_BIT(gpe_register_info->enable_for_run, (u8)register_bit); + ACPI_SET_BIT(gpe_register_info->enable_for_run, + (u8)register_bit); } return_ACPI_STATUS(AE_OK); @@ -106,8 +107,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) * DESCRIPTION: Clear a GPE of stale events and enable it. * ******************************************************************************/ -acpi_status -acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) +acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status; @@ -131,8 +131,8 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) } /* Enable the requested GPE */ - status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); + status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); return_ACPI_STATUS(status); } @@ -150,7 +150,8 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) * ******************************************************************************/ -acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) +acpi_status +acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status = AE_OK; @@ -191,7 +192,8 @@ acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info * ******************************************************************************/ -acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) +acpi_status +acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status = AE_OK; @@ -208,7 +210,8 @@ acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_i status = acpi_ev_update_gpe_enable_mask(gpe_event_info); if (ACPI_SUCCESS(status)) { - status = acpi_hw_low_set_gpe(gpe_event_info, + status = + acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); } @@ -306,7 +309,8 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, /* A Non-NULL gpe_device means this is a GPE Block Device */ - obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *) + obj_desc = + acpi_ns_get_attached_object((struct acpi_namespace_node *) gpe_device); if (!obj_desc || !obj_desc->device.gpe_block) { return (NULL); diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index 8cf4c104c7b..1571a61a783 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c @@ -486,7 +486,8 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not enable GPE 0x%02X", - gpe_index + gpe_block->block_base_number)); + gpe_index + + gpe_block->block_base_number)); continue; } diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c index cb50dd91bc1..228a0c3b1d4 100644 --- a/drivers/acpi/acpica/evgpeutil.c +++ b/drivers/acpi/acpica/evgpeutil.c @@ -374,7 +374,8 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info, gpe_event_info->dispatch.handler = NULL; gpe_event_info->flags &= ~ACPI_GPE_DISPATCH_MASK; - } else if ((gpe_event_info-> + } else + if ((gpe_event_info-> flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_NOTIFY) { diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c index 4c1c8261166..1474241bfc7 100644 --- a/drivers/acpi/acpica/evrgnini.c +++ b/drivers/acpi/acpica/evrgnini.c @@ -227,8 +227,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, /* Install a handler for this PCI root bridge */ - status = - acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL); + status = acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL); if (ACPI_FAILURE(status)) { if (status == AE_SAME_HANDLER) { /* @@ -350,8 +349,8 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node) { acpi_status status; - struct acpica_device_id *hid; - struct acpica_device_id_list *cid; + struct acpi_pnp_device_id *hid; + struct acpi_pnp_device_id_list *cid; u32 i; u8 match; diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index 7587eb6c958..ae668f32cf1 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c @@ -398,7 +398,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler) * ******************************************************************************/ acpi_status -acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context) +acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context) { acpi_status status; diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 87c5f233226..3f30e753b65 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c @@ -221,7 +221,8 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device, if (wake_device == ACPI_ROOT_OBJECT) { device_node = acpi_gbl_root_node; } else { - device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); + device_node = + ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); } /* Validate WakeDevice is of type Device */ @@ -324,7 +325,8 @@ ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake) * ******************************************************************************/ -acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action) +acpi_status +acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action) { acpi_status status = AE_OK; struct acpi_gpe_event_info *gpe_event_info; @@ -567,7 +569,7 @@ acpi_install_gpe_block(acpi_handle gpe_device, status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { - return (status); + return_ACPI_STATUS(status); } node = acpi_ns_validate_handle(gpe_device); @@ -650,7 +652,7 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device) status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { - return (status); + return_ACPI_STATUS(status); } node = acpi_ns_validate_handle(gpe_device); @@ -694,8 +696,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block) * the FADT-defined gpe blocks. Otherwise, the GPE block device. * ******************************************************************************/ -acpi_status -acpi_get_gpe_device(u32 index, acpi_handle *gpe_device) +acpi_status acpi_get_gpe_device(u32 index, acpi_handle * gpe_device) { struct acpi_gpe_device_info info; acpi_status status; diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c index bfb062e4c4b..4492a4e0302 100644 --- a/drivers/acpi/acpica/exconvrt.c +++ b/drivers/acpi/acpica/exconvrt.c @@ -516,8 +516,8 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, string_length--; } - return_desc = acpi_ut_create_string_object((acpi_size) - string_length); + return_desc = + acpi_ut_create_string_object((acpi_size) string_length); if (!return_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c index 691d4763102..66554bc6f9a 100644 --- a/drivers/acpi/acpica/excreate.c +++ b/drivers/acpi/acpica/excreate.c @@ -78,7 +78,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state) (target_node->type == ACPI_TYPE_LOCAL_METHOD_ALIAS)) { /* * Dereference an existing alias so that we don't create a chain - * of aliases. With this code, we guarantee that an alias is + * of aliases. With this code, we guarantee that an alias is * always exactly one level of indirection away from the * actual aliased name. */ @@ -90,7 +90,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state) /* * For objects that can never change (i.e., the NS node will * permanently point to the same object), we can simply attach - * the object to the new NS node. For other objects (such as + * the object to the new NS node. For other objects (such as * Integers, buffers, etc.), we have to point the Alias node * to the original Node. */ @@ -139,7 +139,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state) /* * The new alias assumes the type of the target, and it points - * to the same object. The reference count of the object has an + * to the same object. The reference count of the object has an * additional reference to prevent deletion out from under either the * target node or the alias Node */ @@ -243,8 +243,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state) /* Init object and attach to NS node */ - obj_desc->mutex.sync_level = - (u8) walk_state->operands[1]->integer.value; + obj_desc->mutex.sync_level = (u8)walk_state->operands[1]->integer.value; obj_desc->mutex.node = (struct acpi_namespace_node *)walk_state->operands[0]; diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c index bc5b9a6a131..d7c9f51608a 100644 --- a/drivers/acpi/acpica/exdebug.c +++ b/drivers/acpi/acpica/exdebug.c @@ -145,10 +145,10 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc, case ACPI_TYPE_BUFFER: acpi_os_printf("[0x%.2X]\n", (u32)source_desc->buffer.length); - acpi_ut_dump_buffer2(source_desc->buffer.pointer, - (source_desc->buffer.length < 256) ? - source_desc->buffer.length : 256, - DB_BYTE_DISPLAY); + acpi_ut_dump_buffer(source_desc->buffer.pointer, + (source_desc->buffer.length < 256) ? + source_desc->buffer.length : 256, + DB_BYTE_DISPLAY, 0); break; case ACPI_TYPE_STRING: @@ -190,7 +190,7 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc, acpi_os_printf("Table Index 0x%X\n", source_desc->reference.value); - return; + return_VOID; default: break; diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c index 213c081776f..858b43a7dcf 100644 --- a/drivers/acpi/acpica/exdump.c +++ b/drivers/acpi/acpica/exdump.c @@ -464,7 +464,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth) ACPI_FUNCTION_NAME(ex_dump_operand) - if (!((ACPI_LV_EXEC & acpi_dbg_level) + if (! + ((ACPI_LV_EXEC & acpi_dbg_level) && (_COMPONENT & acpi_dbg_layer))) { return; } @@ -777,7 +778,7 @@ acpi_ex_dump_operands(union acpi_operand_object **operands, * PARAMETERS: title - Descriptive text * value - Value to be displayed * - * DESCRIPTION: Object dump output formatting functions. These functions + * DESCRIPTION: Object dump output formatting functions. These functions * reduce the number of format strings required and keeps them * all in one place for easy modification. * @@ -810,7 +811,8 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags) ACPI_FUNCTION_ENTRY(); if (!flags) { - if (!((ACPI_LV_OBJECTS & acpi_dbg_level) + if (! + ((ACPI_LV_OBJECTS & acpi_dbg_level) && (_COMPONENT & acpi_dbg_layer))) { return; } @@ -940,10 +942,11 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc, acpi_os_printf("[Buffer] Length %.2X = ", obj_desc->buffer.length); if (obj_desc->buffer.length) { - acpi_ut_dump_buffer(ACPI_CAST_PTR - (u8, obj_desc->buffer.pointer), - obj_desc->buffer.length, - DB_DWORD_DISPLAY, _COMPONENT); + acpi_ut_debug_dump_buffer(ACPI_CAST_PTR + (u8, + obj_desc->buffer.pointer), + obj_desc->buffer.length, + DB_DWORD_DISPLAY, _COMPONENT); } else { acpi_os_printf("\n"); } @@ -996,7 +999,8 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags) } if (!flags) { - if (!((ACPI_LV_OBJECTS & acpi_dbg_level) + if (! + ((ACPI_LV_OBJECTS & acpi_dbg_level) && (_COMPONENT & acpi_dbg_layer))) { return_VOID; } diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c index dc092f5b35d..ebc55fbf3ff 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c @@ -59,7 +59,7 @@ ACPI_MODULE_NAME("exfield") * * RETURN: Status * - * DESCRIPTION: Read from a named field. Returns either an Integer or a + * DESCRIPTION: Read from a named field. Returns either an Integer or a * Buffer, depending on the size of the field. * ******************************************************************************/ @@ -149,7 +149,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state, * Allocate a buffer for the contents of the field. * * If the field is larger than the current integer width, create - * a BUFFER to hold it. Otherwise, use an INTEGER. This allows + * a BUFFER to hold it. Otherwise, use an INTEGER. This allows * the use of arithmetic operators on the returned value if the * field size is equal or smaller than an Integer. * diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index a7784152ed3..aa2ccfb7cb6 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c @@ -54,8 +54,7 @@ ACPI_MODULE_NAME("exfldio") /* Local prototypes */ static acpi_status acpi_ex_field_datum_io(union acpi_operand_object *obj_desc, - u32 field_datum_byte_offset, - u64 *value, u32 read_write); + u32 field_datum_byte_offset, u64 *value, u32 read_write); static u8 acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value); @@ -155,7 +154,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, #endif /* - * Validate the request. The entire request from the byte offset for a + * Validate the request. The entire request from the byte offset for a * length of one field datum (access width) must fit within the region. * (Region length is specified in bytes) */ @@ -183,7 +182,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc, obj_desc->common_field.access_byte_width) { /* * This is the case where the access_type (acc_word, etc.) is wider - * than the region itself. For example, a region of length one + * than the region itself. For example, a region of length one * byte, and a field with Dword access specified. */ ACPI_ERROR((AE_INFO, @@ -321,7 +320,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc, * * DESCRIPTION: Check if a value is out of range of the field being written. * Used to check if the values written to Index and Bank registers - * are out of range. Normally, the value is simply truncated + * are out of range. Normally, the value is simply truncated * to fit the field, but this case is most likely a serious * coding error in the ASL. * @@ -370,7 +369,7 @@ acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value) * * RETURN: Status * - * DESCRIPTION: Read or Write a single datum of a field. The field_type is + * DESCRIPTION: Read or Write a single datum of a field. The field_type is * demultiplexed here to handle the different types of fields * (buffer_field, region_field, index_field, bank_field) * @@ -860,7 +859,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc, ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length); /* * We must have a buffer that is at least as long as the field - * we are writing to. This is because individual fields are + * we are writing to. This is because individual fields are * indivisible and partial writes are not supported -- as per * the ACPI specification. */ @@ -875,7 +874,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc, /* * Copy the original data to the new buffer, starting - * at Byte zero. All unused (upper) bytes of the + * at Byte zero. All unused (upper) bytes of the * buffer will be 0. */ ACPI_MEMCPY((char *)new_buffer, (char *)buffer, buffer_length); diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c index 271c0c57ea1..84058705ed1 100644 --- a/drivers/acpi/acpica/exmisc.c +++ b/drivers/acpi/acpica/exmisc.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes @@ -254,7 +253,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0, ACPI_FUNCTION_TRACE(ex_do_concatenate); /* - * Convert the second operand if necessary. The first operand + * Convert the second operand if necessary. The first operand * determines the type of the second operand, (See the Data Types * section of the ACPI specification.) Both object types are * guaranteed to be either Integer/String/Buffer by the operand @@ -573,7 +572,7 @@ acpi_ex_do_logical_op(u16 opcode, ACPI_FUNCTION_TRACE(ex_do_logical_op); /* - * Convert the second operand if necessary. The first operand + * Convert the second operand if necessary. The first operand * determines the type of the second operand, (See the Data Types * section of the ACPI 3.0+ specification.) Both object types are * guaranteed to be either Integer/String/Buffer by the operand diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c index bcceda5be9e..d1f449d93dc 100644 --- a/drivers/acpi/acpica/exmutex.c +++ b/drivers/acpi/acpica/exmutex.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: exmutex - ASL Mutex Acquire/Release functions @@ -305,7 +304,7 @@ acpi_status acpi_ex_release_mutex_object(union acpi_operand_object *obj_desc) ACPI_FUNCTION_TRACE(ex_release_mutex_object); if (obj_desc->mutex.acquisition_depth == 0) { - return (AE_NOT_ACQUIRED); + return_ACPI_STATUS(AE_NOT_ACQUIRED); } /* Match multiple Acquires with multiple Releases */ @@ -462,7 +461,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread) union acpi_operand_object *next = thread->acquired_mutex_list; union acpi_operand_object *obj_desc; - ACPI_FUNCTION_ENTRY(); + ACPI_FUNCTION_NAME(ex_release_all_mutexes); /* Traverse the list of owned mutexes, releasing each one */ @@ -474,6 +473,10 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread) obj_desc->mutex.next = NULL; obj_desc->mutex.acquisition_depth = 0; + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, + "Force-releasing held mutex: %p\n", + obj_desc)); + /* Release the mutex, special case for Global Lock */ if (obj_desc == acpi_gbl_global_lock_mutex) { diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c index fcc75fa27d3..2ff578a16ad 100644 --- a/drivers/acpi/acpica/exnames.c +++ b/drivers/acpi/acpica/exnames.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: exnames - interpreter/scanner name load/execute @@ -53,8 +52,7 @@ ACPI_MODULE_NAME("exnames") /* Local prototypes */ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs); -static acpi_status -acpi_ex_name_segment(u8 ** in_aml_address, char *name_string); +static acpi_status acpi_ex_name_segment(u8 **in_aml_address, char *name_string); /******************************************************************************* * @@ -64,7 +62,7 @@ acpi_ex_name_segment(u8 ** in_aml_address, char *name_string); * (-1)==root, 0==none * num_name_segs - count of 4-character name segments * - * RETURN: A pointer to the allocated string segment. This segment must + * RETURN: A pointer to the allocated string segment. This segment must * be deleted by the caller. * * DESCRIPTION: Allocate a buffer for a name string. Ensure allocated name @@ -178,7 +176,8 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string) ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Bytes from stream:\n")); - for (index = 0; (index < ACPI_NAME_SIZE) + for (index = 0; + (index < ACPI_NAME_SIZE) && (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) { char_buf[index] = *aml_address++; ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index])); diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index 9ba8c73cea1..bbf01e9bf05 100644 --- a/drivers/acpi/acpica/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: exoparg1 - AML execution - opcodes with 1 argument @@ -606,7 +605,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) } /* - * Set result to ONES (TRUE) if Value == 0. Note: + * Set result to ONES (TRUE) if Value == 0. Note: * return_desc->Integer.Value is initially == 0 (FALSE) from above. */ if (!operand[0]->integer.value) { @@ -618,7 +617,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) case AML_INCREMENT_OP: /* Increment (Operand) */ /* - * Create a new integer. Can't just get the base integer and + * Create a new integer. Can't just get the base integer and * increment it because it may be an Arg or Field. */ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER); @@ -686,7 +685,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) /* * Note: The operand is not resolved at this point because we want to - * get the associated object, not its value. For example, we don't + * get the associated object, not its value. For example, we don't * want to resolve a field_unit to its value, we want the actual * field_unit object. */ @@ -727,7 +726,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) /* * The type of the base object must be integer, buffer, string, or - * package. All others are not supported. + * package. All others are not supported. * * NOTE: Integer is not specifically supported by the ACPI spec, * but is supported implicitly via implicit operand conversion. @@ -965,7 +964,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) case ACPI_TYPE_PACKAGE: /* - * Return the referenced element of the package. We must + * Return the referenced element of the package. We must * add another reference to the referenced object, however. */ return_desc = diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c index 879e8a277b9..ee5634a074c 100644 --- a/drivers/acpi/acpica/exoparg2.c +++ b/drivers/acpi/acpica/exoparg2.c @@ -123,7 +123,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state) /* * Dispatch the notify to the appropriate handler * NOTE: the request is queued for execution after this method - * completes. The notify handlers are NOT invoked synchronously + * completes. The notify handlers are NOT invoked synchronously * from this thread -- because handlers may in turn run other * control methods. */ diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c index 71fcc65c9ff..2c89b4651f0 100644 --- a/drivers/acpi/acpica/exoparg3.c +++ b/drivers/acpi/acpica/exoparg3.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: exoparg3 - AML execution - opcodes with 3 arguments @@ -158,7 +157,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state) case AML_MID_OP: /* Mid (Source[0], Index[1], Length[2], Result[3]) */ /* - * Create the return object. The Source operand is guaranteed to be + * Create the return object. The Source operand is guaranteed to be * either a String or a Buffer, so just use its type. */ return_desc = acpi_ut_create_internal_object((operand[0])-> diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c index 0786b865906..3e08695c3b3 100644 --- a/drivers/acpi/acpica/exoparg6.c +++ b/drivers/acpi/acpica/exoparg6.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: exoparg6 - AML execution - opcodes with 6 arguments @@ -198,7 +197,7 @@ acpi_ex_do_match(u32 match_op, return (FALSE); } - return logical_result; + return (logical_result); } /******************************************************************************* @@ -269,7 +268,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state) * and the next should be examined. * * Upon finding a match, the loop will terminate via "break" at - * the bottom. If it terminates "normally", match_value will be + * the bottom. If it terminates "normally", match_value will be * ACPI_UINT64_MAX (Ones) (its initial value) indicating that no * match was found. */ diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index 81eca60d274..ba9db4de7c8 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: exprep - ACPI AML (p-code) execution - field prep utilities @@ -78,8 +77,8 @@ acpi_ex_generate_access(u32 field_bit_offset, * any_acc keyword. * * NOTE: Need to have the region_length in order to check for boundary - * conditions (end-of-region). However, the region_length is a deferred - * operation. Therefore, to complete this implementation, the generation + * conditions (end-of-region). However, the region_length is a deferred + * operation. Therefore, to complete this implementation, the generation * of this access width must be deferred until the region length has * been evaluated. * @@ -308,7 +307,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc, * RETURN: Status * * DESCRIPTION: Initialize the areas of the field object that are common - * to the various types of fields. Note: This is very "sensitive" + * to the various types of fields. Note: This is very "sensitive" * code because we are solving the general case for field * alignment. * @@ -336,13 +335,13 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc, obj_desc->common_field.bit_length = field_bit_length; /* - * Decode the access type so we can compute offsets. The access type gives + * Decode the access type so we can compute offsets. The access type gives * two pieces of information - the width of each field access and the * necessary byte_alignment (address granularity) of the access. * * For any_acc, the access_bit_width is the largest width that is both * necessary and possible in an attempt to access the whole field in one - * I/O operation. However, for any_acc, the byte_alignment is always one + * I/O operation. However, for any_acc, the byte_alignment is always one * byte. * * For all Buffer Fields, the byte_alignment is always one byte. @@ -363,7 +362,7 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc, /* * base_byte_offset is the address of the start of the field within the - * region. It is the byte address of the first *datum* (field-width data + * region. It is the byte address of the first *datum* (field-width data * unit) of the field. (i.e., the first datum that contains at least the * first *bit* of the field.) * diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c index 1f1ce0c3d2f..1db2c0bfde0 100644 --- a/drivers/acpi/acpica/exregion.c +++ b/drivers/acpi/acpica/exregion.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: exregion - ACPI default op_region (address space) handlers @@ -202,7 +201,7 @@ acpi_ex_system_memory_space_handler(u32 function, * Perform the memory read or write * * Note: For machines that do not support non-aligned transfers, the target - * address was checked for alignment above. We do not attempt to break the + * address was checked for alignment above. We do not attempt to break the * transfer up into smaller (byte-size) chunks because the AML specifically * asked for a transfer width that the hardware may require. */ diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c index fa50e77e64a..6239956786e 100644 --- a/drivers/acpi/acpica/exresnte.c +++ b/drivers/acpi/acpica/exresnte.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: exresnte - AML Interpreter object resolution @@ -58,8 +57,8 @@ ACPI_MODULE_NAME("exresnte") * PARAMETERS: object_ptr - Pointer to a location that contains * a pointer to a NS node, and will receive a * pointer to the resolved object. - * walk_state - Current state. Valid only if executing AML - * code. NULL if simply resolving an object + * walk_state - Current state. Valid only if executing AML + * code. NULL if simply resolving an object * * RETURN: Status * @@ -67,7 +66,7 @@ ACPI_MODULE_NAME("exresnte") * * Note: for some of the data types, the pointer attached to the Node * can be either a pointer to an actual internal object or a pointer into the - * AML stream itself. These types are currently: + * AML stream itself. These types are currently: * * ACPI_TYPE_INTEGER * ACPI_TYPE_STRING @@ -89,7 +88,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr, ACPI_FUNCTION_TRACE(ex_resolve_node_to_value); /* - * The stack pointer points to a struct acpi_namespace_node (Node). Get the + * The stack pointer points to a struct acpi_namespace_node (Node). Get the * object that is attached to the Node. */ node = *object_ptr; diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c index bbf40ac2758..cc176b245e2 100644 --- a/drivers/acpi/acpica/exresolv.c +++ b/drivers/acpi/acpica/exresolv.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: exresolv - AML Interpreter object resolution @@ -327,7 +326,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr, * * RETURN: Status * - * DESCRIPTION: Return the base object and type. Traverse a reference list if + * DESCRIPTION: Return the base object and type. Traverse a reference list if * necessary to get to the base object. * ******************************************************************************/ diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c index f232fbabdea..b9ebff2f6a0 100644 --- a/drivers/acpi/acpica/exresop.c +++ b/drivers/acpi/acpica/exresop.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: exresop - AML Interpreter operand/object resolution @@ -87,7 +86,7 @@ acpi_ex_check_object_type(acpi_object_type type_needed, if (type_needed == ACPI_TYPE_LOCAL_REFERENCE) { /* * Allow the AML "Constant" opcodes (Zero, One, etc.) to be reference - * objects and thus allow them to be targets. (As per the ACPI + * objects and thus allow them to be targets. (As per the ACPI * specification, a store to a constant is a noop.) */ if ((this_type == ACPI_TYPE_INTEGER) && @@ -337,7 +336,8 @@ acpi_ex_resolve_operands(u16 opcode, if ((opcode == AML_STORE_OP) && ((*stack_ptr)->common.type == ACPI_TYPE_LOCAL_REFERENCE) - && ((*stack_ptr)->reference.class == ACPI_REFCLASS_INDEX)) { + && ((*stack_ptr)->reference.class == + ACPI_REFCLASS_INDEX)) { goto next_operand; } break; @@ -638,7 +638,7 @@ acpi_ex_resolve_operands(u16 opcode, if (acpi_gbl_enable_interpreter_slack) { /* * Enable original behavior of Store(), allowing any and all - * objects as the source operand. The ACPI spec does not + * objects as the source operand. The ACPI spec does not * allow this, however. */ break; diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c index 5fffe7ab5ec..90431f12f83 100644 --- a/drivers/acpi/acpica/exstore.c +++ b/drivers/acpi/acpica/exstore.c @@ -374,7 +374,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc, * with the input value. * * When storing into an object the data is converted to the - * target object type then stored in the object. This means + * target object type then stored in the object. This means * that the target object type (for an initialized target) will * not be changed by a store operation. * @@ -491,7 +491,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc, acpi_ut_get_object_type_name(source_desc), source_desc, node)); - /* No conversions for all other types. Just attach the source object */ + /* No conversions for all other types. Just attach the source object */ status = acpi_ns_attach_object(node, source_desc, source_desc->common.type); diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c index b35bed52e06..87153bbc4b4 100644 --- a/drivers/acpi/acpica/exstoren.c +++ b/drivers/acpi/acpica/exstoren.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: exstoren - AML Interpreter object store support, @@ -61,7 +60,7 @@ ACPI_MODULE_NAME("exstoren") * * RETURN: Status, resolved object in source_desc_ptr. * - * DESCRIPTION: Resolve an object. If the object is a reference, dereference + * DESCRIPTION: Resolve an object. If the object is a reference, dereference * it and return the actual object in the source_desc_ptr. * ******************************************************************************/ @@ -93,7 +92,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr, /* * Stores into a Field/Region or into a Integer/Buffer/String - * are all essentially the same. This case handles the + * are all essentially the same. This case handles the * "interchangeable" types Integer, String, and Buffer. */ if (source_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE) { @@ -167,7 +166,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr, * * RETURN: Status * - * DESCRIPTION: "Store" an object to another object. This may include + * DESCRIPTION: "Store" an object to another object. This may include * converting the source type to the target type (implicit * conversion), and a copy of the value of the source to * the target. @@ -178,14 +177,14 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr, * with the input value. * * When storing into an object the data is converted to the - * target object type then stored in the object. This means + * target object type then stored in the object. This means * that the target object type (for an initialized target) will * not be changed by a store operation. * * This module allows destination types of Number, String, * Buffer, and Package. * - * Assumes parameters are already validated. NOTE: source_desc + * Assumes parameters are already validated. NOTE: source_desc * resolution (from a reference object) must be performed by * the caller if necessary. * diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c index 53c24847354..b5f339cb130 100644 --- a/drivers/acpi/acpica/exstorob.c +++ b/drivers/acpi/acpica/exstorob.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: exstorob - AML Interpreter object store support, store to object @@ -108,7 +107,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc, #ifdef ACPI_OBSOLETE_BEHAVIOR /* * NOTE: ACPI versions up to 3.0 specified that the buffer must be - * truncated if the string is smaller than the buffer. However, "other" + * truncated if the string is smaller than the buffer. However, "other" * implementations of ACPI never did this and thus became the defacto * standard. ACPI 3.0A changes this behavior such that the buffer * is no longer truncated. @@ -117,7 +116,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc, /* * OBSOLETE BEHAVIOR: * If the original source was a string, we must truncate the buffer, - * according to the ACPI spec. Integer-to-Buffer and Buffer-to-Buffer + * according to the ACPI spec. Integer-to-Buffer and Buffer-to-Buffer * copy must not truncate the original buffer. */ if (original_src_type == ACPI_TYPE_STRING) { diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c index b760641e2fc..c8a0ad5c1f5 100644 --- a/drivers/acpi/acpica/exsystem.c +++ b/drivers/acpi/acpica/exsystem.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: exsystem - Interface to OS services @@ -59,7 +58,7 @@ ACPI_MODULE_NAME("exsystem") * RETURN: Status * * DESCRIPTION: Implements a semaphore wait with a check to see if the - * semaphore is available immediately. If it is not, the + * semaphore is available immediately. If it is not, the * interpreter is released before waiting. * ******************************************************************************/ @@ -104,7 +103,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout) * RETURN: Status * * DESCRIPTION: Implements a mutex wait with a check to see if the - * mutex is available immediately. If it is not, the + * mutex is available immediately. If it is not, the * interpreter is released before waiting. * ******************************************************************************/ @@ -152,7 +151,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) * DESCRIPTION: Suspend running thread for specified amount of time. * Note: ACPI specification requires that Stall() does not * relinquish the processor, and delays longer than 100 usec - * should use Sleep() instead. We allow stalls up to 255 usec + * should use Sleep() instead. We allow stalls up to 255 usec * for compatibility with other interpreters and existing BIOSs. * ******************************************************************************/ @@ -254,7 +253,7 @@ acpi_status acpi_ex_system_signal_event(union acpi_operand_object * obj_desc) * RETURN: Status * * DESCRIPTION: Provides an access point to perform synchronization operations - * within the AML. This operation is a request to wait for an + * within the AML. This operation is a request to wait for an * event. * ******************************************************************************/ diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c index d1ab7917eed..264d22d8018 100644 --- a/drivers/acpi/acpica/exutils.c +++ b/drivers/acpi/acpica/exutils.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: exutils - interpreter/scanner utilities @@ -45,12 +44,12 @@ /* * DEFINE_AML_GLOBALS is tested in amlcode.h * to determine whether certain global names should be "defined" or only - * "declared" in the current compilation. This enhances maintainability + * "declared" in the current compilation. This enhances maintainability * by enabling a single header file to embody all knowledge of the names * in question. * * Exactly one module of any executable should #define DEFINE_GLOBALS - * before #including the header files which use this convention. The + * before #including the header files which use this convention. The * names in question will be defined and initialized in that module, * and declared as extern in all other modules which #include those * header files. diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c index a1e71d0ef57..90a9aea1cee 100644 --- a/drivers/acpi/acpica/hwacpi.c +++ b/drivers/acpi/acpica/hwacpi.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface @@ -136,7 +135,7 @@ acpi_status acpi_hw_set_mode(u32 mode) * * RETURN: SYS_MODE_ACPI or SYS_MODE_LEGACY * - * DESCRIPTION: Return current operating state of system. Determined by + * DESCRIPTION: Return current operating state of system. Determined by * querying the SCI_EN bit. * ******************************************************************************/ diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index db4076580e2..64560045052 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: hwgpe - Low level GPE enable/disable/clear functions @@ -339,7 +338,8 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, acpi_status acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, - struct acpi_gpe_block_info *gpe_block, void *context) + struct acpi_gpe_block_info * gpe_block, + void *context) { u32 i; acpi_status status; diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c index 1455ddcdc32..65bc3453a29 100644 --- a/drivers/acpi/acpica/hwpci.c +++ b/drivers/acpi/acpica/hwpci.c @@ -259,7 +259,7 @@ acpi_hw_process_pci_list(struct acpi_pci_id *pci_id, status = acpi_hw_get_pci_device_info(pci_id, info->device, &bus_number, &is_bridge); if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); + return (status); } info = info->next; @@ -271,7 +271,7 @@ acpi_hw_process_pci_list(struct acpi_pci_id *pci_id, pci_id->segment, pci_id->bus, pci_id->device, pci_id->function, status, bus_number, is_bridge)); - return_ACPI_STATUS(AE_OK); + return (AE_OK); } /******************************************************************************* diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index 4af6d20ef07..f4e57503576 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c @@ -1,4 +1,3 @@ - /******************************************************************************* * * Module Name: hwregs - Read/write access functions for the various ACPI diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c index b6411f16832..bfdce22f379 100644 --- a/drivers/acpi/acpica/hwtimer.c +++ b/drivers/acpi/acpica/hwtimer.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Name: hwtimer.c - ACPI Power Management Timer Interface @@ -101,8 +100,7 @@ acpi_status acpi_get_timer(u32 * ticks) return_ACPI_STATUS(AE_BAD_PARAMETER); } - status = - acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block); + status = acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block); return_ACPI_STATUS(status); } @@ -129,7 +127,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_timer) * a versatile and accurate timer. * * Note that this function accommodates only a single timer - * rollover. Thus for 24-bit timers, this function should only + * rollover. Thus for 24-bit timers, this function should only * be used for calculating durations less than ~4.6 seconds * (~20 minutes for 32-bit timers) -- calculations below: * diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c index c99d546b217..b6aae58299d 100644 --- a/drivers/acpi/acpica/hwvalid.c +++ b/drivers/acpi/acpica/hwvalid.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: hwvalid - I/O request validation diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index 7bfd649d199..05a154c3c9a 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c @@ -1,4 +1,3 @@ - /****************************************************************************** * * Module Name: hwxface - Public ACPICA hardware interfaces diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index 0ff1ecea5c3..ae443fe2ebf 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c @@ -49,8 +49,7 @@ ACPI_MODULE_NAME("hwxfsleep") /* Local prototypes */ -static acpi_status -acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id); +static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id); /* * Dispatch table used to efficiently branch to the various sleep @@ -234,8 +233,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios) * function. * ******************************************************************************/ -static acpi_status -acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id) +static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id) { acpi_status status; struct acpi_sleep_functions *sleep_functions = @@ -369,8 +367,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state) return_ACPI_STATUS(AE_AML_OPERAND_VALUE); } - status = - acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID); + status = acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID); return_ACPI_STATUS(status); } @@ -396,8 +393,7 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state) ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep); status = - acpi_hw_sleep_dispatch(sleep_state, - ACPI_WAKE_PREP_FUNCTION_ID); + acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_PREP_FUNCTION_ID); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c index 23db53ce229..d70eaf39dfd 100644 --- a/drivers/acpi/acpica/nsaccess.c +++ b/drivers/acpi/acpica/nsaccess.c @@ -110,11 +110,11 @@ acpi_status acpi_ns_root_initialize(void) status = acpi_ns_lookup(NULL, init_val->name, init_val->type, ACPI_IMODE_LOAD_PASS2, ACPI_NS_NO_UPSEARCH, NULL, &new_node); - - if (ACPI_FAILURE(status) || (!new_node)) { /* Must be on same line for code converter */ + if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not create predefined name %s", init_val->name)); + continue; } /* @@ -179,8 +179,7 @@ acpi_status acpi_ns_root_initialize(void) /* Build an object around the static string */ - obj_desc->string.length = - (u32) ACPI_STRLEN(val); + obj_desc->string.length = (u32)ACPI_STRLEN(val); obj_desc->string.pointer = val; obj_desc->common.flags |= AOPOBJ_STATIC_POINTER; break; diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c index ac389e5bb59..15143c44f5e 100644 --- a/drivers/acpi/acpica/nsalloc.c +++ b/drivers/acpi/acpica/nsalloc.c @@ -332,7 +332,7 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node) * * RETURN: None. * - * DESCRIPTION: Delete a subtree of the namespace. This includes all objects + * DESCRIPTION: Delete a subtree of the namespace. This includes all objects * stored within the subtree. * ******************************************************************************/ @@ -418,7 +418,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node) * RETURN: Status * * DESCRIPTION: Delete entries within the namespace that are owned by a - * specific ID. Used to delete entire ACPI tables. All + * specific ID. Used to delete entire ACPI tables. All * reference counts are updated. * * MUTEX: Locks namespace during deletion walk. diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c index 2526aaf945e..924b3c71473 100644 --- a/drivers/acpi/acpica/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c @@ -209,14 +209,6 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, "Invalid ACPI Object Type 0x%08X", type)); } - if (!acpi_ut_valid_acpi_name(this_node->name.integer)) { - this_node->name.integer = - acpi_ut_repair_name(this_node->name.ascii); - - ACPI_WARNING((AE_INFO, "Invalid ACPI Name %08X", - this_node->name.integer)); - } - acpi_os_printf("%4.4s", acpi_ut_get_node_name(this_node)); } @@ -700,7 +692,7 @@ void acpi_ns_dump_entry(acpi_handle handle, u32 debug_level) * * PARAMETERS: search_base - Root of subtree to be dumped, or * NS_ALL to dump the entire namespace - * max_depth - Maximum depth of dump. Use INT_MAX + * max_depth - Maximum depth of dump. Use INT_MAX * for an effectively unlimited depth. * * RETURN: None diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c index 95ffe8dfa1f..4328e2adfeb 100644 --- a/drivers/acpi/acpica/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c @@ -96,8 +96,8 @@ acpi_status acpi_ns_initialize_objects(void) /* Walk entire namespace from the supplied root */ status = acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, - ACPI_UINT32_MAX, acpi_ns_init_one_object, NULL, - &info, NULL); + ACPI_UINT32_MAX, acpi_ns_init_one_object, + NULL, &info, NULL); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace")); } diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c index 76935ff2928..911f99127b9 100644 --- a/drivers/acpi/acpica/nsload.c +++ b/drivers/acpi/acpica/nsload.c @@ -80,8 +80,8 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node) /* * Parse the table and load the namespace with all named - * objects found within. Control methods are NOT parsed - * at this time. In fact, the control methods cannot be + * objects found within. Control methods are NOT parsed + * at this time. In fact, the control methods cannot be * parsed until the entire namespace is loaded, because * if a control method makes a forward reference (call) * to another control method, we can't continue parsing @@ -122,7 +122,7 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node) } /* - * Now we can parse the control methods. We always parse + * Now we can parse the control methods. We always parse * them here for a sanity check, and if configured for * just-in-time parsing, we delete the control method * parse trees. @@ -166,7 +166,7 @@ acpi_status acpi_ns_load_namespace(void) } /* - * Load the namespace. The DSDT is required, + * Load the namespace. The DSDT is required, * but the SSDT and PSDT tables are optional. */ status = acpi_ns_load_table_by_type(ACPI_TABLE_ID_DSDT); @@ -283,7 +283,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle) * RETURN: Status * * DESCRIPTION: Shrinks the namespace, typically in response to an undocking - * event. Deletes an entire subtree starting from (and + * event. Deletes an entire subtree starting from (and * including) the given handle. * ******************************************************************************/ diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c index 96e0eb609bb..55a175eadcc 100644 --- a/drivers/acpi/acpica/nsnames.c +++ b/drivers/acpi/acpica/nsnames.c @@ -195,7 +195,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node) ACPI_ERROR((AE_INFO, "Invalid Namespace Node (%p) while traversing namespace", next_node)); - return 0; + return (0); } size += ACPI_PATH_SEGMENT_LENGTH; next_node = next_node->parent; diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c index d6c9a3cc671..e69f7fa2579 100644 --- a/drivers/acpi/acpica/nsobject.c +++ b/drivers/acpi/acpica/nsobject.c @@ -61,7 +61,7 @@ ACPI_MODULE_NAME("nsobject") * RETURN: Status * * DESCRIPTION: Record the given object as the value associated with the - * name whose acpi_handle is passed. If Object is NULL + * name whose acpi_handle is passed. If Object is NULL * and Type is ACPI_TYPE_ANY, set the name as having no value. * Note: Future may require that the Node->Flags field be passed * as a parameter. @@ -133,7 +133,7 @@ acpi_ns_attach_object(struct acpi_namespace_node *node, ((struct acpi_namespace_node *)object)->object) { /* * Value passed is a name handle and that name has a - * non-null value. Use that name's value and type. + * non-null value. Use that name's value and type. */ obj_desc = ((struct acpi_namespace_node *)object)->object; object_type = ((struct acpi_namespace_node *)object)->type; @@ -321,7 +321,7 @@ union acpi_operand_object *acpi_ns_get_secondary_object(union * * RETURN: Status * - * DESCRIPTION: Low-level attach data. Create and attach a Data object. + * DESCRIPTION: Low-level attach data. Create and attach a Data object. * ******************************************************************************/ @@ -377,7 +377,7 @@ acpi_ns_attach_data(struct acpi_namespace_node *node, * * RETURN: Status * - * DESCRIPTION: Low-level detach data. Delete the data node, but the caller + * DESCRIPTION: Low-level detach data. Delete the data node, but the caller * is responsible for the actual data. * ******************************************************************************/ diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c index ec7ba2d3463..233f756d5cf 100644 --- a/drivers/acpi/acpica/nsparse.c +++ b/drivers/acpi/acpica/nsparse.c @@ -168,11 +168,11 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node) /* * AML Parse, pass 1 * - * In this pass, we load most of the namespace. Control methods - * are not parsed until later. A parse tree is not created. Instead, - * each Parser Op subtree is deleted when it is finished. This saves + * In this pass, we load most of the namespace. Control methods + * are not parsed until later. A parse tree is not created. Instead, + * each Parser Op subtree is deleted when it is finished. This saves * a great deal of memory, and allows a small cache of parse objects - * to service the entire parse. The second pass of the parse then + * to service the entire parse. The second pass of the parse then * performs another complete parse of the AML. */ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n")); diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c index 456cc859f86..1d2d8ffc1bc 100644 --- a/drivers/acpi/acpica/nssearch.c +++ b/drivers/acpi/acpica/nssearch.c @@ -314,22 +314,7 @@ acpi_ns_search_and_enter(u32 target_name, * this problem, and we want to be able to enable ACPI support for them, * even though there are a few bad names. */ - if (!acpi_ut_valid_acpi_name(target_name)) { - target_name = - acpi_ut_repair_name(ACPI_CAST_PTR(char, &target_name)); - - /* Report warning only if in strict mode or debug mode */ - - if (!acpi_gbl_enable_interpreter_slack) { - ACPI_WARNING((AE_INFO, - "Found bad character(s) in name, repaired: [%4.4s]\n", - ACPI_CAST_PTR(char, &target_name))); - } else { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Found bad character(s) in name, repaired: [%4.4s]\n", - ACPI_CAST_PTR(char, &target_name))); - } - } + acpi_ut_repair_name(ACPI_CAST_PTR(char, &target_name)); /* Try to find the name in the namespace level specified by the caller */ diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c index ef753a41e08..b5b4cb72a8a 100644 --- a/drivers/acpi/acpica/nsutils.c +++ b/drivers/acpi/acpica/nsutils.c @@ -530,7 +530,7 @@ acpi_ns_externalize_name(u32 internal_name_length, ((num_segments > 0) ? (num_segments - 1) : 0) + 1; /* - * Check to see if we're still in bounds. If not, there's a problem + * Check to see if we're still in bounds. If not, there's a problem * with internal_name (invalid format). */ if (required_length > internal_name_length) { @@ -557,10 +557,14 @@ acpi_ns_externalize_name(u32 internal_name_length, (*converted_name)[j++] = '.'; } - (*converted_name)[j++] = internal_name[names_index++]; - (*converted_name)[j++] = internal_name[names_index++]; - (*converted_name)[j++] = internal_name[names_index++]; - (*converted_name)[j++] = internal_name[names_index++]; + /* Copy and validate the 4-char name segment */ + + ACPI_MOVE_NAME(&(*converted_name)[j], + &internal_name[names_index]); + acpi_ut_repair_name(&(*converted_name)[j]); + + j += ACPI_NAME_SIZE; + names_index += ACPI_NAME_SIZE; } } @@ -681,7 +685,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type) * \ (backslash) and ^ (carat) prefixes, and the * . (period) to separate segments are supported. * prefix_node - Root of subtree to be searched, or NS_ALL for the - * root of the name space. If Name is fully + * root of the name space. If Name is fully * qualified (first s8 is '\'), the passed value * of Scope will not be accessed. * flags - Used to indicate whether to perform upsearch or @@ -689,7 +693,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type) * return_node - Where the Node is returned * * DESCRIPTION: Look up a name relative to a given scope and return the - * corresponding Node. NOTE: Scope can be null. + * corresponding Node. NOTE: Scope can be null. * * MUTEX: Locks namespace * diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c index 730bccc5e7f..0483877f26b 100644 --- a/drivers/acpi/acpica/nswalk.c +++ b/drivers/acpi/acpica/nswalk.c @@ -60,8 +60,8 @@ ACPI_MODULE_NAME("nswalk") * RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if * none is found. * - * DESCRIPTION: Return the next peer node within the namespace. If Handle - * is valid, Scope is ignored. Otherwise, the first node + * DESCRIPTION: Return the next peer node within the namespace. If Handle + * is valid, Scope is ignored. Otherwise, the first node * within Scope is returned. * ******************************************************************************/ @@ -97,8 +97,8 @@ struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node * RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if * none is found. * - * DESCRIPTION: Return the next peer node within the namespace. If Handle - * is valid, Scope is ignored. Otherwise, the first node + * DESCRIPTION: Return the next peer node within the namespace. If Handle + * is valid, Scope is ignored. Otherwise, the first node * within Scope is returned. * ******************************************************************************/ @@ -305,7 +305,7 @@ acpi_ns_walk_namespace(acpi_object_type type, /* * Depth first search: Attempt to go down another level in the - * namespace if we are allowed to. Don't go any further if we have + * namespace if we are allowed to. Don't go any further if we have * reached the caller specified maximum depth or if the user * function has specified that the maximum depth has been reached. */ diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c index 9692e670233..d6a9f77972b 100644 --- a/drivers/acpi/acpica/nsxfeval.c +++ b/drivers/acpi/acpica/nsxfeval.c @@ -61,16 +61,16 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info); * PARAMETERS: handle - Object handle (optional) * pathname - Object pathname (optional) * external_params - List of parameters to pass to method, - * terminated by NULL. May be NULL + * terminated by NULL. May be NULL * if no parameters are being passed. * return_buffer - Where to put method's return value (if - * any). If NULL, no value is returned. + * any). If NULL, no value is returned. * return_type - Expected type of return object * * RETURN: Status * * DESCRIPTION: Find and evaluate the given object, passing the given - * parameters if necessary. One of "Handle" or "Pathname" must + * parameters if necessary. One of "Handle" or "Pathname" must * be valid (non-null) * ******************************************************************************/ @@ -155,15 +155,15 @@ ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed) * PARAMETERS: handle - Object handle (optional) * pathname - Object pathname (optional) * external_params - List of parameters to pass to method, - * terminated by NULL. May be NULL + * terminated by NULL. May be NULL * if no parameters are being passed. * return_buffer - Where to put method's return value (if - * any). If NULL, no value is returned. + * any). If NULL, no value is returned. * * RETURN: Status * * DESCRIPTION: Find and evaluate the given object, passing the given - * parameters if necessary. One of "Handle" or "Pathname" must + * parameters if necessary. One of "Handle" or "Pathname" must * be valid (non-null) * ******************************************************************************/ @@ -542,15 +542,15 @@ acpi_ns_get_device_callback(acpi_handle obj_handle, acpi_status status; struct acpi_namespace_node *node; u32 flags; - struct acpica_device_id *hid; - struct acpica_device_id_list *cid; + struct acpi_pnp_device_id *hid; + struct acpi_pnp_device_id_list *cid; u32 i; u8 found; int no_match; status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { - return (status); + return_ACPI_STATUS(status); } node = acpi_ns_validate_handle(obj_handle); @@ -656,7 +656,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle, * DESCRIPTION: Performs a modified depth-first walk of the namespace tree, * starting (and ending) at the object specified by start_handle. * The user_function is called whenever an object of type - * Device is found. If the user function returns + * Device is found. If the user function returns * a non-zero value, the search is terminated immediately and this * value is returned to the caller. * diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c index 08e9610b34c..811c6f13f47 100644 --- a/drivers/acpi/acpica/nsxfname.c +++ b/drivers/acpi/acpica/nsxfname.c @@ -53,8 +53,8 @@ ACPI_MODULE_NAME("nsxfname") /* Local prototypes */ -static char *acpi_ns_copy_device_id(struct acpica_device_id *dest, - struct acpica_device_id *source, +static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest, + struct acpi_pnp_device_id *source, char *string_area); /****************************************************************************** @@ -69,8 +69,8 @@ static char *acpi_ns_copy_device_id(struct acpica_device_id *dest, * RETURN: Status * * DESCRIPTION: This routine will search for a caller specified name in the - * name space. The caller can restrict the search region by - * specifying a non NULL parent. The parent value is itself a + * name space. The caller can restrict the search region by + * specifying a non NULL parent. The parent value is itself a * namespace handle. * ******************************************************************************/ @@ -149,7 +149,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_handle) * RETURN: Pointer to a string containing the fully qualified Name. * * DESCRIPTION: This routine returns the fully qualified name associated with - * the Handle parameter. This and the acpi_pathname_to_handle are + * the Handle parameter. This and the acpi_pathname_to_handle are * complementary functions. * ******************************************************************************/ @@ -202,8 +202,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer) /* Just copy the ACPI name from the Node and zero terminate it */ - ACPI_STRNCPY(buffer->pointer, acpi_ut_get_node_name(node), - ACPI_NAME_SIZE); + ACPI_MOVE_NAME(buffer->pointer, acpi_ut_get_node_name(node)); ((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0; status = AE_OK; @@ -219,20 +218,21 @@ ACPI_EXPORT_SYMBOL(acpi_get_name) * * FUNCTION: acpi_ns_copy_device_id * - * PARAMETERS: dest - Pointer to the destination DEVICE_ID - * source - Pointer to the source DEVICE_ID + * PARAMETERS: dest - Pointer to the destination PNP_DEVICE_ID + * source - Pointer to the source PNP_DEVICE_ID * string_area - Pointer to where to copy the dest string * * RETURN: Pointer to the next string area * - * DESCRIPTION: Copy a single DEVICE_ID, including the string data. + * DESCRIPTION: Copy a single PNP_DEVICE_ID, including the string data. * ******************************************************************************/ -static char *acpi_ns_copy_device_id(struct acpica_device_id *dest, - struct acpica_device_id *source, +static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest, + struct acpi_pnp_device_id *source, char *string_area) { - /* Create the destination DEVICE_ID */ + + /* Create the destination PNP_DEVICE_ID */ dest->string = string_area; dest->length = source->length; @@ -256,8 +256,8 @@ static char *acpi_ns_copy_device_id(struct acpica_device_id *dest, * namespace node and possibly by running several standard * control methods (Such as in the case of a device.) * - * For Device and Processor objects, run the Device _HID, _UID, _CID, _STA, - * _ADR, _sx_w, and _sx_d methods. + * For Device and Processor objects, run the Device _HID, _UID, _CID, _SUB, + * _STA, _ADR, _sx_w, and _sx_d methods. * * Note: Allocates the return buffer, must be freed by the caller. * @@ -269,9 +269,10 @@ acpi_get_object_info(acpi_handle handle, { struct acpi_namespace_node *node; struct acpi_device_info *info; - struct acpica_device_id_list *cid_list = NULL; - struct acpica_device_id *hid = NULL; - struct acpica_device_id *uid = NULL; + struct acpi_pnp_device_id_list *cid_list = NULL; + struct acpi_pnp_device_id *hid = NULL; + struct acpi_pnp_device_id *uid = NULL; + struct acpi_pnp_device_id *sub = NULL; char *next_id_string; acpi_object_type type; acpi_name name; @@ -316,7 +317,7 @@ acpi_get_object_info(acpi_handle handle, if ((type == ACPI_TYPE_DEVICE) || (type == ACPI_TYPE_PROCESSOR)) { /* * Get extra info for ACPI Device/Processor objects only: - * Run the Device _HID, _UID, and _CID methods. + * Run the Device _HID, _UID, _SUB, and _CID methods. * * Note: none of these methods are required, so they may or may * not be present for this device. The Info->Valid bitfield is used @@ -339,6 +340,14 @@ acpi_get_object_info(acpi_handle handle, valid |= ACPI_VALID_UID; } + /* Execute the Device._SUB method */ + + status = acpi_ut_execute_SUB(node, &sub); + if (ACPI_SUCCESS(status)) { + info_size += sub->length; + valid |= ACPI_VALID_SUB; + } + /* Execute the Device._CID method */ status = acpi_ut_execute_CID(node, &cid_list); @@ -348,7 +357,7 @@ acpi_get_object_info(acpi_handle handle, info_size += (cid_list->list_size - - sizeof(struct acpica_device_id_list)); + sizeof(struct acpi_pnp_device_id_list)); valid |= ACPI_VALID_CID; } } @@ -418,16 +427,17 @@ acpi_get_object_info(acpi_handle handle, next_id_string = ACPI_CAST_PTR(char, info->compatible_id_list.ids); if (cid_list) { - /* Point past the CID DEVICE_ID array */ + /* Point past the CID PNP_DEVICE_ID array */ next_id_string += ((acpi_size) cid_list->count * - sizeof(struct acpica_device_id)); + sizeof(struct acpi_pnp_device_id)); } /* - * Copy the HID, UID, and CIDs to the return buffer. The variable-length - * strings are copied to the reserved area at the end of the buffer. + * Copy the HID, UID, SUB, and CIDs to the return buffer. + * The variable-length strings are copied to the reserved area + * at the end of the buffer. * * For HID and CID, check if the ID is a PCI Root Bridge. */ @@ -445,6 +455,11 @@ acpi_get_object_info(acpi_handle handle, uid, next_id_string); } + if (sub) { + next_id_string = acpi_ns_copy_device_id(&info->subsystem_id, + sub, next_id_string); + } + if (cid_list) { info->compatible_id_list.count = cid_list->count; info->compatible_id_list.list_size = cid_list->list_size; @@ -481,6 +496,9 @@ acpi_get_object_info(acpi_handle handle, if (uid) { ACPI_FREE(uid); } + if (sub) { + ACPI_FREE(sub); + } if (cid_list) { ACPI_FREE(cid_list); } diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c index 6766fc4f088..9d029dac6b6 100644 --- a/drivers/acpi/acpica/nsxfobj.c +++ b/drivers/acpi/acpica/nsxfobj.c @@ -220,8 +220,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_parent) * * RETURN: Status * - * DESCRIPTION: Return the next peer object within the namespace. If Handle is - * valid, Scope is ignored. Otherwise, the first object within + * DESCRIPTION: Return the next peer object within the namespace. If Handle is + * valid, Scope is ignored. Otherwise, the first object within * Scope is returned. * ******************************************************************************/ diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index 844464c4f90..cb79e2d4d74 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c @@ -120,7 +120,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state) * RETURN: Pointer to end-of-package +1 * * DESCRIPTION: Get next package length and return a pointer past the end of - * the package. Consumes the package length field + * the package. Consumes the package length field * ******************************************************************************/ @@ -147,8 +147,8 @@ u8 *acpi_ps_get_next_package_end(struct acpi_parse_state *parser_state) * RETURN: Pointer to the start of the name string (pointer points into * the AML. * - * DESCRIPTION: Get next raw namestring within the AML stream. Handles all name - * prefix characters. Set parser state to point past the string. + * DESCRIPTION: Get next raw namestring within the AML stream. Handles all name + * prefix characters. Set parser state to point past the string. * (Name is consumed from the AML.) * ******************************************************************************/ @@ -220,7 +220,7 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state) * * DESCRIPTION: Get next name (if method call, return # of required args). * Names are looked up in the internal namespace to determine - * if the name represents a control method. If a method + * if the name represents a control method. If a method * is found, the number of arguments to the method is returned. * This information is critical for parsing to continue correctly. * diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index 799162c1b6d..5607805aab2 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -133,18 +133,46 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state) case AML_CLASS_UNKNOWN: - /* The opcode is unrecognized. Just skip unknown opcodes */ + /* The opcode is unrecognized. Complain and skip unknown opcodes */ - ACPI_ERROR((AE_INFO, - "Found unknown opcode 0x%X at AML address %p offset 0x%X, ignoring", - walk_state->opcode, walk_state->parser_state.aml, - walk_state->aml_offset)); + if (walk_state->pass_number == 2) { + ACPI_ERROR((AE_INFO, + "Unknown opcode 0x%.2X at table offset 0x%.4X, ignoring", + walk_state->opcode, + (u32)(walk_state->aml_offset + + sizeof(struct acpi_table_header)))); - ACPI_DUMP_BUFFER(walk_state->parser_state.aml, 128); + ACPI_DUMP_BUFFER(walk_state->parser_state.aml - 16, 48); - /* Assume one-byte bad opcode */ +#ifdef ACPI_ASL_COMPILER + /* + * This is executed for the disassembler only. Output goes + * to the disassembled ASL output file. + */ + acpi_os_printf + ("/*\nError: Unknown opcode 0x%.2X at table offset 0x%.4X, context:\n", + walk_state->opcode, + (u32)(walk_state->aml_offset + + sizeof(struct acpi_table_header))); + + /* Dump the context surrounding the invalid opcode */ + + acpi_ut_dump_buffer(((u8 *)walk_state->parser_state. + aml - 16), 48, DB_BYTE_DISPLAY, + walk_state->aml_offset + + sizeof(struct acpi_table_header) - + 16); + acpi_os_printf(" */\n"); +#endif + } + + /* Increment past one-byte or two-byte opcode */ walk_state->parser_state.aml++; + if (walk_state->opcode > 0xFF) { /* Can only happen if first byte is 0x5B */ + walk_state->parser_state.aml++; + } + return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE); default: @@ -519,11 +547,18 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state, if ((op_info->class == AML_CLASS_EXECUTE) && (!arg)) { ACPI_WARNING((AE_INFO, - "Detected an unsupported executable opcode " - "at module-level: [0x%.4X] at table offset 0x%.4X", - op->common.aml_opcode, - (u32)((aml_op_start - walk_state->parser_state.aml_start) - + sizeof(struct acpi_table_header)))); + "Unsupported module-level executable opcode " + "0x%.2X at table offset 0x%.4X", + op->common. + aml_opcode, + (u32) + (ACPI_PTR_DIFF + (aml_op_start, + walk_state-> + parser_state. + aml_start) + + sizeof(struct + acpi_table_header)))); } } break; @@ -843,8 +878,6 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state, *op = NULL; } - ACPI_PREEMPTION_POINT(); - return_ACPI_STATUS(AE_OK); } diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c index ed1d457bd5c..1793d934aa3 100644 --- a/drivers/acpi/acpica/psopcode.c +++ b/drivers/acpi/acpica/psopcode.c @@ -59,7 +59,7 @@ static const u8 acpi_gbl_argument_count[] = * * DESCRIPTION: Opcode table. Each entry contains <opcode, type, name, operands> * The name is a simple ascii string, the operand specifier is an - * ascii string with one letter per operand. The letter specifies + * ascii string with one letter per operand. The letter specifies * the operand type. * ******************************************************************************/ @@ -183,7 +183,7 @@ static const u8 acpi_gbl_argument_count[] = ******************************************************************************/ /* - * Master Opcode information table. A summary of everything we know about each + * Master Opcode information table. A summary of everything we know about each * opcode, all in one place. */ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { @@ -392,10 +392,12 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE), /* 38 */ ACPI_OP("LAnd", ARGP_LAND_OP, ARGI_LAND_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, - AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT), + AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | + AML_CONSTANT), /* 39 */ ACPI_OP("LOr", ARGP_LOR_OP, ARGI_LOR_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, - AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT), + AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | + AML_CONSTANT), /* 3A */ ACPI_OP("LNot", ARGP_LNOT_OP, ARGI_LNOT_OP, ACPI_TYPE_ANY, AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), @@ -495,7 +497,8 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { AML_NSNODE | AML_NAMED | AML_DEFER), /* 59 */ ACPI_OP("Field", ARGP_FIELD_OP, ARGI_FIELD_OP, ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD), + AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | + AML_FIELD), /* 5A */ ACPI_OP("Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP, ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ, @@ -519,12 +522,13 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { /* 5E */ ACPI_OP("IndexField", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP, ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD), + AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | + AML_FIELD), /* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP, - ACPI_TYPE_LOCAL_BANK_FIELD, AML_CLASS_NAMED_OBJECT, - AML_TYPE_NAMED_FIELD, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD | - AML_DEFER), + ACPI_TYPE_LOCAL_BANK_FIELD, + AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, + AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | + AML_FIELD | AML_DEFER), /* Internal opcodes that map to invalid AML opcodes */ @@ -632,7 +636,8 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = { /* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP, ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ, - AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE), + AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | + AML_NSNODE), /* ACPI 3.0 opcodes */ @@ -695,7 +700,7 @@ static const u8 acpi_gbl_short_op_index[256] = { /* * This table is indexed by the second opcode of the extended opcode - * pair. It returns an index into the opcode table (acpi_gbl_aml_op_info) + * pair. It returns an index into the opcode table (acpi_gbl_aml_op_info) */ static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = { /* 0 1 2 3 4 5 6 7 */ diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c index 01985703bb9..2494caf4775 100644 --- a/drivers/acpi/acpica/psparse.c +++ b/drivers/acpi/acpica/psparse.c @@ -43,9 +43,9 @@ /* * Parse the AML and build an operation tree as most interpreters, - * like Perl, do. Parsing is done by hand rather than with a YACC + * like Perl, do. Parsing is done by hand rather than with a YACC * generated parser to tightly constrain stack and dynamic memory - * usage. At the same time, parsing is kept flexible and the code + * usage. At the same time, parsing is kept flexible and the code * fairly compact by parsing based on a list of AML opcode * templates in aml_op_info[] */ @@ -379,7 +379,7 @@ acpi_ps_next_parse_state(struct acpi_walk_state *walk_state, case AE_CTRL_FALSE: /* * Either an IF/WHILE Predicate was false or we encountered a BREAK - * opcode. In both cases, we do not execute the rest of the + * opcode. In both cases, we do not execute the rest of the * package; We simply close out the parent (finishing the walk of * this branch of the tree) and continue execution at the parent * level. @@ -459,8 +459,9 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state) /* Executing a control method - additional cleanup */ - acpi_ds_terminate_control_method( - walk_state->method_desc, walk_state); + acpi_ds_terminate_control_method(walk_state-> + method_desc, + walk_state); } acpi_ds_delete_walk_state(walk_state); @@ -487,7 +488,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state) acpi_gbl_current_walk_list = thread; /* - * Execute the walk loop as long as there is a valid Walk State. This + * Execute the walk loop as long as there is a valid Walk State. This * handles nested control method invocations without recursion. */ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "State=%p\n", walk_state)); diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c index 8736ad5f04d..4137dcb352d 100644 --- a/drivers/acpi/acpica/psutils.c +++ b/drivers/acpi/acpica/psutils.c @@ -108,7 +108,7 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode) * RETURN: Pointer to the new Op, null on failure * * DESCRIPTION: Allocate an acpi_op, choose op type (and thus size) based on - * opcode. A cache of opcodes is available for the pure + * opcode. A cache of opcodes is available for the pure * GENERIC_OP, since this is by far the most commonly used. * ******************************************************************************/ @@ -164,7 +164,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode) * * RETURN: None. * - * DESCRIPTION: Free an Op object. Either put it on the GENERIC_OP cache list + * DESCRIPTION: Free an Op object. Either put it on the GENERIC_OP cache list * or actually free it. * ******************************************************************************/ diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c index de12469d1c9..147feb6aa2a 100644 --- a/drivers/acpi/acpica/rscalc.c +++ b/drivers/acpi/acpica/rscalc.c @@ -457,6 +457,15 @@ acpi_rs_get_list_length(u8 * aml_buffer, * Get the number of vendor data bytes */ extra_struct_bytes = resource_length; + + /* + * There is already one byte included in the minimum + * descriptor size. If there are extra struct bytes, + * subtract one from the count. + */ + if (extra_struct_bytes) { + extra_struct_bytes--; + } break; case ACPI_RESOURCE_NAME_END_TAG: @@ -601,7 +610,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object, /* * Calculate the size of the return buffer. * The base size is the number of elements * the sizes of the - * structures. Additional space for the strings is added below. + * structures. Additional space for the strings is added below. * The minus one is to subtract the size of the u8 Source[1] * member because it is added below. * @@ -664,8 +673,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object, (*sub_object_list)->string. length + 1); } else { - temp_size_needed += - acpi_ns_get_pathname_length((*sub_object_list)->reference.node); + temp_size_needed += acpi_ns_get_pathname_length((*sub_object_list)->reference.node); } } else { /* diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c index 46b5324b22d..8b64db9a3fd 100644 --- a/drivers/acpi/acpica/rslist.c +++ b/drivers/acpi/acpica/rslist.c @@ -109,7 +109,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml, ACPI_ERROR((AE_INFO, "Invalid/unsupported resource descriptor: Type 0x%2.2X", resource_index)); - return (AE_AML_INVALID_RESOURCE_TYPE); + return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE); } /* Convert the AML byte stream resource to a local resource struct */ @@ -200,7 +200,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource, ACPI_ERROR((AE_INFO, "Invalid/unsupported resource descriptor: Type 0x%2.2X", resource->type)); - return (AE_AML_INVALID_RESOURCE_TYPE); + return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE); } status = acpi_rs_convert_resource_to_aml(resource, diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c index 57deae16657..77d1db29a72 100644 --- a/drivers/acpi/acpica/tbfind.c +++ b/drivers/acpi/acpica/tbfind.c @@ -77,7 +77,7 @@ acpi_tb_find_table(char *signature, /* Normalize the input strings */ ACPI_MEMSET(&header, 0, sizeof(struct acpi_table_header)); - ACPI_STRNCPY(header.signature, signature, ACPI_NAME_SIZE); + ACPI_MOVE_NAME(header.signature, signature); ACPI_STRNCPY(header.oem_id, oem_id, ACPI_OEM_ID_SIZE); ACPI_STRNCPY(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE); diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 70f9d787c82..f540ae46292 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c @@ -526,6 +526,8 @@ void acpi_tb_terminate(void) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n")); (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); + + return_VOID; } /******************************************************************************* diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index b6cea30da63..285e24b9738 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -354,7 +354,7 @@ u8 acpi_tb_checksum(u8 *buffer, u32 length) sum = (u8) (sum + *(buffer++)); } - return sum; + return (sum); } /******************************************************************************* diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index 21101262e47..f5632780421 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c @@ -236,7 +236,7 @@ acpi_get_table_header(char *signature, sizeof(struct acpi_table_header)); if (!header) { - return AE_NO_MEMORY; + return (AE_NO_MEMORY); } ACPI_MEMCPY(out_table_header, header, sizeof(struct acpi_table_header)); @@ -244,7 +244,7 @@ acpi_get_table_header(char *signature, sizeof(struct acpi_table_header)); } else { - return AE_NOT_FOUND; + return (AE_NOT_FOUND); } } else { ACPI_MEMCPY(out_table_header, diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c index f87cc63e69a..a5e1e4e4709 100644 --- a/drivers/acpi/acpica/tbxfload.c +++ b/drivers/acpi/acpica/tbxfload.c @@ -211,7 +211,7 @@ static acpi_status acpi_tb_load_namespace(void) * DESCRIPTION: Dynamically load an ACPI table from the caller's buffer. Must * be a valid ACPI table with a valid ACPI table header. * Note1: Mainly intended to support hotplug addition of SSDTs. - * Note2: Does not copy the incoming table. User is reponsible + * Note2: Does not copy the incoming table. User is responsible * to ensure that the table is not deleted or unmapped. * ******************************************************************************/ diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c index 74e72080003..28f330230f9 100644 --- a/drivers/acpi/acpica/tbxfroot.c +++ b/drivers/acpi/acpica/tbxfroot.c @@ -67,7 +67,6 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp); static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) { - ACPI_FUNCTION_ENTRY(); /* * The signature and checksum must both be correct @@ -108,7 +107,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) * RETURN: Status, RSDP physical address * * DESCRIPTION: Search lower 1Mbyte of memory for the root system descriptor - * pointer structure. If it is found, set *RSDP to point to it. + * pointer structure. If it is found, set *RSDP to point to it. * * NOTE1: The RSDP must be either in the first 1K of the Extended * BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.) diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c new file mode 100644 index 00000000000..e1d40ed2639 --- /dev/null +++ b/drivers/acpi/acpica/utcache.c @@ -0,0 +1,323 @@ +/****************************************************************************** + * + * Module Name: utcache - local cache allocation routines + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2012, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include <acpi/acpi.h> +#include "accommon.h" + +#define _COMPONENT ACPI_UTILITIES +ACPI_MODULE_NAME("utcache") + +#ifdef ACPI_USE_LOCAL_CACHE +/******************************************************************************* + * + * FUNCTION: acpi_os_create_cache + * + * PARAMETERS: cache_name - Ascii name for the cache + * object_size - Size of each cached object + * max_depth - Maximum depth of the cache (in objects) + * return_cache - Where the new cache object is returned + * + * RETURN: Status + * + * DESCRIPTION: Create a cache object + * + ******************************************************************************/ +acpi_status +acpi_os_create_cache(char *cache_name, + u16 object_size, + u16 max_depth, struct acpi_memory_list ** return_cache) +{ + struct acpi_memory_list *cache; + + ACPI_FUNCTION_ENTRY(); + + if (!cache_name || !return_cache || (object_size < 16)) { + return (AE_BAD_PARAMETER); + } + + /* Create the cache object */ + + cache = acpi_os_allocate(sizeof(struct acpi_memory_list)); + if (!cache) { + return (AE_NO_MEMORY); + } + + /* Populate the cache object and return it */ + + ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list)); + cache->link_offset = 8; + cache->list_name = cache_name; + cache->object_size = object_size; + cache->max_depth = max_depth; + + *return_cache = cache; + return (AE_OK); +} + +/******************************************************************************* + * + * FUNCTION: acpi_os_purge_cache + * + * PARAMETERS: cache - Handle to cache object + * + * RETURN: Status + * + * DESCRIPTION: Free all objects within the requested cache. + * + ******************************************************************************/ + +acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache) +{ + char *next; + acpi_status status; + + ACPI_FUNCTION_ENTRY(); + + if (!cache) { + return (AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* Walk the list of objects in this cache */ + + while (cache->list_head) { + + /* Delete and unlink one cached state object */ + + next = *(ACPI_CAST_INDIRECT_PTR(char, + &(((char *)cache-> + list_head)[cache-> + link_offset]))); + ACPI_FREE(cache->list_head); + + cache->list_head = next; + cache->current_depth--; + } + + (void)acpi_ut_release_mutex(ACPI_MTX_CACHES); + return (AE_OK); +} + +/******************************************************************************* + * + * FUNCTION: acpi_os_delete_cache + * + * PARAMETERS: cache - Handle to cache object + * + * RETURN: Status + * + * DESCRIPTION: Free all objects within the requested cache and delete the + * cache object. + * + ******************************************************************************/ + +acpi_status acpi_os_delete_cache(struct acpi_memory_list * cache) +{ + acpi_status status; + + ACPI_FUNCTION_ENTRY(); + + /* Purge all objects in the cache */ + + status = acpi_os_purge_cache(cache); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* Now we can delete the cache object */ + + acpi_os_free(cache); + return (AE_OK); +} + +/******************************************************************************* + * + * FUNCTION: acpi_os_release_object + * + * PARAMETERS: cache - Handle to cache object + * object - The object to be released + * + * RETURN: None + * + * DESCRIPTION: Release an object to the specified cache. If cache is full, + * the object is deleted. + * + ******************************************************************************/ + +acpi_status +acpi_os_release_object(struct acpi_memory_list * cache, void *object) +{ + acpi_status status; + + ACPI_FUNCTION_ENTRY(); + + if (!cache || !object) { + return (AE_BAD_PARAMETER); + } + + /* If cache is full, just free this object */ + + if (cache->current_depth >= cache->max_depth) { + ACPI_FREE(object); + ACPI_MEM_TRACKING(cache->total_freed++); + } + + /* Otherwise put this object back into the cache */ + + else { + status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES); + if (ACPI_FAILURE(status)) { + return (status); + } + + /* Mark the object as cached */ + + ACPI_MEMSET(object, 0xCA, cache->object_size); + ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_CACHED); + + /* Put the object at the head of the cache list */ + + *(ACPI_CAST_INDIRECT_PTR(char, + &(((char *)object)[cache-> + link_offset]))) = + cache->list_head; + cache->list_head = object; + cache->current_depth++; + + (void)acpi_ut_release_mutex(ACPI_MTX_CACHES); + } + + return (AE_OK); +} + +/******************************************************************************* + * + * FUNCTION: acpi_os_acquire_object + * + * PARAMETERS: cache - Handle to cache object + * + * RETURN: the acquired object. NULL on error + * + * DESCRIPTION: Get an object from the specified cache. If cache is empty, + * the object is allocated. + * + ******************************************************************************/ + +void *acpi_os_acquire_object(struct acpi_memory_list *cache) +{ + acpi_status status; + void *object; + + ACPI_FUNCTION_NAME(os_acquire_object); + + if (!cache) { + return (NULL); + } + + status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES); + if (ACPI_FAILURE(status)) { + return (NULL); + } + + ACPI_MEM_TRACKING(cache->requests++); + + /* Check the cache first */ + + if (cache->list_head) { + + /* There is an object available, use it */ + + object = cache->list_head; + cache->list_head = *(ACPI_CAST_INDIRECT_PTR(char, + &(((char *) + object)[cache-> + link_offset]))); + + cache->current_depth--; + + ACPI_MEM_TRACKING(cache->hits++); + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, + "Object %p from %s cache\n", object, + cache->list_name)); + + status = acpi_ut_release_mutex(ACPI_MTX_CACHES); + if (ACPI_FAILURE(status)) { + return (NULL); + } + + /* Clear (zero) the previously used Object */ + + ACPI_MEMSET(object, 0, cache->object_size); + } else { + /* The cache is empty, create a new object */ + + ACPI_MEM_TRACKING(cache->total_allocated++); + +#ifdef ACPI_DBG_TRACK_ALLOCATIONS + if ((cache->total_allocated - cache->total_freed) > + cache->max_occupied) { + cache->max_occupied = + cache->total_allocated - cache->total_freed; + } +#endif + + /* Avoid deadlock with ACPI_ALLOCATE_ZEROED */ + + status = acpi_ut_release_mutex(ACPI_MTX_CACHES); + if (ACPI_FAILURE(status)) { + return (NULL); + } + + object = ACPI_ALLOCATE_ZEROED(cache->object_size); + if (!object) { + return (NULL); + } + } + + return (object); +} +#endif /* ACPI_USE_LOCAL_CACHE */ diff --git a/drivers/acpi/acpica/utclib.c b/drivers/acpi/acpica/utclib.c new file mode 100644 index 00000000000..19ea4755aa7 --- /dev/null +++ b/drivers/acpi/acpica/utclib.c @@ -0,0 +1,749 @@ +/****************************************************************************** + * + * Module Name: cmclib - Local implementation of C library functions + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2012, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include <acpi/acpi.h> +#include "accommon.h" + +/* + * These implementations of standard C Library routines can optionally be + * used if a C library is not available. In general, they are less efficient + * than an inline or assembly implementation + */ + +#define _COMPONENT ACPI_UTILITIES +ACPI_MODULE_NAME("cmclib") + +#ifndef ACPI_USE_SYSTEM_CLIBRARY +#define NEGATIVE 1 +#define POSITIVE 0 +/******************************************************************************* + * + * FUNCTION: acpi_ut_memcmp (memcmp) + * + * PARAMETERS: buffer1 - First Buffer + * buffer2 - Second Buffer + * count - Maximum # of bytes to compare + * + * RETURN: Index where Buffers mismatched, or 0 if Buffers matched + * + * DESCRIPTION: Compare two Buffers, with a maximum length + * + ******************************************************************************/ +int acpi_ut_memcmp(const char *buffer1, const char *buffer2, acpi_size count) +{ + + return ((count == ACPI_SIZE_MAX) ? 0 : ((unsigned char)*buffer1 - + (unsigned char)*buffer2)); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_memcpy (memcpy) + * + * PARAMETERS: dest - Target of the copy + * src - Source buffer to copy + * count - Number of bytes to copy + * + * RETURN: Dest + * + * DESCRIPTION: Copy arbitrary bytes of memory + * + ******************************************************************************/ + +void *acpi_ut_memcpy(void *dest, const void *src, acpi_size count) +{ + char *new = (char *)dest; + char *old = (char *)src; + + while (count) { + *new = *old; + new++; + old++; + count--; + } + + return (dest); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_memset (memset) + * + * PARAMETERS: dest - Buffer to set + * value - Value to set each byte of memory + * count - Number of bytes to set + * + * RETURN: Dest + * + * DESCRIPTION: Initialize a buffer to a known value. + * + ******************************************************************************/ + +void *acpi_ut_memset(void *dest, u8 value, acpi_size count) +{ + char *new = (char *)dest; + + while (count) { + *new = (char)value; + new++; + count--; + } + + return (dest); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_strlen (strlen) + * + * PARAMETERS: string - Null terminated string + * + * RETURN: Length + * + * DESCRIPTION: Returns the length of the input string + * + ******************************************************************************/ + +acpi_size acpi_ut_strlen(const char *string) +{ + u32 length = 0; + + /* Count the string until a null is encountered */ + + while (*string) { + length++; + string++; + } + + return (length); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_strcpy (strcpy) + * + * PARAMETERS: dst_string - Target of the copy + * src_string - The source string to copy + * + * RETURN: dst_string + * + * DESCRIPTION: Copy a null terminated string + * + ******************************************************************************/ + +char *acpi_ut_strcpy(char *dst_string, const char *src_string) +{ + char *string = dst_string; + + /* Move bytes brute force */ + + while (*src_string) { + *string = *src_string; + + string++; + src_string++; + } + + /* Null terminate */ + + *string = 0; + return (dst_string); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_strncpy (strncpy) + * + * PARAMETERS: dst_string - Target of the copy + * src_string - The source string to copy + * count - Maximum # of bytes to copy + * + * RETURN: dst_string + * + * DESCRIPTION: Copy a null terminated string, with a maximum length + * + ******************************************************************************/ + +char *acpi_ut_strncpy(char *dst_string, const char *src_string, acpi_size count) +{ + char *string = dst_string; + + /* Copy the string */ + + for (string = dst_string; + count && (count--, (*string++ = *src_string++));) {; + } + + /* Pad with nulls if necessary */ + + while (count--) { + *string = 0; + string++; + } + + /* Return original pointer */ + + return (dst_string); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_strcmp (strcmp) + * + * PARAMETERS: string1 - First string + * string2 - Second string + * + * RETURN: Index where strings mismatched, or 0 if strings matched + * + * DESCRIPTION: Compare two null terminated strings + * + ******************************************************************************/ + +int acpi_ut_strcmp(const char *string1, const char *string2) +{ + + for (; (*string1 == *string2); string2++) { + if (!*string1++) { + return (0); + } + } + + return ((unsigned char)*string1 - (unsigned char)*string2); +} + +#ifdef ACPI_FUTURE_IMPLEMENTATION +/* Not used at this time */ +/******************************************************************************* + * + * FUNCTION: acpi_ut_strchr (strchr) + * + * PARAMETERS: string - Search string + * ch - character to search for + * + * RETURN: Ptr to char or NULL if not found + * + * DESCRIPTION: Search a string for a character + * + ******************************************************************************/ + +char *acpi_ut_strchr(const char *string, int ch) +{ + + for (; (*string); string++) { + if ((*string) == (char)ch) { + return ((char *)string); + } + } + + return (NULL); +} +#endif + +/******************************************************************************* + * + * FUNCTION: acpi_ut_strncmp (strncmp) + * + * PARAMETERS: string1 - First string + * string2 - Second string + * count - Maximum # of bytes to compare + * + * RETURN: Index where strings mismatched, or 0 if strings matched + * + * DESCRIPTION: Compare two null terminated strings, with a maximum length + * + ******************************************************************************/ + +int acpi_ut_strncmp(const char *string1, const char *string2, acpi_size count) +{ + + for (; count-- && (*string1 == *string2); string2++) { + if (!*string1++) { + return (0); + } + } + + return ((count == ACPI_SIZE_MAX) ? 0 : ((unsigned char)*string1 - + (unsigned char)*string2)); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_strcat (Strcat) + * + * PARAMETERS: dst_string - Target of the copy + * src_string - The source string to copy + * + * RETURN: dst_string + * + * DESCRIPTION: Append a null terminated string to a null terminated string + * + ******************************************************************************/ + +char *acpi_ut_strcat(char *dst_string, const char *src_string) +{ + char *string; + + /* Find end of the destination string */ + + for (string = dst_string; *string++;) {; + } + + /* Concatenate the string */ + + for (--string; (*string++ = *src_string++);) {; + } + + return (dst_string); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_strncat (strncat) + * + * PARAMETERS: dst_string - Target of the copy + * src_string - The source string to copy + * count - Maximum # of bytes to copy + * + * RETURN: dst_string + * + * DESCRIPTION: Append a null terminated string to a null terminated string, + * with a maximum count. + * + ******************************************************************************/ + +char *acpi_ut_strncat(char *dst_string, const char *src_string, acpi_size count) +{ + char *string; + + if (count) { + + /* Find end of the destination string */ + + for (string = dst_string; *string++;) {; + } + + /* Concatenate the string */ + + for (--string; (*string++ = *src_string++) && --count;) {; + } + + /* Null terminate if necessary */ + + if (!count) { + *string = 0; + } + } + + return (dst_string); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_strstr (strstr) + * + * PARAMETERS: string1 - Target string + * string2 - Substring to search for + * + * RETURN: Where substring match starts, Null if no match found + * + * DESCRIPTION: Checks if String2 occurs in String1. This is not really a + * full implementation of strstr, only sufficient for command + * matching + * + ******************************************************************************/ + +char *acpi_ut_strstr(char *string1, char *string2) +{ + char *string; + + if (acpi_ut_strlen(string2) > acpi_ut_strlen(string1)) { + return (NULL); + } + + /* Walk entire string, comparing the letters */ + + for (string = string1; *string2;) { + if (*string2 != *string) { + return (NULL); + } + + string2++; + string++; + } + + return (string1); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_strtoul (strtoul) + * + * PARAMETERS: string - Null terminated string + * terminater - Where a pointer to the terminating byte is + * returned + * base - Radix of the string + * + * RETURN: Converted value + * + * DESCRIPTION: Convert a string into a 32-bit unsigned value. + * Note: use acpi_ut_strtoul64 for 64-bit integers. + * + ******************************************************************************/ + +u32 acpi_ut_strtoul(const char *string, char **terminator, u32 base) +{ + u32 converted = 0; + u32 index; + u32 sign; + const char *string_start; + u32 return_value = 0; + acpi_status status = AE_OK; + + /* + * Save the value of the pointer to the buffer's first + * character, save the current errno value, and then + * skip over any white space in the buffer: + */ + string_start = string; + while (ACPI_IS_SPACE(*string) || *string == '\t') { + ++string; + } + + /* + * The buffer may contain an optional plus or minus sign. + * If it does, then skip over it but remember what is was: + */ + if (*string == '-') { + sign = NEGATIVE; + ++string; + } else if (*string == '+') { + ++string; + sign = POSITIVE; + } else { + sign = POSITIVE; + } + + /* + * If the input parameter Base is zero, then we need to + * determine if it is octal, decimal, or hexadecimal: + */ + if (base == 0) { + if (*string == '0') { + if (acpi_ut_to_lower(*(++string)) == 'x') { + base = 16; + ++string; + } else { + base = 8; + } + } else { + base = 10; + } + } else if (base < 2 || base > 36) { + /* + * The specified Base parameter is not in the domain of + * this function: + */ + goto done; + } + + /* + * For octal and hexadecimal bases, skip over the leading + * 0 or 0x, if they are present. + */ + if (base == 8 && *string == '0') { + string++; + } + + if (base == 16 && + *string == '0' && acpi_ut_to_lower(*(++string)) == 'x') { + string++; + } + + /* + * Main loop: convert the string to an unsigned long: + */ + while (*string) { + if (ACPI_IS_DIGIT(*string)) { + index = (u32)((u8)*string - '0'); + } else { + index = (u32)acpi_ut_to_upper(*string); + if (ACPI_IS_UPPER(index)) { + index = index - 'A' + 10; + } else { + goto done; + } + } + + if (index >= base) { + goto done; + } + + /* + * Check to see if value is out of range: + */ + + if (return_value > ((ACPI_UINT32_MAX - (u32)index) / (u32)base)) { + status = AE_ERROR; + return_value = 0; /* reset */ + } else { + return_value *= base; + return_value += index; + converted = 1; + } + + ++string; + } + + done: + /* + * If appropriate, update the caller's pointer to the next + * unconverted character in the buffer. + */ + if (terminator) { + if (converted == 0 && return_value == 0 && string != NULL) { + *terminator = (char *)string_start; + } else { + *terminator = (char *)string; + } + } + + if (status == AE_ERROR) { + return_value = ACPI_UINT32_MAX; + } + + /* + * If a minus sign was present, then "the conversion is negated": + */ + if (sign == NEGATIVE) { + return_value = (ACPI_UINT32_MAX - return_value) + 1; + } + + return (return_value); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_to_upper (TOUPPER) + * + * PARAMETERS: c - Character to convert + * + * RETURN: Converted character as an int + * + * DESCRIPTION: Convert character to uppercase + * + ******************************************************************************/ + +int acpi_ut_to_upper(int c) +{ + + return (ACPI_IS_LOWER(c) ? ((c) - 0x20) : (c)); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_to_lower (TOLOWER) + * + * PARAMETERS: c - Character to convert + * + * RETURN: Converted character as an int + * + * DESCRIPTION: Convert character to lowercase + * + ******************************************************************************/ + +int acpi_ut_to_lower(int c) +{ + + return (ACPI_IS_UPPER(c) ? ((c) + 0x20) : (c)); +} + +/******************************************************************************* + * + * FUNCTION: is* functions + * + * DESCRIPTION: is* functions use the ctype table below + * + ******************************************************************************/ + +const u8 _acpi_ctype[257] = { + _ACPI_CN, /* 0x00 0 NUL */ + _ACPI_CN, /* 0x01 1 SOH */ + _ACPI_CN, /* 0x02 2 STX */ + _ACPI_CN, /* 0x03 3 ETX */ + _ACPI_CN, /* 0x04 4 EOT */ + _ACPI_CN, /* 0x05 5 ENQ */ + _ACPI_CN, /* 0x06 6 ACK */ + _ACPI_CN, /* 0x07 7 BEL */ + _ACPI_CN, /* 0x08 8 BS */ + _ACPI_CN | _ACPI_SP, /* 0x09 9 TAB */ + _ACPI_CN | _ACPI_SP, /* 0x0A 10 LF */ + _ACPI_CN | _ACPI_SP, /* 0x0B 11 VT */ + _ACPI_CN | _ACPI_SP, /* 0x0C 12 FF */ + _ACPI_CN | _ACPI_SP, /* 0x0D 13 CR */ + _ACPI_CN, /* 0x0E 14 SO */ + _ACPI_CN, /* 0x0F 15 SI */ + _ACPI_CN, /* 0x10 16 DLE */ + _ACPI_CN, /* 0x11 17 DC1 */ + _ACPI_CN, /* 0x12 18 DC2 */ + _ACPI_CN, /* 0x13 19 DC3 */ + _ACPI_CN, /* 0x14 20 DC4 */ + _ACPI_CN, /* 0x15 21 NAK */ + _ACPI_CN, /* 0x16 22 SYN */ + _ACPI_CN, /* 0x17 23 ETB */ + _ACPI_CN, /* 0x18 24 CAN */ + _ACPI_CN, /* 0x19 25 EM */ + _ACPI_CN, /* 0x1A 26 SUB */ + _ACPI_CN, /* 0x1B 27 ESC */ + _ACPI_CN, /* 0x1C 28 FS */ + _ACPI_CN, /* 0x1D 29 GS */ + _ACPI_CN, /* 0x1E 30 RS */ + _ACPI_CN, /* 0x1F 31 US */ + _ACPI_XS | _ACPI_SP, /* 0x20 32 ' ' */ + _ACPI_PU, /* 0x21 33 '!' */ + _ACPI_PU, /* 0x22 34 '"' */ + _ACPI_PU, /* 0x23 35 '#' */ + _ACPI_PU, /* 0x24 36 '$' */ + _ACPI_PU, /* 0x25 37 '%' */ + _ACPI_PU, /* 0x26 38 '&' */ + _ACPI_PU, /* 0x27 39 ''' */ + _ACPI_PU, /* 0x28 40 '(' */ + _ACPI_PU, /* 0x29 41 ')' */ + _ACPI_PU, /* 0x2A 42 '*' */ + _ACPI_PU, /* 0x2B 43 '+' */ + _ACPI_PU, /* 0x2C 44 ',' */ + _ACPI_PU, /* 0x2D 45 '-' */ + _ACPI_PU, /* 0x2E 46 '.' */ + _ACPI_PU, /* 0x2F 47 '/' */ + _ACPI_XD | _ACPI_DI, /* 0x30 48 '0' */ + _ACPI_XD | _ACPI_DI, /* 0x31 49 '1' */ + _ACPI_XD | _ACPI_DI, /* 0x32 50 '2' */ + _ACPI_XD | _ACPI_DI, /* 0x33 51 '3' */ + _ACPI_XD | _ACPI_DI, /* 0x34 52 '4' */ + _ACPI_XD | _ACPI_DI, /* 0x35 53 '5' */ + _ACPI_XD | _ACPI_DI, /* 0x36 54 '6' */ + _ACPI_XD | _ACPI_DI, /* 0x37 55 '7' */ + _ACPI_XD | _ACPI_DI, /* 0x38 56 '8' */ + _ACPI_XD | _ACPI_DI, /* 0x39 57 '9' */ + _ACPI_PU, /* 0x3A 58 ':' */ + _ACPI_PU, /* 0x3B 59 ';' */ + _ACPI_PU, /* 0x3C 60 '<' */ + _ACPI_PU, /* 0x3D 61 '=' */ + _ACPI_PU, /* 0x3E 62 '>' */ + _ACPI_PU, /* 0x3F 63 '?' */ + _ACPI_PU, /* 0x40 64 '@' */ + _ACPI_XD | _ACPI_UP, /* 0x41 65 'A' */ + _ACPI_XD | _ACPI_UP, /* 0x42 66 'B' */ + _ACPI_XD | _ACPI_UP, /* 0x43 67 'C' */ + _ACPI_XD | _ACPI_UP, /* 0x44 68 'D' */ + _ACPI_XD | _ACPI_UP, /* 0x45 69 'E' */ + _ACPI_XD | _ACPI_UP, /* 0x46 70 'F' */ + _ACPI_UP, /* 0x47 71 'G' */ + _ACPI_UP, /* 0x48 72 'H' */ + _ACPI_UP, /* 0x49 73 'I' */ + _ACPI_UP, /* 0x4A 74 'J' */ + _ACPI_UP, /* 0x4B 75 'K' */ + _ACPI_UP, /* 0x4C 76 'L' */ + _ACPI_UP, /* 0x4D 77 'M' */ + _ACPI_UP, /* 0x4E 78 'N' */ + _ACPI_UP, /* 0x4F 79 'O' */ + _ACPI_UP, /* 0x50 80 'P' */ + _ACPI_UP, /* 0x51 81 'Q' */ + _ACPI_UP, /* 0x52 82 'R' */ + _ACPI_UP, /* 0x53 83 'S' */ + _ACPI_UP, /* 0x54 84 'T' */ + _ACPI_UP, /* 0x55 85 'U' */ + _ACPI_UP, /* 0x56 86 'V' */ + _ACPI_UP, /* 0x57 87 'W' */ + _ACPI_UP, /* 0x58 88 'X' */ + _ACPI_UP, /* 0x59 89 'Y' */ + _ACPI_UP, /* 0x5A 90 'Z' */ + _ACPI_PU, /* 0x5B 91 '[' */ + _ACPI_PU, /* 0x5C 92 '\' */ + _ACPI_PU, /* 0x5D 93 ']' */ + _ACPI_PU, /* 0x5E 94 '^' */ + _ACPI_PU, /* 0x5F 95 '_' */ + _ACPI_PU, /* 0x60 96 '`' */ + _ACPI_XD | _ACPI_LO, /* 0x61 97 'a' */ + _ACPI_XD | _ACPI_LO, /* 0x62 98 'b' */ + _ACPI_XD | _ACPI_LO, /* 0x63 99 'c' */ + _ACPI_XD | _ACPI_LO, /* 0x64 100 'd' */ + _ACPI_XD | _ACPI_LO, /* 0x65 101 'e' */ + _ACPI_XD | _ACPI_LO, /* 0x66 102 'f' */ + _ACPI_LO, /* 0x67 103 'g' */ + _ACPI_LO, /* 0x68 104 'h' */ + _ACPI_LO, /* 0x69 105 'i' */ + _ACPI_LO, /* 0x6A 106 'j' */ + _ACPI_LO, /* 0x6B 107 'k' */ + _ACPI_LO, /* 0x6C 108 'l' */ + _ACPI_LO, /* 0x6D 109 'm' */ + _ACPI_LO, /* 0x6E 110 'n' */ + _ACPI_LO, /* 0x6F 111 'o' */ + _ACPI_LO, /* 0x70 112 'p' */ + _ACPI_LO, /* 0x71 113 'q' */ + _ACPI_LO, /* 0x72 114 'r' */ + _ACPI_LO, /* 0x73 115 's' */ + _ACPI_LO, /* 0x74 116 't' */ + _ACPI_LO, /* 0x75 117 'u' */ + _ACPI_LO, /* 0x76 118 'v' */ + _ACPI_LO, /* 0x77 119 'w' */ + _ACPI_LO, /* 0x78 120 'x' */ + _ACPI_LO, /* 0x79 121 'y' */ + _ACPI_LO, /* 0x7A 122 'z' */ + _ACPI_PU, /* 0x7B 123 '{' */ + _ACPI_PU, /* 0x7C 124 '|' */ + _ACPI_PU, /* 0x7D 125 '}' */ + _ACPI_PU, /* 0x7E 126 '~' */ + _ACPI_CN, /* 0x7F 127 DEL */ + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x80 to 0x8F */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x90 to 0x9F */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xA0 to 0xAF */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xB0 to 0xBF */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xC0 to 0xCF */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xD0 to 0xDF */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xE0 to 0xEF */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xF0 to 0xFF */ + 0 /* 0x100 */ +}; + +#endif /* ACPI_USE_SYSTEM_CLIBRARY */ diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c index e810894149a..5d95166245a 100644 --- a/drivers/acpi/acpica/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c @@ -47,8 +47,9 @@ #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utdebug") + #ifdef ACPI_DEBUG_OUTPUT -static acpi_thread_id acpi_gbl_prev_thread_id; +static acpi_thread_id acpi_gbl_prev_thread_id = (acpi_thread_id) 0xFFFFFFFF; static char *acpi_gbl_fn_entry_str = "----Entry"; static char *acpi_gbl_fn_exit_str = "----Exit-"; @@ -109,7 +110,7 @@ void acpi_ut_track_stack_ptr(void) * RETURN: Updated pointer to the function name * * DESCRIPTION: Remove the "Acpi" prefix from the function name, if present. - * This allows compiler macros such as __func__ to be used + * This allows compiler macros such as __FUNCTION__ to be used * with no change to the debug output. * ******************************************************************************/ @@ -222,7 +223,7 @@ ACPI_EXPORT_SYMBOL(acpi_debug_print) * * RETURN: None * - * DESCRIPTION: Print message with no headers. Has same interface as + * DESCRIPTION: Print message with no headers. Has same interface as * debug_print so that the same macros can be used. * ******************************************************************************/ @@ -258,7 +259,7 @@ ACPI_EXPORT_SYMBOL(acpi_debug_print_raw) * * RETURN: None * - * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is + * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is * set in debug_level * ******************************************************************************/ @@ -290,7 +291,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_trace) * * RETURN: None * - * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is + * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is * set in debug_level * ******************************************************************************/ @@ -299,6 +300,7 @@ acpi_ut_trace_ptr(u32 line_number, const char *function_name, const char *module_name, u32 component_id, void *pointer) { + acpi_gbl_nesting_level++; acpi_ut_track_stack_ptr(); @@ -319,7 +321,7 @@ acpi_ut_trace_ptr(u32 line_number, * * RETURN: None * - * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is + * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is * set in debug_level * ******************************************************************************/ @@ -350,7 +352,7 @@ acpi_ut_trace_str(u32 line_number, * * RETURN: None * - * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is + * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is * set in debug_level * ******************************************************************************/ @@ -380,7 +382,7 @@ acpi_ut_trace_u32(u32 line_number, * * RETURN: None * - * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is + * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is * set in debug_level * ******************************************************************************/ @@ -412,7 +414,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_exit) * * RETURN: None * - * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is + * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is * set in debug_level. Prints exit status also. * ******************************************************************************/ @@ -453,7 +455,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_status_exit) * * RETURN: None * - * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is + * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is * set in debug_level. Prints exit value also. * ******************************************************************************/ @@ -485,7 +487,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_value_exit) * * RETURN: None * - * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is + * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is * set in debug_level. Prints exit value also. * ******************************************************************************/ @@ -511,7 +513,7 @@ acpi_ut_ptr_exit(u32 line_number, * PARAMETERS: buffer - Buffer to dump * count - Amount to dump, in bytes * display - BYTE, WORD, DWORD, or QWORD display - * component_ID - Caller's component ID + * offset - Beginning buffer offset (display only) * * RETURN: None * @@ -519,7 +521,7 @@ acpi_ut_ptr_exit(u32 line_number, * ******************************************************************************/ -void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) +void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset) { u32 i = 0; u32 j; @@ -541,7 +543,7 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) /* Print current offset */ - acpi_os_printf("%6.4X: ", i); + acpi_os_printf("%6.4X: ", (base_offset + i)); /* Print 16 hex chars */ @@ -623,7 +625,7 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) /******************************************************************************* * - * FUNCTION: acpi_ut_dump_buffer + * FUNCTION: acpi_ut_debug_dump_buffer * * PARAMETERS: buffer - Buffer to dump * count - Amount to dump, in bytes @@ -636,7 +638,8 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display) * ******************************************************************************/ -void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id) +void +acpi_ut_debug_dump_buffer(u8 *buffer, u32 count, u32 display, u32 component_id) { /* Only dump the buffer if tracing is enabled */ @@ -646,5 +649,5 @@ void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id) return; } - acpi_ut_dump_buffer2(buffer, count, display); + acpi_ut_dump_buffer(buffer, count, display, 0); } diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c index 5d84e195457..774c3aefbf5 100644 --- a/drivers/acpi/acpica/utids.c +++ b/drivers/acpi/acpica/utids.c @@ -67,10 +67,10 @@ ACPI_MODULE_NAME("utids") ******************************************************************************/ acpi_status acpi_ut_execute_HID(struct acpi_namespace_node *device_node, - struct acpica_device_id **return_id) + struct acpi_pnp_device_id **return_id) { union acpi_operand_object *obj_desc; - struct acpica_device_id *hid; + struct acpi_pnp_device_id *hid; u32 length; acpi_status status; @@ -94,16 +94,17 @@ acpi_ut_execute_HID(struct acpi_namespace_node *device_node, /* Allocate a buffer for the HID */ hid = - ACPI_ALLOCATE_ZEROED(sizeof(struct acpica_device_id) + + ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) + (acpi_size) length); if (!hid) { status = AE_NO_MEMORY; goto cleanup; } - /* Area for the string starts after DEVICE_ID struct */ + /* Area for the string starts after PNP_DEVICE_ID struct */ - hid->string = ACPI_ADD_PTR(char, hid, sizeof(struct acpica_device_id)); + hid->string = + ACPI_ADD_PTR(char, hid, sizeof(struct acpi_pnp_device_id)); /* Convert EISAID to a string or simply copy existing string */ @@ -126,6 +127,73 @@ cleanup: /******************************************************************************* * + * FUNCTION: acpi_ut_execute_SUB + * + * PARAMETERS: device_node - Node for the device + * return_id - Where the _SUB is returned + * + * RETURN: Status + * + * DESCRIPTION: Executes the _SUB control method that returns the subsystem + * ID of the device. The _SUB value is always a string containing + * either a valid PNP or ACPI ID. + * + * NOTE: Internal function, no parameter validation + * + ******************************************************************************/ + +acpi_status +acpi_ut_execute_SUB(struct acpi_namespace_node *device_node, + struct acpi_pnp_device_id **return_id) +{ + union acpi_operand_object *obj_desc; + struct acpi_pnp_device_id *sub; + u32 length; + acpi_status status; + + ACPI_FUNCTION_TRACE(ut_execute_SUB); + + status = acpi_ut_evaluate_object(device_node, METHOD_NAME__SUB, + ACPI_BTYPE_STRING, &obj_desc); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* Get the size of the String to be returned, includes null terminator */ + + length = obj_desc->string.length + 1; + + /* Allocate a buffer for the SUB */ + + sub = + ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) + + (acpi_size) length); + if (!sub) { + status = AE_NO_MEMORY; + goto cleanup; + } + + /* Area for the string starts after PNP_DEVICE_ID struct */ + + sub->string = + ACPI_ADD_PTR(char, sub, sizeof(struct acpi_pnp_device_id)); + + /* Simply copy existing string */ + + ACPI_STRCPY(sub->string, obj_desc->string.pointer); + sub->length = length; + *return_id = sub; + + cleanup: + + /* On exit, we must delete the return object */ + + acpi_ut_remove_reference(obj_desc); + return_ACPI_STATUS(status); +} + +/******************************************************************************* + * * FUNCTION: acpi_ut_execute_UID * * PARAMETERS: device_node - Node for the device @@ -144,10 +212,10 @@ cleanup: acpi_status acpi_ut_execute_UID(struct acpi_namespace_node *device_node, - struct acpica_device_id **return_id) + struct acpi_pnp_device_id **return_id) { union acpi_operand_object *obj_desc; - struct acpica_device_id *uid; + struct acpi_pnp_device_id *uid; u32 length; acpi_status status; @@ -171,16 +239,17 @@ acpi_ut_execute_UID(struct acpi_namespace_node *device_node, /* Allocate a buffer for the UID */ uid = - ACPI_ALLOCATE_ZEROED(sizeof(struct acpica_device_id) + + ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) + (acpi_size) length); if (!uid) { status = AE_NO_MEMORY; goto cleanup; } - /* Area for the string starts after DEVICE_ID struct */ + /* Area for the string starts after PNP_DEVICE_ID struct */ - uid->string = ACPI_ADD_PTR(char, uid, sizeof(struct acpica_device_id)); + uid->string = + ACPI_ADD_PTR(char, uid, sizeof(struct acpi_pnp_device_id)); /* Convert an Integer to string, or just copy an existing string */ @@ -226,11 +295,11 @@ cleanup: acpi_status acpi_ut_execute_CID(struct acpi_namespace_node *device_node, - struct acpica_device_id_list **return_cid_list) + struct acpi_pnp_device_id_list **return_cid_list) { union acpi_operand_object **cid_objects; union acpi_operand_object *obj_desc; - struct acpica_device_id_list *cid_list; + struct acpi_pnp_device_id_list *cid_list; char *next_id_string; u32 string_area_size; u32 length; @@ -288,11 +357,12 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node, /* * Now that we know the length of the CIDs, allocate return buffer: * 1) Size of the base structure + - * 2) Size of the CID DEVICE_ID array + + * 2) Size of the CID PNP_DEVICE_ID array + * 3) Size of the actual CID strings */ - cid_list_size = sizeof(struct acpica_device_id_list) + - ((count - 1) * sizeof(struct acpica_device_id)) + string_area_size; + cid_list_size = sizeof(struct acpi_pnp_device_id_list) + + ((count - 1) * sizeof(struct acpi_pnp_device_id)) + + string_area_size; cid_list = ACPI_ALLOCATE_ZEROED(cid_list_size); if (!cid_list) { @@ -300,10 +370,10 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node, goto cleanup; } - /* Area for CID strings starts after the CID DEVICE_ID array */ + /* Area for CID strings starts after the CID PNP_DEVICE_ID array */ next_id_string = ACPI_CAST_PTR(char, cid_list->ids) + - ((acpi_size) count * sizeof(struct acpica_device_id)); + ((acpi_size) count * sizeof(struct acpi_pnp_device_id)); /* Copy/convert the CIDs to the return buffer */ diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c index d88a8aaab2a..49563674833 100644 --- a/drivers/acpi/acpica/utmath.c +++ b/drivers/acpi/acpica/utmath.c @@ -81,7 +81,7 @@ typedef union uint64_overlay { * RETURN: Status (Checks for divide-by-zero) * * DESCRIPTION: Perform a short (maximum 64 bits divided by 32 bits) - * divide and modulo. The result is a 64-bit quotient and a + * divide and modulo. The result is a 64-bit quotient and a * 32-bit remainder. * ******************************************************************************/ diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c index 33c6cf7ff46..9286a69eb9a 100644 --- a/drivers/acpi/acpica/utmisc.c +++ b/drivers/acpi/acpica/utmisc.c @@ -41,8 +41,6 @@ * POSSIBILITY OF SUCH DAMAGES. */ -#include <linux/module.h> - #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" @@ -201,8 +199,8 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id) */ acpi_gbl_owner_id_mask[j] |= (1 << k); - acpi_gbl_last_owner_id_index = (u8) j; - acpi_gbl_next_owner_id_offset = (u8) (k + 1); + acpi_gbl_last_owner_id_index = (u8)j; + acpi_gbl_next_owner_id_offset = (u8)(k + 1); /* * Construct encoded ID from the index and bit position @@ -252,7 +250,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id) * control method or unloading a table. Either way, we would * ignore any error anyway. * - * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255 + * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255 * ******************************************************************************/ @@ -339,6 +337,73 @@ void acpi_ut_strupr(char *src_string) return; } +#ifdef ACPI_ASL_COMPILER +/******************************************************************************* + * + * FUNCTION: acpi_ut_strlwr (strlwr) + * + * PARAMETERS: src_string - The source string to convert + * + * RETURN: None + * + * DESCRIPTION: Convert string to lowercase + * + * NOTE: This is not a POSIX function, so it appears here, not in utclib.c + * + ******************************************************************************/ + +void acpi_ut_strlwr(char *src_string) +{ + char *string; + + ACPI_FUNCTION_ENTRY(); + + if (!src_string) { + return; + } + + /* Walk entire string, lowercasing the letters */ + + for (string = src_string; *string; string++) { + *string = (char)ACPI_TOLOWER(*string); + } + + return; +} + +/****************************************************************************** + * + * FUNCTION: acpi_ut_stricmp + * + * PARAMETERS: string1 - first string to compare + * string2 - second string to compare + * + * RETURN: int that signifies string relationship. Zero means strings + * are equal. + * + * DESCRIPTION: Implementation of the non-ANSI stricmp function (compare + * strings with no case sensitivity) + * + ******************************************************************************/ + +int acpi_ut_stricmp(char *string1, char *string2) +{ + int c1; + int c2; + + do { + c1 = tolower((int)*string1); + c2 = tolower((int)*string2); + + string1++; + string2++; + } + while ((c1 == c2) && (c1)); + + return (c1 - c2); +} +#endif + /******************************************************************************* * * FUNCTION: acpi_ut_print_string @@ -469,8 +534,8 @@ u32 acpi_ut_dword_byte_swap(u32 value) * RETURN: None * * DESCRIPTION: Set the global integer bit width based upon the revision - * of the DSDT. For Revision 1 and 0, Integers are 32 bits. - * For Revision 2 and above, Integers are 64 bits. Yes, this + * of the DSDT. For Revision 1 and 0, Integers are 32 bits. + * For Revision 2 and above, Integers are 64 bits. Yes, this * makes a difference. * ******************************************************************************/ @@ -606,7 +671,7 @@ u8 acpi_ut_valid_acpi_char(char character, u32 position) * * RETURN: TRUE if the name is valid, FALSE otherwise * - * DESCRIPTION: Check for a valid ACPI name. Each character must be one of: + * DESCRIPTION: Check for a valid ACPI name. Each character must be one of: * 1) Upper case alpha * 2) numeric * 3) underscore @@ -638,29 +703,59 @@ u8 acpi_ut_valid_acpi_name(u32 name) * RETURN: Repaired version of the name * * DESCRIPTION: Repair an ACPI name: Change invalid characters to '*' and - * return the new name. + * return the new name. NOTE: the Name parameter must reside in + * read/write memory, cannot be a const. + * + * An ACPI Name must consist of valid ACPI characters. We will repair the name + * if necessary because we don't want to abort because of this, but we want + * all namespace names to be printable. A warning message is appropriate. + * + * This issue came up because there are in fact machines that exhibit + * this problem, and we want to be able to enable ACPI support for them, + * even though there are a few bad names. * ******************************************************************************/ -acpi_name acpi_ut_repair_name(char *name) +void acpi_ut_repair_name(char *name) { - u32 i; - char new_name[ACPI_NAME_SIZE]; + u32 i; + u8 found_bad_char = FALSE; + u32 original_name; + + ACPI_FUNCTION_NAME(ut_repair_name); + + ACPI_MOVE_NAME(&original_name, name); + + /* Check each character in the name */ for (i = 0; i < ACPI_NAME_SIZE; i++) { - new_name[i] = name[i]; + if (acpi_ut_valid_acpi_char(name[i], i)) { + continue; + } /* * Replace a bad character with something printable, yet technically * still invalid. This prevents any collisions with existing "good" * names in the namespace. */ - if (!acpi_ut_valid_acpi_char(name[i], i)) { - new_name[i] = '*'; - } + name[i] = '*'; + found_bad_char = TRUE; } - return (*(u32 *) new_name); + if (found_bad_char) { + + /* Report warning only if in strict mode or debug mode */ + + if (!acpi_gbl_enable_interpreter_slack) { + ACPI_WARNING((AE_INFO, + "Found bad character(s) in name, repaired: [%4.4s]\n", + name)); + } else { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Found bad character(s) in name, repaired: [%4.4s]\n", + name)); + } + } } /******************************************************************************* @@ -681,7 +776,7 @@ acpi_name acpi_ut_repair_name(char *name) * ******************************************************************************/ -acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer) +acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer) { u32 this_digit = 0; u64 return_value = 0; @@ -754,14 +849,14 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer) /* Convert ASCII 0-9 to Decimal value */ - this_digit = ((u8) * string) - '0'; + this_digit = ((u8)*string) - '0'; } else if (base == 10) { /* Digit is out of range; possible in to_integer case only */ term = 1; } else { - this_digit = (u8) ACPI_TOUPPER(*string); + this_digit = (u8)ACPI_TOUPPER(*string); if (ACPI_IS_XDIGIT((char)this_digit)) { /* Convert ASCII Hex char to value */ @@ -788,8 +883,9 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer) valid_digits++; - if (sign_of0x && ((valid_digits > 16) - || ((valid_digits > 8) && mode32))) { + if (sign_of0x + && ((valid_digits > 16) + || ((valid_digits > 8) && mode32))) { /* * This is to_integer operation case. * No any restrictions for string-to-integer conversion, @@ -800,7 +896,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer) /* Divide the digit into the correct position */ - (void)acpi_ut_short_divide((dividend - (u64) this_digit), + (void)acpi_ut_short_divide((dividend - (u64)this_digit), base, "ient, NULL); if (return_value > quotient) { @@ -890,7 +986,7 @@ acpi_ut_create_update_state_and_push(union acpi_operand_object *object, ******************************************************************************/ acpi_status -acpi_ut_walk_package_tree(union acpi_operand_object * source_object, +acpi_ut_walk_package_tree(union acpi_operand_object *source_object, void *target_object, acpi_pkg_callback walk_callback, void *context) { @@ -917,10 +1013,10 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object, /* * Check for: - * 1) An uninitialized package element. It is completely + * 1) An uninitialized package element. It is completely * legal to declare a package and leave it uninitialized * 2) Not an internal object - can be a namespace node instead - * 3) Any type other than a package. Packages are handled in else + * 3) Any type other than a package. Packages are handled in else * case below. */ if ((!this_source_obj) || @@ -939,7 +1035,7 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object, state->pkg.source_object->package.count) { /* * We've handled all of the objects at this level, This means - * that we have just completed a package. That package may + * that we have just completed a package. That package may * have contained one or more packages itself. * * Delete this state and pop the previous state (package). diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c index 296baa676bc..5ccf57c0d87 100644 --- a/drivers/acpi/acpica/utmutex.c +++ b/drivers/acpi/acpica/utmutex.c @@ -193,6 +193,8 @@ static void acpi_ut_delete_mutex(acpi_mutex_handle mutex_id) acpi_gbl_mutex_info[mutex_id].mutex = NULL; acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED; + + return_VOID; } /******************************************************************************* @@ -226,9 +228,9 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id) /* * Mutex debug code, for internal debugging only. * - * Deadlock prevention. Check if this thread owns any mutexes of value - * greater than or equal to this one. If so, the thread has violated - * the mutex ordering rule. This indicates a coding error somewhere in + * Deadlock prevention. Check if this thread owns any mutexes of value + * greater than or equal to this one. If so, the thread has violated + * the mutex ordering rule. This indicates a coding error somewhere in * the ACPI subsystem code. */ for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { @@ -319,9 +321,9 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id) /* * Mutex debug code, for internal debugging only. * - * Deadlock prevention. Check if this thread owns any mutexes of value - * greater than this one. If so, the thread has violated the mutex - * ordering rule. This indicates a coding error somewhere in + * Deadlock prevention. Check if this thread owns any mutexes of value + * greater than this one. If so, the thread has violated the mutex + * ordering rule. This indicates a coding error somewhere in * the ACPI subsystem code. */ for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) { diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c index 655f0799a39..5c52ca78f6f 100644 --- a/drivers/acpi/acpica/utobject.c +++ b/drivers/acpi/acpica/utobject.c @@ -77,7 +77,7 @@ acpi_ut_get_element_length(u8 object_type, * * NOTE: We always allocate the worst-case object descriptor because * these objects are cached, and we want them to be - * one-size-satisifies-any-request. This in itself may not be + * one-size-satisifies-any-request. This in itself may not be * the most memory efficient, but the efficiency of the object * cache should more than make up for this! * @@ -370,9 +370,9 @@ u8 acpi_ut_valid_internal_object(void *object) * line_number - Caller's line number (for error output) * component_id - Caller's component ID (for error output) * - * RETURN: Pointer to newly allocated object descriptor. Null on error + * RETURN: Pointer to newly allocated object descriptor. Null on error * - * DESCRIPTION: Allocate a new object descriptor. Gracefully handle + * DESCRIPTION: Allocate a new object descriptor. Gracefully handle * error conditions. * ******************************************************************************/ @@ -554,7 +554,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object, /* * Account for the space required by the object rounded up to the next - * multiple of the machine word size. This keeps each object aligned + * multiple of the machine word size. This keeps each object aligned * on a machine word boundary. (preventing alignment faults on some * machines.) */ diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c index a1c98826007..cee0473ba81 100644 --- a/drivers/acpi/acpica/utstate.c +++ b/drivers/acpi/acpica/utstate.c @@ -147,7 +147,7 @@ union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state * * RETURN: The new state object. NULL on failure. * - * DESCRIPTION: Create a generic state object. Attempt to obtain one from + * DESCRIPTION: Create a generic state object. Attempt to obtain one from * the global state cache; If none available, create a new one. * ******************************************************************************/ diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c new file mode 100644 index 00000000000..a424a9e3fea --- /dev/null +++ b/drivers/acpi/acpica/uttrack.c @@ -0,0 +1,692 @@ +/****************************************************************************** + * + * Module Name: uttrack - Memory allocation tracking routines (debug only) + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2012, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +/* + * These procedures are used for tracking memory leaks in the subsystem, and + * they get compiled out when the ACPI_DBG_TRACK_ALLOCATIONS is not set. + * + * Each memory allocation is tracked via a doubly linked list. Each + * element contains the caller's component, module name, function name, and + * line number. acpi_ut_allocate and acpi_ut_allocate_zeroed call + * acpi_ut_track_allocation to add an element to the list; deletion + * occurs in the body of acpi_ut_free. + */ + +#include <acpi/acpi.h> +#include "accommon.h" + +#ifdef ACPI_DBG_TRACK_ALLOCATIONS + +#define _COMPONENT ACPI_UTILITIES +ACPI_MODULE_NAME("uttrack") + +/* Local prototypes */ +static struct acpi_debug_mem_block *acpi_ut_find_allocation(struct + acpi_debug_mem_block + *allocation); + +static acpi_status +acpi_ut_track_allocation(struct acpi_debug_mem_block *address, + acpi_size size, + u8 alloc_type, + u32 component, const char *module, u32 line); + +static acpi_status +acpi_ut_remove_allocation(struct acpi_debug_mem_block *address, + u32 component, const char *module, u32 line); + +/******************************************************************************* + * + * FUNCTION: acpi_ut_create_list + * + * PARAMETERS: cache_name - Ascii name for the cache + * object_size - Size of each cached object + * return_cache - Where the new cache object is returned + * + * RETURN: Status + * + * DESCRIPTION: Create a local memory list for tracking purposed + * + ******************************************************************************/ + +acpi_status +acpi_ut_create_list(char *list_name, + u16 object_size, struct acpi_memory_list **return_cache) +{ + struct acpi_memory_list *cache; + + cache = acpi_os_allocate(sizeof(struct acpi_memory_list)); + if (!cache) { + return (AE_NO_MEMORY); + } + + ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list)); + + cache->list_name = list_name; + cache->object_size = object_size; + + *return_cache = cache; + return (AE_OK); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_allocate_and_track + * + * PARAMETERS: size - Size of the allocation + * component - Component type of caller + * module - Source file name of caller + * line - Line number of caller + * + * RETURN: Address of the allocated memory on success, NULL on failure. + * + * DESCRIPTION: The subsystem's equivalent of malloc. + * + ******************************************************************************/ + +void *acpi_ut_allocate_and_track(acpi_size size, + u32 component, const char *module, u32 line) +{ + struct acpi_debug_mem_block *allocation; + acpi_status status; + + allocation = + acpi_ut_allocate(size + sizeof(struct acpi_debug_mem_header), + component, module, line); + if (!allocation) { + return (NULL); + } + + status = acpi_ut_track_allocation(allocation, size, + ACPI_MEM_MALLOC, component, module, + line); + if (ACPI_FAILURE(status)) { + acpi_os_free(allocation); + return (NULL); + } + + acpi_gbl_global_list->total_allocated++; + acpi_gbl_global_list->total_size += (u32)size; + acpi_gbl_global_list->current_total_size += (u32)size; + if (acpi_gbl_global_list->current_total_size > + acpi_gbl_global_list->max_occupied) { + acpi_gbl_global_list->max_occupied = + acpi_gbl_global_list->current_total_size; + } + + return ((void *)&allocation->user_space); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_allocate_zeroed_and_track + * + * PARAMETERS: size - Size of the allocation + * component - Component type of caller + * module - Source file name of caller + * line - Line number of caller + * + * RETURN: Address of the allocated memory on success, NULL on failure. + * + * DESCRIPTION: Subsystem equivalent of calloc. + * + ******************************************************************************/ + +void *acpi_ut_allocate_zeroed_and_track(acpi_size size, + u32 component, + const char *module, u32 line) +{ + struct acpi_debug_mem_block *allocation; + acpi_status status; + + allocation = + acpi_ut_allocate_zeroed(size + sizeof(struct acpi_debug_mem_header), + component, module, line); + if (!allocation) { + + /* Report allocation error */ + + ACPI_ERROR((module, line, + "Could not allocate size %u", (u32)size)); + return (NULL); + } + + status = acpi_ut_track_allocation(allocation, size, + ACPI_MEM_CALLOC, component, module, + line); + if (ACPI_FAILURE(status)) { + acpi_os_free(allocation); + return (NULL); + } + + acpi_gbl_global_list->total_allocated++; + acpi_gbl_global_list->total_size += (u32)size; + acpi_gbl_global_list->current_total_size += (u32)size; + if (acpi_gbl_global_list->current_total_size > + acpi_gbl_global_list->max_occupied) { + acpi_gbl_global_list->max_occupied = + acpi_gbl_global_list->current_total_size; + } + + return ((void *)&allocation->user_space); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_free_and_track + * + * PARAMETERS: allocation - Address of the memory to deallocate + * component - Component type of caller + * module - Source file name of caller + * line - Line number of caller + * + * RETURN: None + * + * DESCRIPTION: Frees the memory at Allocation + * + ******************************************************************************/ + +void +acpi_ut_free_and_track(void *allocation, + u32 component, const char *module, u32 line) +{ + struct acpi_debug_mem_block *debug_block; + acpi_status status; + + ACPI_FUNCTION_TRACE_PTR(ut_free, allocation); + + if (NULL == allocation) { + ACPI_ERROR((module, line, "Attempt to delete a NULL address")); + + return_VOID; + } + + debug_block = ACPI_CAST_PTR(struct acpi_debug_mem_block, + (((char *)allocation) - + sizeof(struct acpi_debug_mem_header))); + + acpi_gbl_global_list->total_freed++; + acpi_gbl_global_list->current_total_size -= debug_block->size; + + status = acpi_ut_remove_allocation(debug_block, + component, module, line); + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, "Could not free memory")); + } + + acpi_os_free(debug_block); + ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p freed\n", allocation)); + return_VOID; +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_find_allocation + * + * PARAMETERS: allocation - Address of allocated memory + * + * RETURN: Three cases: + * 1) List is empty, NULL is returned. + * 2) Element was found. Returns Allocation parameter. + * 3) Element was not found. Returns position where it should be + * inserted into the list. + * + * DESCRIPTION: Searches for an element in the global allocation tracking list. + * If the element is not found, returns the location within the + * list where the element should be inserted. + * + * Note: The list is ordered by larger-to-smaller addresses. + * + * This global list is used to detect memory leaks in ACPICA as + * well as other issues such as an attempt to release the same + * internal object more than once. Although expensive as far + * as cpu time, this list is much more helpful for finding these + * types of issues than using memory leak detectors outside of + * the ACPICA code. + * + ******************************************************************************/ + +static struct acpi_debug_mem_block *acpi_ut_find_allocation(struct + acpi_debug_mem_block + *allocation) +{ + struct acpi_debug_mem_block *element; + + element = acpi_gbl_global_list->list_head; + if (!element) { + return (NULL); + } + + /* + * Search for the address. + * + * Note: List is ordered by larger-to-smaller addresses, on the + * assumption that a new allocation usually has a larger address + * than previous allocations. + */ + while (element > allocation) { + + /* Check for end-of-list */ + + if (!element->next) { + return (element); + } + + element = element->next; + } + + if (element == allocation) { + return (element); + } + + return (element->previous); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_track_allocation + * + * PARAMETERS: allocation - Address of allocated memory + * size - Size of the allocation + * alloc_type - MEM_MALLOC or MEM_CALLOC + * component - Component type of caller + * module - Source file name of caller + * line - Line number of caller + * + * RETURN: Status + * + * DESCRIPTION: Inserts an element into the global allocation tracking list. + * + ******************************************************************************/ + +static acpi_status +acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation, + acpi_size size, + u8 alloc_type, + u32 component, const char *module, u32 line) +{ + struct acpi_memory_list *mem_list; + struct acpi_debug_mem_block *element; + acpi_status status = AE_OK; + + ACPI_FUNCTION_TRACE_PTR(ut_track_allocation, allocation); + + if (acpi_gbl_disable_mem_tracking) { + return_ACPI_STATUS(AE_OK); + } + + mem_list = acpi_gbl_global_list; + status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* + * Search the global list for this address to make sure it is not + * already present. This will catch several kinds of problems. + */ + element = acpi_ut_find_allocation(allocation); + if (element == allocation) { + ACPI_ERROR((AE_INFO, + "UtTrackAllocation: Allocation (%p) already present in global list!", + allocation)); + goto unlock_and_exit; + } + + /* Fill in the instance data */ + + allocation->size = (u32)size; + allocation->alloc_type = alloc_type; + allocation->component = component; + allocation->line = line; + + ACPI_STRNCPY(allocation->module, module, ACPI_MAX_MODULE_NAME); + allocation->module[ACPI_MAX_MODULE_NAME - 1] = 0; + + if (!element) { + + /* Insert at list head */ + + if (mem_list->list_head) { + ((struct acpi_debug_mem_block *)(mem_list->list_head))-> + previous = allocation; + } + + allocation->next = mem_list->list_head; + allocation->previous = NULL; + + mem_list->list_head = allocation; + } else { + /* Insert after element */ + + allocation->next = element->next; + allocation->previous = element; + + if (element->next) { + (element->next)->previous = allocation; + } + + element->next = allocation; + } + + unlock_and_exit: + status = acpi_ut_release_mutex(ACPI_MTX_MEMORY); + return_ACPI_STATUS(status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_remove_allocation + * + * PARAMETERS: allocation - Address of allocated memory + * component - Component type of caller + * module - Source file name of caller + * line - Line number of caller + * + * RETURN: Status + * + * DESCRIPTION: Deletes an element from the global allocation tracking list. + * + ******************************************************************************/ + +static acpi_status +acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation, + u32 component, const char *module, u32 line) +{ + struct acpi_memory_list *mem_list; + acpi_status status; + + ACPI_FUNCTION_TRACE(ut_remove_allocation); + + if (acpi_gbl_disable_mem_tracking) { + return_ACPI_STATUS(AE_OK); + } + + mem_list = acpi_gbl_global_list; + if (NULL == mem_list->list_head) { + + /* No allocations! */ + + ACPI_ERROR((module, line, + "Empty allocation list, nothing to free!")); + + return_ACPI_STATUS(AE_OK); + } + + status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* Unlink */ + + if (allocation->previous) { + (allocation->previous)->next = allocation->next; + } else { + mem_list->list_head = allocation->next; + } + + if (allocation->next) { + (allocation->next)->previous = allocation->previous; + } + + /* Mark the segment as deleted */ + + ACPI_MEMSET(&allocation->user_space, 0xEA, allocation->size); + + ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Freeing size 0%X\n", + allocation->size)); + + status = acpi_ut_release_mutex(ACPI_MTX_MEMORY); + return_ACPI_STATUS(status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_dump_allocation_info + * + * PARAMETERS: None + * + * RETURN: None + * + * DESCRIPTION: Print some info about the outstanding allocations. + * + ******************************************************************************/ + +void acpi_ut_dump_allocation_info(void) +{ +/* + struct acpi_memory_list *mem_list; +*/ + + ACPI_FUNCTION_TRACE(ut_dump_allocation_info); + +/* + ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, + ("%30s: %4d (%3d Kb)\n", "Current allocations", + mem_list->current_count, + ROUND_UP_TO_1K (mem_list->current_size))); + + ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, + ("%30s: %4d (%3d Kb)\n", "Max concurrent allocations", + mem_list->max_concurrent_count, + ROUND_UP_TO_1K (mem_list->max_concurrent_size))); + + ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, + ("%30s: %4d (%3d Kb)\n", "Total (all) internal objects", + running_object_count, + ROUND_UP_TO_1K (running_object_size))); + + ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, + ("%30s: %4d (%3d Kb)\n", "Total (all) allocations", + running_alloc_count, + ROUND_UP_TO_1K (running_alloc_size))); + + ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, + ("%30s: %4d (%3d Kb)\n", "Current Nodes", + acpi_gbl_current_node_count, + ROUND_UP_TO_1K (acpi_gbl_current_node_size))); + + ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, + ("%30s: %4d (%3d Kb)\n", "Max Nodes", + acpi_gbl_max_concurrent_node_count, + ROUND_UP_TO_1K ((acpi_gbl_max_concurrent_node_count * + sizeof (struct acpi_namespace_node))))); +*/ + return_VOID; +} + +/******************************************************************************* + * + * FUNCTION: acpi_ut_dump_allocations + * + * PARAMETERS: component - Component(s) to dump info for. + * module - Module to dump info for. NULL means all. + * + * RETURN: None + * + * DESCRIPTION: Print a list of all outstanding allocations. + * + ******************************************************************************/ + +void acpi_ut_dump_allocations(u32 component, const char *module) +{ + struct acpi_debug_mem_block *element; + union acpi_descriptor *descriptor; + u32 num_outstanding = 0; + u8 descriptor_type; + + ACPI_FUNCTION_TRACE(ut_dump_allocations); + + if (acpi_gbl_disable_mem_tracking) { + return_VOID; + } + + /* + * Walk the allocation list. + */ + if (ACPI_FAILURE(acpi_ut_acquire_mutex(ACPI_MTX_MEMORY))) { + return_VOID; + } + + element = acpi_gbl_global_list->list_head; + while (element) { + if ((element->component & component) && + ((module == NULL) + || (0 == ACPI_STRCMP(module, element->module)))) { + descriptor = + ACPI_CAST_PTR(union acpi_descriptor, + &element->user_space); + + if (element->size < + sizeof(struct acpi_common_descriptor)) { + acpi_os_printf("%p Length 0x%04X %9.9s-%u " + "[Not a Descriptor - too small]\n", + descriptor, element->size, + element->module, element->line); + } else { + /* Ignore allocated objects that are in a cache */ + + if (ACPI_GET_DESCRIPTOR_TYPE(descriptor) != + ACPI_DESC_TYPE_CACHED) { + acpi_os_printf + ("%p Length 0x%04X %9.9s-%u [%s] ", + descriptor, element->size, + element->module, element->line, + acpi_ut_get_descriptor_name + (descriptor)); + + /* Validate the descriptor type using Type field and length */ + + descriptor_type = 0; /* Not a valid descriptor type */ + + switch (ACPI_GET_DESCRIPTOR_TYPE + (descriptor)) { + case ACPI_DESC_TYPE_OPERAND: + if (element->size == + sizeof(union + acpi_operand_object)) + { + descriptor_type = + ACPI_DESC_TYPE_OPERAND; + } + break; + + case ACPI_DESC_TYPE_PARSER: + if (element->size == + sizeof(union + acpi_parse_object)) { + descriptor_type = + ACPI_DESC_TYPE_PARSER; + } + break; + + case ACPI_DESC_TYPE_NAMED: + if (element->size == + sizeof(struct + acpi_namespace_node)) + { + descriptor_type = + ACPI_DESC_TYPE_NAMED; + } + break; + + default: + break; + } + + /* Display additional info for the major descriptor types */ + + switch (descriptor_type) { + case ACPI_DESC_TYPE_OPERAND: + acpi_os_printf + ("%12.12s RefCount 0x%04X\n", + acpi_ut_get_type_name + (descriptor->object.common. + type), + descriptor->object.common. + reference_count); + break; + + case ACPI_DESC_TYPE_PARSER: + acpi_os_printf + ("AmlOpcode 0x%04hX\n", + descriptor->op.asl. + aml_opcode); + break; + + case ACPI_DESC_TYPE_NAMED: + acpi_os_printf("%4.4s\n", + acpi_ut_get_node_name + (&descriptor-> + node)); + break; + + default: + acpi_os_printf("\n"); + break; + } + } + } + + num_outstanding++; + } + + element = element->next; + } + + (void)acpi_ut_release_mutex(ACPI_MTX_MEMORY); + + /* Print summary */ + + if (!num_outstanding) { + ACPI_INFO((AE_INFO, "No outstanding allocations")); + } else { + ACPI_ERROR((AE_INFO, "%u(0x%X) Outstanding allocations", + num_outstanding, num_outstanding)); + } + + return_VOID; +} + +#endif /* ACPI_DBG_TRACK_ALLOCATIONS */ diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c index b09632b4f5b..390db0ca5e2 100644 --- a/drivers/acpi/acpica/utxface.c +++ b/drivers/acpi/acpica/utxface.c @@ -147,7 +147,7 @@ ACPI_EXPORT_SYMBOL(acpi_subsystem_status) * RETURN: status - the status of the call * * DESCRIPTION: This function is called to get information about the current - * state of the ACPI subsystem. It will return system information + * state of the ACPI subsystem. It will return system information * in the out_buffer. * * If the function fails an appropriate status will be returned @@ -238,7 +238,7 @@ acpi_install_initialization_handler(acpi_init_handler handler, u32 function) } acpi_gbl_init_handler = handler; - return AE_OK; + return (AE_OK); } ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler) @@ -263,6 +263,7 @@ acpi_status acpi_purge_cached_objects(void) (void)acpi_os_purge_cache(acpi_gbl_operand_cache); (void)acpi_os_purge_cache(acpi_gbl_ps_node_cache); (void)acpi_os_purge_cache(acpi_gbl_ps_node_ext_cache); + return_ACPI_STATUS(AE_OK); } diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c index 6d63cc39b9a..d4d3826140d 100644 --- a/drivers/acpi/acpica/utxferror.c +++ b/drivers/acpi/acpica/utxferror.c @@ -408,7 +408,7 @@ acpi_ut_namespace_error(const char *module_name, ACPI_MOVE_32_TO_32(&bad_name, ACPI_CAST_PTR(u32, internal_name)); - acpi_os_printf("[0x%4.4X] (NON-ASCII)", bad_name); + acpi_os_printf("[0x%.8X] (NON-ASCII)", bad_name); } else { /* Convert path to external format */ diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 1599566ed1f..da93c003e95 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -994,7 +994,7 @@ err: return rc; } -static int __devexit ghes_remove(struct platform_device *ghes_dev) +static int ghes_remove(struct platform_device *ghes_dev) { struct ghes *ghes; struct acpi_hest_generic *generic; diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 45e3e1759fb..7efaeaa53b8 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -34,6 +34,7 @@ #include <linux/dmi.h> #include <linux/slab.h> #include <linux/suspend.h> +#include <asm/unaligned.h> #ifdef CONFIG_ACPI_PROCFS_POWER #include <linux/proc_fs.h> @@ -95,6 +96,18 @@ enum { ACPI_BATTERY_ALARM_PRESENT, ACPI_BATTERY_XINFO_PRESENT, ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, + /* On Lenovo Thinkpad models from 2010 and 2011, the power unit + switches between mWh and mAh depending on whether the system + is running on battery or not. When mAh is the unit, most + reported values are incorrect and need to be adjusted by + 10000/design_voltage. Verified on x201, t410, t410s, and x220. + Pre-2010 and 2012 models appear to always report in mWh and + are thus unaffected (tested with t42, t61, t500, x200, x300, + and x230). Also, in mid-2012 Lenovo issued a BIOS update for + the 2011 models that fixes the issue (tested on x220 with a + post-1.29 BIOS), but as of Nov. 2012, no such update is + available for the 2010 models. */ + ACPI_BATTERY_QUIRK_THINKPAD_MAH, }; struct acpi_battery { @@ -438,6 +451,21 @@ static int acpi_battery_get_info(struct acpi_battery *battery) kfree(buffer.pointer); if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)) battery->full_charge_capacity = battery->design_capacity; + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) && + battery->power_unit && battery->design_voltage) { + battery->design_capacity = battery->design_capacity * + 10000 / battery->design_voltage; + battery->full_charge_capacity = battery->full_charge_capacity * + 10000 / battery->design_voltage; + battery->design_capacity_warning = + battery->design_capacity_warning * + 10000 / battery->design_voltage; + /* Curiously, design_capacity_low, unlike the rest of them, + is correct. */ + /* capacity_granularity_* equal 1 on the systems tested, so + it's impossible to tell if they would need an adjustment + or not if their values were higher. */ + } return result; } @@ -486,6 +514,11 @@ static int acpi_battery_get_state(struct acpi_battery *battery) && battery->capacity_now >= 0 && battery->capacity_now <= 100) battery->capacity_now = (battery->capacity_now * battery->full_charge_capacity) / 100; + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) && + battery->power_unit && battery->design_voltage) { + battery->capacity_now = battery->capacity_now * + 10000 / battery->design_voltage; + } return result; } @@ -595,6 +628,24 @@ static void sysfs_remove_battery(struct acpi_battery *battery) mutex_unlock(&battery->sysfs_lock); } +static void find_battery(const struct dmi_header *dm, void *private) +{ + struct acpi_battery *battery = (struct acpi_battery *)private; + /* Note: the hardcoded offsets below have been extracted from + the source code of dmidecode. */ + if (dm->type == DMI_ENTRY_PORTABLE_BATTERY && dm->length >= 8) { + const u8 *dmi_data = (const u8 *)(dm + 1); + int dmi_capacity = get_unaligned((const u16 *)(dmi_data + 6)); + if (dm->length >= 18) + dmi_capacity *= dmi_data[17]; + if (battery->design_capacity * battery->design_voltage / 1000 + != dmi_capacity && + battery->design_capacity * 10 == dmi_capacity) + set_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, + &battery->flags); + } +} + /* * According to the ACPI spec, some kinds of primary batteries can * report percentage battery remaining capacity directly to OS. @@ -620,6 +671,32 @@ static void acpi_battery_quirks(struct acpi_battery *battery) battery->capacity_now = (battery->capacity_now * battery->full_charge_capacity) / 100; } + + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags)) + return ; + + if (battery->power_unit && dmi_name_in_vendors("LENOVO")) { + const char *s; + s = dmi_get_system_info(DMI_PRODUCT_VERSION); + if (s && !strnicmp(s, "ThinkPad", 8)) { + dmi_walk(find_battery, battery); + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, + &battery->flags) && + battery->design_voltage) { + battery->design_capacity = + battery->design_capacity * + 10000 / battery->design_voltage; + battery->full_charge_capacity = + battery->full_charge_capacity * + 10000 / battery->design_voltage; + battery->design_capacity_warning = + battery->design_capacity_warning * + 10000 / battery->design_voltage; + battery->capacity_now = battery->capacity_now * + 10000 / battery->design_voltage; + } + } + } } static int acpi_battery_update(struct acpi_battery *battery) diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c index 1f9f7d7d7bc..811910b50b7 100644 --- a/drivers/acpi/container.c +++ b/drivers/acpi/container.c @@ -92,17 +92,24 @@ static int is_device_present(acpi_handle handle) return ((sta & ACPI_STA_DEVICE_PRESENT) == ACPI_STA_DEVICE_PRESENT); } +static bool is_container_device(const char *hid) +{ + const struct acpi_device_id *container_id; + + for (container_id = container_device_ids; + container_id->id[0]; container_id++) { + if (!strcmp((char *)container_id->id, hid)) + return true; + } + + return false; +} + /*******************************************************************/ static int acpi_container_add(struct acpi_device *device) { struct acpi_container *container; - - if (!device) { - printk(KERN_ERR PREFIX "device is NULL\n"); - return -EINVAL; - } - container = kzalloc(sizeof(struct acpi_container), GFP_KERNEL); if (!container) return -ENOMEM; @@ -164,7 +171,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context) case ACPI_NOTIFY_BUS_CHECK: /* Fall through */ case ACPI_NOTIFY_DEVICE_CHECK: - printk(KERN_WARNING "Container driver received %s event\n", + pr_debug("Container driver received %s event\n", (type == ACPI_NOTIFY_BUS_CHECK) ? "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"); @@ -185,7 +192,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context) result = container_device_add(&device, handle); if (result) { - printk(KERN_WARNING "Failed to add container\n"); + acpi_handle_warn(handle, "Failed to add container\n"); break; } @@ -232,10 +239,8 @@ container_walk_namespace_cb(acpi_handle handle, goto end; } - if (strcmp(hid, "ACPI0004") && strcmp(hid, "PNP0A05") && - strcmp(hid, "PNP0A06")) { + if (!is_container_device(hid)) goto end; - } switch (*action) { case INSTALL_NOTIFY_HANDLER: diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 88eb1430466..f32bd47b35e 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -31,6 +31,7 @@ #include <linux/platform_device.h> #include <linux/jiffies.h> #include <linux/stddef.h> +#include <linux/acpi.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> @@ -460,12 +461,8 @@ static void handle_dock(struct dock_station *ds, int dock) struct acpi_object_list arg_list; union acpi_object arg; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - acpi_get_name(ds->handle, ACPI_FULL_PATHNAME, &name_buffer); - - printk(KERN_INFO PREFIX "%s - %s\n", - (char *)name_buffer.pointer, dock ? "docking" : "undocking"); + acpi_handle_info(ds->handle, "%s\n", dock ? "docking" : "undocking"); /* _DCK method has one argument */ arg_list.count = 1; @@ -474,11 +471,10 @@ static void handle_dock(struct dock_station *ds, int dock) arg.integer.value = dock; status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) - ACPI_EXCEPTION((AE_INFO, status, "%s - failed to execute" - " _DCK\n", (char *)name_buffer.pointer)); + acpi_handle_err(ds->handle, "Failed to execute _DCK (0x%x)\n", + status); kfree(buffer.pointer); - kfree(name_buffer.pointer); } static inline void dock(struct dock_station *ds) @@ -525,9 +521,11 @@ static void dock_lock(struct dock_station *ds, int lock) status = acpi_evaluate_object(ds->handle, "_LCK", &arg_list, NULL); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { if (lock) - printk(KERN_WARNING PREFIX "Locking device failed\n"); + acpi_handle_warn(ds->handle, + "Locking device failed (0x%x)\n", status); else - printk(KERN_WARNING PREFIX "Unlocking device failed\n"); + acpi_handle_warn(ds->handle, + "Unlocking device failed (0x%x)\n", status); } } @@ -667,7 +665,7 @@ static int handle_eject_request(struct dock_station *ds, u32 event) dock_lock(ds, 0); eject_dock(ds); if (dock_present(ds)) { - printk(KERN_ERR PREFIX "Unable to undock!\n"); + acpi_handle_err(ds->handle, "Unable to undock!\n"); return -EBUSY; } complete_undock(ds); @@ -715,7 +713,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data) begin_dock(ds); dock(ds); if (!dock_present(ds)) { - printk(KERN_ERR PREFIX "Unable to dock!\n"); + acpi_handle_err(handle, "Unable to dock!\n"); complete_dock(ds); break; } @@ -743,7 +741,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data) dock_event(ds, event, UNDOCK_EVENT); break; default: - printk(KERN_ERR PREFIX "Unknown dock event %d\n", event); + acpi_handle_err(handle, "Unknown dock event %d\n", event); } } @@ -987,7 +985,7 @@ err_rmgroup: sysfs_remove_group(&dd->dev.kobj, &dock_attribute_group); err_unregister: platform_device_unregister(dd); - printk(KERN_ERR "%s encountered error %d\n", __func__, ret); + acpi_handle_err(handle, "%s encountered error %d\n", __func__, ret); return ret; } @@ -1016,51 +1014,39 @@ static int dock_remove(struct dock_station *ds) } /** - * find_dock - look for a dock station + * find_dock_and_bay - look for dock stations and bays * @handle: acpi handle of a device * @lvl: unused - * @context: counter of dock stations found + * @context: unused * @rv: unused * - * This is called by acpi_walk_namespace to look for dock stations. + * This is called by acpi_walk_namespace to look for dock stations and bays. */ static __init acpi_status -find_dock(acpi_handle handle, u32 lvl, void *context, void **rv) +find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv) { - if (is_dock(handle)) + if (is_dock(handle) || is_ejectable_bay(handle)) dock_add(handle); return AE_OK; } -static __init acpi_status -find_bay(acpi_handle handle, u32 lvl, void *context, void **rv) -{ - /* If bay is a dock, it's already handled */ - if (is_ejectable_bay(handle) && !is_dock(handle)) - dock_add(handle); - return AE_OK; -} - static int __init dock_init(void) { if (acpi_disabled) return 0; - /* look for a dock station */ + /* look for dock stations and bays */ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, - ACPI_UINT32_MAX, find_dock, NULL, NULL, NULL); + ACPI_UINT32_MAX, find_dock_and_bay, NULL, NULL, NULL); - /* look for bay */ - acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, - ACPI_UINT32_MAX, find_bay, NULL, NULL, NULL); if (!dock_station_count) { - printk(KERN_INFO PREFIX "No dock devices found.\n"); + pr_info(PREFIX "No dock devices found.\n"); return 0; } register_acpi_bus_notifier(&dock_acpi_notifier); - printk(KERN_INFO PREFIX "%s: %d docks/bays found\n", + pr_info(PREFIX "%s: %d docks/bays found\n", ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count); return 0; } diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index a51df968131..354007d490d 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -158,10 +158,10 @@ static int ec_transaction_done(struct acpi_ec *ec) { unsigned long flags; int ret = 0; - spin_lock_irqsave(&ec->curr_lock, flags); + spin_lock_irqsave(&ec->lock, flags); if (!ec->curr || ec->curr->done) ret = 1; - spin_unlock_irqrestore(&ec->curr_lock, flags); + spin_unlock_irqrestore(&ec->lock, flags); return ret; } @@ -175,32 +175,38 @@ static void start_transaction(struct acpi_ec *ec) static void advance_transaction(struct acpi_ec *ec, u8 status) { unsigned long flags; - spin_lock_irqsave(&ec->curr_lock, flags); - if (!ec->curr) + struct transaction *t = ec->curr; + + spin_lock_irqsave(&ec->lock, flags); + if (!t) goto unlock; - if (ec->curr->wlen > ec->curr->wi) { + if (t->wlen > t->wi) { if ((status & ACPI_EC_FLAG_IBF) == 0) acpi_ec_write_data(ec, - ec->curr->wdata[ec->curr->wi++]); + t->wdata[t->wi++]); else goto err; - } else if (ec->curr->rlen > ec->curr->ri) { + } else if (t->rlen > t->ri) { if ((status & ACPI_EC_FLAG_OBF) == 1) { - ec->curr->rdata[ec->curr->ri++] = acpi_ec_read_data(ec); - if (ec->curr->rlen == ec->curr->ri) - ec->curr->done = true; + t->rdata[t->ri++] = acpi_ec_read_data(ec); + if (t->rlen == t->ri) + t->done = true; } else goto err; - } else if (ec->curr->wlen == ec->curr->wi && + } else if (t->wlen == t->wi && (status & ACPI_EC_FLAG_IBF) == 0) - ec->curr->done = true; + t->done = true; goto unlock; err: - /* false interrupt, state didn't change */ - if (in_interrupt()) - ++ec->curr->irq_count; + /* + * If SCI bit is set, then don't think it's a false IRQ + * otherwise will take a not handled IRQ as a false one. + */ + if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI)) + ++t->irq_count; + unlock: - spin_unlock_irqrestore(&ec->curr_lock, flags); + spin_unlock_irqrestore(&ec->lock, flags); } static int acpi_ec_sync_query(struct acpi_ec *ec); @@ -238,9 +244,9 @@ static int ec_poll(struct acpi_ec *ec) if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) break; pr_debug(PREFIX "controller reset, restart transaction\n"); - spin_lock_irqsave(&ec->curr_lock, flags); + spin_lock_irqsave(&ec->lock, flags); start_transaction(ec); - spin_unlock_irqrestore(&ec->curr_lock, flags); + spin_unlock_irqrestore(&ec->lock, flags); } return -ETIME; } @@ -253,17 +259,17 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, if (EC_FLAGS_MSI) udelay(ACPI_EC_MSI_UDELAY); /* start transaction */ - spin_lock_irqsave(&ec->curr_lock, tmp); + spin_lock_irqsave(&ec->lock, tmp); /* following two actions should be kept atomic */ ec->curr = t; start_transaction(ec); if (ec->curr->command == ACPI_EC_COMMAND_QUERY) clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); - spin_unlock_irqrestore(&ec->curr_lock, tmp); + spin_unlock_irqrestore(&ec->lock, tmp); ret = ec_poll(ec); - spin_lock_irqsave(&ec->curr_lock, tmp); + spin_lock_irqsave(&ec->lock, tmp); ec->curr = NULL; - spin_unlock_irqrestore(&ec->curr_lock, tmp); + spin_unlock_irqrestore(&ec->lock, tmp); return ret; } @@ -292,7 +298,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) return -EINVAL; if (t->rdata) memset(t->rdata, 0, t->rlen); - mutex_lock(&ec->lock); + mutex_lock(&ec->mutex); if (test_bit(EC_FLAGS_BLOCKED, &ec->flags)) { status = -EINVAL; goto unlock; @@ -310,7 +316,8 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) status = -ETIME; goto end; } - pr_debug(PREFIX "transaction start\n"); + pr_debug(PREFIX "transaction start (cmd=0x%02x, addr=0x%02x)\n", + t->command, t->wdata ? t->wdata[0] : 0); /* disable GPE during transaction if storm is detected */ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { /* It has to be disabled, so that it doesn't trigger. */ @@ -326,8 +333,9 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) /* It is safe to enable the GPE outside of the transaction. */ acpi_enable_gpe(NULL, ec->gpe); } else if (t->irq_count > ec_storm_threshold) { - pr_info(PREFIX "GPE storm detected, " - "transactions will use polling mode\n"); + pr_info(PREFIX "GPE storm detected(%d GPEs), " + "transactions will use polling mode\n", + t->irq_count); set_bit(EC_FLAGS_GPE_STORM, &ec->flags); } pr_debug(PREFIX "transaction end\n"); @@ -335,7 +343,7 @@ end: if (ec->global_lock) acpi_release_global_lock(glk); unlock: - mutex_unlock(&ec->lock); + mutex_unlock(&ec->mutex); return status; } @@ -403,7 +411,7 @@ int ec_burst_disable(void) EXPORT_SYMBOL(ec_burst_disable); -int ec_read(u8 addr, u8 * val) +int ec_read(u8 addr, u8 *val) { int err; u8 temp_data; @@ -468,10 +476,10 @@ void acpi_ec_block_transactions(void) if (!ec) return; - mutex_lock(&ec->lock); + mutex_lock(&ec->mutex); /* Prevent transactions from being carried out */ set_bit(EC_FLAGS_BLOCKED, &ec->flags); - mutex_unlock(&ec->lock); + mutex_unlock(&ec->mutex); } void acpi_ec_unblock_transactions(void) @@ -481,10 +489,10 @@ void acpi_ec_unblock_transactions(void) if (!ec) return; - mutex_lock(&ec->lock); + mutex_lock(&ec->mutex); /* Allow transactions to be carried out again */ clear_bit(EC_FLAGS_BLOCKED, &ec->flags); - mutex_unlock(&ec->lock); + mutex_unlock(&ec->mutex); } void acpi_ec_unblock_transactions_early(void) @@ -536,9 +544,9 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, handler->handle = handle; handler->func = func; handler->data = data; - mutex_lock(&ec->lock); + mutex_lock(&ec->mutex); list_add(&handler->node, &ec->list); - mutex_unlock(&ec->lock); + mutex_unlock(&ec->mutex); return 0; } @@ -547,14 +555,14 @@ EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler); void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) { struct acpi_ec_query_handler *handler, *tmp; - mutex_lock(&ec->lock); + mutex_lock(&ec->mutex); list_for_each_entry_safe(handler, tmp, &ec->list, node) { if (query_bit == handler->query_bit) { list_del(&handler->node); kfree(handler); } } - mutex_unlock(&ec->lock); + mutex_unlock(&ec->mutex); } EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); @@ -601,9 +609,9 @@ static void acpi_ec_gpe_query(void *ec_cxt) struct acpi_ec *ec = ec_cxt; if (!ec) return; - mutex_lock(&ec->lock); + mutex_lock(&ec->mutex); acpi_ec_sync_query(ec); - mutex_unlock(&ec->lock); + mutex_unlock(&ec->mutex); } static int ec_check_sci(struct acpi_ec *ec, u8 state) @@ -622,10 +630,11 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, u32 gpe_number, void *data) { struct acpi_ec *ec = data; + u8 status = acpi_ec_read_status(ec); - pr_debug(PREFIX "~~~> interrupt\n"); + pr_debug(PREFIX "~~~> interrupt, status:0x%02x\n", status); - advance_transaction(ec, acpi_ec_read_status(ec)); + advance_transaction(ec, status); if (ec_transaction_done(ec) && (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) { wake_up(&ec->wait); @@ -691,10 +700,10 @@ static struct acpi_ec *make_acpi_ec(void) if (!ec) return NULL; ec->flags = 1 << EC_FLAGS_QUERY_PENDING; - mutex_init(&ec->lock); + mutex_init(&ec->mutex); init_waitqueue_head(&ec->wait); INIT_LIST_HEAD(&ec->list); - spin_lock_init(&ec->curr_lock); + spin_lock_init(&ec->lock); return ec; } @@ -853,12 +862,12 @@ static int acpi_ec_remove(struct acpi_device *device, int type) ec = acpi_driver_data(device); ec_remove_handlers(ec); - mutex_lock(&ec->lock); + mutex_lock(&ec->mutex); list_for_each_entry_safe(handler, tmp, &ec->list, node) { list_del(&handler->node); kfree(handler); } - mutex_unlock(&ec->lock); + mutex_unlock(&ec->mutex); release_region(ec->data_addr, 1); release_region(ec->command_addr, 1); device->driver_data = NULL; diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c index 20a0f2c3ca3..b514e81e8cf 100644 --- a/drivers/acpi/hed.c +++ b/drivers/acpi/hed.c @@ -70,7 +70,7 @@ static int __devinit acpi_hed_add(struct acpi_device *device) return 0; } -static int __devexit acpi_hed_remove(struct acpi_device *device, int type) +static int acpi_hed_remove(struct acpi_device *device, int type) { hed_handle = NULL; return 0; diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 57d41f6e144..3c407cdc1ec 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -58,11 +58,11 @@ struct acpi_ec { unsigned long data_addr; unsigned long global_lock; unsigned long flags; - struct mutex lock; + struct mutex mutex; wait_queue_head_t wait; struct list_head list; struct transaction *curr; - spinlock_t curr_lock; + spinlock_t lock; }; extern struct acpi_ec *first_ec; diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 9eaf708f588..6dc4a2b1e95 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -932,7 +932,7 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, * having a static work_struct. */ - dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); + dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); if (!dpc) return AE_NO_MEMORY; @@ -944,17 +944,22 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, * because the hotplug code may call driver .remove() functions, * which invoke flush_scheduled_work/acpi_os_wait_events_complete * to flush these workqueues. + * + * To prevent lockdep from complaining unnecessarily, make sure that + * there is a different static lockdep key for each workqueue by using + * INIT_WORK() for each of them separately. */ - queue = hp ? kacpi_hotplug_wq : - (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq); - dpc->wait = hp ? 1 : 0; - - if (queue == kacpi_hotplug_wq) + if (hp) { + queue = kacpi_hotplug_wq; + dpc->wait = 1; INIT_WORK(&dpc->work, acpi_os_execute_deferred); - else if (queue == kacpi_notify_wq) + } else if (type == OSL_NOTIFY_HANDLER) { + queue = kacpi_notify_wq; INIT_WORK(&dpc->work, acpi_os_execute_deferred); - else + } else { + queue = kacpid_wq; INIT_WORK(&dpc->work, acpi_os_execute_deferred); + } /* * On some machines, a software-initiated SMI causes corruption unless @@ -986,6 +991,7 @@ acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function, { return __acpi_os_execute(0, function, context, 1); } +EXPORT_SYMBOL(acpi_os_hotplug_execute); void acpi_os_wait_events_complete(void) { diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 1be25a590dc..23a03249013 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -459,19 +459,19 @@ int acpi_pci_irq_enable(struct pci_dev *dev) */ if (gsi < 0) { u32 dev_gsi; - dev_warn(&dev->dev, "PCI INT %c: no GSI", pin_name(pin)); /* Interrupt Line values above 0xF are forbidden */ if (dev->irq > 0 && (dev->irq <= 0xF) && (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) { - printk(" - using ISA IRQ %d\n", dev->irq); + dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n", + pin_name(pin), dev->irq); acpi_register_gsi(&dev->dev, dev_gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW); - return 0; } else { - printk("\n"); - return 0; + dev_warn(&dev->dev, "PCI INT %c: no GSI\n", + pin_name(pin)); } + return 0; } rc = acpi_register_gsi(&dev->dev, gsi, triggering, polarity); diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 40e38a06ba8..7db61b8fa11 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -473,7 +473,7 @@ int acpi_power_resource_register_device(struct device *dev, acpi_handle handle) return ret; no_power_resource: - printk(KERN_DEBUG PREFIX "Invalid Power Resource to register!"); + printk(KERN_DEBUG PREFIX "Invalid Power Resource to register!\n"); return -ENODEV; } EXPORT_SYMBOL_GPL(acpi_power_resource_register_device); diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index 27adb090bb3..ef98796b382 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c @@ -362,16 +362,13 @@ acpi_system_write_wakeup_device(struct file *file, struct list_head *node, *next; char strbuf[5]; char str[5] = ""; - unsigned int len = count; - if (len > 4) - len = 4; - if (len < 0) - return -EFAULT; + if (count > 4) + count = 4; - if (copy_from_user(strbuf, buffer, len)) + if (copy_from_user(strbuf, buffer, count)) return -EFAULT; - strbuf[len] = '\0'; + strbuf[count] = '\0'; sscanf(strbuf, "%s", str); mutex_lock(&acpi_device_lock); diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index bd4e5dca3ff..e83311bf1eb 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -44,6 +44,7 @@ #include <linux/moduleparam.h> #include <linux/cpuidle.h> #include <linux/slab.h> +#include <linux/acpi.h> #include <asm/io.h> #include <asm/cpu.h> @@ -282,7 +283,9 @@ static int acpi_processor_get_info(struct acpi_device *device) /* Declared with "Processor" statement; match ProcessorID */ status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX "Evaluating processor object\n"); + dev_err(&device->dev, + "Failed to evaluate processor object (0x%x)\n", + status); return -ENODEV; } @@ -301,8 +304,9 @@ static int acpi_processor_get_info(struct acpi_device *device) status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, NULL, &value); if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX - "Evaluating processor _UID [%#x]\n", status); + dev_err(&device->dev, + "Failed to evaluate processor _UID (0x%x)\n", + status); return -ENODEV; } device_declaration = 1; @@ -345,7 +349,7 @@ static int acpi_processor_get_info(struct acpi_device *device) if (!object.processor.pblk_address) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n")); else if (object.processor.pblk_length != 6) - printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n", + dev_err(&device->dev, "Invalid PBLK length [%d]\n", object.processor.pblk_length); else { pr->throttling.address = object.processor.pblk_address; @@ -430,8 +434,8 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, * Initialize missing things */ if (pr->flags.need_hotplug_init) { - printk(KERN_INFO "Will online and init hotplugged " - "CPU: %d\n", pr->id); + pr_info("Will online and init hotplugged CPU: %d\n", + pr->id); WARN(acpi_processor_start(pr), "Failed to start CPU:" " %d\n", pr->id); pr->flags.need_hotplug_init = 0; @@ -492,14 +496,16 @@ static __ref int acpi_processor_start(struct acpi_processor *pr) &pr->cdev->device.kobj, "thermal_cooling"); if (result) { - printk(KERN_ERR PREFIX "Create sysfs link\n"); + dev_err(&device->dev, + "Failed to create sysfs link 'thermal_cooling'\n"); goto err_thermal_unregister; } result = sysfs_create_link(&pr->cdev->device.kobj, &device->dev.kobj, "device"); if (result) { - printk(KERN_ERR PREFIX "Create sysfs link\n"); + dev_err(&pr->cdev->device, + "Failed to create sysfs link 'device'\n"); goto err_remove_sysfs_thermal; } @@ -561,8 +567,9 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) */ if (per_cpu(processor_device_array, pr->id) != NULL && per_cpu(processor_device_array, pr->id) != device) { - printk(KERN_WARNING "BIOS reported wrong ACPI id " - "for the processor\n"); + dev_warn(&device->dev, + "BIOS reported wrong ACPI id %d for the processor\n", + pr->id); result = -ENODEV; goto err_free_cpumask; } @@ -695,8 +702,8 @@ int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device) static void acpi_processor_hotplug_notify(acpi_handle handle, u32 event, void *data) { - struct acpi_processor *pr; struct acpi_device *device = NULL; + struct acpi_eject_event *ej_event = NULL; u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */ int result; @@ -716,7 +723,7 @@ static void acpi_processor_hotplug_notify(acpi_handle handle, result = acpi_processor_device_add(handle, &device); if (result) { - printk(KERN_ERR PREFIX "Unable to add the device\n"); + acpi_handle_err(handle, "Unable to add the device\n"); break; } @@ -728,20 +735,29 @@ static void acpi_processor_hotplug_notify(acpi_handle handle, "received ACPI_NOTIFY_EJECT_REQUEST\n")); if (acpi_bus_get_device(handle, &device)) { - printk(KERN_ERR PREFIX - "Device don't exist, dropping EJECT\n"); + acpi_handle_err(handle, + "Device don't exist, dropping EJECT\n"); break; } - pr = acpi_driver_data(device); - if (!pr) { - printk(KERN_ERR PREFIX - "Driver data is NULL, dropping EJECT\n"); + if (!acpi_driver_data(device)) { + acpi_handle_err(handle, + "Driver data is NULL, dropping EJECT\n"); break; } - /* REVISIT: update when eject is supported */ - ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED; - break; + ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL); + if (!ej_event) { + acpi_handle_err(handle, "No memory, dropping EJECT\n"); + break; + } + + ej_event->handle = handle; + ej_event->event = ACPI_NOTIFY_EJECT_REQUEST; + acpi_os_hotplug_execute(acpi_bus_hot_remove_device, + (void *)ej_event); + + /* eject is performed asynchronously */ + return; default: ACPI_DEBUG_PRINT((ACPI_DB_INFO, @@ -841,7 +857,7 @@ static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr) * and do it when the CPU gets online the first time * TBD: Cleanup above functions and try to do this more elegant. */ - printk(KERN_INFO "CPU %d got hotplugged\n", pr->id); + pr_info("CPU %d got hotplugged\n", pr->id); pr->flags.need_hotplug_init = 1; return AE_OK; @@ -852,8 +868,22 @@ static int acpi_processor_handle_eject(struct acpi_processor *pr) if (cpu_online(pr->id)) cpu_down(pr->id); + get_online_cpus(); + /* + * The cpu might become online again at this point. So we check whether + * the cpu has been onlined or not. If the cpu became online, it means + * that someone wants to use the cpu. So acpi_processor_handle_eject() + * returns -EAGAIN. + */ + if (unlikely(cpu_online(pr->id))) { + put_online_cpus(); + pr_warn("Failed to remove CPU %d, because other task " + "brought the CPU back online\n", pr->id); + return -EAGAIN; + } arch_unregister_cpu(pr->id); acpi_unmap_lsapic(pr->id); + put_online_cpus(); return (0); } #else diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index e8086c72530..f1a5da44591 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -735,31 +735,18 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) static int acpi_idle_enter_c1(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - ktime_t kt1, kt2; - s64 idle_time; struct acpi_processor *pr; struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); pr = __this_cpu_read(processors); - dev->last_residency = 0; if (unlikely(!pr)) return -EINVAL; - local_irq_disable(); - - lapic_timer_state_broadcast(pr, cx, 1); - kt1 = ktime_get_real(); acpi_idle_do_entry(cx); - kt2 = ktime_get_real(); - idle_time = ktime_to_us(ktime_sub(kt2, kt1)); - - /* Update device last_residency*/ - dev->last_residency = (int)idle_time; - local_irq_enable(); lapic_timer_state_broadcast(pr, cx, 0); return index; @@ -806,19 +793,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, struct acpi_processor *pr; struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); - ktime_t kt1, kt2; - s64 idle_time_ns; - s64 idle_time; pr = __this_cpu_read(processors); - dev->last_residency = 0; if (unlikely(!pr)) return -EINVAL; - local_irq_disable(); - - if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* @@ -829,7 +809,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; - local_irq_enable(); return -EINVAL; } } @@ -843,22 +822,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, if (cx->type == ACPI_STATE_C3) ACPI_FLUSH_CPU_CACHE(); - kt1 = ktime_get_real(); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); acpi_idle_do_entry(cx); - kt2 = ktime_get_real(); - idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); - idle_time = idle_time_ns; - do_div(idle_time, NSEC_PER_USEC); - /* Update device last_residency*/ - dev->last_residency = (int)idle_time; + sched_clock_idle_wakeup_event(0); - /* Tell the scheduler how much we idled: */ - sched_clock_idle_wakeup_event(idle_time_ns); - - local_irq_enable(); if (cx->entry_method != ACPI_CSTATE_FFH) current_thread_info()->status |= TS_POLLING; @@ -883,13 +852,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, struct acpi_processor *pr; struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); - ktime_t kt1, kt2; - s64 idle_time_ns; - s64 idle_time; - pr = __this_cpu_read(processors); - dev->last_residency = 0; if (unlikely(!pr)) return -EINVAL; @@ -899,16 +863,11 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, return drv->states[drv->safe_state_index].enter(dev, drv, drv->safe_state_index); } else { - local_irq_disable(); acpi_safe_halt(); - local_irq_enable(); return -EBUSY; } } - local_irq_disable(); - - if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* @@ -919,7 +878,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; - local_irq_enable(); return -EINVAL; } } @@ -934,7 +892,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, */ lapic_timer_state_broadcast(pr, cx, 1); - kt1 = ktime_get_real(); /* * disable bus master * bm_check implies we need ARB_DIS @@ -965,18 +922,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, c3_cpu_count--; raw_spin_unlock(&c3_lock); } - kt2 = ktime_get_real(); - idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); - idle_time = idle_time_ns; - do_div(idle_time, NSEC_PER_USEC); - - /* Update device last_residency*/ - dev->last_residency = (int)idle_time; - /* Tell the scheduler how much we idled: */ - sched_clock_idle_wakeup_event(idle_time_ns); + sched_clock_idle_wakeup_event(0); - local_irq_enable(); if (cx->entry_method != ACPI_CSTATE_FFH) current_thread_info()->status |= TS_POLLING; @@ -987,6 +935,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, struct cpuidle_driver acpi_idle_driver = { .name = "acpi_idle", .owner = THIS_MODULE, + .en_core_tk_irqen = 1, }; /** diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 3db115acea5..53502d1bbf2 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -118,6 +118,7 @@ void acpi_bus_hot_remove_device(void *context) struct acpi_eject_event *ej_event = (struct acpi_eject_event *) context; struct acpi_device *device; acpi_handle handle = ej_event->handle; + acpi_handle temp; struct acpi_object_list arg_list; union acpi_object arg; acpi_status status = AE_OK; @@ -138,13 +139,16 @@ void acpi_bus_hot_remove_device(void *context) goto err_out; } + /* device has been freed */ + device = NULL; + /* power off device */ status = acpi_evaluate_object(handle, "_PS3", NULL, NULL); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) printk(KERN_WARNING PREFIX "Power-off device failed\n"); - if (device->flags.lockable) { + if (ACPI_SUCCESS(acpi_get_handle(handle, "_LCK", &temp))) { arg_list.count = 1; arg_list.pointer = &arg; arg.type = ACPI_TYPE_INTEGER; @@ -178,6 +182,7 @@ err_out: kfree(context); return; } +EXPORT_SYMBOL(acpi_bus_hot_remove_device); static ssize_t acpi_eject_store(struct device *d, struct device_attribute *attr, @@ -237,6 +242,25 @@ acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *bu } static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL); +static ssize_t acpi_device_uid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_device *acpi_dev = to_acpi_device(dev); + + return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id); +} +static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL); + +static ssize_t acpi_device_adr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct acpi_device *acpi_dev = to_acpi_device(dev); + + return sprintf(buf, "0x%08x\n", + (unsigned int)(acpi_dev->pnp.bus_address)); +} +static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL); + static ssize_t acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); @@ -280,11 +304,21 @@ static ssize_t description_show(struct device *dev, } static DEVICE_ATTR(description, 0444, description_show, NULL); +static ssize_t +acpi_device_sun_show(struct device *dev, struct device_attribute *attr, + char *buf) { + struct acpi_device *acpi_dev = to_acpi_device(dev); + + return sprintf(buf, "%lu\n", acpi_dev->pnp.sun); +} +static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL); + static int acpi_device_setup_files(struct acpi_device *dev) { struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; acpi_status status; acpi_handle temp; + unsigned long long sun; int result = 0; /* @@ -321,6 +355,21 @@ static int acpi_device_setup_files(struct acpi_device *dev) goto end; } + if (dev->flags.bus_address) + result = device_create_file(&dev->dev, &dev_attr_adr); + if (dev->pnp.unique_id) + result = device_create_file(&dev->dev, &dev_attr_uid); + + status = acpi_evaluate_integer(dev->handle, "_SUN", NULL, &sun); + if (ACPI_SUCCESS(status)) { + dev->pnp.sun = (unsigned long)sun; + result = device_create_file(&dev->dev, &dev_attr_sun); + if (result) + goto end; + } else { + dev->pnp.sun = (unsigned long)-1; + } + /* * If device has _EJ0, 'eject' file is created that is used to trigger * hot-removal function from userland. @@ -352,6 +401,14 @@ static void acpi_device_remove_files(struct acpi_device *dev) if (ACPI_SUCCESS(status)) device_remove_file(&dev->dev, &dev_attr_eject); + status = acpi_get_handle(dev->handle, "_SUN", &temp); + if (ACPI_SUCCESS(status)) + device_remove_file(&dev->dev, &dev_attr_sun); + + if (dev->pnp.unique_id) + device_remove_file(&dev->dev, &dev_attr_uid); + if (dev->flags.bus_address) + device_remove_file(&dev->dev, &dev_attr_adr); device_remove_file(&dev->dev, &dev_attr_modalias); device_remove_file(&dev->dev, &dev_attr_hid); if (dev->handle) @@ -428,6 +485,7 @@ static void acpi_device_release(struct device *dev) struct acpi_device *acpi_dev = to_acpi_device(dev); acpi_free_ids(acpi_dev); + kfree(acpi_dev->pnp.unique_id); kfree(acpi_dev); } @@ -910,8 +968,8 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle, static void acpi_bus_set_run_wake_flags(struct acpi_device *device) { struct acpi_device_id button_device_ids[] = { - {"PNP0C0D", 0}, {"PNP0C0C", 0}, + {"PNP0C0D", 0}, {"PNP0C0E", 0}, {"", 0}, }; @@ -923,6 +981,11 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device) /* Power button, Lid switch always enable wakeup */ if (!acpi_match_device_ids(device, button_device_ids)) { device->wakeup.flags.run_wake = 1; + if (!acpi_match_device_ids(device, &button_device_ids[1])) { + /* Do not use Lid/sleep button for S5 wakeup */ + if (device->wakeup.sleep_state == ACPI_STATE_S5) + device->wakeup.sleep_state = ACPI_STATE_S4; + } device_set_wakeup_capable(&device->dev, true); return; } @@ -1071,11 +1134,6 @@ static int acpi_bus_get_flags(struct acpi_device *device) device->flags.ejectable = 1; } - /* Presence of _LCK indicates 'lockable' */ - status = acpi_get_handle(device->handle, "_LCK", &temp); - if (ACPI_SUCCESS(status)) - device->flags.lockable = 1; - /* Power resources cannot be power manageable. */ if (device->device_type == ACPI_BUS_TYPE_POWER) return 0; @@ -1243,7 +1301,7 @@ static void acpi_device_set_id(struct acpi_device *device) { acpi_status status; struct acpi_device_info *info; - struct acpica_device_id_list *cid_list; + struct acpi_pnp_device_id_list *cid_list; int i; switch (device->device_type) { @@ -1270,6 +1328,9 @@ static void acpi_device_set_id(struct acpi_device *device) device->pnp.bus_address = info->address; device->flags.bus_address = 1; } + if (info->valid & ACPI_VALID_UID) + device->pnp.unique_id = kstrdup(info->unique_id.string, + GFP_KERNEL); kfree(info); diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 13a285dffac..2fcc67d34b1 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -103,6 +103,21 @@ void __init acpi_nvs_nosave(void) } /* + * The ACPI specification wants us to save NVS memory regions during hibernation + * but says nothing about saving NVS during S3. Not all versions of Windows + * save NVS on S3 suspend either, and it is clear that not all systems need + * NVS to be saved at S3 time. To improve suspend/resume time, allow the + * user to disable saving NVS on S3 if their system does not require it, but + * continue to save/restore NVS for S4 as specified. + */ +static bool nvs_nosave_s3; + +void __init acpi_nvs_nosave_s3(void) +{ + nvs_nosave_s3 = true; +} + +/* * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the * user to request that behavior by using the 'acpi_old_suspend_ordering' * kernel command line option that causes the following variable to be set. @@ -114,6 +129,180 @@ void __init acpi_old_suspend_ordering(void) old_suspend_ordering = true; } +static int __init init_old_suspend_ordering(const struct dmi_system_id *d) +{ + acpi_old_suspend_ordering(); + return 0; +} + +static int __init init_nvs_nosave(const struct dmi_system_id *d) +{ + acpi_nvs_nosave(); + return 0; +} + +static struct dmi_system_id __initdata acpisleep_dmi_table[] = { + { + .callback = init_old_suspend_ordering, + .ident = "Abit KN9 (nForce4 variant)", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"), + DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"), + }, + }, + { + .callback = init_old_suspend_ordering, + .ident = "HP xw4600 Workstation", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"), + }, + }, + { + .callback = init_old_suspend_ordering, + .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "M2N8L"), + }, + }, + { + .callback = init_old_suspend_ordering, + .ident = "Panasonic CF51-2L", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, + "Matsushita Electric Industrial Co.,Ltd."), + DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VGN-FW21E", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VPCEB17FX", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VGN-SR11M", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Everex StepNote Series", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VPCEB1Z1E", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VGN-NW130D", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VPCCW29FX", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Averatec AV1020-ED2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), + DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), + }, + }, + { + .callback = init_old_suspend_ordering, + .ident = "Asus A8N-SLI DELUXE", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"), + }, + }, + { + .callback = init_old_suspend_ordering, + .ident = "Asus A8N-SLI Premium", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VGN-SR26GN_P", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VPCEB1S1E", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VGN-FW520F", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Asus K54C", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "K54C"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Asus K54HR", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"), + }, + }, + {}, +}; + +static void acpi_sleep_dmi_check(void) +{ + dmi_check_system(acpisleep_dmi_table); +} + /** * acpi_pm_freeze - Disable the GPEs and suspend EC transactions. */ @@ -229,6 +418,7 @@ static void acpi_pm_end(void) } #else /* !CONFIG_ACPI_SLEEP */ #define acpi_target_sleep_state ACPI_STATE_S0 +static inline void acpi_sleep_dmi_check(void) {} #endif /* CONFIG_ACPI_SLEEP */ #ifdef CONFIG_SUSPEND @@ -248,7 +438,7 @@ static int acpi_suspend_begin(suspend_state_t pm_state) u32 acpi_state = acpi_suspend_states[pm_state]; int error = 0; - error = nvs_nosave ? 0 : suspend_nvs_alloc(); + error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc(); if (error) return error; @@ -387,167 +577,6 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = { .end = acpi_pm_end, .recover = acpi_pm_finish, }; - -static int __init init_old_suspend_ordering(const struct dmi_system_id *d) -{ - old_suspend_ordering = true; - return 0; -} - -static int __init init_nvs_nosave(const struct dmi_system_id *d) -{ - acpi_nvs_nosave(); - return 0; -} - -static struct dmi_system_id __initdata acpisleep_dmi_table[] = { - { - .callback = init_old_suspend_ordering, - .ident = "Abit KN9 (nForce4 variant)", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"), - DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"), - }, - }, - { - .callback = init_old_suspend_ordering, - .ident = "HP xw4600 Workstation", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"), - }, - }, - { - .callback = init_old_suspend_ordering, - .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."), - DMI_MATCH(DMI_BOARD_NAME, "M2N8L"), - }, - }, - { - .callback = init_old_suspend_ordering, - .ident = "Panasonic CF51-2L", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, - "Matsushita Electric Industrial Co.,Ltd."), - DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Sony Vaio VGN-FW21E", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Sony Vaio VPCEB17FX", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Sony Vaio VGN-SR11M", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Everex StepNote Series", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Sony Vaio VPCEB1Z1E", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Sony Vaio VGN-NW130D", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Sony Vaio VPCCW29FX", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Averatec AV1020-ED2", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), - DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), - }, - }, - { - .callback = init_old_suspend_ordering, - .ident = "Asus A8N-SLI DELUXE", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), - DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"), - }, - }, - { - .callback = init_old_suspend_ordering, - .ident = "Asus A8N-SLI Premium", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), - DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Sony Vaio VGN-SR26GN_P", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Sony Vaio VGN-FW520F", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Asus K54C", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "K54C"), - }, - }, - { - .callback = init_nvs_nosave, - .ident = "Asus K54HR", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"), - }, - }, - {}, -}; #endif /* CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATION @@ -707,13 +736,13 @@ int __init acpi_sleep_init(void) u8 type_a, type_b; #ifdef CONFIG_SUSPEND int i = 0; - - dmi_check_system(acpisleep_dmi_table); #endif if (acpi_disabled) return 0; + acpi_sleep_dmi_check(); + sleep_states[ACPI_STATE_S0] = 1; printk(KERN_INFO PREFIX "(supports S0"); diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 7c3f98ba4af..ea61ca9129c 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -476,7 +476,7 @@ static void fixed_event_count(u32 event_number) return; } -static void acpi_gbl_event_handler(u32 event_type, acpi_handle device, +static void acpi_global_event_handler(u32 event_type, acpi_handle device, u32 event_number, void *context) { if (event_type == ACPI_EVENT_TYPE_GPE) @@ -638,7 +638,7 @@ void acpi_irq_stats_init(void) if (all_counters == NULL) goto fail; - status = acpi_install_global_event_handler(acpi_gbl_event_handler, NULL); + status = acpi_install_global_event_handler(acpi_global_event_handler, NULL); if (ACPI_FAILURE(status)) goto fail; diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 804204d4199..6e8cc16b54c 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -984,6 +984,38 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event) } } +/* + * On some platforms, the AML code has dependency about + * the evaluating order of _TMP and _CRT/_HOT/_PSV/_ACx. + * 1. On HP Pavilion G4-1016tx, _TMP must be invoked after + * /_CRT/_HOT/_PSV/_ACx, or else system will be power off. + * 2. On HP Compaq 6715b/6715s, the return value of _PSV is 0 + * if _TMP has never been evaluated. + * + * As this dependency is totally transparent to OS, evaluate + * all of them once, in the order of _CRT/_HOT/_PSV/_ACx, + * _TMP, before they are actually used. + */ +static void acpi_thermal_aml_dependency_fix(struct acpi_thermal *tz) +{ + acpi_handle handle = tz->device->handle; + unsigned long long value; + int i; + + acpi_evaluate_integer(handle, "_CRT", NULL, &value); + acpi_evaluate_integer(handle, "_HOT", NULL, &value); + acpi_evaluate_integer(handle, "_PSV", NULL, &value); + for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) { + char name[5] = { '_', 'A', 'C', ('0' + i), '\0' }; + acpi_status status; + + status = acpi_evaluate_integer(handle, name, NULL, &value); + if (status == AE_NOT_FOUND) + break; + } + acpi_evaluate_integer(handle, "_TMP", NULL, &value); +} + static int acpi_thermal_get_info(struct acpi_thermal *tz) { int result = 0; @@ -992,6 +1024,8 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz) if (!tz) return -EINVAL; + acpi_thermal_aml_dependency_fix(tz); + /* Get trip points [_CRT, _PSV, etc.] (required) */ result = acpi_thermal_get_trip_points(tz); if (result) diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 462f7e30036..74437130431 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -28,6 +28,8 @@ #include <linux/slab.h> #include <linux/init.h> #include <linux/types.h> +#include <linux/hardirq.h> +#include <linux/acpi.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> @@ -457,3 +459,39 @@ acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event, #endif } EXPORT_SYMBOL(acpi_evaluate_hotplug_ost); + +/** + * acpi_handle_printk: Print message with ACPI prefix and object path + * + * This function is called through acpi_handle_<level> macros and prints + * a message with ACPI prefix and object path. This function acquires + * the global namespace mutex to obtain an object path. In interrupt + * context, it shows the object path as <n/a>. + */ +void +acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + struct acpi_buffer buffer = { + .length = ACPI_ALLOCATE_BUFFER, + .pointer = NULL + }; + const char *path; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + + if (in_interrupt() || + acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer) != AE_OK) + path = "<n/a>"; + else + path = buffer.pointer; + + printk("%sACPI: %s: %pV", level, path, &vaf); + + va_end(args); + kfree(buffer.pointer); +} +EXPORT_SYMBOL(acpi_handle_printk); diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 0230cb6cbb3..ac9a69cd45f 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -389,6 +389,12 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d) return 0; } +static int video_ignore_initial_backlight(const struct dmi_system_id *d) +{ + use_bios_initial_backlight = 0; + return 0; +} + static struct dmi_system_id video_dmi_table[] __initdata = { /* * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 @@ -433,6 +439,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), }, }, + { + .callback = video_ignore_initial_backlight, + .ident = "HP Folio 13-2000", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"), + }, + }, {} }; diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index b728880ef10..4ac2593234e 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -156,6 +156,14 @@ static struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "X360"), }, }, + { + .callback = video_detect_force_vendor, + .ident = "Asus UL30VT", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"), + }, + }, { }, }; diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c index b1ae48054dc..b7078afddb7 100644 --- a/drivers/ata/ahci_platform.c +++ b/drivers/ata/ahci_platform.c @@ -238,7 +238,7 @@ static int __devexit ahci_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int ahci_suspend(struct device *dev) { struct ahci_platform_data *pdata = dev_get_platdata(dev); diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index fd9ecf74e63..5b0ba3f20ed 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -1105,10 +1105,15 @@ static int ata_acpi_bind_device(struct ata_port *ap, struct scsi_device *sdev, struct acpi_device *acpi_dev; struct acpi_device_power_state *states; - if (ap->flags & ATA_FLAG_ACPI_SATA) - ata_dev = &ap->link.device[sdev->channel]; - else + if (ap->flags & ATA_FLAG_ACPI_SATA) { + if (!sata_pmp_attached(ap)) + ata_dev = &ap->link.device[sdev->id]; + else + ata_dev = &ap->pmp_link[sdev->channel].device[sdev->id]; + } + else { ata_dev = &ap->link.device[sdev->id]; + } *handle = ata_dev_acpi_handle(ata_dev); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 3cc7096cfda..f46fbd3bd3f 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2942,6 +2942,10 @@ const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) if (xfer_mode == t->mode) return t; + + WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n", + __func__, xfer_mode); + return NULL; } diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index e3bda074fa1..a6df6a351d6 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1052,6 +1052,8 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev) { sdev->use_10_for_rw = 1; sdev->use_10_for_ms = 1; + sdev->no_report_opcodes = 1; + sdev->no_write_same = 1; /* Schedule policy is determined by ->qc_defer() callback and * it needs to see every deferred qc. Set dev_blocked to 1 to diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index 26201ebef3c..371fd2c698b 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c @@ -317,6 +317,12 @@ static int cf_init(struct arasan_cf_dev *acdev) return ret; } + ret = clk_set_rate(acdev->clk, 166000000); + if (ret) { + dev_warn(acdev->host->dev, "clock set rate failed"); + return ret; + } + spin_lock_irqsave(&acdev->host->lock, flags); /* configure CF interface clock */ writel((pdata->cf_if_clk <= CF_IF_CLK_200M) ? pdata->cf_if_clk : @@ -908,7 +914,7 @@ static int __devexit arasan_cf_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int arasan_cf_suspend(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index 0d7c4c2cd26..400bf1c3e98 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c @@ -260,7 +260,7 @@ static const struct of_device_id ahci_of_match[] = { }; MODULE_DEVICE_TABLE(of, ahci_of_match); -static int __init ahci_highbank_probe(struct platform_device *pdev) +static int __devinit ahci_highbank_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ahci_host_priv *hpriv; @@ -378,7 +378,7 @@ static int __devexit ahci_highbank_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int ahci_highbank_suspend(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 44a4256533e..08608de87e4 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c @@ -142,6 +142,39 @@ static int k2_sata_scr_write(struct ata_link *link, return 0; } +static int k2_sata_softreset(struct ata_link *link, + unsigned int *class, unsigned long deadline) +{ + u8 dmactl; + void __iomem *mmio = link->ap->ioaddr.bmdma_addr; + + dmactl = readb(mmio + ATA_DMA_CMD); + + /* Clear the start bit */ + if (dmactl & ATA_DMA_START) { + dmactl &= ~ATA_DMA_START; + writeb(dmactl, mmio + ATA_DMA_CMD); + } + + return ata_sff_softreset(link, class, deadline); +} + +static int k2_sata_hardreset(struct ata_link *link, + unsigned int *class, unsigned long deadline) +{ + u8 dmactl; + void __iomem *mmio = link->ap->ioaddr.bmdma_addr; + + dmactl = readb(mmio + ATA_DMA_CMD); + + /* Clear the start bit */ + if (dmactl & ATA_DMA_START) { + dmactl &= ~ATA_DMA_START; + writeb(dmactl, mmio + ATA_DMA_CMD); + } + + return sata_sff_hardreset(link, class, deadline); +} static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) { @@ -346,6 +379,8 @@ static struct scsi_host_template k2_sata_sht = { static struct ata_port_operations k2_sata_ops = { .inherits = &ata_bmdma_port_ops, + .softreset = k2_sata_softreset, + .hardreset = k2_sata_hardreset, .sff_tf_load = k2_sata_tf_load, .sff_tf_read = k2_sata_tf_read, .sff_check_status = k2_stat_check_status, diff --git a/drivers/base/core.c b/drivers/base/core.c index abea76c36a4..150a41580fa 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1180,7 +1180,6 @@ void device_del(struct device *dev) if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_DEL_DEVICE, dev); - device_pm_remove(dev); dpm_sysfs_remove(dev); if (parent) klist_del(&dev->p->knode_parent); @@ -1205,6 +1204,7 @@ void device_del(struct device *dev) device_remove_file(dev, &uevent_attr); device_remove_attrs(dev); bus_remove_device(dev); + device_pm_remove(dev); driver_deferred_probe_del(dev); /* Notify the platform of the removal, in case they diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index eb78e9640c4..9d8fde70939 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -99,7 +99,7 @@ static void __pm_clk_remove(struct pm_clock_entry *ce) if (ce->status < PCE_STATUS_ERROR) { if (ce->status == PCE_STATUS_ENABLED) - clk_disable(ce->clk); + clk_disable_unprepare(ce->clk); if (ce->status >= PCE_STATUS_ACQUIRED) clk_put(ce->clk); @@ -396,7 +396,7 @@ static void enable_clock(struct device *dev, const char *con_id) clk = clk_get(dev, con_id); if (!IS_ERR(clk)) { - clk_enable(clk); + clk_prepare_enable(clk); clk_put(clk); dev_info(dev, "Runtime PM disabled, clock forced on.\n"); } @@ -413,7 +413,7 @@ static void disable_clock(struct device *dev, const char *con_id) clk = clk_get(dev, con_id); if (!IS_ERR(clk)) { - clk_disable(clk); + clk_disable_unprepare(clk); clk_put(clk); dev_info(dev, "Runtime PM disabled, clock forced off.\n"); } diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index d9468642fc4..50b2831e027 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -23,6 +23,7 @@ #include <linux/rcupdate.h> #include <linux/opp.h> #include <linux/of.h> +#include <linux/export.h> /* * Internal data structure organization with the OPP layer library is as @@ -65,6 +66,7 @@ struct opp { unsigned long u_volt; struct device_opp *dev_opp; + struct rcu_head head; }; /** @@ -160,6 +162,7 @@ unsigned long opp_get_voltage(struct opp *opp) return v; } +EXPORT_SYMBOL(opp_get_voltage); /** * opp_get_freq() - Gets the frequency corresponding to an available opp @@ -189,6 +192,7 @@ unsigned long opp_get_freq(struct opp *opp) return f; } +EXPORT_SYMBOL(opp_get_freq); /** * opp_get_opp_count() - Get number of opps available in the opp list @@ -221,6 +225,7 @@ int opp_get_opp_count(struct device *dev) return count; } +EXPORT_SYMBOL(opp_get_opp_count); /** * opp_find_freq_exact() - search for an exact frequency @@ -230,7 +235,10 @@ int opp_get_opp_count(struct device *dev) * * Searches for exact match in the opp list and returns pointer to the matching * opp if found, else returns ERR_PTR in case of error and should be handled - * using IS_ERR. + * using IS_ERR. Error return values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices * * Note: available is a modifier for the search. if available=true, then the * match is for exact matching frequency and is available in the stored OPP @@ -249,7 +257,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, bool available) { struct device_opp *dev_opp; - struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); + struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); dev_opp = find_device_opp(dev); if (IS_ERR(dev_opp)) { @@ -268,6 +276,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, return opp; } +EXPORT_SYMBOL(opp_find_freq_exact); /** * opp_find_freq_ceil() - Search for an rounded ceil freq @@ -278,7 +287,11 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, * for a device. * * Returns matching *opp and refreshes *freq accordingly, else returns - * ERR_PTR in case of error and should be handled using IS_ERR. + * ERR_PTR in case of error and should be handled using IS_ERR. Error return + * values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices * * Locking: This function must be called under rcu_read_lock(). opp is a rcu * protected pointer. The reason for the same is that the opp pointer which is @@ -289,7 +302,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) { struct device_opp *dev_opp; - struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); + struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); if (!dev || !freq) { dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); @@ -298,7 +311,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) dev_opp = find_device_opp(dev); if (IS_ERR(dev_opp)) - return opp; + return ERR_CAST(dev_opp); list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { if (temp_opp->available && temp_opp->rate >= *freq) { @@ -310,6 +323,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) return opp; } +EXPORT_SYMBOL(opp_find_freq_ceil); /** * opp_find_freq_floor() - Search for a rounded floor freq @@ -320,7 +334,11 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) * for a device. * * Returns matching *opp and refreshes *freq accordingly, else returns - * ERR_PTR in case of error and should be handled using IS_ERR. + * ERR_PTR in case of error and should be handled using IS_ERR. Error return + * values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices * * Locking: This function must be called under rcu_read_lock(). opp is a rcu * protected pointer. The reason for the same is that the opp pointer which is @@ -331,7 +349,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) { struct device_opp *dev_opp; - struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); + struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); if (!dev || !freq) { dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); @@ -340,7 +358,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) dev_opp = find_device_opp(dev); if (IS_ERR(dev_opp)) - return opp; + return ERR_CAST(dev_opp); list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { if (temp_opp->available) { @@ -356,6 +374,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) return opp; } +EXPORT_SYMBOL(opp_find_freq_floor); /** * opp_add() - Add an OPP table from a table definitions @@ -512,7 +531,7 @@ static int opp_set_availability(struct device *dev, unsigned long freq, list_replace_rcu(&opp->node, &new_opp->node); mutex_unlock(&dev_opp_list_lock); - synchronize_rcu(); + kfree_rcu(opp, head); /* Notify the change of the OPP availability */ if (availability_req) @@ -522,13 +541,10 @@ static int opp_set_availability(struct device *dev, unsigned long freq, srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE, new_opp); - /* clean up old opp */ - new_opp = opp; - goto out; + return 0; unlock: mutex_unlock(&dev_opp_list_lock); -out: kfree(new_opp); return r; } @@ -552,6 +568,7 @@ int opp_enable(struct device *dev, unsigned long freq) { return opp_set_availability(dev, freq, true); } +EXPORT_SYMBOL(opp_enable); /** * opp_disable() - Disable a specific OPP @@ -573,6 +590,7 @@ int opp_disable(struct device *dev, unsigned long freq) { return opp_set_availability(dev, freq, false); } +EXPORT_SYMBOL(opp_disable); #ifdef CONFIG_CPU_FREQ /** diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index fdc3894bc33..ff46387f530 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -223,12 +223,14 @@ void dev_pm_qos_constraints_destroy(struct device *dev) struct dev_pm_qos *qos; struct dev_pm_qos_request *req, *tmp; struct pm_qos_constraints *c; + struct pm_qos_flags *f; /* - * If the device's PM QoS resume latency limit has been exposed to user - * space, it has to be hidden at this point. + * If the device's PM QoS resume latency limit or PM QoS flags have been + * exposed to user space, they have to be hidden at this point. */ dev_pm_qos_hide_latency_limit(dev); + dev_pm_qos_hide_flags(dev); mutex_lock(&dev_pm_qos_mtx); @@ -237,8 +239,8 @@ void dev_pm_qos_constraints_destroy(struct device *dev) if (!qos) goto out; + /* Flush the constraints lists for the device. */ c = &qos->latency; - /* Flush the constraints list for the device */ plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { /* * Update constraints list and call the notification @@ -247,6 +249,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev) apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); memset(req, 0, sizeof(*req)); } + f = &qos->flags; + list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) { + apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); + } spin_lock_irq(&dev->power.lock); dev->power.qos = NULL; @@ -544,7 +551,7 @@ int dev_pm_qos_add_ancestor_request(struct device *dev, error = dev_pm_qos_add_request(ancestor, req, DEV_PM_QOS_LATENCY, value); - if (error) + if (error < 0) req->dev = NULL; return error; diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 3804a0af3ef..9fe4f186555 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -935,7 +935,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail) /* cf. http://lkml.org/lkml/2006/10/31/28 */ if (!fastfail) - q->request_fn(q); + __blk_run_queue(q); } static void diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 1c49d717396..2ddd64a9ffd 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4330,6 +4330,7 @@ out_unreg_region: out_unreg_blkdev: unregister_blkdev(FLOPPY_MAJOR, "fd"); out_put_disk: + destroy_workqueue(floppy_wq); for (drive = 0; drive < N_DRIVE; drive++) { if (!disks[drive]) break; @@ -4340,7 +4341,6 @@ out_put_disk: } put_disk(disks[drive]); } - destroy_workqueue(floppy_wq); return err; } @@ -4555,6 +4555,8 @@ static void __exit floppy_module_exit(void) unregister_blkdev(FLOPPY_MAJOR, "fd"); platform_driver_unregister(&floppy_driver); + destroy_workqueue(floppy_wq); + for (drive = 0; drive < N_DRIVE; drive++) { del_timer_sync(&motor_off_timer[drive]); @@ -4578,7 +4580,6 @@ static void __exit floppy_module_exit(void) cancel_delayed_work_sync(&fd_timeout); cancel_delayed_work_sync(&fd_timer); - destroy_workqueue(floppy_wq); if (atomic_read(&usage_count)) floppy_release_irq_and_dma(); diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index adc6f36564c..9694dd99bbb 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -559,7 +559,7 @@ static void mtip_timeout_function(unsigned long int data) struct mtip_cmd *command; int tag, cmdto_cnt = 0; unsigned int bit, group; - unsigned int num_command_slots = port->dd->slot_groups * 32; + unsigned int num_command_slots; unsigned long to, tagaccum[SLOTBITS_IN_LONGS]; if (unlikely(!port)) @@ -572,6 +572,7 @@ static void mtip_timeout_function(unsigned long int data) } /* clear the tag accumulator */ memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); + num_command_slots = port->dd->slot_groups * 32; for (tag = 0; tag < num_command_slots; tag++) { /* @@ -2218,8 +2219,8 @@ static int exec_drive_taskfile(struct driver_data *dd, fis.device); /* check for erase mode support during secure erase.*/ - if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) - && (outbuf[0] & MTIP_SEC_ERASE_MODE)) { + if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf && + (outbuf[0] & MTIP_SEC_ERASE_MODE)) { erasemode = 1; } @@ -2439,7 +2440,7 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd, * return value * None */ -static void mtip_hw_submit_io(struct driver_data *dd, sector_t start, +static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector, int nsect, int nents, int tag, void *callback, void *data, int dir) { @@ -2447,6 +2448,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start, struct mtip_port *port = dd->port; struct mtip_cmd *command = &port->commands[tag]; int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + u64 start = sector; /* Map the scatter list for DMA access */ nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); @@ -2465,8 +2467,12 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start, fis->opts = 1 << 7; fis->command = (dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE); - *((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF); - *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF); + fis->lba_low = start & 0xFF; + fis->lba_mid = (start >> 8) & 0xFF; + fis->lba_hi = (start >> 16) & 0xFF; + fis->lba_low_ex = (start >> 24) & 0xFF; + fis->lba_mid_ex = (start >> 32) & 0xFF; + fis->lba_hi_ex = (start >> 40) & 0xFF; fis->device = 1 << 6; fis->features = nsect & 0xFF; fis->features_ex = (nsect >> 8) & 0xFF; diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 5f4a917bd8b..b1742640556 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h @@ -34,7 +34,7 @@ #define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48 /* check for erase mode support during secure erase */ -#define MTIP_SEC_ERASE_MODE 0x3 +#define MTIP_SEC_ERASE_MODE 0x2 /* # of times to retry timed out/failed IOs */ #define MTIP_MAX_RETRIES 2 @@ -155,14 +155,14 @@ enum { MTIP_DDF_REBUILD_FAILED_BIT = 8, }; -__packed struct smart_attr{ +struct smart_attr { u8 attr_id; u16 flags; u8 cur; u8 worst; u32 data; u8 res[3]; -}; +} __packed; /* Register Frame Information Structure (FIS), host to device. */ struct host_to_dev_fis { diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index fc2de5528dc..b00000e8aef 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -67,6 +67,7 @@ static struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x13d3, 0x3304) }, { USB_DEVICE(0x0930, 0x0215) }, { USB_DEVICE(0x0489, 0xE03D) }, + { USB_DEVICE(0x0489, 0xE027) }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03F0, 0x311D) }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index debda27df9b..ee82f2fb65f 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -124,6 +124,7 @@ static struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c index ff63560b846..0c48b0e05ed 100644 --- a/drivers/bus/omap-ocp2scp.c +++ b/drivers/bus/omap-ocp2scp.c @@ -22,6 +22,26 @@ #include <linux/pm_runtime.h> #include <linux/of.h> #include <linux/of_platform.h> +#include <linux/platform_data/omap_ocp2scp.h> + +/** + * _count_resources - count for the number of resources + * @res: struct resource * + * + * Count and return the number of resources populated for the device that is + * connected to ocp2scp. + */ +static unsigned _count_resources(struct resource *res) +{ + int cnt = 0; + + while (res->start != res->end) { + cnt++; + res++; + } + + return cnt; +} static int ocp2scp_remove_devices(struct device *dev, void *c) { @@ -34,20 +54,62 @@ static int ocp2scp_remove_devices(struct device *dev, void *c) static int __devinit omap_ocp2scp_probe(struct platform_device *pdev) { - int ret; - struct device_node *np = pdev->dev.of_node; + int ret; + unsigned res_cnt, i; + struct device_node *np = pdev->dev.of_node; + struct platform_device *pdev_child; + struct omap_ocp2scp_platform_data *pdata = pdev->dev.platform_data; + struct omap_ocp2scp_dev *dev; if (np) { ret = of_platform_populate(np, NULL, NULL, &pdev->dev); if (ret) { - dev_err(&pdev->dev, "failed to add resources for ocp2scp child\n"); + dev_err(&pdev->dev, + "failed to add resources for ocp2scp child\n"); goto err0; } + } else if (pdata) { + for (i = 0, dev = *pdata->devices; i < pdata->dev_cnt; i++, + dev++) { + res_cnt = _count_resources(dev->res); + + pdev_child = platform_device_alloc(dev->drv_name, + PLATFORM_DEVID_AUTO); + if (!pdev_child) { + dev_err(&pdev->dev, + "failed to allocate mem for ocp2scp child\n"); + goto err0; + } + + ret = platform_device_add_resources(pdev_child, + dev->res, res_cnt); + if (ret) { + dev_err(&pdev->dev, + "failed to add resources for ocp2scp child\n"); + goto err1; + } + + pdev_child->dev.parent = &pdev->dev; + + ret = platform_device_add(pdev_child); + if (ret) { + dev_err(&pdev->dev, + "failed to register ocp2scp child device\n"); + goto err1; + } + } + } else { + dev_err(&pdev->dev, "OCP2SCP initialized without plat data\n"); + return -EINVAL; } + pm_runtime_enable(&pdev->dev); return 0; +err1: + platform_device_put(pdev_child); + err0: device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices); diff --git a/drivers/clk/ux500/u8500_clk.c b/drivers/clk/ux500/u8500_clk.c index ca4a25ed844..e2c17d187d9 100644 --- a/drivers/clk/ux500/u8500_clk.c +++ b/drivers/clk/ux500/u8500_clk.c @@ -40,7 +40,7 @@ void u8500_clk_init(void) CLK_IS_ROOT|CLK_IGNORE_UNUSED, 32768); clk_register_clkdev(clk, "clk32k", NULL); - clk_register_clkdev(clk, NULL, "rtc-pl031"); + clk_register_clkdev(clk, "apb_pclk", "rtc-pl031"); /* PRCMU clocks */ fw_version = prcmu_get_fw_version(); @@ -228,10 +228,17 @@ void u8500_clk_init(void) clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", U8500_CLKRST1_BASE, BIT(2), 0); + clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.1"); + clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", U8500_CLKRST1_BASE, BIT(3), 0); + clk_register_clkdev(clk, "apb_pclk", "msp0"); + clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.0"); + clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", U8500_CLKRST1_BASE, BIT(4), 0); + clk_register_clkdev(clk, "apb_pclk", "msp1"); + clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.1"); clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", U8500_CLKRST1_BASE, BIT(5), 0); @@ -239,6 +246,7 @@ void u8500_clk_init(void) clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", U8500_CLKRST1_BASE, BIT(6), 0); + clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.2"); clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", U8500_CLKRST1_BASE, BIT(7), 0); @@ -246,6 +254,7 @@ void u8500_clk_init(void) clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", U8500_CLKRST1_BASE, BIT(8), 0); + clk_register_clkdev(clk, "apb_pclk", "slimbus0"); clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", U8500_CLKRST1_BASE, BIT(9), 0); @@ -255,11 +264,16 @@ void u8500_clk_init(void) clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", U8500_CLKRST1_BASE, BIT(10), 0); + clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.4"); + clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", U8500_CLKRST1_BASE, BIT(11), 0); + clk_register_clkdev(clk, "apb_pclk", "msp3"); + clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.3"); clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", U8500_CLKRST2_BASE, BIT(0), 0); + clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.3"); clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", U8500_CLKRST2_BASE, BIT(1), 0); @@ -279,12 +293,13 @@ void u8500_clk_init(void) clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", U8500_CLKRST2_BASE, BIT(5), 0); + clk_register_clkdev(clk, "apb_pclk", "msp2"); + clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.2"); clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", U8500_CLKRST2_BASE, BIT(6), 0); clk_register_clkdev(clk, "apb_pclk", "sdi1"); - clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", U8500_CLKRST2_BASE, BIT(7), 0); clk_register_clkdev(clk, "apb_pclk", "sdi3"); @@ -316,10 +331,15 @@ void u8500_clk_init(void) clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", U8500_CLKRST3_BASE, BIT(1), 0); + clk_register_clkdev(clk, "apb_pclk", "ssp0"); + clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", U8500_CLKRST3_BASE, BIT(2), 0); + clk_register_clkdev(clk, "apb_pclk", "ssp1"); + clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", U8500_CLKRST3_BASE, BIT(3), 0); + clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.0"); clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", U8500_CLKRST3_BASE, BIT(4), 0); @@ -401,10 +421,17 @@ void u8500_clk_init(void) clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk", U8500_CLKRST1_BASE, BIT(2), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "nmk-i2c.1"); + clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk", U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "msp0"); + clk_register_clkdev(clk, NULL, "ux500-msp-i2s.0"); + clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk", U8500_CLKRST1_BASE, BIT(4), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "msp1"); + clk_register_clkdev(clk, NULL, "ux500-msp-i2s.1"); clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmcclk", U8500_CLKRST1_BASE, BIT(5), CLK_SET_RATE_GATE); @@ -412,17 +439,25 @@ void u8500_clk_init(void) clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk", U8500_CLKRST1_BASE, BIT(6), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "nmk-i2c.2"); + clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk", - U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE); - /* FIXME: Redefinition of BIT(3). */ + U8500_CLKRST1_BASE, BIT(8), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "slimbus0"); + clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk", U8500_CLKRST1_BASE, BIT(9), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "nmk-i2c.4"); + clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk", U8500_CLKRST1_BASE, BIT(10), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "msp3"); + clk_register_clkdev(clk, NULL, "ux500-msp-i2s.3"); /* Periph2 */ clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk", U8500_CLKRST2_BASE, BIT(0), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "nmk-i2c.3"); clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmcclk", U8500_CLKRST2_BASE, BIT(2), CLK_SET_RATE_GATE); @@ -430,6 +465,8 @@ void u8500_clk_init(void) clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk", U8500_CLKRST2_BASE, BIT(3), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "msp2"); + clk_register_clkdev(clk, NULL, "ux500-msp-i2s.2"); clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmcclk", U8500_CLKRST2_BASE, BIT(4), CLK_SET_RATE_GATE); @@ -450,10 +487,15 @@ void u8500_clk_init(void) /* Periph3 */ clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk", U8500_CLKRST3_BASE, BIT(1), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "ssp0"); + clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk", U8500_CLKRST3_BASE, BIT(2), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "ssp1"); + clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk", U8500_CLKRST3_BASE, BIT(3), CLK_SET_RATE_GATE); + clk_register_clkdev(clk, NULL, "nmk-i2c.0"); clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmcclk", U8500_CLKRST3_BASE, BIT(4), CLK_SET_RATE_GATE); diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 5961e6415f0..a0b3661d90b 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -76,3 +76,10 @@ config ARM_EXYNOS5250_CPUFREQ help This adds the CPUFreq driver for Samsung EXYNOS5250 SoC. + +config ARM_SPEAR_CPUFREQ + bool "SPEAr CPUFreq support" + depends on PLAT_SPEAR + default y + help + This adds the CPUFreq driver support for SPEAr SOCs. diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 1bc90e1306d..1f254ec087c 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -7,8 +7,8 @@ obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o -obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o -obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o +obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o cpufreq_governor.o +obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o cpufreq_governor.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o @@ -50,6 +50,7 @@ obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o +obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o ################################################################################## # PowerPC platform drivers diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index e9158278c71..52bf36d599f 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -174,7 +174,7 @@ static struct cpufreq_driver cpu0_cpufreq_driver = { .attr = cpu0_cpufreq_attr, }; -static int __devinit cpu0_cpufreq_driver_init(void) +static int cpu0_cpufreq_driver_init(void) { struct device_node *np; int ret; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index fb8a5279c5d..1f93dbd7235 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -15,6 +15,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> @@ -127,7 +129,7 @@ static int __init init_cpufreq_transition_notifier_list(void) pure_initcall(init_cpufreq_transition_notifier_list); static int off __read_mostly; -int cpufreq_disabled(void) +static int cpufreq_disabled(void) { return off; } @@ -402,7 +404,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, static ssize_t store_##file_name \ (struct cpufreq_policy *policy, const char *buf, size_t count) \ { \ - unsigned int ret = -EINVAL; \ + unsigned int ret; \ struct cpufreq_policy new_policy; \ \ ret = cpufreq_get_policy(&new_policy, policy->cpu); \ @@ -445,7 +447,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) return sprintf(buf, "performance\n"); else if (policy->governor) - return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", + return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", policy->governor->name); return -EINVAL; } @@ -457,7 +459,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) static ssize_t store_scaling_governor(struct cpufreq_policy *policy, const char *buf, size_t count) { - unsigned int ret = -EINVAL; + unsigned int ret; char str_governor[16]; struct cpufreq_policy new_policy; @@ -491,7 +493,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, */ static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) { - return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name); + return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); } /** @@ -512,7 +514,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2))) goto out; - i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name); + i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); } out: i += sprintf(&buf[i], "\n"); @@ -581,7 +583,7 @@ static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) } /** - * show_scaling_driver - show the current cpufreq HW/BIOS limitation + * show_bios_limit - show the current cpufreq HW/BIOS limitation */ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) { @@ -1468,12 +1470,23 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int relation) { int retval = -EINVAL; + unsigned int old_target_freq = target_freq; if (cpufreq_disabled()) return -ENODEV; - pr_debug("target for CPU %u: %u kHz, relation %u\n", policy->cpu, - target_freq, relation); + /* Make sure that target_freq is within supported range */ + if (target_freq > policy->max) + target_freq = policy->max; + if (target_freq < policy->min) + target_freq = policy->min; + + pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", + policy->cpu, target_freq, relation, old_target_freq); + + if (target_freq == policy->cur) + return 0; + if (cpu_online(policy->cpu) && cpufreq_driver->target) retval = cpufreq_driver->target(policy, target_freq, relation); @@ -1509,12 +1522,14 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu) { int ret = 0; + if (!(cpu_online(cpu) && cpufreq_driver->getavg)) + return 0; + policy = cpufreq_cpu_get(policy->cpu); if (!policy) return -EINVAL; - if (cpu_online(cpu) && cpufreq_driver->getavg) - ret = cpufreq_driver->getavg(policy, cpu); + ret = cpufreq_driver->getavg(policy, cpu); cpufreq_cpu_put(policy); return ret; diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index a152af7e199..64ef737e7e7 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -11,83 +11,30 @@ * published by the Free Software Foundation. */ -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> #include <linux/cpufreq.h> -#include <linux/cpu.h> -#include <linux/jiffies.h> +#include <linux/init.h> +#include <linux/kernel.h> #include <linux/kernel_stat.h> +#include <linux/kobject.h> +#include <linux/module.h> #include <linux/mutex.h> -#include <linux/hrtimer.h> -#include <linux/tick.h> -#include <linux/ktime.h> -#include <linux/sched.h> +#include <linux/notifier.h> +#include <linux/percpu-defs.h> +#include <linux/sysfs.h> +#include <linux/types.h> -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ +#include "cpufreq_governor.h" +/* Conservative governor macors */ #define DEF_FREQUENCY_UP_THRESHOLD (80) #define DEF_FREQUENCY_DOWN_THRESHOLD (20) - -/* - * The polling frequency of this governor depends on the capability of - * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling - * rate. - * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) - * this governor will not work. - * All times here are in uS. - */ -#define MIN_SAMPLING_RATE_RATIO (2) - -static unsigned int min_sampling_rate; - -#define LATENCY_MULTIPLIER (1000) -#define MIN_LATENCY_MULTIPLIER (100) #define DEF_SAMPLING_DOWN_FACTOR (1) #define MAX_SAMPLING_DOWN_FACTOR (10) -#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) - -static void do_dbs_timer(struct work_struct *work); - -struct cpu_dbs_info_s { - cputime64_t prev_cpu_idle; - cputime64_t prev_cpu_wall; - cputime64_t prev_cpu_nice; - struct cpufreq_policy *cur_policy; - struct delayed_work work; - unsigned int down_skip; - unsigned int requested_freq; - int cpu; - unsigned int enable:1; - /* - * percpu mutex that serializes governor limit change with - * do_dbs_timer invocation. We do not want do_dbs_timer to run - * when user is changing the governor or limits. - */ - struct mutex timer_mutex; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); -static unsigned int dbs_enable; /* number of CPUs using this policy */ +static struct dbs_data cs_dbs_data; +static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info); -/* - * dbs_mutex protects dbs_enable in governor start/stop. - */ -static DEFINE_MUTEX(dbs_mutex); - -static struct dbs_tuners { - unsigned int sampling_rate; - unsigned int sampling_down_factor; - unsigned int up_threshold; - unsigned int down_threshold; - unsigned int ignore_nice; - unsigned int freq_step; -} dbs_tuners_ins = { +static struct cs_dbs_tuners cs_tuners = { .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, @@ -95,95 +42,121 @@ static struct dbs_tuners { .freq_step = 5, }; -static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) +/* + * Every sampling_rate, we check, if current idle time is less than 20% + * (default), then we try to increase frequency Every sampling_rate * + * sampling_down_factor, we check, if current idle time is more than 80%, then + * we try to decrease frequency + * + * Any frequency increase takes it to the maximum frequency. Frequency reduction + * happens at minimum steps of 5% (default) of maximum frequency + */ +static void cs_check_cpu(int cpu, unsigned int load) { - u64 idle_time; - u64 cur_wall_time; - u64 busy_time; + struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); + struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; + unsigned int freq_target; + + /* + * break out if we 'cannot' reduce the speed as the user might + * want freq_step to be zero + */ + if (cs_tuners.freq_step == 0) + return; - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + /* Check for frequency increase */ + if (load > cs_tuners.up_threshold) { + dbs_info->down_skip = 0; - busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + /* if we are already at full speed then break out early */ + if (dbs_info->requested_freq == policy->max) + return; - idle_time = cur_wall_time - busy_time; - if (wall) - *wall = jiffies_to_usecs(cur_wall_time); + freq_target = (cs_tuners.freq_step * policy->max) / 100; - return jiffies_to_usecs(idle_time); + /* max freq cannot be less than 100. But who knows.... */ + if (unlikely(freq_target == 0)) + freq_target = 5; + + dbs_info->requested_freq += freq_target; + if (dbs_info->requested_freq > policy->max) + dbs_info->requested_freq = policy->max; + + __cpufreq_driver_target(policy, dbs_info->requested_freq, + CPUFREQ_RELATION_H); + return; + } + + /* + * The optimal frequency is the frequency that is the lowest that can + * support the current CPU usage without triggering the up policy. To be + * safe, we focus 10 points under the threshold. + */ + if (load < (cs_tuners.down_threshold - 10)) { + freq_target = (cs_tuners.freq_step * policy->max) / 100; + + dbs_info->requested_freq -= freq_target; + if (dbs_info->requested_freq < policy->min) + dbs_info->requested_freq = policy->min; + + /* + * if we cannot reduce the frequency anymore, break out early + */ + if (policy->cur == policy->min) + return; + + __cpufreq_driver_target(policy, dbs_info->requested_freq, + CPUFREQ_RELATION_H); + return; + } } -static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +static void cs_dbs_timer(struct work_struct *work) { - u64 idle_time = get_cpu_idle_time_us(cpu, NULL); + struct cs_cpu_dbs_info_s *dbs_info = container_of(work, + struct cs_cpu_dbs_info_s, cdbs.work.work); + unsigned int cpu = dbs_info->cdbs.cpu; + int delay = delay_for_sampling_rate(cs_tuners.sampling_rate); - if (idle_time == -1ULL) - return get_cpu_idle_time_jiffy(cpu, wall); - else - idle_time += get_cpu_iowait_time_us(cpu, wall); + mutex_lock(&dbs_info->cdbs.timer_mutex); - return idle_time; + dbs_check_cpu(&cs_dbs_data, cpu); + + schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay); + mutex_unlock(&dbs_info->cdbs.timer_mutex); } -/* keep track of frequency transitions */ -static int -dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, - void *data) +static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, + void *data) { struct cpufreq_freqs *freq = data; - struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, - freq->cpu); - + struct cs_cpu_dbs_info_s *dbs_info = + &per_cpu(cs_cpu_dbs_info, freq->cpu); struct cpufreq_policy *policy; - if (!this_dbs_info->enable) + if (!dbs_info->enable) return 0; - policy = this_dbs_info->cur_policy; + policy = dbs_info->cdbs.cur_policy; /* - * we only care if our internally tracked freq moves outside - * the 'valid' ranges of freqency available to us otherwise - * we do not change it + * we only care if our internally tracked freq moves outside the 'valid' + * ranges of freqency available to us otherwise we do not change it */ - if (this_dbs_info->requested_freq > policy->max - || this_dbs_info->requested_freq < policy->min) - this_dbs_info->requested_freq = freq->new; + if (dbs_info->requested_freq > policy->max + || dbs_info->requested_freq < policy->min) + dbs_info->requested_freq = freq->new; return 0; } -static struct notifier_block dbs_cpufreq_notifier_block = { - .notifier_call = dbs_cpufreq_notifier -}; - /************************** sysfs interface ************************/ static ssize_t show_sampling_rate_min(struct kobject *kobj, struct attribute *attr, char *buf) { - return sprintf(buf, "%u\n", min_sampling_rate); + return sprintf(buf, "%u\n", cs_dbs_data.min_sampling_rate); } -define_one_global_ro(sampling_rate_min); - -/* cpufreq_conservative Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct kobject *kobj, struct attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ -} -show_one(sampling_rate, sampling_rate); -show_one(sampling_down_factor, sampling_down_factor); -show_one(up_threshold, up_threshold); -show_one(down_threshold, down_threshold); -show_one(ignore_nice_load, ignore_nice); -show_one(freq_step, freq_step); - static ssize_t store_sampling_down_factor(struct kobject *a, struct attribute *b, const char *buf, size_t count) @@ -195,7 +168,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a, if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; - dbs_tuners_ins.sampling_down_factor = input; + cs_tuners.sampling_down_factor = input; return count; } @@ -209,7 +182,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, if (ret != 1) return -EINVAL; - dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + cs_tuners.sampling_rate = max(input, cs_dbs_data.min_sampling_rate); return count; } @@ -220,11 +193,10 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, int ret; ret = sscanf(buf, "%u", &input); - if (ret != 1 || input > 100 || - input <= dbs_tuners_ins.down_threshold) + if (ret != 1 || input > 100 || input <= cs_tuners.down_threshold) return -EINVAL; - dbs_tuners_ins.up_threshold = input; + cs_tuners.up_threshold = input; return count; } @@ -237,21 +209,19 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, /* cannot be lower than 11 otherwise freq will not fall */ if (ret != 1 || input < 11 || input > 100 || - input >= dbs_tuners_ins.up_threshold) + input >= cs_tuners.up_threshold) return -EINVAL; - dbs_tuners_ins.down_threshold = input; + cs_tuners.down_threshold = input; return count; } static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, const char *buf, size_t count) { - unsigned int input; + unsigned int input, j; int ret; - unsigned int j; - ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; @@ -259,19 +229,20 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, if (input > 1) input = 1; - if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */ + if (input == cs_tuners.ignore_nice) /* nothing to do */ return count; - dbs_tuners_ins.ignore_nice = input; + cs_tuners.ignore_nice = input; /* we need to re-evaluate prev_cpu_idle */ for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; + struct cs_cpu_dbs_info_s *dbs_info; dbs_info = &per_cpu(cs_cpu_dbs_info, j); - dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->cdbs.prev_cpu_wall); + if (cs_tuners.ignore_nice) + dbs_info->cdbs.prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } return count; } @@ -289,18 +260,28 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b, if (input > 100) input = 100; - /* no need to test here if freq_step is zero as the user might actually - * want this, they would be crazy though :) */ - dbs_tuners_ins.freq_step = input; + /* + * no need to test here if freq_step is zero as the user might actually + * want this, they would be crazy though :) + */ + cs_tuners.freq_step = input; return count; } +show_one(cs, sampling_rate, sampling_rate); +show_one(cs, sampling_down_factor, sampling_down_factor); +show_one(cs, up_threshold, up_threshold); +show_one(cs, down_threshold, down_threshold); +show_one(cs, ignore_nice_load, ignore_nice); +show_one(cs, freq_step, freq_step); + define_one_global_rw(sampling_rate); define_one_global_rw(sampling_down_factor); define_one_global_rw(up_threshold); define_one_global_rw(down_threshold); define_one_global_rw(ignore_nice_load); define_one_global_rw(freq_step); +define_one_global_ro(sampling_rate_min); static struct attribute *dbs_attributes[] = { &sampling_rate_min.attr, @@ -313,283 +294,38 @@ static struct attribute *dbs_attributes[] = { NULL }; -static struct attribute_group dbs_attr_group = { +static struct attribute_group cs_attr_group = { .attrs = dbs_attributes, .name = "conservative", }; /************************** sysfs end ************************/ -static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) -{ - unsigned int load = 0; - unsigned int max_load = 0; - unsigned int freq_target; - - struct cpufreq_policy *policy; - unsigned int j; - - policy = this_dbs_info->cur_policy; - - /* - * Every sampling_rate, we check, if current idle time is less - * than 20% (default), then we try to increase frequency - * Every sampling_rate*sampling_down_factor, we check, if current - * idle time is more than 80%, then we try to decrease frequency - * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - * 5% (default) of maximum frequency - */ - - /* Get Absolute Load */ - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - cputime64_t cur_wall_time, cur_idle_time; - unsigned int idle_time, wall_time; - - j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); - - cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - - wall_time = (unsigned int) - (cur_wall_time - j_dbs_info->prev_cpu_wall); - j_dbs_info->prev_cpu_wall = cur_wall_time; - - idle_time = (unsigned int) - (cur_idle_time - j_dbs_info->prev_cpu_idle); - j_dbs_info->prev_cpu_idle = cur_idle_time; - - if (dbs_tuners_ins.ignore_nice) { - u64 cur_nice; - unsigned long cur_nice_jiffies; - - cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - - j_dbs_info->prev_cpu_nice; - /* - * Assumption: nice time between sampling periods will - * be less than 2^32 jiffies for 32 bit sys - */ - cur_nice_jiffies = (unsigned long) - cputime64_to_jiffies64(cur_nice); - - j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - idle_time += jiffies_to_usecs(cur_nice_jiffies); - } +define_get_cpu_dbs_routines(cs_cpu_dbs_info); - if (unlikely(!wall_time || wall_time < idle_time)) - continue; - - load = 100 * (wall_time - idle_time) / wall_time; - - if (load > max_load) - max_load = load; - } - - /* - * break out if we 'cannot' reduce the speed as the user might - * want freq_step to be zero - */ - if (dbs_tuners_ins.freq_step == 0) - return; - - /* Check for frequency increase */ - if (max_load > dbs_tuners_ins.up_threshold) { - this_dbs_info->down_skip = 0; - - /* if we are already at full speed then break out early */ - if (this_dbs_info->requested_freq == policy->max) - return; - - freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; - - /* max freq cannot be less than 100. But who knows.... */ - if (unlikely(freq_target == 0)) - freq_target = 5; - - this_dbs_info->requested_freq += freq_target; - if (this_dbs_info->requested_freq > policy->max) - this_dbs_info->requested_freq = policy->max; - - __cpufreq_driver_target(policy, this_dbs_info->requested_freq, - CPUFREQ_RELATION_H); - return; - } - - /* - * The optimal frequency is the frequency that is the lowest that - * can support the current CPU usage without triggering the up - * policy. To be safe, we focus 10 points under the threshold. - */ - if (max_load < (dbs_tuners_ins.down_threshold - 10)) { - freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; - - this_dbs_info->requested_freq -= freq_target; - if (this_dbs_info->requested_freq < policy->min) - this_dbs_info->requested_freq = policy->min; - - /* - * if we cannot reduce the frequency anymore, break out early - */ - if (policy->cur == policy->min) - return; - - __cpufreq_driver_target(policy, this_dbs_info->requested_freq, - CPUFREQ_RELATION_H); - return; - } -} - -static void do_dbs_timer(struct work_struct *work) -{ - struct cpu_dbs_info_s *dbs_info = - container_of(work, struct cpu_dbs_info_s, work.work); - unsigned int cpu = dbs_info->cpu; - - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - delay -= jiffies % delay; - - mutex_lock(&dbs_info->timer_mutex); - - dbs_check_cpu(dbs_info); - - schedule_delayed_work_on(cpu, &dbs_info->work, delay); - mutex_unlock(&dbs_info->timer_mutex); -} - -static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) -{ - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - delay -= jiffies % delay; +static struct notifier_block cs_cpufreq_notifier_block = { + .notifier_call = dbs_cpufreq_notifier, +}; - dbs_info->enable = 1; - INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer); - schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); -} +static struct cs_ops cs_ops = { + .notifier_block = &cs_cpufreq_notifier_block, +}; -static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) -{ - dbs_info->enable = 0; - cancel_delayed_work_sync(&dbs_info->work); -} +static struct dbs_data cs_dbs_data = { + .governor = GOV_CONSERVATIVE, + .attr_group = &cs_attr_group, + .tuners = &cs_tuners, + .get_cpu_cdbs = get_cpu_cdbs, + .get_cpu_dbs_info_s = get_cpu_dbs_info_s, + .gov_dbs_timer = cs_dbs_timer, + .gov_check_cpu = cs_check_cpu, + .gov_ops = &cs_ops, +}; -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, +static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) { - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - unsigned int j; - int rc; - - this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - mutex_lock(&dbs_mutex); - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - j_dbs_info->prev_cpu_nice = - kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - } - this_dbs_info->cpu = cpu; - this_dbs_info->down_skip = 0; - this_dbs_info->requested_freq = policy->cur; - - mutex_init(&this_dbs_info->timer_mutex); - dbs_enable++; - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - unsigned int latency; - /* policy latency is in nS. Convert it to uS first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - - rc = sysfs_create_group(cpufreq_global_kobject, - &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - /* - * conservative does not implement micro like ondemand - * governor, thus we are bound to jiffes/HZ - */ - min_sampling_rate = - MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); - /* Bring kernel and HW constraints together */ - min_sampling_rate = max(min_sampling_rate, - MIN_LATENCY_MULTIPLIER * latency); - dbs_tuners_ins.sampling_rate = - max(min_sampling_rate, - latency * LATENCY_MULTIPLIER); - - cpufreq_register_notifier( - &dbs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - } - mutex_unlock(&dbs_mutex); - - dbs_timer_init(this_dbs_info); - - break; - - case CPUFREQ_GOV_STOP: - dbs_timer_exit(this_dbs_info); - - mutex_lock(&dbs_mutex); - dbs_enable--; - mutex_destroy(&this_dbs_info->timer_mutex); - - /* - * Stop the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 0) - cpufreq_unregister_notifier( - &dbs_cpufreq_notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - - mutex_unlock(&dbs_mutex); - if (!dbs_enable) - sysfs_remove_group(cpufreq_global_kobject, - &dbs_attr_group); - - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&this_dbs_info->timer_mutex); - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target( - this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target( - this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); - dbs_check_cpu(this_dbs_info); - mutex_unlock(&this_dbs_info->timer_mutex); - - break; - } - return 0; + return cpufreq_governor_dbs(&cs_dbs_data, policy, event); } #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE @@ -597,13 +333,14 @@ static #endif struct cpufreq_governor cpufreq_gov_conservative = { .name = "conservative", - .governor = cpufreq_governor_dbs, + .governor = cs_cpufreq_governor_dbs, .max_transition_latency = TRANSITION_LATENCY_LIMIT, .owner = THIS_MODULE, }; static int __init cpufreq_gov_dbs_init(void) { + mutex_init(&cs_dbs_data.mutex); return cpufreq_register_governor(&cpufreq_gov_conservative); } @@ -612,7 +349,6 @@ static void __exit cpufreq_gov_dbs_exit(void) cpufreq_unregister_governor(&cpufreq_gov_conservative); } - MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>"); MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " "Low Latency Frequency Transition capable processors " diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c new file mode 100644 index 00000000000..6c5f1d383cd --- /dev/null +++ b/drivers/cpufreq/cpufreq_governor.c @@ -0,0 +1,318 @@ +/* + * drivers/cpufreq/cpufreq_governor.c + * + * CPUFREQ governors common code + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. + * (C) 2003 Jun Nakajima <jun.nakajima@intel.com> + * (C) 2009 Alexander Clouter <alex@digriz.org.uk> + * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <asm/cputime.h> +#include <linux/cpufreq.h> +#include <linux/cpumask.h> +#include <linux/export.h> +#include <linux/kernel_stat.h> +#include <linux/mutex.h> +#include <linux/tick.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#include "cpufreq_governor.h" + +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) +{ + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + + idle_time = cur_wall_time - busy_time; + if (wall) + *wall = cputime_to_usecs(cur_wall_time); + + return cputime_to_usecs(idle_time); +} + +u64 get_cpu_idle_time(unsigned int cpu, u64 *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, NULL); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + else + idle_time += get_cpu_iowait_time_us(cpu, wall); + + return idle_time; +} +EXPORT_SYMBOL_GPL(get_cpu_idle_time); + +void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) +{ + struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu); + struct od_dbs_tuners *od_tuners = dbs_data->tuners; + struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; + struct cpufreq_policy *policy; + unsigned int max_load = 0; + unsigned int ignore_nice; + unsigned int j; + + if (dbs_data->governor == GOV_ONDEMAND) + ignore_nice = od_tuners->ignore_nice; + else + ignore_nice = cs_tuners->ignore_nice; + + policy = cdbs->cur_policy; + + /* Get Absolute Load (in terms of freq for ondemand gov) */ + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_common_info *j_cdbs; + u64 cur_wall_time, cur_idle_time, cur_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + unsigned int load; + + j_cdbs = dbs_data->get_cpu_cdbs(j); + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + + wall_time = (unsigned int) + (cur_wall_time - j_cdbs->prev_cpu_wall); + j_cdbs->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) + (cur_idle_time - j_cdbs->prev_cpu_idle); + j_cdbs->prev_cpu_idle = cur_idle_time; + + if (ignore_nice) { + u64 cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + cdbs->prev_cpu_nice; + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + cdbs->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + if (dbs_data->governor == GOV_ONDEMAND) { + struct od_cpu_dbs_info_s *od_j_dbs_info = + dbs_data->get_cpu_dbs_info_s(cpu); + + cur_iowait_time = get_cpu_iowait_time_us(j, + &cur_wall_time); + if (cur_iowait_time == -1ULL) + cur_iowait_time = 0; + + iowait_time = (unsigned int) (cur_iowait_time - + od_j_dbs_info->prev_cpu_iowait); + od_j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + /* + * For the purpose of ondemand, waiting for disk IO is + * an indication that you're performance critical, and + * not that the system is actually idle. So subtract the + * iowait time from the cpu idle time. + */ + if (od_tuners->io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + } + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + + if (dbs_data->governor == GOV_ONDEMAND) { + int freq_avg = __cpufreq_driver_getavg(policy, j); + if (freq_avg <= 0) + freq_avg = policy->cur; + + load *= freq_avg; + } + + if (load > max_load) + max_load = load; + } + + dbs_data->gov_check_cpu(cpu, max_load); +} +EXPORT_SYMBOL_GPL(dbs_check_cpu); + +static inline void dbs_timer_init(struct dbs_data *dbs_data, + struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate) +{ + int delay = delay_for_sampling_rate(sampling_rate); + + INIT_DEFERRABLE_WORK(&cdbs->work, dbs_data->gov_dbs_timer); + schedule_delayed_work_on(cdbs->cpu, &cdbs->work, delay); +} + +static inline void dbs_timer_exit(struct cpu_dbs_common_info *cdbs) +{ + cancel_delayed_work_sync(&cdbs->work); +} + +int cpufreq_governor_dbs(struct dbs_data *dbs_data, + struct cpufreq_policy *policy, unsigned int event) +{ + struct od_cpu_dbs_info_s *od_dbs_info = NULL; + struct cs_cpu_dbs_info_s *cs_dbs_info = NULL; + struct od_dbs_tuners *od_tuners = dbs_data->tuners; + struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; + struct cpu_dbs_common_info *cpu_cdbs; + unsigned int *sampling_rate, latency, ignore_nice, j, cpu = policy->cpu; + int rc; + + cpu_cdbs = dbs_data->get_cpu_cdbs(cpu); + + if (dbs_data->governor == GOV_CONSERVATIVE) { + cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); + sampling_rate = &cs_tuners->sampling_rate; + ignore_nice = cs_tuners->ignore_nice; + } else { + od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); + sampling_rate = &od_tuners->sampling_rate; + ignore_nice = od_tuners->ignore_nice; + } + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_data->mutex); + + dbs_data->enable++; + cpu_cdbs->cpu = cpu; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_common_info *j_cdbs; + j_cdbs = dbs_data->get_cpu_cdbs(j); + + j_cdbs->cur_policy = policy; + j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, + &j_cdbs->prev_cpu_wall); + if (ignore_nice) + j_cdbs->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + + /* + * Start the timerschedule work, when this governor is used for + * first time + */ + if (dbs_data->enable != 1) + goto second_time; + + rc = sysfs_create_group(cpufreq_global_kobject, + dbs_data->attr_group); + if (rc) { + mutex_unlock(&dbs_data->mutex); + return rc; + } + + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + + /* + * conservative does not implement micro like ondemand + * governor, thus we are bound to jiffes/HZ + */ + if (dbs_data->governor == GOV_CONSERVATIVE) { + struct cs_ops *ops = dbs_data->gov_ops; + + cpufreq_register_notifier(ops->notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * + jiffies_to_usecs(10); + } else { + struct od_ops *ops = dbs_data->gov_ops; + + od_tuners->io_is_busy = ops->io_busy(); + } + + /* Bring kernel and HW constraints together */ + dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + *sampling_rate = max(dbs_data->min_sampling_rate, latency * + LATENCY_MULTIPLIER); + +second_time: + if (dbs_data->governor == GOV_CONSERVATIVE) { + cs_dbs_info->down_skip = 0; + cs_dbs_info->enable = 1; + cs_dbs_info->requested_freq = policy->cur; + } else { + struct od_ops *ops = dbs_data->gov_ops; + od_dbs_info->rate_mult = 1; + od_dbs_info->sample_type = OD_NORMAL_SAMPLE; + ops->powersave_bias_init_cpu(cpu); + } + mutex_unlock(&dbs_data->mutex); + + mutex_init(&cpu_cdbs->timer_mutex); + dbs_timer_init(dbs_data, cpu_cdbs, *sampling_rate); + break; + + case CPUFREQ_GOV_STOP: + if (dbs_data->governor == GOV_CONSERVATIVE) + cs_dbs_info->enable = 0; + + dbs_timer_exit(cpu_cdbs); + + mutex_lock(&dbs_data->mutex); + mutex_destroy(&cpu_cdbs->timer_mutex); + dbs_data->enable--; + if (!dbs_data->enable) { + struct cs_ops *ops = dbs_data->gov_ops; + + sysfs_remove_group(cpufreq_global_kobject, + dbs_data->attr_group); + if (dbs_data->governor == GOV_CONSERVATIVE) + cpufreq_unregister_notifier(ops->notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + } + mutex_unlock(&dbs_data->mutex); + + break; + + case CPUFREQ_GOV_LIMITS: + mutex_lock(&cpu_cdbs->timer_mutex); + if (policy->max < cpu_cdbs->cur_policy->cur) + __cpufreq_driver_target(cpu_cdbs->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > cpu_cdbs->cur_policy->cur) + __cpufreq_driver_target(cpu_cdbs->cur_policy, + policy->min, CPUFREQ_RELATION_L); + dbs_check_cpu(dbs_data, cpu); + mutex_unlock(&cpu_cdbs->timer_mutex); + break; + } + return 0; +} +EXPORT_SYMBOL_GPL(cpufreq_governor_dbs); diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h new file mode 100644 index 00000000000..f6616540c53 --- /dev/null +++ b/drivers/cpufreq/cpufreq_governor.h @@ -0,0 +1,176 @@ +/* + * drivers/cpufreq/cpufreq_governor.h + * + * Header file for CPUFreq governors common code + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. + * (C) 2003 Jun Nakajima <jun.nakajima@intel.com> + * (C) 2009 Alexander Clouter <alex@digriz.org.uk> + * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _CPUFREQ_GOVERNER_H +#define _CPUFREQ_GOVERNER_H + +#include <linux/cpufreq.h> +#include <linux/kobject.h> +#include <linux/mutex.h> +#include <linux/workqueue.h> +#include <linux/sysfs.h> + +/* + * The polling frequency depends on the capability of the processor. Default + * polling frequency is 1000 times the transition latency of the processor. The + * governor will work on any processor with transition latency <= 10mS, using + * appropriate sampling rate. + * + * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) + * this governor will not work. All times here are in uS. + */ +#define MIN_SAMPLING_RATE_RATIO (2) +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +/* Ondemand Sampling types */ +enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE}; + +/* Macro creating sysfs show routines */ +#define show_one(_gov, file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", _gov##_tuners.object); \ +} + +#define define_get_cpu_dbs_routines(_dbs_info) \ +static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \ +{ \ + return &per_cpu(_dbs_info, cpu).cdbs; \ +} \ + \ +static void *get_cpu_dbs_info_s(int cpu) \ +{ \ + return &per_cpu(_dbs_info, cpu); \ +} + +/* + * Abbreviations: + * dbs: used as a shortform for demand based switching It helps to keep variable + * names smaller, simpler + * cdbs: common dbs + * on_*: On-demand governor + * cs_*: Conservative governor + */ + +/* Per cpu structures */ +struct cpu_dbs_common_info { + int cpu; + u64 prev_cpu_idle; + u64 prev_cpu_wall; + u64 prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + /* + * percpu mutex that serializes governor limit change with gov_dbs_timer + * invocation. We do not want gov_dbs_timer to run when user is changing + * the governor or limits. + */ + struct mutex timer_mutex; +}; + +struct od_cpu_dbs_info_s { + struct cpu_dbs_common_info cdbs; + u64 prev_cpu_iowait; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_lo; + unsigned int freq_lo_jiffies; + unsigned int freq_hi_jiffies; + unsigned int rate_mult; + unsigned int sample_type:1; +}; + +struct cs_cpu_dbs_info_s { + struct cpu_dbs_common_info cdbs; + unsigned int down_skip; + unsigned int requested_freq; + unsigned int enable:1; +}; + +/* Governers sysfs tunables */ +struct od_dbs_tuners { + unsigned int ignore_nice; + unsigned int sampling_rate; + unsigned int sampling_down_factor; + unsigned int up_threshold; + unsigned int down_differential; + unsigned int powersave_bias; + unsigned int io_is_busy; +}; + +struct cs_dbs_tuners { + unsigned int ignore_nice; + unsigned int sampling_rate; + unsigned int sampling_down_factor; + unsigned int up_threshold; + unsigned int down_threshold; + unsigned int freq_step; +}; + +/* Per Governer data */ +struct dbs_data { + /* Common across governors */ + #define GOV_ONDEMAND 0 + #define GOV_CONSERVATIVE 1 + int governor; + unsigned int min_sampling_rate; + unsigned int enable; /* number of CPUs using this policy */ + struct attribute_group *attr_group; + void *tuners; + + /* dbs_mutex protects dbs_enable in governor start/stop */ + struct mutex mutex; + + struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu); + void *(*get_cpu_dbs_info_s)(int cpu); + void (*gov_dbs_timer)(struct work_struct *work); + void (*gov_check_cpu)(int cpu, unsigned int load); + + /* Governor specific ops, see below */ + void *gov_ops; +}; + +/* Governor specific ops, will be passed to dbs_data->gov_ops */ +struct od_ops { + int (*io_busy)(void); + void (*powersave_bias_init_cpu)(int cpu); + unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy, + unsigned int freq_next, unsigned int relation); + void (*freq_increase)(struct cpufreq_policy *p, unsigned int freq); +}; + +struct cs_ops { + struct notifier_block *notifier_block; +}; + +static inline int delay_for_sampling_rate(unsigned int sampling_rate) +{ + int delay = usecs_to_jiffies(sampling_rate); + + /* We want all CPUs to do sampling nearly on same jiffy */ + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + return delay; +} + +u64 get_cpu_idle_time(unsigned int cpu, u64 *wall); +void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); +int cpufreq_governor_dbs(struct dbs_data *dbs_data, + struct cpufreq_policy *policy, unsigned int event); +#endif /* _CPUFREQ_GOVERNER_H */ diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 396322f2a83..7731f7c7e79 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -10,24 +10,23 @@ * published by the Free Software Foundation. */ -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/init.h> +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/cpufreq.h> -#include <linux/cpu.h> -#include <linux/jiffies.h> +#include <linux/init.h> +#include <linux/kernel.h> #include <linux/kernel_stat.h> +#include <linux/kobject.h> +#include <linux/module.h> #include <linux/mutex.h> -#include <linux/hrtimer.h> +#include <linux/percpu-defs.h> +#include <linux/sysfs.h> #include <linux/tick.h> -#include <linux/ktime.h> -#include <linux/sched.h> +#include <linux/types.h> -/* - * dbs is used in this file as a shortform for demandbased switching - * It helps to keep variable names smaller, simpler - */ +#include "cpufreq_governor.h" +/* On-demand governor macors */ #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) #define DEF_FREQUENCY_UP_THRESHOLD (80) #define DEF_SAMPLING_DOWN_FACTOR (1) @@ -38,80 +37,14 @@ #define MIN_FREQUENCY_UP_THRESHOLD (11) #define MAX_FREQUENCY_UP_THRESHOLD (100) -/* - * The polling frequency of this governor depends on the capability of - * the processor. Default polling frequency is 1000 times the transition - * latency of the processor. The governor will work on any processor with - * transition latency <= 10mS, using appropriate sampling - * rate. - * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) - * this governor will not work. - * All times here are in uS. - */ -#define MIN_SAMPLING_RATE_RATIO (2) - -static unsigned int min_sampling_rate; - -#define LATENCY_MULTIPLIER (1000) -#define MIN_LATENCY_MULTIPLIER (100) -#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) - -static void do_dbs_timer(struct work_struct *work); -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event); +static struct dbs_data od_dbs_data; +static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info); #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND -static +static struct cpufreq_governor cpufreq_gov_ondemand; #endif -struct cpufreq_governor cpufreq_gov_ondemand = { - .name = "ondemand", - .governor = cpufreq_governor_dbs, - .max_transition_latency = TRANSITION_LATENCY_LIMIT, - .owner = THIS_MODULE, -}; -/* Sampling types */ -enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; - -struct cpu_dbs_info_s { - cputime64_t prev_cpu_idle; - cputime64_t prev_cpu_iowait; - cputime64_t prev_cpu_wall; - cputime64_t prev_cpu_nice; - struct cpufreq_policy *cur_policy; - struct delayed_work work; - struct cpufreq_frequency_table *freq_table; - unsigned int freq_lo; - unsigned int freq_lo_jiffies; - unsigned int freq_hi_jiffies; - unsigned int rate_mult; - int cpu; - unsigned int sample_type:1; - /* - * percpu mutex that serializes governor limit change with - * do_dbs_timer invocation. We do not want do_dbs_timer to run - * when user is changing the governor or limits. - */ - struct mutex timer_mutex; -}; -static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); - -static unsigned int dbs_enable; /* number of CPUs using this policy */ - -/* - * dbs_mutex protects dbs_enable in governor start/stop. - */ -static DEFINE_MUTEX(dbs_mutex); - -static struct dbs_tuners { - unsigned int sampling_rate; - unsigned int up_threshold; - unsigned int down_differential; - unsigned int ignore_nice; - unsigned int sampling_down_factor; - unsigned int powersave_bias; - unsigned int io_is_busy; -} dbs_tuners_ins = { +static struct od_dbs_tuners od_tuners = { .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, @@ -119,48 +52,35 @@ static struct dbs_tuners { .powersave_bias = 0, }; -static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) -{ - u64 idle_time; - u64 cur_wall_time; - u64 busy_time; - - cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - - busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; - busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; - - idle_time = cur_wall_time - busy_time; - if (wall) - *wall = jiffies_to_usecs(cur_wall_time); - - return jiffies_to_usecs(idle_time); -} - -static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +static void ondemand_powersave_bias_init_cpu(int cpu) { - u64 idle_time = get_cpu_idle_time_us(cpu, NULL); - - if (idle_time == -1ULL) - return get_cpu_idle_time_jiffy(cpu, wall); - else - idle_time += get_cpu_iowait_time_us(cpu, wall); + struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - return idle_time; + dbs_info->freq_table = cpufreq_frequency_get_table(cpu); + dbs_info->freq_lo = 0; } -static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall) +/* + * Not all CPUs want IO time to be accounted as busy; this depends on how + * efficient idling at a higher frequency/voltage is. + * Pavel Machek says this is not so for various generations of AMD and old + * Intel systems. + * Mike Chan (androidlcom) calis this is also not true for ARM. + * Because of this, whitelist specific known (series) of CPUs by default, and + * leave all others up to the user. + */ +static int should_io_be_busy(void) { - u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); - - if (iowait_time == -1ULL) - return 0; - - return iowait_time; +#if defined(CONFIG_X86) + /* + * For Intel, Core 2 (model 15) andl later have an efficient idle. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && + boot_cpu_data.x86 == 6 && + boot_cpu_data.x86_model >= 15) + return 1; +#endif + return 0; } /* @@ -169,14 +89,13 @@ static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wal * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. */ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, - unsigned int freq_next, - unsigned int relation) + unsigned int freq_next, unsigned int relation) { unsigned int freq_req, freq_reduc, freq_avg; unsigned int freq_hi, freq_lo; unsigned int index = 0; unsigned int jiffies_total, jiffies_hi, jiffies_lo; - struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, + struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); if (!dbs_info->freq_table) { @@ -188,7 +107,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, relation, &index); freq_req = dbs_info->freq_table[index].frequency; - freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; + freq_reduc = freq_req * od_tuners.powersave_bias / 1000; freq_avg = freq_req - freq_reduc; /* Find freq bounds for freq_avg in freq_table */ @@ -207,7 +126,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, dbs_info->freq_lo_jiffies = 0; return freq_lo; } - jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + jiffies_total = usecs_to_jiffies(od_tuners.sampling_rate); jiffies_hi = (freq_avg - freq_lo) * jiffies_total; jiffies_hi += ((freq_hi - freq_lo) / 2); jiffies_hi /= (freq_hi - freq_lo); @@ -218,13 +137,6 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, return freq_hi; } -static void ondemand_powersave_bias_init_cpu(int cpu) -{ - struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - dbs_info->freq_table = cpufreq_frequency_get_table(cpu); - dbs_info->freq_lo = 0; -} - static void ondemand_powersave_bias_init(void) { int i; @@ -233,83 +145,173 @@ static void ondemand_powersave_bias_init(void) } } -/************************** sysfs interface ************************/ +static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) +{ + if (od_tuners.powersave_bias) + freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); + else if (p->cur == p->max) + return; -static ssize_t show_sampling_rate_min(struct kobject *kobj, - struct attribute *attr, char *buf) + __cpufreq_driver_target(p, freq, od_tuners.powersave_bias ? + CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); +} + +/* + * Every sampling_rate, we check, if current idle time is less than 20% + * (default), then we try to increase frequency Every sampling_rate, we look for + * a the lowest frequency which can sustain the load while keeping idle time + * over 30%. If such a frequency exist, we try to decrease to this frequency. + * + * Any frequency increase takes it to the maximum frequency. Frequency reduction + * happens at minimum steps of 5% (default) of current frequency + */ +static void od_check_cpu(int cpu, unsigned int load_freq) { - return sprintf(buf, "%u\n", min_sampling_rate); + struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; + + dbs_info->freq_lo = 0; + + /* Check for frequency increase */ + if (load_freq > od_tuners.up_threshold * policy->cur) { + /* If switching to max speed, apply sampling_down_factor */ + if (policy->cur < policy->max) + dbs_info->rate_mult = + od_tuners.sampling_down_factor; + dbs_freq_increase(policy, policy->max); + return; + } + + /* Check for frequency decrease */ + /* if we cannot reduce the frequency anymore, break out early */ + if (policy->cur == policy->min) + return; + + /* + * The optimal frequency is the frequency that is the lowest that can + * support the current CPU usage without triggering the up policy. To be + * safe, we focus 10 points under the threshold. + */ + if (load_freq < (od_tuners.up_threshold - od_tuners.down_differential) * + policy->cur) { + unsigned int freq_next; + freq_next = load_freq / (od_tuners.up_threshold - + od_tuners.down_differential); + + /* No longer fully busy, reset rate_mult */ + dbs_info->rate_mult = 1; + + if (freq_next < policy->min) + freq_next = policy->min; + + if (!od_tuners.powersave_bias) { + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } else { + int freq = powersave_bias_target(policy, freq_next, + CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, freq, + CPUFREQ_RELATION_L); + } + } } -define_one_global_ro(sampling_rate_min); +static void od_dbs_timer(struct work_struct *work) +{ + struct od_cpu_dbs_info_s *dbs_info = + container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work); + unsigned int cpu = dbs_info->cdbs.cpu; + int delay, sample_type = dbs_info->sample_type; + + mutex_lock(&dbs_info->cdbs.timer_mutex); + + /* Common NORMAL_SAMPLE setup */ + dbs_info->sample_type = OD_NORMAL_SAMPLE; + if (sample_type == OD_SUB_SAMPLE) { + delay = dbs_info->freq_lo_jiffies; + __cpufreq_driver_target(dbs_info->cdbs.cur_policy, + dbs_info->freq_lo, CPUFREQ_RELATION_H); + } else { + dbs_check_cpu(&od_dbs_data, cpu); + if (dbs_info->freq_lo) { + /* Setup timer for SUB_SAMPLE */ + dbs_info->sample_type = OD_SUB_SAMPLE; + delay = dbs_info->freq_hi_jiffies; + } else { + delay = delay_for_sampling_rate(od_tuners.sampling_rate + * dbs_info->rate_mult); + } + } + + schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay); + mutex_unlock(&dbs_info->cdbs.timer_mutex); +} + +/************************** sysfs interface ************************/ -/* cpufreq_ondemand Governor Tunables */ -#define show_one(file_name, object) \ -static ssize_t show_##file_name \ -(struct kobject *kobj, struct attribute *attr, char *buf) \ -{ \ - return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", od_dbs_data.min_sampling_rate); } -show_one(sampling_rate, sampling_rate); -show_one(io_is_busy, io_is_busy); -show_one(up_threshold, up_threshold); -show_one(sampling_down_factor, sampling_down_factor); -show_one(ignore_nice_load, ignore_nice); -show_one(powersave_bias, powersave_bias); /** * update_sampling_rate - update sampling rate effective immediately if needed. * @new_rate: new sampling rate * * If new rate is smaller than the old, simply updaing - * dbs_tuners_int.sampling_rate might not be appropriate. For example, - * if the original sampling_rate was 1 second and the requested new sampling - * rate is 10 ms because the user needs immediate reaction from ondemand - * governor, but not sure if higher frequency will be required or not, - * then, the governor may change the sampling rate too late; up to 1 second - * later. Thus, if we are reducing the sampling rate, we need to make the - * new value effective immediately. + * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the + * original sampling_rate was 1 second and the requested new sampling rate is 10 + * ms because the user needs immediate reaction from ondemand governor, but not + * sure if higher frequency will be required or not, then, the governor may + * change the sampling rate too late; up to 1 second later. Thus, if we are + * reducing the sampling rate, we need to make the new value effective + * immediately. */ static void update_sampling_rate(unsigned int new_rate) { int cpu; - dbs_tuners_ins.sampling_rate = new_rate - = max(new_rate, min_sampling_rate); + od_tuners.sampling_rate = new_rate = max(new_rate, + od_dbs_data.min_sampling_rate); for_each_online_cpu(cpu) { struct cpufreq_policy *policy; - struct cpu_dbs_info_s *dbs_info; + struct od_cpu_dbs_info_s *dbs_info; unsigned long next_sampling, appointed_at; policy = cpufreq_cpu_get(cpu); if (!policy) continue; + if (policy->governor != &cpufreq_gov_ondemand) { + cpufreq_cpu_put(policy); + continue; + } dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); cpufreq_cpu_put(policy); - mutex_lock(&dbs_info->timer_mutex); + mutex_lock(&dbs_info->cdbs.timer_mutex); - if (!delayed_work_pending(&dbs_info->work)) { - mutex_unlock(&dbs_info->timer_mutex); + if (!delayed_work_pending(&dbs_info->cdbs.work)) { + mutex_unlock(&dbs_info->cdbs.timer_mutex); continue; } - next_sampling = jiffies + usecs_to_jiffies(new_rate); - appointed_at = dbs_info->work.timer.expires; - + next_sampling = jiffies + usecs_to_jiffies(new_rate); + appointed_at = dbs_info->cdbs.work.timer.expires; if (time_before(next_sampling, appointed_at)) { - mutex_unlock(&dbs_info->timer_mutex); - cancel_delayed_work_sync(&dbs_info->work); - mutex_lock(&dbs_info->timer_mutex); + mutex_unlock(&dbs_info->cdbs.timer_mutex); + cancel_delayed_work_sync(&dbs_info->cdbs.work); + mutex_lock(&dbs_info->cdbs.timer_mutex); - schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, - usecs_to_jiffies(new_rate)); + schedule_delayed_work_on(dbs_info->cdbs.cpu, + &dbs_info->cdbs.work, + usecs_to_jiffies(new_rate)); } - mutex_unlock(&dbs_info->timer_mutex); + mutex_unlock(&dbs_info->cdbs.timer_mutex); } } @@ -334,7 +336,7 @@ static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; - dbs_tuners_ins.io_is_busy = !!input; + od_tuners.io_is_busy = !!input; return count; } @@ -349,7 +351,7 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, input < MIN_FREQUENCY_UP_THRESHOLD) { return -EINVAL; } - dbs_tuners_ins.up_threshold = input; + od_tuners.up_threshold = input; return count; } @@ -362,12 +364,12 @@ static ssize_t store_sampling_down_factor(struct kobject *a, if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; - dbs_tuners_ins.sampling_down_factor = input; + od_tuners.sampling_down_factor = input; /* Reset down sampling multiplier in case it was active */ for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; - dbs_info = &per_cpu(od_cpu_dbs_info, j); + struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, + j); dbs_info->rate_mult = 1; } return count; @@ -388,19 +390,20 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, if (input > 1) input = 1; - if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + if (input == od_tuners.ignore_nice) { /* nothing to do */ return count; } - dbs_tuners_ins.ignore_nice = input; + od_tuners.ignore_nice = input; /* we need to re-evaluate prev_cpu_idle */ for_each_online_cpu(j) { - struct cpu_dbs_info_s *dbs_info; + struct od_cpu_dbs_info_s *dbs_info; dbs_info = &per_cpu(od_cpu_dbs_info, j); - dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->cdbs.prev_cpu_wall); + if (od_tuners.ignore_nice) + dbs_info->cdbs.prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } return count; @@ -419,17 +422,25 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, if (input > 1000) input = 1000; - dbs_tuners_ins.powersave_bias = input; + od_tuners.powersave_bias = input; ondemand_powersave_bias_init(); return count; } +show_one(od, sampling_rate, sampling_rate); +show_one(od, io_is_busy, io_is_busy); +show_one(od, up_threshold, up_threshold); +show_one(od, sampling_down_factor, sampling_down_factor); +show_one(od, ignore_nice_load, ignore_nice); +show_one(od, powersave_bias, powersave_bias); + define_one_global_rw(sampling_rate); define_one_global_rw(io_is_busy); define_one_global_rw(up_threshold); define_one_global_rw(sampling_down_factor); define_one_global_rw(ignore_nice_load); define_one_global_rw(powersave_bias); +define_one_global_ro(sampling_rate_min); static struct attribute *dbs_attributes[] = { &sampling_rate_min.attr, @@ -442,354 +453,71 @@ static struct attribute *dbs_attributes[] = { NULL }; -static struct attribute_group dbs_attr_group = { +static struct attribute_group od_attr_group = { .attrs = dbs_attributes, .name = "ondemand", }; /************************** sysfs end ************************/ -static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) -{ - if (dbs_tuners_ins.powersave_bias) - freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); - else if (p->cur == p->max) - return; - - __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ? - CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); -} - -static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) -{ - unsigned int max_load_freq; - - struct cpufreq_policy *policy; - unsigned int j; - - this_dbs_info->freq_lo = 0; - policy = this_dbs_info->cur_policy; - - /* - * Every sampling_rate, we check, if current idle time is less - * than 20% (default), then we try to increase frequency - * Every sampling_rate, we look for a the lowest - * frequency which can sustain the load while keeping idle time over - * 30%. If such a frequency exist, we try to decrease to this frequency. - * - * Any frequency increase takes it to the maximum frequency. - * Frequency reduction happens at minimum steps of - * 5% (default) of current frequency - */ - - /* Get Absolute Load - in terms of freq */ - max_load_freq = 0; - - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time; - unsigned int idle_time, wall_time, iowait_time; - unsigned int load, load_freq; - int freq_avg; - - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - - cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); - - wall_time = (unsigned int) - (cur_wall_time - j_dbs_info->prev_cpu_wall); - j_dbs_info->prev_cpu_wall = cur_wall_time; - - idle_time = (unsigned int) - (cur_idle_time - j_dbs_info->prev_cpu_idle); - j_dbs_info->prev_cpu_idle = cur_idle_time; - - iowait_time = (unsigned int) - (cur_iowait_time - j_dbs_info->prev_cpu_iowait); - j_dbs_info->prev_cpu_iowait = cur_iowait_time; - - if (dbs_tuners_ins.ignore_nice) { - u64 cur_nice; - unsigned long cur_nice_jiffies; - - cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - - j_dbs_info->prev_cpu_nice; - /* - * Assumption: nice time between sampling periods will - * be less than 2^32 jiffies for 32 bit sys - */ - cur_nice_jiffies = (unsigned long) - cputime64_to_jiffies64(cur_nice); - - j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - idle_time += jiffies_to_usecs(cur_nice_jiffies); - } - - /* - * For the purpose of ondemand, waiting for disk IO is an - * indication that you're performance critical, and not that - * the system is actually idle. So subtract the iowait time - * from the cpu idle time. - */ - - if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) - idle_time -= iowait_time; - - if (unlikely(!wall_time || wall_time < idle_time)) - continue; - - load = 100 * (wall_time - idle_time) / wall_time; - - freq_avg = __cpufreq_driver_getavg(policy, j); - if (freq_avg <= 0) - freq_avg = policy->cur; - - load_freq = load * freq_avg; - if (load_freq > max_load_freq) - max_load_freq = load_freq; - } +define_get_cpu_dbs_routines(od_cpu_dbs_info); - /* Check for frequency increase */ - if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { - /* If switching to max speed, apply sampling_down_factor */ - if (policy->cur < policy->max) - this_dbs_info->rate_mult = - dbs_tuners_ins.sampling_down_factor; - dbs_freq_increase(policy, policy->max); - return; - } - - /* Check for frequency decrease */ - /* if we cannot reduce the frequency anymore, break out early */ - if (policy->cur == policy->min) - return; - - /* - * The optimal frequency is the frequency that is the lowest that - * can support the current CPU usage without triggering the up - * policy. To be safe, we focus 10 points under the threshold. - */ - if (max_load_freq < - (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * - policy->cur) { - unsigned int freq_next; - freq_next = max_load_freq / - (dbs_tuners_ins.up_threshold - - dbs_tuners_ins.down_differential); - - /* No longer fully busy, reset rate_mult */ - this_dbs_info->rate_mult = 1; - - if (freq_next < policy->min) - freq_next = policy->min; - - if (!dbs_tuners_ins.powersave_bias) { - __cpufreq_driver_target(policy, freq_next, - CPUFREQ_RELATION_L); - } else { - int freq = powersave_bias_target(policy, freq_next, - CPUFREQ_RELATION_L); - __cpufreq_driver_target(policy, freq, - CPUFREQ_RELATION_L); - } - } -} - -static void do_dbs_timer(struct work_struct *work) -{ - struct cpu_dbs_info_s *dbs_info = - container_of(work, struct cpu_dbs_info_s, work.work); - unsigned int cpu = dbs_info->cpu; - int sample_type = dbs_info->sample_type; - - int delay; - - mutex_lock(&dbs_info->timer_mutex); - - /* Common NORMAL_SAMPLE setup */ - dbs_info->sample_type = DBS_NORMAL_SAMPLE; - if (!dbs_tuners_ins.powersave_bias || - sample_type == DBS_NORMAL_SAMPLE) { - dbs_check_cpu(dbs_info); - if (dbs_info->freq_lo) { - /* Setup timer for SUB_SAMPLE */ - dbs_info->sample_type = DBS_SUB_SAMPLE; - delay = dbs_info->freq_hi_jiffies; - } else { - /* We want all CPUs to do sampling nearly on - * same jiffy - */ - delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate - * dbs_info->rate_mult); - - if (num_online_cpus() > 1) - delay -= jiffies % delay; - } - } else { - __cpufreq_driver_target(dbs_info->cur_policy, - dbs_info->freq_lo, CPUFREQ_RELATION_H); - delay = dbs_info->freq_lo_jiffies; - } - schedule_delayed_work_on(cpu, &dbs_info->work, delay); - mutex_unlock(&dbs_info->timer_mutex); -} - -static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) -{ - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); - - if (num_online_cpus() > 1) - delay -= jiffies % delay; +static struct od_ops od_ops = { + .io_busy = should_io_be_busy, + .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu, + .powersave_bias_target = powersave_bias_target, + .freq_increase = dbs_freq_increase, +}; - dbs_info->sample_type = DBS_NORMAL_SAMPLE; - INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer); - schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); -} +static struct dbs_data od_dbs_data = { + .governor = GOV_ONDEMAND, + .attr_group = &od_attr_group, + .tuners = &od_tuners, + .get_cpu_cdbs = get_cpu_cdbs, + .get_cpu_dbs_info_s = get_cpu_dbs_info_s, + .gov_dbs_timer = od_dbs_timer, + .gov_check_cpu = od_check_cpu, + .gov_ops = &od_ops, +}; -static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) { - cancel_delayed_work_sync(&dbs_info->work); + return cpufreq_governor_dbs(&od_dbs_data, policy, event); } -/* - * Not all CPUs want IO time to be accounted as busy; this dependson how - * efficient idling at a higher frequency/voltage is. - * Pavel Machek says this is not so for various generations of AMD and old - * Intel systems. - * Mike Chan (androidlcom) calis this is also not true for ARM. - * Because of this, whitelist specific known (series) of CPUs by default, and - * leave all others up to the user. - */ -static int should_io_be_busy(void) -{ -#if defined(CONFIG_X86) - /* - * For Intel, Core 2 (model 15) andl later have an efficient idle. - */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - boot_cpu_data.x86 == 6 && - boot_cpu_data.x86_model >= 15) - return 1; +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND +static #endif - return 0; -} - -static int cpufreq_governor_dbs(struct cpufreq_policy *policy, - unsigned int event) -{ - unsigned int cpu = policy->cpu; - struct cpu_dbs_info_s *this_dbs_info; - unsigned int j; - int rc; - - this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); - - switch (event) { - case CPUFREQ_GOV_START: - if ((!cpu_online(cpu)) || (!policy->cur)) - return -EINVAL; - - mutex_lock(&dbs_mutex); - - dbs_enable++; - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_info_s *j_dbs_info; - j_dbs_info = &per_cpu(od_cpu_dbs_info, j); - j_dbs_info->cur_policy = policy; - - j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, - &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) - j_dbs_info->prev_cpu_nice = - kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - } - this_dbs_info->cpu = cpu; - this_dbs_info->rate_mult = 1; - ondemand_powersave_bias_init_cpu(cpu); - /* - * Start the timerschedule work, when this governor - * is used for first time - */ - if (dbs_enable == 1) { - unsigned int latency; - - rc = sysfs_create_group(cpufreq_global_kobject, - &dbs_attr_group); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - - /* policy latency is in nS. Convert it to uS first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; - /* Bring kernel and HW constraints together */ - min_sampling_rate = max(min_sampling_rate, - MIN_LATENCY_MULTIPLIER * latency); - dbs_tuners_ins.sampling_rate = - max(min_sampling_rate, - latency * LATENCY_MULTIPLIER); - dbs_tuners_ins.io_is_busy = should_io_be_busy(); - } - mutex_unlock(&dbs_mutex); - - mutex_init(&this_dbs_info->timer_mutex); - dbs_timer_init(this_dbs_info); - break; - - case CPUFREQ_GOV_STOP: - dbs_timer_exit(this_dbs_info); - - mutex_lock(&dbs_mutex); - mutex_destroy(&this_dbs_info->timer_mutex); - dbs_enable--; - mutex_unlock(&dbs_mutex); - if (!dbs_enable) - sysfs_remove_group(cpufreq_global_kobject, - &dbs_attr_group); - - break; - - case CPUFREQ_GOV_LIMITS: - mutex_lock(&this_dbs_info->timer_mutex); - if (policy->max < this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > this_dbs_info->cur_policy->cur) - __cpufreq_driver_target(this_dbs_info->cur_policy, - policy->min, CPUFREQ_RELATION_L); - dbs_check_cpu(this_dbs_info); - mutex_unlock(&this_dbs_info->timer_mutex); - break; - } - return 0; -} +struct cpufreq_governor cpufreq_gov_ondemand = { + .name = "ondemand", + .governor = od_cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; static int __init cpufreq_gov_dbs_init(void) { u64 idle_time; int cpu = get_cpu(); + mutex_init(&od_dbs_data.mutex); idle_time = get_cpu_idle_time_us(cpu, NULL); put_cpu(); if (idle_time != -1ULL) { /* Idle micro accounting is supported. Use finer thresholds */ - dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; - dbs_tuners_ins.down_differential = - MICRO_FREQUENCY_DOWN_DIFFERENTIAL; + od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; + od_tuners.down_differential = MICRO_FREQUENCY_DOWN_DIFFERENTIAL; /* * In nohz/micro accounting case we set the minimum frequency * not depending on HZ, but fixed (very low). The deferred * timer might skip some samples if idle/sleeping as needed. */ - min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; + od_dbs_data.min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; } else { /* For correct statistics, we need 10 ticks for each measure */ - min_sampling_rate = - MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); + od_dbs_data.min_sampling_rate = MIN_SAMPLING_RATE_RATIO * + jiffies_to_usecs(10); } return cpufreq_register_governor(&cpufreq_gov_ondemand); @@ -800,7 +528,6 @@ static void __exit cpufreq_gov_dbs_exit(void) cpufreq_unregister_governor(&cpufreq_gov_ondemand); } - MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>"); MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c index f13a8a9af6a..ceee06849b9 100644 --- a/drivers/cpufreq/cpufreq_performance.c +++ b/drivers/cpufreq/cpufreq_performance.c @@ -10,6 +10,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/cpufreq.h> diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c index 4c2eb512f2b..2d948a17115 100644 --- a/drivers/cpufreq/cpufreq_powersave.c +++ b/drivers/cpufreq/cpufreq_powersave.c @@ -10,6 +10,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/cpufreq.h> diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 399831690fe..e40e5080964 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -37,7 +37,7 @@ struct cpufreq_stats { unsigned int max_state; unsigned int state_num; unsigned int last_index; - cputime64_t *time_in_state; + u64 *time_in_state; unsigned int *freq_table; #ifdef CONFIG_CPU_FREQ_STAT_DETAILS unsigned int *trans_table; @@ -223,7 +223,7 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy, count++; } - alloc_size = count * sizeof(int) + count * sizeof(cputime64_t); + alloc_size = count * sizeof(int) + count * sizeof(u64); #ifdef CONFIG_CPU_FREQ_STAT_DETAILS alloc_size += count * count * sizeof(int); diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index bedac1aa9be..c8c3d293cc5 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -11,6 +11,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/smp.h> diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index af2d81e10f7..7012ea8bf1e 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c @@ -31,13 +31,13 @@ static unsigned int locking_frequency; static bool frequency_locked; static DEFINE_MUTEX(cpufreq_lock); -int exynos_verify_speed(struct cpufreq_policy *policy) +static int exynos_verify_speed(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, exynos_info->freq_table); } -unsigned int exynos_getspeed(unsigned int cpu) +static unsigned int exynos_getspeed(unsigned int cpu) { return clk_get_rate(exynos_info->cpu_clk) / 1000; } @@ -100,7 +100,8 @@ static int exynos_target(struct cpufreq_policy *policy, } arm_volt = volt_table[index]; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + for_each_cpu(freqs.cpu, policy->cpus) + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); /* When the new frequency is higher than current frequency */ if ((freqs.new > freqs.old) && !safe_arm_volt) { @@ -115,7 +116,8 @@ static int exynos_target(struct cpufreq_policy *policy, if (freqs.new != freqs.old) exynos_info->set_freq(old_index, index); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + for_each_cpu(freqs.cpu, policy->cpus) + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); /* When the new frequency is lower than current frequency */ if ((freqs.new < freqs.old) || @@ -235,6 +237,7 @@ static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) cpumask_copy(policy->related_cpus, cpu_possible_mask); cpumask_copy(policy->cpus, cpu_online_mask); } else { + policy->shared_type = CPUFREQ_SHARED_TYPE_ANY; cpumask_setall(policy->cpus); } diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index 90431cb9280..49cda256efb 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -9,6 +9,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 53ddbc760af..f1fa500ac10 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -930,7 +930,7 @@ static int __cpuinit longhaul_cpu_init(struct cpufreq_policy *policy) return 0; } -static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) +static int longhaul_cpu_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_put_attr(policy->cpu); return 0; @@ -946,7 +946,7 @@ static struct cpufreq_driver longhaul_driver = { .target = longhaul_target, .get = longhaul_get, .init = longhaul_cpu_init, - .exit = __devexit_p(longhaul_cpu_exit), + .exit = longhaul_cpu_exit, .name = "longhaul", .owner = THIS_MODULE, .attr = longhaul_attr, diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index e3ebb4fa2c3..056faf6af1a 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -1186,7 +1186,7 @@ err_out: return -ENODEV; } -static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) +static int powernowk8_cpu_exit(struct cpufreq_policy *pol) { struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); @@ -1242,7 +1242,7 @@ static struct cpufreq_driver cpufreq_amd64_driver = { .target = powernowk8_target, .bios_limit = acpi_processor_get_bios_limit, .init = powernowk8_cpu_init, - .exit = __devexit_p(powernowk8_cpu_exit), + .exit = powernowk8_cpu_exit, .get = powernowk8_get, .name = "powernow-k8", .owner = THIS_MODULE, diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c new file mode 100644 index 00000000000..4575cfe4175 --- /dev/null +++ b/drivers/cpufreq/spear-cpufreq.c @@ -0,0 +1,291 @@ +/* + * drivers/cpufreq/spear-cpufreq.c + * + * CPU Frequency Scaling for SPEAr platform + * + * Copyright (C) 2012 ST Microelectronics + * Deepak Sikri <deepak.sikri@st.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/clk.h> +#include <linux/cpufreq.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/slab.h> +#include <linux/types.h> + +/* SPEAr CPUFreq driver data structure */ +static struct { + struct clk *clk; + unsigned int transition_latency; + struct cpufreq_frequency_table *freq_tbl; + u32 cnt; +} spear_cpufreq; + +int spear_cpufreq_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl); +} + +static unsigned int spear_cpufreq_get(unsigned int cpu) +{ + return clk_get_rate(spear_cpufreq.clk) / 1000; +} + +static struct clk *spear1340_cpu_get_possible_parent(unsigned long newfreq) +{ + struct clk *sys_pclk; + int pclk; + /* + * In SPEAr1340, cpu clk's parent sys clk can take input from + * following sources + */ + const char *sys_clk_src[] = { + "sys_syn_clk", + "pll1_clk", + "pll2_clk", + "pll3_clk", + }; + + /* + * As sys clk can have multiple source with their own range + * limitation so we choose possible sources accordingly + */ + if (newfreq <= 300000000) + pclk = 0; /* src is sys_syn_clk */ + else if (newfreq > 300000000 && newfreq <= 500000000) + pclk = 3; /* src is pll3_clk */ + else if (newfreq == 600000000) + pclk = 1; /* src is pll1_clk */ + else + return ERR_PTR(-EINVAL); + + /* Get parent to sys clock */ + sys_pclk = clk_get(NULL, sys_clk_src[pclk]); + if (IS_ERR(sys_pclk)) + pr_err("Failed to get %s clock\n", sys_clk_src[pclk]); + + return sys_pclk; +} + +/* + * In SPEAr1340, we cannot use newfreq directly because we need to actually + * access a source clock (clk) which might not be ancestor of cpu at present. + * Hence in SPEAr1340 we would operate on source clock directly before switching + * cpu clock to it. + */ +static int spear1340_set_cpu_rate(struct clk *sys_pclk, unsigned long newfreq) +{ + struct clk *sys_clk; + int ret = 0; + + sys_clk = clk_get_parent(spear_cpufreq.clk); + if (IS_ERR(sys_clk)) { + pr_err("failed to get cpu's parent (sys) clock\n"); + return PTR_ERR(sys_clk); + } + + /* Set the rate of the source clock before changing the parent */ + ret = clk_set_rate(sys_pclk, newfreq); + if (ret) { + pr_err("Failed to set sys clk rate to %lu\n", newfreq); + return ret; + } + + ret = clk_set_parent(sys_clk, sys_pclk); + if (ret) { + pr_err("Failed to set sys clk parent\n"); + return ret; + } + + return 0; +} + +static int spear_cpufreq_target(struct cpufreq_policy *policy, + unsigned int target_freq, unsigned int relation) +{ + struct cpufreq_freqs freqs; + unsigned long newfreq; + struct clk *srcclk; + int index, ret, mult = 1; + + if (cpufreq_frequency_table_target(policy, spear_cpufreq.freq_tbl, + target_freq, relation, &index)) + return -EINVAL; + + freqs.cpu = policy->cpu; + freqs.old = spear_cpufreq_get(0); + + newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000; + if (of_machine_is_compatible("st,spear1340")) { + /* + * SPEAr1340 is special in the sense that due to the possibility + * of multiple clock sources for cpu clk's parent we can have + * different clock source for different frequency of cpu clk. + * Hence we need to choose one from amongst these possible clock + * sources. + */ + srcclk = spear1340_cpu_get_possible_parent(newfreq); + if (IS_ERR(srcclk)) { + pr_err("Failed to get src clk\n"); + return PTR_ERR(srcclk); + } + + /* SPEAr1340: src clk is always 2 * intended cpu clk */ + mult = 2; + } else { + /* + * src clock to be altered is ancestor of cpu clock. Hence we + * can directly work on cpu clk + */ + srcclk = spear_cpufreq.clk; + } + + newfreq = clk_round_rate(srcclk, newfreq * mult); + if (newfreq < 0) { + pr_err("clk_round_rate failed for cpu src clock\n"); + return newfreq; + } + + freqs.new = newfreq / 1000; + freqs.new /= mult; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + if (mult == 2) + ret = spear1340_set_cpu_rate(srcclk, newfreq); + else + ret = clk_set_rate(spear_cpufreq.clk, newfreq); + + /* Get current rate after clk_set_rate, in case of failure */ + if (ret) { + pr_err("CPU Freq: cpu clk_set_rate failed: %d\n", ret); + freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000; + } + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + return ret; +} + +static int spear_cpufreq_init(struct cpufreq_policy *policy) +{ + int ret; + + ret = cpufreq_frequency_table_cpuinfo(policy, spear_cpufreq.freq_tbl); + if (ret) { + pr_err("cpufreq_frequency_table_cpuinfo() failed"); + return ret; + } + + cpufreq_frequency_table_get_attr(spear_cpufreq.freq_tbl, policy->cpu); + policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency; + policy->cur = spear_cpufreq_get(0); + + cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); + cpumask_copy(policy->related_cpus, policy->cpus); + + return 0; +} + +static int spear_cpufreq_exit(struct cpufreq_policy *policy) +{ + cpufreq_frequency_table_put_attr(policy->cpu); + return 0; +} + +static struct freq_attr *spear_cpufreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver spear_cpufreq_driver = { + .name = "cpufreq-spear", + .flags = CPUFREQ_STICKY, + .verify = spear_cpufreq_verify, + .target = spear_cpufreq_target, + .get = spear_cpufreq_get, + .init = spear_cpufreq_init, + .exit = spear_cpufreq_exit, + .attr = spear_cpufreq_attr, +}; + +static int spear_cpufreq_driver_init(void) +{ + struct device_node *np; + const struct property *prop; + struct cpufreq_frequency_table *freq_tbl; + const __be32 *val; + int cnt, i, ret; + + np = of_find_node_by_path("/cpus/cpu@0"); + if (!np) { + pr_err("No cpu node found"); + return -ENODEV; + } + + if (of_property_read_u32(np, "clock-latency", + &spear_cpufreq.transition_latency)) + spear_cpufreq.transition_latency = CPUFREQ_ETERNAL; + + prop = of_find_property(np, "cpufreq_tbl", NULL); + if (!prop || !prop->value) { + pr_err("Invalid cpufreq_tbl"); + ret = -ENODEV; + goto out_put_node; + } + + cnt = prop->length / sizeof(u32); + val = prop->value; + + freq_tbl = kmalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL); + if (!freq_tbl) { + ret = -ENOMEM; + goto out_put_node; + } + + for (i = 0; i < cnt; i++) { + freq_tbl[i].index = i; + freq_tbl[i].frequency = be32_to_cpup(val++); + } + + freq_tbl[i].index = i; + freq_tbl[i].frequency = CPUFREQ_TABLE_END; + + spear_cpufreq.freq_tbl = freq_tbl; + + of_node_put(np); + + spear_cpufreq.clk = clk_get(NULL, "cpu_clk"); + if (IS_ERR(spear_cpufreq.clk)) { + pr_err("Unable to get CPU clock\n"); + ret = PTR_ERR(spear_cpufreq.clk); + goto out_put_mem; + } + + ret = cpufreq_register_driver(&spear_cpufreq_driver); + if (!ret) + return 0; + + pr_err("failed register driver: %d\n", ret); + clk_put(spear_cpufreq.clk); + +out_put_mem: + kfree(freq_tbl); + return ret; + +out_put_node: + of_node_put(np); + return ret; +} +late_initcall(spear_cpufreq_driver_init); + +MODULE_AUTHOR("Deepak Sikri <deepak.sikri@st.com>"); +MODULE_DESCRIPTION("SPEAr CPUFreq driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig index a76b689e553..234ae651b38 100644 --- a/drivers/cpuidle/Kconfig +++ b/drivers/cpuidle/Kconfig @@ -9,6 +9,15 @@ config CPU_IDLE If you're using an ACPI-enabled platform, you should say Y here. +config CPU_IDLE_MULTIPLE_DRIVERS + bool "Support multiple cpuidle drivers" + depends on CPU_IDLE + default n + help + Allows the cpuidle framework to use different drivers for each CPU. + This is useful if you have a system with different CPU latencies and + states. If unsure say N. + config CPU_IDLE_GOV_LADDER bool depends on CPU_IDLE diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 7f15b8514a1..8df53dd8dbe 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -68,7 +68,7 @@ static cpuidle_enter_t cpuidle_enter_ops; int cpuidle_play_dead(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); - struct cpuidle_driver *drv = cpuidle_get_driver(); + struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); int i, dead_state = -1; int power_usage = -1; @@ -109,8 +109,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, /* This can be moved to within driver enter routine * but that results in multiple copies of same code. */ - dev->states_usage[entered_state].time += - (unsigned long long)dev->last_residency; + dev->states_usage[entered_state].time += dev->last_residency; dev->states_usage[entered_state].usage++; } else { dev->last_residency = 0; @@ -128,7 +127,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int cpuidle_idle_call(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); - struct cpuidle_driver *drv = cpuidle_get_driver(); + struct cpuidle_driver *drv; int next_state, entered_state; if (off) @@ -141,9 +140,15 @@ int cpuidle_idle_call(void) if (!dev || !dev->enabled) return -EBUSY; + drv = cpuidle_get_cpu_driver(dev); + /* ask the governor for the next state */ next_state = cpuidle_curr_governor->select(drv, dev); if (need_resched()) { + dev->last_residency = 0; + /* give the governor an opportunity to reflect on the outcome */ + if (cpuidle_curr_governor->reflect) + cpuidle_curr_governor->reflect(dev, next_state); local_irq_enable(); return 0; } @@ -308,15 +313,19 @@ static void poll_idle_init(struct cpuidle_driver *drv) {} int cpuidle_enable_device(struct cpuidle_device *dev) { int ret, i; - struct cpuidle_driver *drv = cpuidle_get_driver(); + struct cpuidle_driver *drv; if (!dev) return -EINVAL; if (dev->enabled) return 0; + + drv = cpuidle_get_cpu_driver(dev); + if (!drv || !cpuidle_curr_governor) return -EIO; + if (!dev->state_count) dev->state_count = drv->state_count; @@ -331,7 +340,8 @@ int cpuidle_enable_device(struct cpuidle_device *dev) poll_idle_init(drv); - if ((ret = cpuidle_add_state_sysfs(dev))) + ret = cpuidle_add_device_sysfs(dev); + if (ret) return ret; if (cpuidle_curr_governor->enable && @@ -352,7 +362,7 @@ int cpuidle_enable_device(struct cpuidle_device *dev) return 0; fail_sysfs: - cpuidle_remove_state_sysfs(dev); + cpuidle_remove_device_sysfs(dev); return ret; } @@ -368,17 +378,20 @@ EXPORT_SYMBOL_GPL(cpuidle_enable_device); */ void cpuidle_disable_device(struct cpuidle_device *dev) { + struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); + if (!dev || !dev->enabled) return; - if (!cpuidle_get_driver() || !cpuidle_curr_governor) + + if (!drv || !cpuidle_curr_governor) return; dev->enabled = 0; if (cpuidle_curr_governor->disable) - cpuidle_curr_governor->disable(cpuidle_get_driver(), dev); + cpuidle_curr_governor->disable(drv, dev); - cpuidle_remove_state_sysfs(dev); + cpuidle_remove_device_sysfs(dev); enabled_devices--; } @@ -394,17 +407,14 @@ EXPORT_SYMBOL_GPL(cpuidle_disable_device); static int __cpuidle_register_device(struct cpuidle_device *dev) { int ret; - struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); - struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); + struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); - if (!try_module_get(cpuidle_driver->owner)) + if (!try_module_get(drv->owner)) return -EINVAL; - init_completion(&dev->kobj_unregister); - per_cpu(cpuidle_devices, dev->cpu) = dev; list_add(&dev->device_list, &cpuidle_detected_devices); - ret = cpuidle_add_sysfs(cpu_dev); + ret = cpuidle_add_sysfs(dev); if (ret) goto err_sysfs; @@ -416,12 +426,11 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) return 0; err_coupled: - cpuidle_remove_sysfs(cpu_dev); - wait_for_completion(&dev->kobj_unregister); + cpuidle_remove_sysfs(dev); err_sysfs: list_del(&dev->device_list); per_cpu(cpuidle_devices, dev->cpu) = NULL; - module_put(cpuidle_driver->owner); + module_put(drv->owner); return ret; } @@ -460,8 +469,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device); */ void cpuidle_unregister_device(struct cpuidle_device *dev) { - struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); - struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); + struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); if (dev->registered == 0) return; @@ -470,16 +478,15 @@ void cpuidle_unregister_device(struct cpuidle_device *dev) cpuidle_disable_device(dev); - cpuidle_remove_sysfs(cpu_dev); + cpuidle_remove_sysfs(dev); list_del(&dev->device_list); - wait_for_completion(&dev->kobj_unregister); per_cpu(cpuidle_devices, dev->cpu) = NULL; cpuidle_coupled_unregister_device(dev); cpuidle_resume_and_unlock(); - module_put(cpuidle_driver->owner); + module_put(drv->owner); } EXPORT_SYMBOL_GPL(cpuidle_unregister_device); diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h index 76e7f696ad8..ee97e9672ec 100644 --- a/drivers/cpuidle/cpuidle.h +++ b/drivers/cpuidle/cpuidle.h @@ -5,8 +5,6 @@ #ifndef __DRIVER_CPUIDLE_H #define __DRIVER_CPUIDLE_H -#include <linux/device.h> - /* For internal use only */ extern struct cpuidle_governor *cpuidle_curr_governor; extern struct list_head cpuidle_governors; @@ -25,12 +23,15 @@ extern void cpuidle_uninstall_idle_handler(void); extern int cpuidle_switch_governor(struct cpuidle_governor *gov); /* sysfs */ + +struct device; + extern int cpuidle_add_interface(struct device *dev); extern void cpuidle_remove_interface(struct device *dev); -extern int cpuidle_add_state_sysfs(struct cpuidle_device *device); -extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device); -extern int cpuidle_add_sysfs(struct device *dev); -extern void cpuidle_remove_sysfs(struct device *dev); +extern int cpuidle_add_device_sysfs(struct cpuidle_device *device); +extern void cpuidle_remove_device_sysfs(struct cpuidle_device *device); +extern int cpuidle_add_sysfs(struct cpuidle_device *dev); +extern void cpuidle_remove_sysfs(struct cpuidle_device *dev); #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED bool cpuidle_state_is_coupled(struct cpuidle_device *dev, diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 87db3877fea..3af841fb397 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -14,9 +14,10 @@ #include "cpuidle.h" -static struct cpuidle_driver *cpuidle_curr_driver; DEFINE_SPINLOCK(cpuidle_driver_lock); -int cpuidle_driver_refcount; + +static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu); +static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu); static void set_power_states(struct cpuidle_driver *drv) { @@ -40,11 +41,15 @@ static void set_power_states(struct cpuidle_driver *drv) drv->states[i].power_usage = -1 - i; } -/** - * cpuidle_register_driver - registers a driver - * @drv: the driver - */ -int cpuidle_register_driver(struct cpuidle_driver *drv) +static void __cpuidle_driver_init(struct cpuidle_driver *drv) +{ + drv->refcnt = 0; + + if (!drv->power_specified) + set_power_states(drv); +} + +static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu) { if (!drv || !drv->state_count) return -EINVAL; @@ -52,31 +57,145 @@ int cpuidle_register_driver(struct cpuidle_driver *drv) if (cpuidle_disabled()) return -ENODEV; - spin_lock(&cpuidle_driver_lock); - if (cpuidle_curr_driver) { - spin_unlock(&cpuidle_driver_lock); + if (__cpuidle_get_cpu_driver(cpu)) return -EBUSY; + + __cpuidle_driver_init(drv); + + __cpuidle_set_cpu_driver(drv, cpu); + + return 0; +} + +static void __cpuidle_unregister_driver(struct cpuidle_driver *drv, int cpu) +{ + if (drv != __cpuidle_get_cpu_driver(cpu)) + return; + + if (!WARN_ON(drv->refcnt > 0)) + __cpuidle_set_cpu_driver(NULL, cpu); +} + +#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS + +static DEFINE_PER_CPU(struct cpuidle_driver *, cpuidle_drivers); + +static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu) +{ + per_cpu(cpuidle_drivers, cpu) = drv; +} + +static struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu) +{ + return per_cpu(cpuidle_drivers, cpu); +} + +static void __cpuidle_unregister_all_cpu_driver(struct cpuidle_driver *drv) +{ + int cpu; + for_each_present_cpu(cpu) + __cpuidle_unregister_driver(drv, cpu); +} + +static int __cpuidle_register_all_cpu_driver(struct cpuidle_driver *drv) +{ + int ret = 0; + int i, cpu; + + for_each_present_cpu(cpu) { + ret = __cpuidle_register_driver(drv, cpu); + if (ret) + break; } - if (!drv->power_specified) - set_power_states(drv); + if (ret) + for_each_present_cpu(i) { + if (i == cpu) + break; + __cpuidle_unregister_driver(drv, i); + } - cpuidle_curr_driver = drv; + return ret; +} + +int cpuidle_register_cpu_driver(struct cpuidle_driver *drv, int cpu) +{ + int ret; + + spin_lock(&cpuidle_driver_lock); + ret = __cpuidle_register_driver(drv, cpu); spin_unlock(&cpuidle_driver_lock); - return 0; + return ret; +} + +void cpuidle_unregister_cpu_driver(struct cpuidle_driver *drv, int cpu) +{ + spin_lock(&cpuidle_driver_lock); + __cpuidle_unregister_driver(drv, cpu); + spin_unlock(&cpuidle_driver_lock); +} + +/** + * cpuidle_register_driver - registers a driver + * @drv: the driver + */ +int cpuidle_register_driver(struct cpuidle_driver *drv) +{ + int ret; + + spin_lock(&cpuidle_driver_lock); + ret = __cpuidle_register_all_cpu_driver(drv); + spin_unlock(&cpuidle_driver_lock); + + return ret; } EXPORT_SYMBOL_GPL(cpuidle_register_driver); /** - * cpuidle_get_driver - return the current driver + * cpuidle_unregister_driver - unregisters a driver + * @drv: the driver */ -struct cpuidle_driver *cpuidle_get_driver(void) +void cpuidle_unregister_driver(struct cpuidle_driver *drv) +{ + spin_lock(&cpuidle_driver_lock); + __cpuidle_unregister_all_cpu_driver(drv); + spin_unlock(&cpuidle_driver_lock); +} +EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); + +#else + +static struct cpuidle_driver *cpuidle_curr_driver; + +static inline void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu) +{ + cpuidle_curr_driver = drv; +} + +static inline struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu) { return cpuidle_curr_driver; } -EXPORT_SYMBOL_GPL(cpuidle_get_driver); + +/** + * cpuidle_register_driver - registers a driver + * @drv: the driver + */ +int cpuidle_register_driver(struct cpuidle_driver *drv) +{ + int ret, cpu; + + cpu = get_cpu(); + spin_lock(&cpuidle_driver_lock); + ret = __cpuidle_register_driver(drv, cpu); + spin_unlock(&cpuidle_driver_lock); + put_cpu(); + + return ret; +} +EXPORT_SYMBOL_GPL(cpuidle_register_driver); /** * cpuidle_unregister_driver - unregisters a driver @@ -84,20 +203,50 @@ EXPORT_SYMBOL_GPL(cpuidle_get_driver); */ void cpuidle_unregister_driver(struct cpuidle_driver *drv) { - if (drv != cpuidle_curr_driver) { - WARN(1, "invalid cpuidle_unregister_driver(%s)\n", - drv->name); - return; - } + int cpu; + cpu = get_cpu(); spin_lock(&cpuidle_driver_lock); + __cpuidle_unregister_driver(drv, cpu); + spin_unlock(&cpuidle_driver_lock); + put_cpu(); +} +EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); +#endif + +/** + * cpuidle_get_driver - return the current driver + */ +struct cpuidle_driver *cpuidle_get_driver(void) +{ + struct cpuidle_driver *drv; + int cpu; - if (!WARN_ON(cpuidle_driver_refcount > 0)) - cpuidle_curr_driver = NULL; + cpu = get_cpu(); + drv = __cpuidle_get_cpu_driver(cpu); + put_cpu(); + return drv; +} +EXPORT_SYMBOL_GPL(cpuidle_get_driver); + +/** + * cpuidle_get_cpu_driver - return the driver tied with a cpu + */ +struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev) +{ + struct cpuidle_driver *drv; + + if (!dev) + return NULL; + + spin_lock(&cpuidle_driver_lock); + drv = __cpuidle_get_cpu_driver(dev->cpu); spin_unlock(&cpuidle_driver_lock); + + return drv; } -EXPORT_SYMBOL_GPL(cpuidle_unregister_driver); +EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver); struct cpuidle_driver *cpuidle_driver_ref(void) { @@ -105,8 +254,8 @@ struct cpuidle_driver *cpuidle_driver_ref(void) spin_lock(&cpuidle_driver_lock); - drv = cpuidle_curr_driver; - cpuidle_driver_refcount++; + drv = cpuidle_get_driver(); + drv->refcnt++; spin_unlock(&cpuidle_driver_lock); return drv; @@ -114,10 +263,12 @@ struct cpuidle_driver *cpuidle_driver_ref(void) void cpuidle_driver_unref(void) { + struct cpuidle_driver *drv = cpuidle_get_driver(); + spin_lock(&cpuidle_driver_lock); - if (!WARN_ON(cpuidle_driver_refcount <= 0)) - cpuidle_driver_refcount--; + if (drv && !WARN_ON(drv->refcnt <= 0)) + drv->refcnt--; spin_unlock(&cpuidle_driver_lock); } diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 5b1f2c372c1..bd40b943b6d 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -28,6 +28,13 @@ #define MAX_INTERESTING 50000 #define STDDEV_THRESH 400 +/* 60 * 60 > STDDEV_THRESH * INTERVALS = 400 * 8 */ +#define MAX_DEVIATION 60 + +static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer); +static DEFINE_PER_CPU(int, hrtimer_status); +/* menu hrtimer mode */ +enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL}; /* * Concepts and ideas behind the menu governor @@ -109,6 +116,13 @@ * */ +/* + * The C-state residency is so long that is is worthwhile to exit + * from the shallow C-state and re-enter into a deeper C-state. + */ +static unsigned int perfect_cstate_ms __read_mostly = 30; +module_param(perfect_cstate_ms, uint, 0000); + struct menu_device { int last_state_idx; int needs_update; @@ -191,40 +205,102 @@ static u64 div_round64(u64 dividend, u32 divisor) return div_u64(dividend + (divisor / 2), divisor); } +/* Cancel the hrtimer if it is not triggered yet */ +void menu_hrtimer_cancel(void) +{ + int cpu = smp_processor_id(); + struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu); + + /* The timer is still not time out*/ + if (per_cpu(hrtimer_status, cpu)) { + hrtimer_cancel(hrtmr); + per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP; + } +} +EXPORT_SYMBOL_GPL(menu_hrtimer_cancel); + +/* Call back for hrtimer is triggered */ +static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer) +{ + int cpu = smp_processor_id(); + struct menu_device *data = &per_cpu(menu_devices, cpu); + + /* In general case, the expected residency is much larger than + * deepest C-state target residency, but prediction logic still + * predicts a small predicted residency, so the prediction + * history is totally broken if the timer is triggered. + * So reset the correction factor. + */ + if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL) + data->correction_factor[data->bucket] = RESOLUTION * DECAY; + + per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP; + + return HRTIMER_NORESTART; +} + /* * Try detecting repeating patterns by keeping track of the last 8 * intervals, and checking if the standard deviation of that set * of points is below a threshold. If it is... then use the * average of these 8 points as the estimated value. */ -static void detect_repeating_patterns(struct menu_device *data) +static u32 get_typical_interval(struct menu_device *data) { - int i; - uint64_t avg = 0; - uint64_t stddev = 0; /* contains the square of the std deviation */ - - /* first calculate average and standard deviation of the past */ - for (i = 0; i < INTERVALS; i++) - avg += data->intervals[i]; - avg = avg / INTERVALS; + int i = 0, divisor = 0; + uint64_t max = 0, avg = 0, stddev = 0; + int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */ + unsigned int ret = 0; - /* if the avg is beyond the known next tick, it's worthless */ - if (avg > data->expected_us) - return; +again: - for (i = 0; i < INTERVALS; i++) - stddev += (data->intervals[i] - avg) * - (data->intervals[i] - avg); - - stddev = stddev / INTERVALS; + /* first calculate average and standard deviation of the past */ + max = avg = divisor = stddev = 0; + for (i = 0; i < INTERVALS; i++) { + int64_t value = data->intervals[i]; + if (value <= thresh) { + avg += value; + divisor++; + if (value > max) + max = value; + } + } + do_div(avg, divisor); + for (i = 0; i < INTERVALS; i++) { + int64_t value = data->intervals[i]; + if (value <= thresh) { + int64_t diff = value - avg; + stddev += diff * diff; + } + } + do_div(stddev, divisor); + stddev = int_sqrt(stddev); /* - * now.. if stddev is small.. then assume we have a - * repeating pattern and predict we keep doing this. + * If we have outliers to the upside in our distribution, discard + * those by setting the threshold to exclude these outliers, then + * calculate the average and standard deviation again. Once we get + * down to the bottom 3/4 of our samples, stop excluding samples. + * + * This can deal with workloads that have long pauses interspersed + * with sporadic activity with a bunch of short pauses. + * + * The typical interval is obtained when standard deviation is small + * or standard deviation is small compared to the average interval. */ - - if (avg && stddev < STDDEV_THRESH) + if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3)) + || stddev <= 20) { data->predicted_us = avg; + ret = 1; + return ret; + + } else if ((divisor * 4) > INTERVALS * 3) { + /* Exclude the max interval */ + thresh = max - 1; + goto again; + } + + return ret; } /** @@ -240,6 +316,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) int i; int multiplier; struct timespec t; + int repeat = 0, low_predicted = 0; + int cpu = smp_processor_id(); + struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu); if (data->needs_update) { menu_update(drv, dev); @@ -274,7 +353,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], RESOLUTION * DECAY); - detect_repeating_patterns(data); + repeat = get_typical_interval(data); /* * We want to default to C1 (hlt), not to busy polling @@ -295,8 +374,10 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) if (s->disabled || su->disable) continue; - if (s->target_residency > data->predicted_us) + if (s->target_residency > data->predicted_us) { + low_predicted = 1; continue; + } if (s->exit_latency > latency_req) continue; if (s->exit_latency * multiplier > data->predicted_us) @@ -309,6 +390,44 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) } } + /* not deepest C-state chosen for low predicted residency */ + if (low_predicted) { + unsigned int timer_us = 0; + unsigned int perfect_us = 0; + + /* + * Set a timer to detect whether this sleep is much + * longer than repeat mode predicted. If the timer + * triggers, the code will evaluate whether to put + * the CPU into a deeper C-state. + * The timer is cancelled on CPU wakeup. + */ + timer_us = 2 * (data->predicted_us + MAX_DEVIATION); + + perfect_us = perfect_cstate_ms * 1000; + + if (repeat && (4 * timer_us < data->expected_us)) { + RCU_NONIDLE(hrtimer_start(hrtmr, + ns_to_ktime(1000 * timer_us), + HRTIMER_MODE_REL_PINNED)); + /* In repeat case, menu hrtimer is started */ + per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT; + } else if (perfect_us < data->expected_us) { + /* + * The next timer is long. This could be because + * we did not make a useful prediction. + * In that case, it makes sense to re-enter + * into a deeper C-state after some time. + */ + RCU_NONIDLE(hrtimer_start(hrtmr, + ns_to_ktime(1000 * timer_us), + HRTIMER_MODE_REL_PINNED)); + /* In general case, menu hrtimer is started */ + per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL; + } + + } + return data->last_state_idx; } @@ -399,6 +518,9 @@ static int menu_enable_device(struct cpuidle_driver *drv, struct cpuidle_device *dev) { struct menu_device *data = &per_cpu(menu_devices, dev->cpu); + struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu); + hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + t->function = menu_hrtimer_notify; memset(data, 0, sizeof(struct menu_device)); diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 5f809e337b8..34094294610 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -12,6 +12,7 @@ #include <linux/slab.h> #include <linux/cpu.h> #include <linux/capability.h> +#include <linux/device.h> #include "cpuidle.h" @@ -297,6 +298,13 @@ static struct attribute *cpuidle_state_default_attrs[] = { NULL }; +struct cpuidle_state_kobj { + struct cpuidle_state *state; + struct cpuidle_state_usage *state_usage; + struct completion kobj_unregister; + struct kobject kobj; +}; + #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) #define kobj_to_state(k) (kobj_to_state_obj(k)->state) #define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage) @@ -356,17 +364,17 @@ static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i) } /** - * cpuidle_add_driver_sysfs - adds driver-specific sysfs attributes + * cpuidle_add_state_sysfs - adds cpuidle states sysfs attributes * @device: the target device */ -int cpuidle_add_state_sysfs(struct cpuidle_device *device) +static int cpuidle_add_state_sysfs(struct cpuidle_device *device) { int i, ret = -ENOMEM; struct cpuidle_state_kobj *kobj; - struct cpuidle_driver *drv = cpuidle_get_driver(); + struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device); /* state statistics */ - for (i = 0; i < device->state_count; i++) { + for (i = 0; i < drv->state_count; i++) { kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); if (!kobj) goto error_state; @@ -374,8 +382,8 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device) kobj->state_usage = &device->states_usage[i]; init_completion(&kobj->kobj_unregister); - ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj, - "state%d", i); + ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, + &device->kobj, "state%d", i); if (ret) { kfree(kobj); goto error_state; @@ -393,10 +401,10 @@ error_state: } /** - * cpuidle_remove_driver_sysfs - removes driver-specific sysfs attributes + * cpuidle_remove_driver_sysfs - removes the cpuidle states sysfs attributes * @device: the target device */ -void cpuidle_remove_state_sysfs(struct cpuidle_device *device) +static void cpuidle_remove_state_sysfs(struct cpuidle_device *device) { int i; @@ -404,17 +412,179 @@ void cpuidle_remove_state_sysfs(struct cpuidle_device *device) cpuidle_free_state_kobj(device, i); } +#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS +#define kobj_to_driver_kobj(k) container_of(k, struct cpuidle_driver_kobj, kobj) +#define attr_to_driver_attr(a) container_of(a, struct cpuidle_driver_attr, attr) + +#define define_one_driver_ro(_name, show) \ + static struct cpuidle_driver_attr attr_driver_##_name = \ + __ATTR(_name, 0644, show, NULL) + +struct cpuidle_driver_kobj { + struct cpuidle_driver *drv; + struct completion kobj_unregister; + struct kobject kobj; +}; + +struct cpuidle_driver_attr { + struct attribute attr; + ssize_t (*show)(struct cpuidle_driver *, char *); + ssize_t (*store)(struct cpuidle_driver *, const char *, size_t); +}; + +static ssize_t show_driver_name(struct cpuidle_driver *drv, char *buf) +{ + ssize_t ret; + + spin_lock(&cpuidle_driver_lock); + ret = sprintf(buf, "%s\n", drv ? drv->name : "none"); + spin_unlock(&cpuidle_driver_lock); + + return ret; +} + +static void cpuidle_driver_sysfs_release(struct kobject *kobj) +{ + struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj); + complete(&driver_kobj->kobj_unregister); +} + +static ssize_t cpuidle_driver_show(struct kobject *kobj, struct attribute * attr, + char * buf) +{ + int ret = -EIO; + struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj); + struct cpuidle_driver_attr *dattr = attr_to_driver_attr(attr); + + if (dattr->show) + ret = dattr->show(driver_kobj->drv, buf); + + return ret; +} + +static ssize_t cpuidle_driver_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t size) +{ + int ret = -EIO; + struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj); + struct cpuidle_driver_attr *dattr = attr_to_driver_attr(attr); + + if (dattr->store) + ret = dattr->store(driver_kobj->drv, buf, size); + + return ret; +} + +define_one_driver_ro(name, show_driver_name); + +static const struct sysfs_ops cpuidle_driver_sysfs_ops = { + .show = cpuidle_driver_show, + .store = cpuidle_driver_store, +}; + +static struct attribute *cpuidle_driver_default_attrs[] = { + &attr_driver_name.attr, + NULL +}; + +static struct kobj_type ktype_driver_cpuidle = { + .sysfs_ops = &cpuidle_driver_sysfs_ops, + .default_attrs = cpuidle_driver_default_attrs, + .release = cpuidle_driver_sysfs_release, +}; + +/** + * cpuidle_add_driver_sysfs - adds the driver name sysfs attribute + * @device: the target device + */ +static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) +{ + struct cpuidle_driver_kobj *kdrv; + struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); + int ret; + + kdrv = kzalloc(sizeof(*kdrv), GFP_KERNEL); + if (!kdrv) + return -ENOMEM; + + kdrv->drv = drv; + init_completion(&kdrv->kobj_unregister); + + ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle, + &dev->kobj, "driver"); + if (ret) { + kfree(kdrv); + return ret; + } + + kobject_uevent(&kdrv->kobj, KOBJ_ADD); + dev->kobj_driver = kdrv; + + return ret; +} + +/** + * cpuidle_remove_driver_sysfs - removes the driver name sysfs attribute + * @device: the target device + */ +static void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev) +{ + struct cpuidle_driver_kobj *kdrv = dev->kobj_driver; + kobject_put(&kdrv->kobj); + wait_for_completion(&kdrv->kobj_unregister); + kfree(kdrv); +} +#else +static inline int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) +{ + return 0; +} + +static inline void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev) +{ + ; +} +#endif + +/** + * cpuidle_add_device_sysfs - adds device specific sysfs attributes + * @device: the target device + */ +int cpuidle_add_device_sysfs(struct cpuidle_device *device) +{ + int ret; + + ret = cpuidle_add_state_sysfs(device); + if (ret) + return ret; + + ret = cpuidle_add_driver_sysfs(device); + if (ret) + cpuidle_remove_state_sysfs(device); + return ret; +} + +/** + * cpuidle_remove_device_sysfs : removes device specific sysfs attributes + * @device : the target device + */ +void cpuidle_remove_device_sysfs(struct cpuidle_device *device) +{ + cpuidle_remove_driver_sysfs(device); + cpuidle_remove_state_sysfs(device); +} + /** * cpuidle_add_sysfs - creates a sysfs instance for the target device * @dev: the target device */ -int cpuidle_add_sysfs(struct device *cpu_dev) +int cpuidle_add_sysfs(struct cpuidle_device *dev) { - int cpu = cpu_dev->id; - struct cpuidle_device *dev; + struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); int error; - dev = per_cpu(cpuidle_devices, cpu); + init_completion(&dev->kobj_unregister); + error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &cpu_dev->kobj, "cpuidle"); if (!error) @@ -426,11 +596,8 @@ int cpuidle_add_sysfs(struct device *cpu_dev) * cpuidle_remove_sysfs - deletes a sysfs instance on the target device * @dev: the target device */ -void cpuidle_remove_sysfs(struct device *cpu_dev) +void cpuidle_remove_sysfs(struct cpuidle_device *dev) { - int cpu = cpu_dev->id; - struct cpuidle_device *dev; - - dev = per_cpu(cpuidle_devices, cpu); kobject_put(&dev->kobj); + wait_for_completion(&dev->kobj_unregister); } diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index f6b0a6e2ea5..0f079be1330 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -30,7 +30,7 @@ if PM_DEVFREQ comment "DEVFREQ Governors" config DEVFREQ_GOV_SIMPLE_ONDEMAND - bool "Simple Ondemand" + tristate "Simple Ondemand" help Chooses frequency based on the recent load on the device. Works similar as ONDEMAND governor of CPUFREQ does. A device with @@ -39,7 +39,7 @@ config DEVFREQ_GOV_SIMPLE_ONDEMAND values to the governor with data field at devfreq_add_device(). config DEVFREQ_GOV_PERFORMANCE - bool "Performance" + tristate "Performance" help Sets the frequency at the maximum available frequency. This governor always returns UINT_MAX as frequency so that @@ -47,7 +47,7 @@ config DEVFREQ_GOV_PERFORMANCE at any time. config DEVFREQ_GOV_POWERSAVE - bool "Powersave" + tristate "Powersave" help Sets the frequency at the minimum available frequency. This governor always returns 0 as frequency so that @@ -55,7 +55,7 @@ config DEVFREQ_GOV_POWERSAVE at any time. config DEVFREQ_GOV_USERSPACE - bool "Userspace" + tristate "Userspace" help Sets the frequency at the user specified one. This governor returns the user configured frequency if there diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index b146d76f04c..53766f39aad 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -27,21 +27,17 @@ #include <linux/hrtimer.h> #include "governor.h" -struct class *devfreq_class; +static struct class *devfreq_class; /* - * devfreq_work periodically monitors every registered device. - * The minimum polling interval is one jiffy. The polling interval is - * determined by the minimum polling period among all polling devfreq - * devices. The resolution of polling interval is one jiffy. + * devfreq core provides delayed work based load monitoring helper + * functions. Governors can use these or can implement their own + * monitoring mechanism. */ -static bool polling; static struct workqueue_struct *devfreq_wq; -static struct delayed_work devfreq_work; - -/* wait removing if this is to be removed */ -static struct devfreq *wait_remove_device; +/* The list of all device-devfreq governors */ +static LIST_HEAD(devfreq_governor_list); /* The list of all device-devfreq */ static LIST_HEAD(devfreq_list); static DEFINE_MUTEX(devfreq_list_lock); @@ -73,6 +69,79 @@ static struct devfreq *find_device_devfreq(struct device *dev) } /** + * devfreq_get_freq_level() - Lookup freq_table for the frequency + * @devfreq: the devfreq instance + * @freq: the target frequency + */ +static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq) +{ + int lev; + + for (lev = 0; lev < devfreq->profile->max_state; lev++) + if (freq == devfreq->profile->freq_table[lev]) + return lev; + + return -EINVAL; +} + +/** + * devfreq_update_status() - Update statistics of devfreq behavior + * @devfreq: the devfreq instance + * @freq: the update target frequency + */ +static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) +{ + int lev, prev_lev; + unsigned long cur_time; + + lev = devfreq_get_freq_level(devfreq, freq); + if (lev < 0) + return lev; + + cur_time = jiffies; + devfreq->time_in_state[lev] += + cur_time - devfreq->last_stat_updated; + if (freq != devfreq->previous_freq) { + prev_lev = devfreq_get_freq_level(devfreq, + devfreq->previous_freq); + devfreq->trans_table[(prev_lev * + devfreq->profile->max_state) + lev]++; + devfreq->total_trans++; + } + devfreq->last_stat_updated = cur_time; + + return 0; +} + +/** + * find_devfreq_governor() - find devfreq governor from name + * @name: name of the governor + * + * Search the list of devfreq governors and return the matched + * governor's pointer. devfreq_list_lock should be held by the caller. + */ +static struct devfreq_governor *find_devfreq_governor(const char *name) +{ + struct devfreq_governor *tmp_governor; + + if (unlikely(IS_ERR_OR_NULL(name))) { + pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); + return ERR_PTR(-EINVAL); + } + WARN(!mutex_is_locked(&devfreq_list_lock), + "devfreq_list_lock must be locked."); + + list_for_each_entry(tmp_governor, &devfreq_governor_list, node) { + if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN)) + return tmp_governor; + } + + return ERR_PTR(-ENODEV); +} + +/* Load monitoring helper functions for governors use */ + +/** * update_devfreq() - Reevaluate the device and configure frequency. * @devfreq: the devfreq instance. * @@ -90,6 +159,9 @@ int update_devfreq(struct devfreq *devfreq) return -EINVAL; } + if (!devfreq->governor) + return -EINVAL; + /* Reevaluate the proper frequency */ err = devfreq->governor->get_target_freq(devfreq, &freq); if (err) @@ -116,16 +188,173 @@ int update_devfreq(struct devfreq *devfreq) if (err) return err; + if (devfreq->profile->freq_table) + if (devfreq_update_status(devfreq, freq)) + dev_err(&devfreq->dev, + "Couldn't update frequency transition information.\n"); + devfreq->previous_freq = freq; return err; } +EXPORT_SYMBOL(update_devfreq); + +/** + * devfreq_monitor() - Periodically poll devfreq objects. + * @work: the work struct used to run devfreq_monitor periodically. + * + */ +static void devfreq_monitor(struct work_struct *work) +{ + int err; + struct devfreq *devfreq = container_of(work, + struct devfreq, work.work); + + mutex_lock(&devfreq->lock); + err = update_devfreq(devfreq); + if (err) + dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err); + + queue_delayed_work(devfreq_wq, &devfreq->work, + msecs_to_jiffies(devfreq->profile->polling_ms)); + mutex_unlock(&devfreq->lock); +} + +/** + * devfreq_monitor_start() - Start load monitoring of devfreq instance + * @devfreq: the devfreq instance. + * + * Helper function for starting devfreq device load monitoing. By + * default delayed work based monitoring is supported. Function + * to be called from governor in response to DEVFREQ_GOV_START + * event when device is added to devfreq framework. + */ +void devfreq_monitor_start(struct devfreq *devfreq) +{ + INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); + if (devfreq->profile->polling_ms) + queue_delayed_work(devfreq_wq, &devfreq->work, + msecs_to_jiffies(devfreq->profile->polling_ms)); +} +EXPORT_SYMBOL(devfreq_monitor_start); + +/** + * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance + * @devfreq: the devfreq instance. + * + * Helper function to stop devfreq device load monitoing. Function + * to be called from governor in response to DEVFREQ_GOV_STOP + * event when device is removed from devfreq framework. + */ +void devfreq_monitor_stop(struct devfreq *devfreq) +{ + cancel_delayed_work_sync(&devfreq->work); +} +EXPORT_SYMBOL(devfreq_monitor_stop); + +/** + * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance + * @devfreq: the devfreq instance. + * + * Helper function to suspend devfreq device load monitoing. Function + * to be called from governor in response to DEVFREQ_GOV_SUSPEND + * event or when polling interval is set to zero. + * + * Note: Though this function is same as devfreq_monitor_stop(), + * intentionally kept separate to provide hooks for collecting + * transition statistics. + */ +void devfreq_monitor_suspend(struct devfreq *devfreq) +{ + mutex_lock(&devfreq->lock); + if (devfreq->stop_polling) { + mutex_unlock(&devfreq->lock); + return; + } + + devfreq->stop_polling = true; + mutex_unlock(&devfreq->lock); + cancel_delayed_work_sync(&devfreq->work); +} +EXPORT_SYMBOL(devfreq_monitor_suspend); + +/** + * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance + * @devfreq: the devfreq instance. + * + * Helper function to resume devfreq device load monitoing. Function + * to be called from governor in response to DEVFREQ_GOV_RESUME + * event or when polling interval is set to non-zero. + */ +void devfreq_monitor_resume(struct devfreq *devfreq) +{ + mutex_lock(&devfreq->lock); + if (!devfreq->stop_polling) + goto out; + + if (!delayed_work_pending(&devfreq->work) && + devfreq->profile->polling_ms) + queue_delayed_work(devfreq_wq, &devfreq->work, + msecs_to_jiffies(devfreq->profile->polling_ms)); + devfreq->stop_polling = false; + +out: + mutex_unlock(&devfreq->lock); +} +EXPORT_SYMBOL(devfreq_monitor_resume); + +/** + * devfreq_interval_update() - Update device devfreq monitoring interval + * @devfreq: the devfreq instance. + * @delay: new polling interval to be set. + * + * Helper function to set new load monitoring polling interval. Function + * to be called from governor in response to DEVFREQ_GOV_INTERVAL event. + */ +void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay) +{ + unsigned int cur_delay = devfreq->profile->polling_ms; + unsigned int new_delay = *delay; + + mutex_lock(&devfreq->lock); + devfreq->profile->polling_ms = new_delay; + + if (devfreq->stop_polling) + goto out; + + /* if new delay is zero, stop polling */ + if (!new_delay) { + mutex_unlock(&devfreq->lock); + cancel_delayed_work_sync(&devfreq->work); + return; + } + + /* if current delay is zero, start polling with new delay */ + if (!cur_delay) { + queue_delayed_work(devfreq_wq, &devfreq->work, + msecs_to_jiffies(devfreq->profile->polling_ms)); + goto out; + } + + /* if current delay is greater than new delay, restart polling */ + if (cur_delay > new_delay) { + mutex_unlock(&devfreq->lock); + cancel_delayed_work_sync(&devfreq->work); + mutex_lock(&devfreq->lock); + if (!devfreq->stop_polling) + queue_delayed_work(devfreq_wq, &devfreq->work, + msecs_to_jiffies(devfreq->profile->polling_ms)); + } +out: + mutex_unlock(&devfreq->lock); +} +EXPORT_SYMBOL(devfreq_interval_update); /** * devfreq_notifier_call() - Notify that the device frequency requirements * has been changed out of devfreq framework. - * @nb the notifier_block (supposed to be devfreq->nb) - * @type not used - * @devp not used + * @nb: the notifier_block (supposed to be devfreq->nb) + * @type: not used + * @devp: not used * * Called by a notifier that uses devfreq->nb. */ @@ -143,59 +372,34 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, } /** - * _remove_devfreq() - Remove devfreq from the device. + * _remove_devfreq() - Remove devfreq from the list and release its resources. * @devfreq: the devfreq struct * @skip: skip calling device_unregister(). - * - * Note that the caller should lock devfreq->lock before calling - * this. _remove_devfreq() will unlock it and free devfreq - * internally. devfreq_list_lock should be locked by the caller - * as well (not relased at return) - * - * Lock usage: - * devfreq->lock: locked before call. - * unlocked at return (and freed) - * devfreq_list_lock: locked before call. - * kept locked at return. - * if devfreq is centrally polled. - * - * Freed memory: - * devfreq */ static void _remove_devfreq(struct devfreq *devfreq, bool skip) { - if (!mutex_is_locked(&devfreq->lock)) { - WARN(true, "devfreq->lock must be locked by the caller.\n"); - return; - } - if (!devfreq->governor->no_central_polling && - !mutex_is_locked(&devfreq_list_lock)) { - WARN(true, "devfreq_list_lock must be locked by the caller.\n"); + mutex_lock(&devfreq_list_lock); + if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) { + mutex_unlock(&devfreq_list_lock); + dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n"); return; } + list_del(&devfreq->node); + mutex_unlock(&devfreq_list_lock); - if (devfreq->being_removed) - return; - - devfreq->being_removed = true; + if (devfreq->governor) + devfreq->governor->event_handler(devfreq, + DEVFREQ_GOV_STOP, NULL); if (devfreq->profile->exit) devfreq->profile->exit(devfreq->dev.parent); - if (devfreq->governor->exit) - devfreq->governor->exit(devfreq); - if (!skip && get_device(&devfreq->dev)) { device_unregister(&devfreq->dev); put_device(&devfreq->dev); } - if (!devfreq->governor->no_central_polling) - list_del(&devfreq->node); - - mutex_unlock(&devfreq->lock); mutex_destroy(&devfreq->lock); - kfree(devfreq); } @@ -210,163 +414,39 @@ static void _remove_devfreq(struct devfreq *devfreq, bool skip) static void devfreq_dev_release(struct device *dev) { struct devfreq *devfreq = to_devfreq(dev); - bool central_polling = !devfreq->governor->no_central_polling; - - /* - * If devfreq_dev_release() was called by device_unregister() of - * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and - * being_removed is already set. This also partially checks the case - * where devfreq_dev_release() is called from a thread other than - * the one called _remove_devfreq(); however, this case is - * dealt completely with another following being_removed check. - * - * Because being_removed is never being - * unset, we do not need to worry about race conditions on - * being_removed. - */ - if (devfreq->being_removed) - return; - - if (central_polling) - mutex_lock(&devfreq_list_lock); - - mutex_lock(&devfreq->lock); - /* - * Check being_removed flag again for the case where - * devfreq_dev_release() was called in a thread other than the one - * possibly called _remove_devfreq(). - */ - if (devfreq->being_removed) { - mutex_unlock(&devfreq->lock); - goto out; - } - - /* devfreq->lock is unlocked and removed in _removed_devfreq() */ _remove_devfreq(devfreq, true); - -out: - if (central_polling) - mutex_unlock(&devfreq_list_lock); -} - -/** - * devfreq_monitor() - Periodically poll devfreq objects. - * @work: the work struct used to run devfreq_monitor periodically. - * - */ -static void devfreq_monitor(struct work_struct *work) -{ - static unsigned long last_polled_at; - struct devfreq *devfreq, *tmp; - int error; - unsigned long jiffies_passed; - unsigned long next_jiffies = ULONG_MAX, now = jiffies; - struct device *dev; - - /* Initially last_polled_at = 0, polling every device at bootup */ - jiffies_passed = now - last_polled_at; - last_polled_at = now; - if (jiffies_passed == 0) - jiffies_passed = 1; - - mutex_lock(&devfreq_list_lock); - list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) { - mutex_lock(&devfreq->lock); - dev = devfreq->dev.parent; - - /* Do not remove tmp for a while */ - wait_remove_device = tmp; - - if (devfreq->governor->no_central_polling || - devfreq->next_polling == 0) { - mutex_unlock(&devfreq->lock); - continue; - } - mutex_unlock(&devfreq_list_lock); - - /* - * Reduce more next_polling if devfreq_wq took an extra - * delay. (i.e., CPU has been idled.) - */ - if (devfreq->next_polling <= jiffies_passed) { - error = update_devfreq(devfreq); - - /* Remove a devfreq with an error. */ - if (error && error != -EAGAIN) { - - dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n", - error, devfreq->governor->name); - - /* - * Unlock devfreq before locking the list - * in order to avoid deadlock with - * find_device_devfreq or others - */ - mutex_unlock(&devfreq->lock); - mutex_lock(&devfreq_list_lock); - /* Check if devfreq is already removed */ - if (IS_ERR(find_device_devfreq(dev))) - continue; - mutex_lock(&devfreq->lock); - /* This unlocks devfreq->lock and free it */ - _remove_devfreq(devfreq, false); - continue; - } - devfreq->next_polling = devfreq->polling_jiffies; - } else { - devfreq->next_polling -= jiffies_passed; - } - - if (devfreq->next_polling) - next_jiffies = (next_jiffies > devfreq->next_polling) ? - devfreq->next_polling : next_jiffies; - - mutex_unlock(&devfreq->lock); - mutex_lock(&devfreq_list_lock); - } - wait_remove_device = NULL; - mutex_unlock(&devfreq_list_lock); - - if (next_jiffies > 0 && next_jiffies < ULONG_MAX) { - polling = true; - queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies); - } else { - polling = false; - } } /** * devfreq_add_device() - Add devfreq feature to the device * @dev: the device to add devfreq feature. * @profile: device-specific profile to run devfreq. - * @governor: the policy to choose frequency. + * @governor_name: name of the policy to choose frequency. * @data: private data for the governor. The devfreq framework does not * touch this value. */ struct devfreq *devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, - const struct devfreq_governor *governor, + const char *governor_name, void *data) { struct devfreq *devfreq; + struct devfreq_governor *governor; int err = 0; - if (!dev || !profile || !governor) { + if (!dev || !profile || !governor_name) { dev_err(dev, "%s: Invalid parameters.\n", __func__); return ERR_PTR(-EINVAL); } - - if (!governor->no_central_polling) { - mutex_lock(&devfreq_list_lock); - devfreq = find_device_devfreq(dev); - mutex_unlock(&devfreq_list_lock); - if (!IS_ERR(devfreq)) { - dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); - err = -EINVAL; - goto err_out; - } + mutex_lock(&devfreq_list_lock); + devfreq = find_device_devfreq(dev); + mutex_unlock(&devfreq_list_lock); + if (!IS_ERR(devfreq)) { + dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); + err = -EINVAL; + goto err_out; } devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); @@ -383,92 +463,316 @@ struct devfreq *devfreq_add_device(struct device *dev, devfreq->dev.class = devfreq_class; devfreq->dev.release = devfreq_dev_release; devfreq->profile = profile; - devfreq->governor = governor; + strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); devfreq->previous_freq = profile->initial_freq; devfreq->data = data; - devfreq->next_polling = devfreq->polling_jiffies - = msecs_to_jiffies(devfreq->profile->polling_ms); devfreq->nb.notifier_call = devfreq_notifier_call; + devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) * + devfreq->profile->max_state * + devfreq->profile->max_state, + GFP_KERNEL); + devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) * + devfreq->profile->max_state, + GFP_KERNEL); + devfreq->last_stat_updated = jiffies; + dev_set_name(&devfreq->dev, dev_name(dev)); err = device_register(&devfreq->dev); if (err) { put_device(&devfreq->dev); + mutex_unlock(&devfreq->lock); goto err_dev; } - if (governor->init) - err = governor->init(devfreq); - if (err) - goto err_init; - mutex_unlock(&devfreq->lock); - if (governor->no_central_polling) - goto out; - mutex_lock(&devfreq_list_lock); - list_add(&devfreq->node, &devfreq_list); - if (devfreq_wq && devfreq->next_polling && !polling) { - polling = true; - queue_delayed_work(devfreq_wq, &devfreq_work, - devfreq->next_polling); - } + governor = find_devfreq_governor(devfreq->governor_name); + if (!IS_ERR(governor)) + devfreq->governor = governor; + if (devfreq->governor) + err = devfreq->governor->event_handler(devfreq, + DEVFREQ_GOV_START, NULL); mutex_unlock(&devfreq_list_lock); -out: + if (err) { + dev_err(dev, "%s: Unable to start governor for the device\n", + __func__); + goto err_init; + } + return devfreq; err_init: + list_del(&devfreq->node); device_unregister(&devfreq->dev); err_dev: - mutex_unlock(&devfreq->lock); kfree(devfreq); err_out: return ERR_PTR(err); } +EXPORT_SYMBOL(devfreq_add_device); /** * devfreq_remove_device() - Remove devfreq feature from a device. - * @devfreq the devfreq instance to be removed + * @devfreq: the devfreq instance to be removed */ int devfreq_remove_device(struct devfreq *devfreq) { - bool central_polling; + if (!devfreq) + return -EINVAL; + + _remove_devfreq(devfreq, false); + return 0; +} +EXPORT_SYMBOL(devfreq_remove_device); + +/** + * devfreq_suspend_device() - Suspend devfreq of a device. + * @devfreq: the devfreq instance to be suspended + */ +int devfreq_suspend_device(struct devfreq *devfreq) +{ if (!devfreq) return -EINVAL; - central_polling = !devfreq->governor->no_central_polling; + if (!devfreq->governor) + return 0; + + return devfreq->governor->event_handler(devfreq, + DEVFREQ_GOV_SUSPEND, NULL); +} +EXPORT_SYMBOL(devfreq_suspend_device); + +/** + * devfreq_resume_device() - Resume devfreq of a device. + * @devfreq: the devfreq instance to be resumed + */ +int devfreq_resume_device(struct devfreq *devfreq) +{ + if (!devfreq) + return -EINVAL; + + if (!devfreq->governor) + return 0; + + return devfreq->governor->event_handler(devfreq, + DEVFREQ_GOV_RESUME, NULL); +} +EXPORT_SYMBOL(devfreq_resume_device); + +/** + * devfreq_add_governor() - Add devfreq governor + * @governor: the devfreq governor to be added + */ +int devfreq_add_governor(struct devfreq_governor *governor) +{ + struct devfreq_governor *g; + struct devfreq *devfreq; + int err = 0; + + if (!governor) { + pr_err("%s: Invalid parameters.\n", __func__); + return -EINVAL; + } + + mutex_lock(&devfreq_list_lock); + g = find_devfreq_governor(governor->name); + if (!IS_ERR(g)) { + pr_err("%s: governor %s already registered\n", __func__, + g->name); + err = -EINVAL; + goto err_out; + } - if (central_polling) { - mutex_lock(&devfreq_list_lock); - while (wait_remove_device == devfreq) { - mutex_unlock(&devfreq_list_lock); - schedule(); - mutex_lock(&devfreq_list_lock); + list_add(&governor->node, &devfreq_governor_list); + + list_for_each_entry(devfreq, &devfreq_list, node) { + int ret = 0; + struct device *dev = devfreq->dev.parent; + + if (!strncmp(devfreq->governor_name, governor->name, + DEVFREQ_NAME_LEN)) { + /* The following should never occur */ + if (devfreq->governor) { + dev_warn(dev, + "%s: Governor %s already present\n", + __func__, devfreq->governor->name); + ret = devfreq->governor->event_handler(devfreq, + DEVFREQ_GOV_STOP, NULL); + if (ret) { + dev_warn(dev, + "%s: Governor %s stop = %d\n", + __func__, + devfreq->governor->name, ret); + } + /* Fall through */ + } + devfreq->governor = governor; + ret = devfreq->governor->event_handler(devfreq, + DEVFREQ_GOV_START, NULL); + if (ret) { + dev_warn(dev, "%s: Governor %s start=%d\n", + __func__, devfreq->governor->name, + ret); + } } } - mutex_lock(&devfreq->lock); - _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */ +err_out: + mutex_unlock(&devfreq_list_lock); - if (central_polling) - mutex_unlock(&devfreq_list_lock); + return err; +} +EXPORT_SYMBOL(devfreq_add_governor); - return 0; +/** + * devfreq_remove_device() - Remove devfreq feature from a device. + * @governor: the devfreq governor to be removed + */ +int devfreq_remove_governor(struct devfreq_governor *governor) +{ + struct devfreq_governor *g; + struct devfreq *devfreq; + int err = 0; + + if (!governor) { + pr_err("%s: Invalid parameters.\n", __func__); + return -EINVAL; + } + + mutex_lock(&devfreq_list_lock); + g = find_devfreq_governor(governor->name); + if (IS_ERR(g)) { + pr_err("%s: governor %s not registered\n", __func__, + governor->name); + err = PTR_ERR(g); + goto err_out; + } + list_for_each_entry(devfreq, &devfreq_list, node) { + int ret; + struct device *dev = devfreq->dev.parent; + + if (!strncmp(devfreq->governor_name, governor->name, + DEVFREQ_NAME_LEN)) { + /* we should have a devfreq governor! */ + if (!devfreq->governor) { + dev_warn(dev, "%s: Governor %s NOT present\n", + __func__, governor->name); + continue; + /* Fall through */ + } + ret = devfreq->governor->event_handler(devfreq, + DEVFREQ_GOV_STOP, NULL); + if (ret) { + dev_warn(dev, "%s: Governor %s stop=%d\n", + __func__, devfreq->governor->name, + ret); + } + devfreq->governor = NULL; + } + } + + list_del(&governor->node); +err_out: + mutex_unlock(&devfreq_list_lock); + + return err; } +EXPORT_SYMBOL(devfreq_remove_governor); static ssize_t show_governor(struct device *dev, struct device_attribute *attr, char *buf) { + if (!to_devfreq(dev)->governor) + return -EINVAL; + return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name); } +static ssize_t store_governor(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct devfreq *df = to_devfreq(dev); + int ret; + char str_governor[DEVFREQ_NAME_LEN + 1]; + struct devfreq_governor *governor; + + ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor); + if (ret != 1) + return -EINVAL; + + mutex_lock(&devfreq_list_lock); + governor = find_devfreq_governor(str_governor); + if (IS_ERR(governor)) { + ret = PTR_ERR(governor); + goto out; + } + if (df->governor == governor) + goto out; + + if (df->governor) { + ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); + if (ret) { + dev_warn(dev, "%s: Governor %s not stopped(%d)\n", + __func__, df->governor->name, ret); + goto out; + } + } + df->governor = governor; + strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN); + ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL); + if (ret) + dev_warn(dev, "%s: Governor %s not started(%d)\n", + __func__, df->governor->name, ret); +out: + mutex_unlock(&devfreq_list_lock); + + if (!ret) + ret = count; + return ret; +} +static ssize_t show_available_governors(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct devfreq_governor *tmp_governor; + ssize_t count = 0; + + mutex_lock(&devfreq_list_lock); + list_for_each_entry(tmp_governor, &devfreq_governor_list, node) + count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), + "%s ", tmp_governor->name); + mutex_unlock(&devfreq_list_lock); + + /* Truncate the trailing space */ + if (count) + count--; + + count += sprintf(&buf[count], "\n"); + + return count; +} + static ssize_t show_freq(struct device *dev, struct device_attribute *attr, char *buf) { + unsigned long freq; + struct devfreq *devfreq = to_devfreq(dev); + + if (devfreq->profile->get_cur_freq && + !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) + return sprintf(buf, "%lu\n", freq); + + return sprintf(buf, "%lu\n", devfreq->previous_freq); +} + +static ssize_t show_target_freq(struct device *dev, + struct device_attribute *attr, char *buf) +{ return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq); } @@ -486,39 +790,19 @@ static ssize_t store_polling_interval(struct device *dev, unsigned int value; int ret; + if (!df->governor) + return -EINVAL; + ret = sscanf(buf, "%u", &value); if (ret != 1) - goto out; - - mutex_lock(&df->lock); - df->profile->polling_ms = value; - df->next_polling = df->polling_jiffies - = msecs_to_jiffies(value); - mutex_unlock(&df->lock); + return -EINVAL; + df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value); ret = count; - if (df->governor->no_central_polling) - goto out; - - mutex_lock(&devfreq_list_lock); - if (df->next_polling > 0 && !polling) { - polling = true; - queue_delayed_work(devfreq_wq, &devfreq_work, - df->next_polling); - } - mutex_unlock(&devfreq_list_lock); -out: return ret; } -static ssize_t show_central_polling(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", - !to_devfreq(dev)->governor->no_central_polling); -} - static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { @@ -529,7 +813,7 @@ static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr, ret = sscanf(buf, "%lu", &value); if (ret != 1) - goto out; + return -EINVAL; mutex_lock(&df->lock); max = df->max_freq; @@ -543,7 +827,6 @@ static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr, ret = count; unlock: mutex_unlock(&df->lock); -out: return ret; } @@ -563,7 +846,7 @@ static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr, ret = sscanf(buf, "%lu", &value); if (ret != 1) - goto out; + return -EINVAL; mutex_lock(&df->lock); min = df->min_freq; @@ -577,7 +860,6 @@ static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr, ret = count; unlock: mutex_unlock(&df->lock); -out: return ret; } @@ -587,34 +869,92 @@ static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr, return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq); } +static ssize_t show_available_freqs(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct devfreq *df = to_devfreq(d); + struct device *dev = df->dev.parent; + struct opp *opp; + ssize_t count = 0; + unsigned long freq = 0; + + rcu_read_lock(); + do { + opp = opp_find_freq_ceil(dev, &freq); + if (IS_ERR(opp)) + break; + + count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), + "%lu ", freq); + freq++; + } while (1); + rcu_read_unlock(); + + /* Truncate the trailing space */ + if (count) + count--; + + count += sprintf(&buf[count], "\n"); + + return count; +} + +static ssize_t show_trans_table(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct devfreq *devfreq = to_devfreq(dev); + ssize_t len; + int i, j, err; + unsigned int max_state = devfreq->profile->max_state; + + err = devfreq_update_status(devfreq, devfreq->previous_freq); + if (err) + return 0; + + len = sprintf(buf, " From : To\n"); + len += sprintf(buf + len, " :"); + for (i = 0; i < max_state; i++) + len += sprintf(buf + len, "%8u", + devfreq->profile->freq_table[i]); + + len += sprintf(buf + len, " time(ms)\n"); + + for (i = 0; i < max_state; i++) { + if (devfreq->profile->freq_table[i] + == devfreq->previous_freq) { + len += sprintf(buf + len, "*"); + } else { + len += sprintf(buf + len, " "); + } + len += sprintf(buf + len, "%8u:", + devfreq->profile->freq_table[i]); + for (j = 0; j < max_state; j++) + len += sprintf(buf + len, "%8u", + devfreq->trans_table[(i * max_state) + j]); + len += sprintf(buf + len, "%10u\n", + jiffies_to_msecs(devfreq->time_in_state[i])); + } + + len += sprintf(buf + len, "Total transition : %u\n", + devfreq->total_trans); + return len; +} + static struct device_attribute devfreq_attrs[] = { - __ATTR(governor, S_IRUGO, show_governor, NULL), + __ATTR(governor, S_IRUGO | S_IWUSR, show_governor, store_governor), + __ATTR(available_governors, S_IRUGO, show_available_governors, NULL), __ATTR(cur_freq, S_IRUGO, show_freq, NULL), - __ATTR(central_polling, S_IRUGO, show_central_polling, NULL), + __ATTR(available_frequencies, S_IRUGO, show_available_freqs, NULL), + __ATTR(target_freq, S_IRUGO, show_target_freq, NULL), __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval, store_polling_interval), __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq), __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq), + __ATTR(trans_stat, S_IRUGO, show_trans_table, NULL), { }, }; -/** - * devfreq_start_polling() - Initialize data structure for devfreq framework and - * start polling registered devfreq devices. - */ -static int __init devfreq_start_polling(void) -{ - mutex_lock(&devfreq_list_lock); - polling = false; - devfreq_wq = create_freezable_workqueue("devfreq_wq"); - INIT_DEFERRABLE_WORK(&devfreq_work, devfreq_monitor); - mutex_unlock(&devfreq_list_lock); - - devfreq_monitor(&devfreq_work.work); - return 0; -} -late_initcall(devfreq_start_polling); - static int __init devfreq_init(void) { devfreq_class = class_create(THIS_MODULE, "devfreq"); @@ -622,7 +962,15 @@ static int __init devfreq_init(void) pr_err("%s: couldn't create class\n", __FILE__); return PTR_ERR(devfreq_class); } + + devfreq_wq = create_freezable_workqueue("devfreq_wq"); + if (IS_ERR(devfreq_wq)) { + class_destroy(devfreq_class); + pr_err("%s: couldn't create workqueue\n", __FILE__); + return PTR_ERR(devfreq_wq); + } devfreq_class->dev_attrs = devfreq_attrs; + return 0; } subsys_initcall(devfreq_init); @@ -630,6 +978,7 @@ subsys_initcall(devfreq_init); static void __exit devfreq_exit(void) { class_destroy(devfreq_class); + destroy_workqueue(devfreq_wq); } module_exit(devfreq_exit); @@ -641,9 +990,9 @@ module_exit(devfreq_exit); /** * devfreq_recommended_opp() - Helper function to get proper OPP for the * freq value given to target callback. - * @dev The devfreq user device. (parent of devfreq) - * @freq The frequency given to target function - * @flags Flags handed from devfreq framework. + * @dev: The devfreq user device. (parent of devfreq) + * @freq: The frequency given to target function + * @flags: Flags handed from devfreq framework. * */ struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, @@ -656,14 +1005,14 @@ struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, opp = opp_find_freq_floor(dev, freq); /* If not available, use the closest opp */ - if (opp == ERR_PTR(-ENODEV)) + if (opp == ERR_PTR(-ERANGE)) opp = opp_find_freq_ceil(dev, freq); } else { /* The freq is an lower bound. opp should be higher */ opp = opp_find_freq_ceil(dev, freq); /* If not available, use the closest opp */ - if (opp == ERR_PTR(-ENODEV)) + if (opp == ERR_PTR(-ERANGE)) opp = opp_find_freq_floor(dev, freq); } @@ -674,35 +1023,49 @@ struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, * devfreq_register_opp_notifier() - Helper function to get devfreq notified * for any changes in the OPP availability * changes - * @dev The devfreq user device. (parent of devfreq) - * @devfreq The devfreq object. + * @dev: The devfreq user device. (parent of devfreq) + * @devfreq: The devfreq object. */ int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) { - struct srcu_notifier_head *nh = opp_get_notifier(dev); + struct srcu_notifier_head *nh; + int ret = 0; + rcu_read_lock(); + nh = opp_get_notifier(dev); if (IS_ERR(nh)) - return PTR_ERR(nh); - return srcu_notifier_chain_register(nh, &devfreq->nb); + ret = PTR_ERR(nh); + rcu_read_unlock(); + if (!ret) + ret = srcu_notifier_chain_register(nh, &devfreq->nb); + + return ret; } /** * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq * notified for any changes in the OPP * availability changes anymore. - * @dev The devfreq user device. (parent of devfreq) - * @devfreq The devfreq object. + * @dev: The devfreq user device. (parent of devfreq) + * @devfreq: The devfreq object. * * At exit() callback of devfreq_dev_profile, this must be included if * devfreq_recommended_opp is used. */ int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) { - struct srcu_notifier_head *nh = opp_get_notifier(dev); + struct srcu_notifier_head *nh; + int ret = 0; + rcu_read_lock(); + nh = opp_get_notifier(dev); if (IS_ERR(nh)) - return PTR_ERR(nh); - return srcu_notifier_chain_unregister(nh, &devfreq->nb); + ret = PTR_ERR(nh); + rcu_read_unlock(); + if (!ret) + ret = srcu_notifier_chain_unregister(nh, &devfreq->nb); + + return ret; } MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c index 88ddc77a9bb..74183720871 100644 --- a/drivers/devfreq/exynos4_bus.c +++ b/drivers/devfreq/exynos4_bus.c @@ -987,7 +987,7 @@ static __devinit int exynos4_busfreq_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; int err = 0; - data = kzalloc(sizeof(struct busfreq_data), GFP_KERNEL); + data = devm_kzalloc(&pdev->dev, sizeof(struct busfreq_data), GFP_KERNEL); if (data == NULL) { dev_err(dev, "Cannot allocate memory.\n"); return -ENOMEM; @@ -1012,31 +1012,26 @@ static __devinit int exynos4_busfreq_probe(struct platform_device *pdev) err = -EINVAL; } if (err) - goto err_regulator; + return err; - data->vdd_int = regulator_get(dev, "vdd_int"); + data->vdd_int = devm_regulator_get(dev, "vdd_int"); if (IS_ERR(data->vdd_int)) { dev_err(dev, "Cannot get the regulator \"vdd_int\"\n"); - err = PTR_ERR(data->vdd_int); - goto err_regulator; + return PTR_ERR(data->vdd_int); } if (data->type == TYPE_BUSF_EXYNOS4x12) { - data->vdd_mif = regulator_get(dev, "vdd_mif"); + data->vdd_mif = devm_regulator_get(dev, "vdd_mif"); if (IS_ERR(data->vdd_mif)) { dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n"); - err = PTR_ERR(data->vdd_mif); - regulator_put(data->vdd_int); - goto err_regulator; - + return PTR_ERR(data->vdd_mif); } } opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq); if (IS_ERR(opp)) { dev_err(dev, "Invalid initial frequency %lu kHz.\n", - exynos4_devfreq_profile.initial_freq); - err = PTR_ERR(opp); - goto err_opp_add; + exynos4_devfreq_profile.initial_freq); + return PTR_ERR(opp); } data->curr_opp = opp; @@ -1045,30 +1040,20 @@ static __devinit int exynos4_busfreq_probe(struct platform_device *pdev) busfreq_mon_reset(data); data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile, - &devfreq_simple_ondemand, NULL); - if (IS_ERR(data->devfreq)) { - err = PTR_ERR(data->devfreq); - goto err_opp_add; - } + "simple_ondemand", NULL); + if (IS_ERR(data->devfreq)) + return PTR_ERR(data->devfreq); devfreq_register_opp_notifier(dev, data->devfreq); err = register_pm_notifier(&data->pm_notifier); if (err) { dev_err(dev, "Failed to setup pm notifier\n"); - goto err_devfreq_add; + devfreq_remove_device(data->devfreq); + return err; } return 0; -err_devfreq_add: - devfreq_remove_device(data->devfreq); -err_opp_add: - if (data->vdd_mif) - regulator_put(data->vdd_mif); - regulator_put(data->vdd_int); -err_regulator: - kfree(data); - return err; } static __devexit int exynos4_busfreq_remove(struct platform_device *pdev) @@ -1077,10 +1062,6 @@ static __devexit int exynos4_busfreq_remove(struct platform_device *pdev) unregister_pm_notifier(&data->pm_notifier); devfreq_remove_device(data->devfreq); - regulator_put(data->vdd_int); - if (data->vdd_mif) - regulator_put(data->vdd_mif); - kfree(data); return 0; } diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h index ea7f13c58de..fad7d632197 100644 --- a/drivers/devfreq/governor.h +++ b/drivers/devfreq/governor.h @@ -18,7 +18,24 @@ #define to_devfreq(DEV) container_of((DEV), struct devfreq, dev) +/* Devfreq events */ +#define DEVFREQ_GOV_START 0x1 +#define DEVFREQ_GOV_STOP 0x2 +#define DEVFREQ_GOV_INTERVAL 0x3 +#define DEVFREQ_GOV_SUSPEND 0x4 +#define DEVFREQ_GOV_RESUME 0x5 + /* Caution: devfreq->lock must be locked before calling update_devfreq */ extern int update_devfreq(struct devfreq *devfreq); +extern void devfreq_monitor_start(struct devfreq *devfreq); +extern void devfreq_monitor_stop(struct devfreq *devfreq); +extern void devfreq_monitor_suspend(struct devfreq *devfreq); +extern void devfreq_monitor_resume(struct devfreq *devfreq); +extern void devfreq_interval_update(struct devfreq *devfreq, + unsigned int *delay); + +extern int devfreq_add_governor(struct devfreq_governor *governor); +extern int devfreq_remove_governor(struct devfreq_governor *governor); + #endif /* _GOVERNOR_H */ diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c index af75ddd4f15..c72f942f30a 100644 --- a/drivers/devfreq/governor_performance.c +++ b/drivers/devfreq/governor_performance.c @@ -10,6 +10,7 @@ */ #include <linux/devfreq.h> +#include <linux/module.h> #include "governor.h" static int devfreq_performance_func(struct devfreq *df, @@ -26,14 +27,41 @@ static int devfreq_performance_func(struct devfreq *df, return 0; } -static int performance_init(struct devfreq *devfreq) +static int devfreq_performance_handler(struct devfreq *devfreq, + unsigned int event, void *data) { - return update_devfreq(devfreq); + int ret = 0; + + if (event == DEVFREQ_GOV_START) { + mutex_lock(&devfreq->lock); + ret = update_devfreq(devfreq); + mutex_unlock(&devfreq->lock); + } + + return ret; } -const struct devfreq_governor devfreq_performance = { +static struct devfreq_governor devfreq_performance = { .name = "performance", - .init = performance_init, .get_target_freq = devfreq_performance_func, - .no_central_polling = true, + .event_handler = devfreq_performance_handler, }; + +static int __init devfreq_performance_init(void) +{ + return devfreq_add_governor(&devfreq_performance); +} +subsys_initcall(devfreq_performance_init); + +static void __exit devfreq_performance_exit(void) +{ + int ret; + + ret = devfreq_remove_governor(&devfreq_performance); + if (ret) + pr_err("%s: failed remove governor %d\n", __func__, ret); + + return; +} +module_exit(devfreq_performance_exit); +MODULE_LICENSE("GPL"); diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c index fec0cdbd247..0c6bed567e6 100644 --- a/drivers/devfreq/governor_powersave.c +++ b/drivers/devfreq/governor_powersave.c @@ -10,6 +10,7 @@ */ #include <linux/devfreq.h> +#include <linux/module.h> #include "governor.h" static int devfreq_powersave_func(struct devfreq *df, @@ -23,14 +24,41 @@ static int devfreq_powersave_func(struct devfreq *df, return 0; } -static int powersave_init(struct devfreq *devfreq) +static int devfreq_powersave_handler(struct devfreq *devfreq, + unsigned int event, void *data) { - return update_devfreq(devfreq); + int ret = 0; + + if (event == DEVFREQ_GOV_START) { + mutex_lock(&devfreq->lock); + ret = update_devfreq(devfreq); + mutex_unlock(&devfreq->lock); + } + + return ret; } -const struct devfreq_governor devfreq_powersave = { +static struct devfreq_governor devfreq_powersave = { .name = "powersave", - .init = powersave_init, .get_target_freq = devfreq_powersave_func, - .no_central_polling = true, + .event_handler = devfreq_powersave_handler, }; + +static int __init devfreq_powersave_init(void) +{ + return devfreq_add_governor(&devfreq_powersave); +} +subsys_initcall(devfreq_powersave_init); + +static void __exit devfreq_powersave_exit(void) +{ + int ret; + + ret = devfreq_remove_governor(&devfreq_powersave); + if (ret) + pr_err("%s: failed remove governor %d\n", __func__, ret); + + return; +} +module_exit(devfreq_powersave_exit); +MODULE_LICENSE("GPL"); diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c index a2e3eae7901..0720ba84ca9 100644 --- a/drivers/devfreq/governor_simpleondemand.c +++ b/drivers/devfreq/governor_simpleondemand.c @@ -10,8 +10,10 @@ */ #include <linux/errno.h> +#include <linux/module.h> #include <linux/devfreq.h> #include <linux/math64.h> +#include "governor.h" /* Default constants for DevFreq-Simple-Ondemand (DFSO) */ #define DFSO_UPTHRESHOLD (90) @@ -88,7 +90,58 @@ static int devfreq_simple_ondemand_func(struct devfreq *df, return 0; } -const struct devfreq_governor devfreq_simple_ondemand = { +static int devfreq_simple_ondemand_handler(struct devfreq *devfreq, + unsigned int event, void *data) +{ + switch (event) { + case DEVFREQ_GOV_START: + devfreq_monitor_start(devfreq); + break; + + case DEVFREQ_GOV_STOP: + devfreq_monitor_stop(devfreq); + break; + + case DEVFREQ_GOV_INTERVAL: + devfreq_interval_update(devfreq, (unsigned int *)data); + break; + + case DEVFREQ_GOV_SUSPEND: + devfreq_monitor_suspend(devfreq); + break; + + case DEVFREQ_GOV_RESUME: + devfreq_monitor_resume(devfreq); + break; + + default: + break; + } + + return 0; +} + +static struct devfreq_governor devfreq_simple_ondemand = { .name = "simple_ondemand", .get_target_freq = devfreq_simple_ondemand_func, + .event_handler = devfreq_simple_ondemand_handler, }; + +static int __init devfreq_simple_ondemand_init(void) +{ + return devfreq_add_governor(&devfreq_simple_ondemand); +} +subsys_initcall(devfreq_simple_ondemand_init); + +static void __exit devfreq_simple_ondemand_exit(void) +{ + int ret; + + ret = devfreq_remove_governor(&devfreq_simple_ondemand); + if (ret) + pr_err("%s: failed remove governor %d\n", __func__, ret); + + return; +} +module_exit(devfreq_simple_ondemand_exit); +MODULE_LICENSE("GPL"); diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c index 0681246fc89..35de6e83c1f 100644 --- a/drivers/devfreq/governor_userspace.c +++ b/drivers/devfreq/governor_userspace.c @@ -14,6 +14,7 @@ #include <linux/devfreq.h> #include <linux/pm.h> #include <linux/mutex.h> +#include <linux/module.h> #include "governor.h" struct userspace_data { @@ -116,10 +117,46 @@ static void userspace_exit(struct devfreq *devfreq) devfreq->data = NULL; } -const struct devfreq_governor devfreq_userspace = { +static int devfreq_userspace_handler(struct devfreq *devfreq, + unsigned int event, void *data) +{ + int ret = 0; + + switch (event) { + case DEVFREQ_GOV_START: + ret = userspace_init(devfreq); + break; + case DEVFREQ_GOV_STOP: + userspace_exit(devfreq); + break; + default: + break; + } + + return ret; +} + +static struct devfreq_governor devfreq_userspace = { .name = "userspace", .get_target_freq = devfreq_userspace_func, - .init = userspace_init, - .exit = userspace_exit, - .no_central_polling = true, + .event_handler = devfreq_userspace_handler, }; + +static int __init devfreq_userspace_init(void) +{ + return devfreq_add_governor(&devfreq_userspace); +} +subsys_initcall(devfreq_userspace_init); + +static void __exit devfreq_userspace_exit(void) +{ + int ret; + + ret = devfreq_remove_governor(&devfreq_userspace); + if (ret) + pr_err("%s: failed remove governor %d\n", __func__, ret); + + return; +} +module_exit(devfreq_userspace_exit); +MODULE_LICENSE("GPL"); diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 8d4804732ba..8c4139647ef 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -33,7 +33,7 @@ * detection. The mods to Rev F required more family * information detection. * - * Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>: + * Changes/Fixes by Borislav Petkov <bp@alien8.de>: * - misc fixes and code cleanups * * This module is based on the following documents diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c index 6c86f6e5455..351945fa2ec 100644 --- a/drivers/edac/edac_stub.c +++ b/drivers/edac/edac_stub.c @@ -5,7 +5,7 @@ * * 2007 (c) MontaVista Software, Inc. * 2010 (c) Advanced Micro Devices Inc. - * Borislav Petkov <borislav.petkov@amd.com> + * Borislav Petkov <bp@alien8.de> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c index 66b5151c108..2ae78f20cc2 100644 --- a/drivers/edac/mce_amd_inj.c +++ b/drivers/edac/mce_amd_inj.c @@ -6,7 +6,7 @@ * This file may be distributed under the terms of the GNU General Public * License version 2. * - * Copyright (c) 2010: Borislav Petkov <borislav.petkov@amd.com> + * Copyright (c) 2010: Borislav Petkov <bp@alien8.de> * Advanced Micro Devices Inc. */ @@ -168,6 +168,6 @@ module_init(edac_init_mce_inject); module_exit(edac_exit_mce_inject); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Borislav Petkov <borislav.petkov@amd.com>"); +MODULE_AUTHOR("Borislav Petkov <bp@alien8.de>"); MODULE_AUTHOR("AMD Inc."); MODULE_DESCRIPTION("MCE injection facility for testing MCE decoding"); diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 1162d6b3bf8..bb1b392f5cd 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -1546,6 +1546,8 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev) struct sbp2_logical_unit *lu = sdev->hostdata; sdev->use_10_for_rw = 1; + sdev->no_report_opcodes = 1; + sdev->no_write_same = 1; if (sbp2_param_exclusive_login) sdev->manage_start_stop = 1; diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 5c9b384119b..f16557690cf 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -470,7 +470,7 @@ config GPIO_ADP5588_IRQ config GPIO_ADNP tristate "Avionic Design N-bit GPIO expander" - depends on I2C && OF + depends on I2C && OF_GPIO help This option enables support for N GPIOs found on Avionic Design I2C GPIO expanders. The register space will be extended by powers diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c index 0f425189de1..ce1c8476007 100644 --- a/drivers/gpio/gpio-mcp23s08.c +++ b/drivers/gpio/gpio-mcp23s08.c @@ -77,7 +77,7 @@ struct mcp23s08_driver_data { /*----------------------------------------------------------------------*/ -#ifdef CONFIG_I2C +#if IS_ENABLED(CONFIG_I2C) static int mcp23008_read(struct mcp23s08 *mcp, unsigned reg) { @@ -399,7 +399,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, break; #endif /* CONFIG_SPI_MASTER */ -#ifdef CONFIG_I2C +#if IS_ENABLED(CONFIG_I2C) case MCP_TYPE_008: mcp->ops = &mcp23008_ops; mcp->chip.ngpio = 8; @@ -473,7 +473,7 @@ fail: /*----------------------------------------------------------------------*/ -#ifdef CONFIG_I2C +#if IS_ENABLED(CONFIG_I2C) static int __devinit mcp230xx_probe(struct i2c_client *client, const struct i2c_device_id *id) diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index cf7afb9eb61..be65c0451ad 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -92,6 +92,11 @@ static inline void __iomem *mvebu_gpioreg_out(struct mvebu_gpio_chip *mvchip) return mvchip->membase + GPIO_OUT_OFF; } +static inline void __iomem *mvebu_gpioreg_blink(struct mvebu_gpio_chip *mvchip) +{ + return mvchip->membase + GPIO_BLINK_EN_OFF; +} + static inline void __iomem *mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip) { return mvchip->membase + GPIO_IO_CONF_OFF; @@ -206,6 +211,23 @@ static int mvebu_gpio_get(struct gpio_chip *chip, unsigned pin) return (u >> pin) & 1; } +static void mvebu_gpio_blink(struct gpio_chip *chip, unsigned pin, int value) +{ + struct mvebu_gpio_chip *mvchip = + container_of(chip, struct mvebu_gpio_chip, chip); + unsigned long flags; + u32 u; + + spin_lock_irqsave(&mvchip->lock, flags); + u = readl_relaxed(mvebu_gpioreg_blink(mvchip)); + if (value) + u |= 1 << pin; + else + u &= ~(1 << pin); + writel_relaxed(u, mvebu_gpioreg_blink(mvchip)); + spin_unlock_irqrestore(&mvchip->lock, flags); +} + static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned pin) { struct mvebu_gpio_chip *mvchip = @@ -244,6 +266,7 @@ static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned pin, if (ret) return ret; + mvebu_gpio_blink(chip, pin, 0); mvebu_gpio_set(chip, pin, value); spin_lock_irqsave(&mvchip->lock, flags); diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index b726b478a4f..6345878ae1e 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -143,7 +143,7 @@ static void intel_crt_dpms(struct drm_connector *connector, int mode) int old_dpms; /* PCH platforms and VLV only support on/off. */ - if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON) + if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON) mode = DRM_MODE_DPMS_OFF; if (mode == connector->dpms) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 461a637f1ef..4154bcd7a07 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3841,6 +3841,17 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, } } + if (intel_encoder->type == INTEL_OUTPUT_EDP) { + /* Use VBT settings if we have an eDP panel */ + unsigned int edp_bpc = dev_priv->edp.bpp / 3; + + if (edp_bpc < display_bpc) { + DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); + display_bpc = edp_bpc; + } + continue; + } + /* * HDMI is either 12 or 8, so if the display lets 10bpc sneak * through, clamp it down. (Note: >12bpc will be caught below.) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 79d308da29f..c600fb06e25 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2382,6 +2382,18 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags) return true; } +static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo) +{ + struct drm_device *dev = intel_sdvo->base.base.dev; + struct drm_connector *connector, *tmp; + + list_for_each_entry_safe(connector, tmp, + &dev->mode_config.connector_list, head) { + if (intel_attached_encoder(connector) == &intel_sdvo->base) + intel_sdvo_destroy(connector); + } +} + static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *intel_sdvo_connector, int type) @@ -2705,7 +2717,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) intel_sdvo->caps.output_flags) != true) { DRM_DEBUG_KMS("SDVO output failed to setup on %s\n", SDVO_NAME(intel_sdvo)); - goto err; + /* Output_setup can leave behind connectors! */ + goto err_output; } /* Only enable the hotplug irq if we need it, to work around noisy @@ -2718,12 +2731,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) - goto err; + goto err_output; if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, &intel_sdvo->pixel_clock_min, &intel_sdvo->pixel_clock_max)) - goto err; + goto err_output; DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " "clock range %dMHz - %dMHz, " @@ -2743,6 +2756,9 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); return true; +err_output: + intel_sdvo_output_cleanup(intel_sdvo); + err: drm_encoder_cleanup(&intel_encoder->base); i2c_del_adapter(&intel_sdvo->ddc); diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index 05a909a17ce..15b182c84ce 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c @@ -49,13 +49,7 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc) if (chan->vblank.crtc != crtc) continue; - if (nv_device(priv)->chipset == 0x50) { - nv_wr32(priv, 0x001704, chan->vblank.channel); - nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma); - bar->flush(bar); - nv_wr32(priv, 0x001570, chan->vblank.offset); - nv_wr32(priv, 0x001574, chan->vblank.value); - } else { + if (nv_device(priv)->chipset >= 0xc0) { nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel); bar->flush(bar); nv_wr32(priv, 0x06000c, @@ -63,6 +57,17 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc) nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset)); nv_wr32(priv, 0x060014, chan->vblank.value); + } else { + nv_wr32(priv, 0x001704, chan->vblank.channel); + nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma); + bar->flush(bar); + if (nv_device(priv)->chipset == 0x50) { + nv_wr32(priv, 0x001570, chan->vblank.offset); + nv_wr32(priv, 0x001574, chan->vblank.value); + } else { + nv_wr32(priv, 0x060010, chan->vblank.offset); + nv_wr32(priv, 0x060014, chan->vblank.value); + } } list_del(&chan->vblank.head); diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c index e45035efb8c..7bbb1e1b7a8 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c @@ -669,21 +669,27 @@ nv40_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem) }); } -void +int nv40_grctx_init(struct nouveau_device *device, u32 *size) { - u32 ctxprog[256], i; + u32 *ctxprog = kmalloc(256 * 4, GFP_KERNEL), i; struct nouveau_grctx ctx = { .device = device, .mode = NOUVEAU_GRCTX_PROG, .data = ctxprog, - .ctxprog_max = ARRAY_SIZE(ctxprog) + .ctxprog_max = 256, }; + if (!ctxprog) + return -ENOMEM; + nv40_grctx_generate(&ctx); nv_wr32(device, 0x400324, 0); for (i = 0; i < ctx.ctxprog_len; i++) nv_wr32(device, 0x400328, ctxprog[i]); *size = ctx.ctxvals_pos * 4; + + kfree(ctxprog); + return 0; } diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c index 425001204a8..cc6574eeb80 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c @@ -346,7 +346,9 @@ nv40_graph_init(struct nouveau_object *object) return ret; /* generate and upload context program */ - nv40_grctx_init(nv_device(priv), &priv->size); + ret = nv40_grctx_init(nv_device(priv), &priv->size); + if (ret) + return ret; /* No context present currently */ nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h index d2ac975afc2..7da35a4e797 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h @@ -15,7 +15,7 @@ nv44_graph_class(void *priv) return !(0x0baf & (1 << (device->chipset & 0x0f))); } -void nv40_grctx_init(struct nouveau_device *, u32 *size); +int nv40_grctx_init(struct nouveau_device *, u32 *size); void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *); #endif diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h index 818feabbf4a..486f1a9217f 100644 --- a/drivers/gpu/drm/nouveau/core/include/core/object.h +++ b/drivers/gpu/drm/nouveau/core/include/core/object.h @@ -175,14 +175,18 @@ nv_mo32(void *obj, u32 addr, u32 mask, u32 data) return temp; } -static inline bool -nv_strncmp(void *obj, u32 addr, u32 len, const char *str) +static inline int +nv_memcmp(void *obj, u32 addr, const char *str, u32 len) { + unsigned char c1, c2; + while (len--) { - if (nv_ro08(obj, addr++) != *(str++)) - return false; + c1 = nv_ro08(obj, addr++); + c2 = *(str++); + if (c1 != c2) + return c1 - c2; } - return true; + return 0; } #endif diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h index 39e73b91d36..41b7a6a76f1 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h @@ -54,6 +54,7 @@ int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, int clk, struct nouveau_pll_vals *); int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1, struct nouveau_pll_vals *); - +int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *, + int clk, struct nouveau_pll_vals *); #endif diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c index 7d750382a83..c5119715774 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c @@ -64,7 +64,7 @@ dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) } } else if (*ver >= 0x15) { - if (!nv_strncmp(bios, dcb - 7, 7, "DEV_REC")) { + if (!nv_memcmp(bios, dcb - 7, "DEV_REC", 7)) { u16 i2c = nv_ro16(bios, dcb + 2); *hdr = 4; *cnt = (i2c - dcb) / 10; diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c index cc8d7d162d7..9068c98b96f 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c @@ -66,6 +66,24 @@ nva3_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq) return ret; } +int +nva3_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info, + int clk, struct nouveau_pll_vals *pv) +{ + int ret, N, M, P; + + ret = nva3_pll_calc(clock, info, clk, &N, NULL, &M, &P); + + if (ret > 0) { + pv->refclk = info->refclk; + pv->N1 = N; + pv->M1 = M; + pv->log2P = P; + } + return ret; +} + + static int nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -80,6 +98,7 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, return ret; priv->base.pll_set = nva3_clock_pll_set; + priv->base.pll_calc = nva3_clock_pll_calc; return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c index 5ccce0b17bf..f6962c9b6c3 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c @@ -79,6 +79,7 @@ nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, return ret; priv->base.pll_set = nvc0_clock_pll_set; + priv->base.pll_calc = nva3_clock_pll_calc; return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index cc79c796afe..cbf1fc60a38 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -241,6 +241,10 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) if (unlikely(!abi16)) return -ENOMEM; + + if (!drm->channel) + return nouveau_abi16_put(abi16, -ENODEV); + client = nv_client(abi16->client); if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 0910125cbbc..8503b2ea570 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -129,7 +129,8 @@ nouveau_accel_init(struct nouveau_drm *drm) /* initialise synchronisation routines */ if (device->card_type < NV_10) ret = nv04_fence_create(drm); - else if (device->chipset < 0x84) ret = nv10_fence_create(drm); + else if (device->card_type < NV_50) ret = nv10_fence_create(drm); + else if (device->chipset < 0x84) ret = nv50_fence_create(drm); else if (device->card_type < NV_C0) ret = nv84_fence_create(drm); else ret = nvc0_fence_create(drm); if (ret) { diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index ba498f8e47a..010bae19554 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -1625,7 +1625,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); /* some early dce3.2 boards have a bug in their transmitter control table */ - if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730)) + if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730)) atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); } if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index af31f829f4a..219942c660d 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1330,6 +1330,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav break; udelay(1); } + } else { + save->crtc_enabled[i] = false; } } diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index 10ea17a6b2a..42433344cb1 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c @@ -69,9 +69,12 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = { /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/ { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59, PCI_VENDOR_ID_DELL, 0x00e3, 2}, - /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */ + /* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66, PCI_VENDOR_ID_DELL, 0x0149, 1}, + /* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */ + { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66, + PCI_VENDOR_ID_IBM, 0x0531, 1}, /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50, 0x1025, 0x0061, 1}, diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 860dc4813e9..bd2a3b40cd1 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -749,7 +749,10 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, /* clear the pages coming from the pool if requested */ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { list_for_each_entry(p, &plist, lru) { - clear_page(page_address(p)); + if (PageHighMem(p)) + clear_highpage(p); + else + clear_page(page_address(p)); } } diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index bf8260133ea..7d759a43029 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -308,9 +308,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm) if (unlikely(to_page == NULL)) goto out_err; - preempt_disable(); copy_highpage(to_page, from_page); - preempt_enable(); page_cache_release(from_page); } @@ -358,9 +356,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) ret = PTR_ERR(to_page); goto out_err; } - preempt_disable(); copy_highpage(to_page, from_page); - preempt_enable(); set_page_dirty(to_page); mark_page_accessed(to_page); page_cache_release(to_page); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index b07ca2e4d04..7290811f89b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -110,6 +110,8 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); ret = copy_to_user(buffer, bounce, size); + if (ret) + ret = -EFAULT; vfree(bounce); if (unlikely(ret != 0)) diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c index f676c01bb47..6fcd466d082 100644 --- a/drivers/hid/hid-microsoft.c +++ b/drivers/hid/hid-microsoft.c @@ -46,9 +46,9 @@ static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc, rdesc[559] = 0x45; } /* the same as above (s/usage/physical/) */ - if ((quirks & MS_RDESC_3K) && *rsize == 106 && - !memcmp((char []){ 0x19, 0x00, 0x29, 0xff }, - &rdesc[94], 4)) { + if ((quirks & MS_RDESC_3K) && *rsize == 106 && rdesc[94] == 0x19 && + rdesc[95] == 0x00 && rdesc[96] == 0x29 && + rdesc[97] == 0xff) { rdesc[94] = 0x35; rdesc[96] = 0x45; } diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index aa59a254be2..c02bf208084 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c @@ -39,6 +39,7 @@ #define AT91_TWI_STOP 0x0002 /* Send a Stop Condition */ #define AT91_TWI_MSEN 0x0004 /* Master Transfer Enable */ #define AT91_TWI_SVDIS 0x0020 /* Slave Transfer Disable */ +#define AT91_TWI_QUICK 0x0040 /* SMBus quick command */ #define AT91_TWI_SWRST 0x0080 /* Software Reset */ #define AT91_TWI_MMR 0x0004 /* Master Mode Register */ @@ -212,7 +213,11 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) INIT_COMPLETION(dev->cmd_complete); dev->transfer_status = 0; - if (dev->msg->flags & I2C_M_RD) { + + if (!dev->buf_len) { + at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK); + at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); + } else if (dev->msg->flags & I2C_M_RD) { unsigned start_flags = AT91_TWI_START; if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) { diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index 286ca191782..0670da79ee5 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c @@ -287,12 +287,14 @@ read_init_dma_fail: select_init_dma_fail: dma_unmap_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE); select_init_pio_fail: + dmaengine_terminate_all(i2c->dmach); return -EINVAL; /* Write failpath. */ write_init_dma_fail: dma_unmap_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE); write_init_pio_fail: + dmaengine_terminate_all(i2c->dmach); return -EINVAL; } diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index db31eaed6ea..3525c9e62cb 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -43,7 +43,6 @@ #include <linux/slab.h> #include <linux/i2c-omap.h> #include <linux/pm_runtime.h> -#include <linux/pm_qos.h> /* I2C controller revisions */ #define OMAP_I2C_OMAP1_REV_2 0x20 @@ -187,8 +186,9 @@ struct omap_i2c_dev { int reg_shift; /* bit shift for I2C register addresses */ struct completion cmd_complete; struct resource *ioarea; - u32 latency; /* maximum MPU wkup latency */ - struct pm_qos_request pm_qos_request; + u32 latency; /* maximum mpu wkup latency */ + void (*set_mpu_wkup_lat)(struct device *dev, + long latency); u32 speed; /* Speed of bus in kHz */ u32 dtrev; /* extra revision from DT */ u32 flags; @@ -494,7 +494,9 @@ static void omap_i2c_resize_fifo(struct omap_i2c_dev *dev, u8 size, bool is_rx) dev->b_hw = 1; /* Enable hardware fixes */ /* calculate wakeup latency constraint for MPU */ - dev->latency = (1000000 * dev->threshold) / (1000 * dev->speed / 8); + if (dev->set_mpu_wkup_lat != NULL) + dev->latency = (1000000 * dev->threshold) / + (1000 * dev->speed / 8); } /* @@ -522,6 +524,9 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap, dev->buf = msg->buf; dev->buf_len = msg->len; + /* make sure writes to dev->buf_len are ordered */ + barrier(); + omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len); /* Clear the FIFO Buffers */ @@ -579,7 +584,6 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap, */ timeout = wait_for_completion_timeout(&dev->cmd_complete, OMAP_I2C_TIMEOUT); - dev->buf_len = 0; if (timeout == 0) { dev_err(dev->dev, "controller timed out\n"); omap_i2c_init(dev); @@ -629,16 +633,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) if (r < 0) goto out; - /* - * When waiting for completion of a i2c transfer, we need to - * set a wake up latency constraint for the MPU. This is to - * ensure quick enough wakeup from idle, when transfer - * completes. - */ - if (dev->latency) - pm_qos_add_request(&dev->pm_qos_request, - PM_QOS_CPU_DMA_LATENCY, - dev->latency); + if (dev->set_mpu_wkup_lat != NULL) + dev->set_mpu_wkup_lat(dev->dev, dev->latency); for (i = 0; i < num; i++) { r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1))); @@ -646,8 +642,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) break; } - if (dev->latency) - pm_qos_remove_request(&dev->pm_qos_request); + if (dev->set_mpu_wkup_lat != NULL) + dev->set_mpu_wkup_lat(dev->dev, -1); if (r == 0) r = num; @@ -1104,6 +1100,7 @@ omap_i2c_probe(struct platform_device *pdev) } else if (pdata != NULL) { dev->speed = pdata->clkrate; dev->flags = pdata->flags; + dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat; dev->dtrev = pdata->rev; } @@ -1159,8 +1156,9 @@ omap_i2c_probe(struct platform_device *pdev) dev->b_hw = 1; /* Enable hardware fixes */ /* calculate wakeup latency constraint for MPU */ - dev->latency = (1000000 * dev->fifo_size) / - (1000 * dev->speed / 8); + if (dev->set_mpu_wkup_lat != NULL) + dev->latency = (1000000 * dev->fifo_size) / + (1000 * dev->speed / 8); } /* reset ASAP, clearing any IRQs */ diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 3e0335f1fc6..9d902725bac 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -806,6 +806,7 @@ static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c) dev_err(i2c->dev, "invalid gpio[%d]: %d\n", idx, gpio); goto free_gpio; } + i2c->gpios[idx] = gpio; ret = gpio_request(gpio, "i2c-bus"); if (ret) { diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c index 5f097f309b9..7fa5b24b16d 100644 --- a/drivers/i2c/muxes/i2c-mux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c @@ -169,7 +169,7 @@ static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev) mux->busses = devm_kzalloc(&pdev->dev, sizeof(mux->busses) * mux->pdata->bus_count, GFP_KERNEL); - if (!mux->states) { + if (!mux->busses) { dev_err(&pdev->dev, "Cannot allocate busses\n"); ret = -ENOMEM; goto err; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index b0f6b4c8ee1..c49c04d9c2b 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -56,7 +56,6 @@ #include <linux/kernel.h> #include <linux/cpuidle.h> #include <linux/clockchips.h> -#include <linux/hrtimer.h> /* ktime_get_real() */ #include <trace/events/power.h> #include <linux/sched.h> #include <linux/notifier.h> @@ -72,6 +71,7 @@ static struct cpuidle_driver intel_idle_driver = { .name = "intel_idle", .owner = THIS_MODULE, + .en_core_tk_irqen = 1, }; /* intel_idle.max_cstate=0 disables driver */ static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; @@ -281,8 +281,6 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage); unsigned int cstate; - ktime_t kt_before, kt_after; - s64 usec_delta; int cpu = smp_processor_id(); cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; @@ -297,8 +295,6 @@ static int intel_idle(struct cpuidle_device *dev, if (!(lapic_timer_reliable_states & (1 << (cstate)))) clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); - kt_before = ktime_get_real(); - stop_critical_timings(); if (!need_resched()) { @@ -310,17 +306,9 @@ static int intel_idle(struct cpuidle_device *dev, start_critical_timings(); - kt_after = ktime_get_real(); - usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before)); - - local_irq_enable(); - if (!(lapic_timer_reliable_states & (1 << (cstate)))) clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); - /* Update cpuidle counters */ - dev->last_residency = (int)usec_delta; - return index; } diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c index c0ec7d42c3b..1abbc170d8b 100644 --- a/drivers/input/input-mt.c +++ b/drivers/input/input-mt.c @@ -26,10 +26,14 @@ static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src) * input_mt_init_slots() - initialize MT input slots * @dev: input device supporting MT events and finger tracking * @num_slots: number of slots used by the device + * @flags: mt tasks to handle in core * * This function allocates all necessary memory for MT slot handling * in the input device, prepares the ABS_MT_SLOT and * ABS_MT_TRACKING_ID events for use and sets up appropriate buffers. + * Depending on the flags set, it also performs pointer emulation and + * frame synchronization. + * * May be called repeatedly. Returns -EINVAL if attempting to * reinitialize with a different number of slots. */ diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index 8f02e3d0e71..4c842c320c2 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c @@ -12,8 +12,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MOUSEDEV_MINOR_BASE 32 -#define MOUSEDEV_MINORS 32 -#define MOUSEDEV_MIX 31 +#define MOUSEDEV_MINORS 31 +#define MOUSEDEV_MIX 63 #include <linux/sched.h> #include <linux/slab.h> diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index f02028ec3db..78e5d9ab0ba 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -955,7 +955,8 @@ static int ads7846_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(ads7846_pm, ads7846_suspend, ads7846_resume); -static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads7846 *ts) +static int __devinit ads7846_setup_pendown(struct spi_device *spi, + struct ads7846 *ts) { struct ads7846_platform_data *pdata = spi->dev.platform_data; int err; @@ -981,6 +982,9 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784 ts->gpio_pendown = pdata->gpio_pendown; + if (pdata->gpio_pendown_debounce) + gpio_set_debounce(pdata->gpio_pendown, + pdata->gpio_pendown_debounce); } else { dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n"); return -EINVAL; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index d4a4cd445ca..0badfa48b32 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4108,7 +4108,7 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to) static int intel_iommu_add_device(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - struct pci_dev *bridge, *dma_pdev; + struct pci_dev *bridge, *dma_pdev = NULL; struct iommu_group *group; int ret; @@ -4122,7 +4122,7 @@ static int intel_iommu_add_device(struct device *dev) dma_pdev = pci_get_domain_bus_and_slot( pci_domain_nr(pdev->bus), bridge->subordinate->number, 0); - else + if (!dma_pdev) dma_pdev = pci_dev_get(bridge); } else dma_pdev = pci_dev_get(pdev); diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index a649f146d17..c0f7a426626 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -1054,6 +1054,7 @@ static int smmu_debugfs_stats_show(struct seq_file *s, void *v) stats[i], val, offs); } seq_printf(s, "\n"); + dput(dent); return 0; } diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c index dc670ccc697..16c78f1c5ef 100644 --- a/drivers/irqchip/irq-bcm2835.c +++ b/drivers/irqchip/irq-bcm2835.c @@ -168,7 +168,8 @@ static int __init armctrl_of_init(struct device_node *node, } static struct of_device_id irq_of_match[] __initconst = { - { .compatible = "brcm,bcm2835-armctrl-ic", .data = armctrl_of_init } + { .compatible = "brcm,bcm2835-armctrl-ic", .data = armctrl_of_init }, + { } }; void __init bcm2835_init_irq(void) diff --git a/drivers/leds/ledtrig-cpu.c b/drivers/leds/ledtrig-cpu.c index b312056da14..4239b3955ff 100644 --- a/drivers/leds/ledtrig-cpu.c +++ b/drivers/leds/ledtrig-cpu.c @@ -33,8 +33,6 @@ struct led_trigger_cpu { char name[MAX_NAME_LEN]; struct led_trigger *_trig; - struct mutex lock; - int lock_is_inited; }; static DEFINE_PER_CPU(struct led_trigger_cpu, cpu_trig); @@ -50,12 +48,6 @@ void ledtrig_cpu(enum cpu_led_event ledevt) { struct led_trigger_cpu *trig = &__get_cpu_var(cpu_trig); - /* mutex lock should be initialized before calling mutex_call() */ - if (!trig->lock_is_inited) - return; - - mutex_lock(&trig->lock); - /* Locate the correct CPU LED */ switch (ledevt) { case CPU_LED_IDLE_END: @@ -75,8 +67,6 @@ void ledtrig_cpu(enum cpu_led_event ledevt) /* Will leave the LED as it is */ break; } - - mutex_unlock(&trig->lock); } EXPORT_SYMBOL(ledtrig_cpu); @@ -117,14 +107,9 @@ static int __init ledtrig_cpu_init(void) for_each_possible_cpu(cpu) { struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu); - mutex_init(&trig->lock); - snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu); - mutex_lock(&trig->lock); led_trigger_register_simple(trig->name, &trig->_trig); - trig->lock_is_inited = 1; - mutex_unlock(&trig->lock); } register_syscore_ops(&ledtrig_cpu_syscore_ops); @@ -142,15 +127,9 @@ static void __exit ledtrig_cpu_exit(void) for_each_possible_cpu(cpu) { struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu); - mutex_lock(&trig->lock); - led_trigger_unregister_simple(trig->_trig); trig->_trig = NULL; memset(trig->name, 0, MAX_NAME_LEN); - trig->lock_is_inited = 0; - - mutex_unlock(&trig->lock); - mutex_destroy(&trig->lock); } unregister_syscore_ops(&ledtrig_cpu_syscore_ops); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 02db9183ca0..77e6eff41ca 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -740,8 +740,14 @@ static void rq_completed(struct mapped_device *md, int rw, int run_queue) if (!md_in_flight(md)) wake_up(&md->wait); + /* + * Run this off this callpath, as drivers could invoke end_io while + * inside their request_fn (and holding the queue lock). Calling + * back into ->request_fn() could deadlock attempting to grab the + * queue lock again. + */ if (run_queue) - blk_run_queue(md->queue); + blk_run_queue_async(md->queue); /* * dm_put() must be at the end of this function. See the comment above diff --git a/drivers/md/md.c b/drivers/md/md.c index 9ab768acfb6..61200717687 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1817,10 +1817,10 @@ retry: memset(bbp, 0xff, PAGE_SIZE); for (i = 0 ; i < bb->count ; i++) { - u64 internal_bb = *p++; + u64 internal_bb = p[i]; u64 store_bb = ((BB_OFFSET(internal_bb) << 10) | BB_LEN(internal_bb)); - *bbp++ = cpu_to_le64(store_bb); + bbp[i] = cpu_to_le64(store_bb); } bb->changed = 0; if (read_seqretry(&bb->lock, seq)) @@ -5294,7 +5294,7 @@ void md_stop_writes(struct mddev *mddev) } EXPORT_SYMBOL_GPL(md_stop_writes); -void md_stop(struct mddev *mddev) +static void __md_stop(struct mddev *mddev) { mddev->ready = 0; mddev->pers->stop(mddev); @@ -5304,6 +5304,18 @@ void md_stop(struct mddev *mddev) mddev->pers = NULL; clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); } + +void md_stop(struct mddev *mddev) +{ + /* stop the array and free an attached data structures. + * This is called from dm-raid + */ + __md_stop(mddev); + bitmap_destroy(mddev); + if (mddev->bio_set) + bioset_free(mddev->bio_set); +} + EXPORT_SYMBOL_GPL(md_stop); static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) @@ -5364,7 +5376,7 @@ static int do_md_stop(struct mddev * mddev, int mode, set_disk_ro(disk, 0); __md_stop_writes(mddev); - md_stop(mddev); + __md_stop(mddev); mddev->queue->merge_bvec_fn = NULL; mddev->queue->backing_dev_info.congested_fn = NULL; @@ -7936,9 +7948,9 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, sector_t *first_bad, int *bad_sectors) { int hi; - int lo = 0; + int lo; u64 *p = bb->page; - int rv = 0; + int rv; sector_t target = s + sectors; unsigned seq; @@ -7953,7 +7965,8 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, retry: seq = read_seqbegin(&bb->lock); - + lo = 0; + rv = 0; hi = bb->count; /* Binary search between lo and hi for 'target' diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d1295aff417..0d5d0ff2c0f 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -499,7 +499,7 @@ static void raid10_end_write_request(struct bio *bio, int error) */ one_write_done(r10_bio); if (dec_rdev) - rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); + rdev_dec_pending(rdev, conf->mddev); } /* @@ -1334,18 +1334,21 @@ retry_write: blocked_rdev = rrdev; break; } + if (rdev && (test_bit(Faulty, &rdev->flags) + || test_bit(Unmerged, &rdev->flags))) + rdev = NULL; if (rrdev && (test_bit(Faulty, &rrdev->flags) || test_bit(Unmerged, &rrdev->flags))) rrdev = NULL; r10_bio->devs[i].bio = NULL; r10_bio->devs[i].repl_bio = NULL; - if (!rdev || test_bit(Faulty, &rdev->flags) || - test_bit(Unmerged, &rdev->flags)) { + + if (!rdev && !rrdev) { set_bit(R10BIO_Degraded, &r10_bio->state); continue; } - if (test_bit(WriteErrorSeen, &rdev->flags)) { + if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) { sector_t first_bad; sector_t dev_sector = r10_bio->devs[i].addr; int bad_sectors; @@ -1387,8 +1390,10 @@ retry_write: max_sectors = good_sectors; } } - r10_bio->devs[i].bio = bio; - atomic_inc(&rdev->nr_pending); + if (rdev) { + r10_bio->devs[i].bio = bio; + atomic_inc(&rdev->nr_pending); + } if (rrdev) { r10_bio->devs[i].repl_bio = bio; atomic_inc(&rrdev->nr_pending); @@ -1444,69 +1449,71 @@ retry_write: for (i = 0; i < conf->copies; i++) { struct bio *mbio; int d = r10_bio->devs[i].devnum; - if (!r10_bio->devs[i].bio) - continue; + if (r10_bio->devs[i].bio) { + struct md_rdev *rdev = conf->mirrors[d].rdev; + mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); + md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, + max_sectors); + r10_bio->devs[i].bio = mbio; + + mbio->bi_sector = (r10_bio->devs[i].addr+ + choose_data_offset(r10_bio, + rdev)); + mbio->bi_bdev = rdev->bdev; + mbio->bi_end_io = raid10_end_write_request; + mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; + mbio->bi_private = r10_bio; - mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, - max_sectors); - r10_bio->devs[i].bio = mbio; + atomic_inc(&r10_bio->remaining); - mbio->bi_sector = (r10_bio->devs[i].addr+ - choose_data_offset(r10_bio, - conf->mirrors[d].rdev)); - mbio->bi_bdev = conf->mirrors[d].rdev->bdev; - mbio->bi_end_io = raid10_end_write_request; - mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; - mbio->bi_private = r10_bio; + cb = blk_check_plugged(raid10_unplug, mddev, + sizeof(*plug)); + if (cb) + plug = container_of(cb, struct raid10_plug_cb, + cb); + else + plug = NULL; + spin_lock_irqsave(&conf->device_lock, flags); + if (plug) { + bio_list_add(&plug->pending, mbio); + plug->pending_cnt++; + } else { + bio_list_add(&conf->pending_bio_list, mbio); + conf->pending_count++; + } + spin_unlock_irqrestore(&conf->device_lock, flags); + if (!plug) + md_wakeup_thread(mddev->thread); + } - atomic_inc(&r10_bio->remaining); + if (r10_bio->devs[i].repl_bio) { + struct md_rdev *rdev = conf->mirrors[d].replacement; + if (rdev == NULL) { + /* Replacement just got moved to main 'rdev' */ + smp_mb(); + rdev = conf->mirrors[d].rdev; + } + mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); + md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, + max_sectors); + r10_bio->devs[i].repl_bio = mbio; + + mbio->bi_sector = (r10_bio->devs[i].addr + + choose_data_offset( + r10_bio, rdev)); + mbio->bi_bdev = rdev->bdev; + mbio->bi_end_io = raid10_end_write_request; + mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; + mbio->bi_private = r10_bio; - cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug)); - if (cb) - plug = container_of(cb, struct raid10_plug_cb, cb); - else - plug = NULL; - spin_lock_irqsave(&conf->device_lock, flags); - if (plug) { - bio_list_add(&plug->pending, mbio); - plug->pending_cnt++; - } else { + atomic_inc(&r10_bio->remaining); + spin_lock_irqsave(&conf->device_lock, flags); bio_list_add(&conf->pending_bio_list, mbio); conf->pending_count++; + spin_unlock_irqrestore(&conf->device_lock, flags); + if (!mddev_check_plugged(mddev)) + md_wakeup_thread(mddev->thread); } - spin_unlock_irqrestore(&conf->device_lock, flags); - if (!plug) - md_wakeup_thread(mddev->thread); - - if (!r10_bio->devs[i].repl_bio) - continue; - - mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - md_trim_bio(mbio, r10_bio->sector - bio->bi_sector, - max_sectors); - r10_bio->devs[i].repl_bio = mbio; - - /* We are actively writing to the original device - * so it cannot disappear, so the replacement cannot - * become NULL here - */ - mbio->bi_sector = (r10_bio->devs[i].addr + - choose_data_offset( - r10_bio, - conf->mirrors[d].replacement)); - mbio->bi_bdev = conf->mirrors[d].replacement->bdev; - mbio->bi_end_io = raid10_end_write_request; - mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; - mbio->bi_private = r10_bio; - - atomic_inc(&r10_bio->remaining); - spin_lock_irqsave(&conf->device_lock, flags); - bio_list_add(&conf->pending_bio_list, mbio); - conf->pending_count++; - spin_unlock_irqrestore(&conf->device_lock, flags); - if (!mddev_check_plugged(mddev)) - md_wakeup_thread(mddev->thread); } /* Don't remove the bias on 'remaining' (one_write_done) until diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index c5439dce029..a4502686e7a 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2774,10 +2774,12 @@ static void handle_stripe_clean_event(struct r5conf *conf, dev = &sh->dev[i]; if (!test_bit(R5_LOCKED, &dev->flags) && (test_bit(R5_UPTODATE, &dev->flags) || - test_and_clear_bit(R5_Discard, &dev->flags))) { + test_bit(R5_Discard, &dev->flags))) { /* We can return any write requests */ struct bio *wbi, *wbi2; pr_debug("Return write for disc %d\n", i); + if (test_and_clear_bit(R5_Discard, &dev->flags)) + clear_bit(R5_UPTODATE, &dev->flags); wbi = dev->written; dev->written = NULL; while (wbi && wbi->bi_sector < @@ -2795,7 +2797,8 @@ static void handle_stripe_clean_event(struct r5conf *conf, !test_bit(STRIPE_DEGRADED, &sh->state), 0); } - } + } else if (test_bit(R5_Discard, &sh->dev[i].flags)) + clear_bit(R5_Discard, &sh->dev[i].flags); if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) if (atomic_dec_and_test(&conf->pending_full_writes)) @@ -3490,40 +3493,6 @@ static void handle_stripe(struct stripe_head *sh) handle_failed_sync(conf, sh, &s); } - /* - * might be able to return some write requests if the parity blocks - * are safe, or on a failed drive - */ - pdev = &sh->dev[sh->pd_idx]; - s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) - || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); - qdev = &sh->dev[sh->qd_idx]; - s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) - || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) - || conf->level < 6; - - if (s.written && - (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) - && !test_bit(R5_LOCKED, &pdev->flags) - && (test_bit(R5_UPTODATE, &pdev->flags) || - test_bit(R5_Discard, &pdev->flags))))) && - (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) - && !test_bit(R5_LOCKED, &qdev->flags) - && (test_bit(R5_UPTODATE, &qdev->flags) || - test_bit(R5_Discard, &qdev->flags)))))) - handle_stripe_clean_event(conf, sh, disks, &s.return_bi); - - /* Now we might consider reading some blocks, either to check/generate - * parity, or to satisfy requests - * or to load a block that is being partially written. - */ - if (s.to_read || s.non_overwrite - || (conf->level == 6 && s.to_write && s.failed) - || (s.syncing && (s.uptodate + s.compute < disks)) - || s.replacing - || s.expanding) - handle_stripe_fill(sh, &s, disks); - /* Now we check to see if any write operations have recently * completed */ @@ -3561,6 +3530,40 @@ static void handle_stripe(struct stripe_head *sh) s.dec_preread_active = 1; } + /* + * might be able to return some write requests if the parity blocks + * are safe, or on a failed drive + */ + pdev = &sh->dev[sh->pd_idx]; + s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) + || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); + qdev = &sh->dev[sh->qd_idx]; + s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) + || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) + || conf->level < 6; + + if (s.written && + (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) + && !test_bit(R5_LOCKED, &pdev->flags) + && (test_bit(R5_UPTODATE, &pdev->flags) || + test_bit(R5_Discard, &pdev->flags))))) && + (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) + && !test_bit(R5_LOCKED, &qdev->flags) + && (test_bit(R5_UPTODATE, &qdev->flags) || + test_bit(R5_Discard, &qdev->flags)))))) + handle_stripe_clean_event(conf, sh, disks, &s.return_bi); + + /* Now we might consider reading some blocks, either to check/generate + * parity, or to satisfy requests + * or to load a block that is being partially written. + */ + if (s.to_read || s.non_overwrite + || (conf->level == 6 && s.to_write && s.failed) + || (s.syncing && (s.uptodate + s.compute < disks)) + || s.replacing + || s.expanding) + handle_stripe_fill(sh, &s, disks); + /* Now to consider new write requests and what else, if anything * should be read. We do not handle new writes when: * 1/ A 'write' operation (copy+xor) is already in flight. @@ -5529,6 +5532,10 @@ static int run(struct mddev *mddev) * discard data disk but write parity disk */ stripe = stripe * PAGE_SIZE; + /* Round up to power of 2, as discard handling + * currently assumes that */ + while ((stripe-1) & stripe) + stripe = (stripe | (stripe-1)) + 1; mddev->queue->limits.discard_alignment = stripe; mddev->queue->limits.discard_granularity = stripe; /* diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c index 8f52fc858e4..5a5cd2ace4a 100644 --- a/drivers/mtd/devices/slram.c +++ b/drivers/mtd/devices/slram.c @@ -240,7 +240,7 @@ static int parse_cmdline(char *devname, char *szstart, char *szlength) if (*(szlength) != '+') { devlength = simple_strtoul(szlength, &buffer, 0); - devlength = handle_unit(devlength, buffer) - devstart; + devlength = handle_unit(devlength, buffer); if (devlength < devstart) goto err_out; diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index ec6841d8e95..1a03b7f673c 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2983,13 +2983,15 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip, /* * Field definitions are in the following datasheets: * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32) - * New style (6 byte ID): Samsung K9GAG08U0F (p.44) + * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44) * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22) * - * Check for ID length, cell type, and Hynix/Samsung ID to decide what - * to do. + * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung + * ID to decide what to do. */ - if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG) { + if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG && + (chip->cellinfo & NAND_CI_CELLTYPE_MSK) && + id_data[5] != 0x00) { /* Calc pagesize */ mtd->writesize = 2048 << (extid & 0x03); extid >>= 2; diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c index 64be8f0848b..d9127e2ed80 100644 --- a/drivers/mtd/ofpart.c +++ b/drivers/mtd/ofpart.c @@ -121,7 +121,7 @@ static int parse_ofoldpart_partitions(struct mtd_info *master, nr_parts = plen / sizeof(part[0]); *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL); - if (!pparts) + if (!*pparts) return -ENOMEM; names = of_get_property(dp, "partition-names", &plen); diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index 7153e0d2710..b3f41f20062 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c @@ -3694,7 +3694,7 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int * flexonenand_set_boundary - Writes the SLC boundary * @param mtd - mtd info structure */ -int flexonenand_set_boundary(struct mtd_info *mtd, int die, +static int flexonenand_set_boundary(struct mtd_info *mtd, int die, int boundary, int lock) { struct onenand_chip *this = mtd->priv; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b2530b00212..5f5b69f37d2 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1379,6 +1379,8 @@ static void bond_compute_features(struct bonding *bond) struct net_device *bond_dev = bond->dev; netdev_features_t vlan_features = BOND_VLAN_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; + unsigned int gso_max_size = GSO_MAX_SIZE; + u16 gso_max_segs = GSO_MAX_SEGS; int i; unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; @@ -1394,11 +1396,16 @@ static void bond_compute_features(struct bonding *bond) dst_release_flag &= slave->dev->priv_flags; if (slave->dev->hard_header_len > max_hard_header_len) max_hard_header_len = slave->dev->hard_header_len; + + gso_max_size = min(gso_max_size, slave->dev->gso_max_size); + gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs); } done: bond_dev->vlan_features = vlan_features; bond_dev->hard_header_len = max_hard_header_len; + bond_dev->gso_max_segs = gso_max_segs; + netif_set_gso_max_size(bond_dev, gso_max_size); flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE; bond_dev->priv_flags = flags | dst_release_flag; diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index d04911d33b6..47618e50535 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -813,6 +813,7 @@ static int __init ne_drv_probe(struct platform_device *pdev) dev->irq = irq[this_dev]; dev->mem_end = bad[this_dev]; } + SET_NETDEV_DEV(dev, &pdev->dev); err = do_ne_probe(dev); if (err) { free_netdev(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index bd1fd3d87c2..01611b33a93 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9545,10 +9545,13 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp) */ static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp) { - u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); - if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { - BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); - REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp)); + if (!CHIP_IS_E1x(bp)) { + u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { + BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); + REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, + 1 << BP_FUNC(bp)); + } } } diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 92317e9c0f7..60ac46f4ac0 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -1860,10 +1860,14 @@ jme_open(struct net_device *netdev) jme_clear_pm(jme); JME_NAPI_ENABLE(jme); - tasklet_enable(&jme->linkch_task); - tasklet_enable(&jme->txclean_task); - tasklet_hi_enable(&jme->rxclean_task); - tasklet_hi_enable(&jme->rxempty_task); + tasklet_init(&jme->linkch_task, jme_link_change_tasklet, + (unsigned long) jme); + tasklet_init(&jme->txclean_task, jme_tx_clean_tasklet, + (unsigned long) jme); + tasklet_init(&jme->rxclean_task, jme_rx_clean_tasklet, + (unsigned long) jme); + tasklet_init(&jme->rxempty_task, jme_rx_empty_tasklet, + (unsigned long) jme); rc = jme_request_irq(jme); if (rc) @@ -3079,22 +3083,6 @@ jme_init_one(struct pci_dev *pdev, tasklet_init(&jme->pcc_task, jme_pcc_tasklet, (unsigned long) jme); - tasklet_init(&jme->linkch_task, - jme_link_change_tasklet, - (unsigned long) jme); - tasklet_init(&jme->txclean_task, - jme_tx_clean_tasklet, - (unsigned long) jme); - tasklet_init(&jme->rxclean_task, - jme_rx_clean_tasklet, - (unsigned long) jme); - tasklet_init(&jme->rxempty_task, - jme_rx_empty_tasklet, - (unsigned long) jme); - tasklet_disable_nosync(&jme->linkch_task); - tasklet_disable_nosync(&jme->txclean_task); - tasklet_disable_nosync(&jme->rxclean_task); - tasklet_disable_nosync(&jme->rxempty_task); jme->dpi.cur = PCC_P1; jme->reg_ghc = 0; diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index e558edd1cb6..69e01977a1d 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -5459,8 +5459,10 @@ static int prepare_hardware(struct net_device *dev) rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev); if (rc) return rc; - tasklet_enable(&hw_priv->rx_tasklet); - tasklet_enable(&hw_priv->tx_tasklet); + tasklet_init(&hw_priv->rx_tasklet, rx_proc_task, + (unsigned long) hw_priv); + tasklet_init(&hw_priv->tx_tasklet, tx_proc_task, + (unsigned long) hw_priv); hw->promiscuous = 0; hw->all_multi = 0; @@ -7033,16 +7035,6 @@ static int __devinit pcidev_init(struct pci_dev *pdev, spin_lock_init(&hw_priv->hwlock); mutex_init(&hw_priv->lock); - /* tasklet is enabled. */ - tasklet_init(&hw_priv->rx_tasklet, rx_proc_task, - (unsigned long) hw_priv); - tasklet_init(&hw_priv->tx_tasklet, tx_proc_task, - (unsigned long) hw_priv); - - /* tasklet_enable will decrement the atomic counter. */ - tasklet_disable(&hw_priv->rx_tasklet); - tasklet_disable(&hw_priv->tx_tasklet); - for (i = 0; i < TOTAL_PORT_NUM; i++) init_waitqueue_head(&hw_priv->counter[i].counter); diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 1c818254b7b..b01f83a044c 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -979,17 +979,6 @@ static void cp_init_hw (struct cp_private *cp) cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); - cpw32_f(HiTxRingAddr, 0); - cpw32_f(HiTxRingAddr + 4, 0); - - ring_dma = cp->ring_dma; - cpw32_f(RxRingAddr, ring_dma & 0xffffffff); - cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); - - ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; - cpw32_f(TxRingAddr, ring_dma & 0xffffffff); - cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); - cp_start_hw(cp); cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ @@ -1003,6 +992,17 @@ static void cp_init_hw (struct cp_private *cp) cpw8(Config5, cpr8(Config5) & PMEStatus); + cpw32_f(HiTxRingAddr, 0); + cpw32_f(HiTxRingAddr + 4, 0); + + ring_dma = cp->ring_dma; + cpw32_f(RxRingAddr, ring_dma & 0xffffffff); + cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); + + ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; + cpw32_f(TxRingAddr, ring_dma & 0xffffffff); + cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); + cpw16(MultiIntr, 0); cpw8_f(Cfg9346, Cfg9346_Lock); diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index fb9f6b38511..edf5edb1314 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -2479,7 +2479,7 @@ static int sis900_resume(struct pci_dev *pci_dev) netif_start_queue(net_dev); /* Workaround for EDB */ - sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); + sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); /* Enable all known interrupts by setting the interrupt mask. */ sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 62d1baf111e..c53c0f4e2ce 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2110,7 +2110,7 @@ static void __devinit smsc911x_read_mac_address(struct net_device *dev) static int __devinit smsc911x_init(struct net_device *dev) { struct smsc911x_data *pdata = netdev_priv(dev); - unsigned int byte_test; + unsigned int byte_test, mask; unsigned int to = 100; SMSC_TRACE(pdata, probe, "Driver Parameters:"); @@ -2130,9 +2130,22 @@ static int __devinit smsc911x_init(struct net_device *dev) /* * poll the READY bit in PMT_CTRL. Any other access to the device is * forbidden while this bit isn't set. Try for 100ms + * + * Note that this test is done before the WORD_SWAP register is + * programmed. So in some configurations the READY bit is at 16 before + * WORD_SWAP is written to. This issue is worked around by waiting + * until either bit 0 or bit 16 gets set in PMT_CTRL. + * + * SMSC has confirmed that checking bit 16 (marked as reserved in + * the datasheet) is fine since these bits "will either never be set + * or can only go high after READY does (so also indicate the device + * is ready)". */ - while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to) + + mask = PMT_CTRL_READY_ | swahw32(PMT_CTRL_READY_); + while (!(smsc911x_reg_read(pdata, PMT_CTRL) & mask) && --to) udelay(1000); + if (to == 0) { pr_err("Device not READY in 100ms aborting\n"); return -ENODEV; diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 4e981001385..66e025ad5df 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -917,7 +917,7 @@ static int tile_net_setup_interrupts(struct net_device *dev) ingress_irq = rc; tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU); rc = request_irq(ingress_irq, tile_net_handle_ingress_irq, - 0, NULL, NULL); + 0, "tile_net", NULL); if (rc != 0) { netdev_err(dev, "request_irq failed: %d\n", rc); destroy_irq(ingress_irq); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 1d04754a663..a788501e978 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -894,6 +894,8 @@ out: return IRQ_HANDLED; } +static void axienet_dma_err_handler(unsigned long data); + /** * axienet_open - Driver open routine. * @ndev: Pointer to net_device structure @@ -942,6 +944,10 @@ static int axienet_open(struct net_device *ndev) phy_start(lp->phy_dev); } + /* Enable tasklets for Axi DMA error handling */ + tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, + (unsigned long) lp); + /* Enable interrupts for Axi DMA Tx */ ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev); if (ret) @@ -950,8 +956,7 @@ static int axienet_open(struct net_device *ndev) ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev); if (ret) goto err_rx_irq; - /* Enable tasklets for Axi DMA error handling */ - tasklet_enable(&lp->dma_err_tasklet); + return 0; err_rx_irq: @@ -960,6 +965,7 @@ err_tx_irq: if (lp->phy_dev) phy_disconnect(lp->phy_dev); lp->phy_dev = NULL; + tasklet_kill(&lp->dma_err_tasklet); dev_err(lp->dev, "request_irq() failed\n"); return ret; } @@ -1613,10 +1619,6 @@ static int __devinit axienet_of_probe(struct platform_device *op) goto err_iounmap_2; } - tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, - (unsigned long) lp); - tasklet_disable(&lp->dma_err_tasklet); - return 0; err_iounmap_2: diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 98934bdf6ac..477d6729b17 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1102,10 +1102,12 @@ static int init_queues(struct port *port) { int i; - if (!ports_open) - if (!(dma_pool = dma_pool_create(DRV_NAME, NULL, - POOL_ALLOC_SIZE, 32, 0))) + if (!ports_open) { + dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev, + POOL_ALLOC_SIZE, 32, 0); + if (!dma_pool) return -ENOMEM; + } if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, &port->desc_tab_phys))) diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c index 5039f08f5a5..43e9ab4f4d7 100644 --- a/drivers/net/irda/sir_dev.c +++ b/drivers/net/irda/sir_dev.c @@ -222,7 +222,7 @@ static void sirdev_config_fsm(struct work_struct *work) break; case SIRDEV_STATE_DONGLE_SPEED: - if (dev->dongle_drv->reset) { + if (dev->dongle_drv->set_speed) { ret = dev->dongle_drv->set_speed(dev, fsm->param); if (ret < 0) { fsm->result = ret; diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c index 6428fcbbdd4..daec9b05d16 100644 --- a/drivers/net/phy/mdio-bitbang.c +++ b/drivers/net/phy/mdio-bitbang.c @@ -234,7 +234,6 @@ void free_mdio_bitbang(struct mii_bus *bus) struct mdiobb_ctrl *ctrl = bus->priv; module_put(ctrl->ops->owner); - mdiobus_unregister(bus); mdiobus_free(bus); } EXPORT_SYMBOL(free_mdio_bitbang); diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 899274f2f9b..2ed1140df3e 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -185,17 +185,20 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev) { struct mdio_gpio_platform_data *pdata; struct mii_bus *new_bus; - int ret; + int ret, bus_id; - if (pdev->dev.of_node) + if (pdev->dev.of_node) { pdata = mdio_gpio_of_get_data(pdev); - else + bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); + } else { pdata = pdev->dev.platform_data; + bus_id = pdev->id; + } if (!pdata) return -ENODEV; - new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id); + new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, bus_id); if (!new_bus) return -ENODEV; diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c index 9db0171e936..c5db428e73f 100644 --- a/drivers/net/team/team_mode_broadcast.c +++ b/drivers/net/team/team_mode_broadcast.c @@ -29,8 +29,8 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb) if (last) { skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) { - ret = team_dev_queue_xmit(team, last, - skb2); + ret = !team_dev_queue_xmit(team, last, + skb2); if (!sum_ret) sum_ret = ret; } @@ -39,7 +39,7 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb) } } if (last) { - ret = team_dev_queue_xmit(team, last, skb); + ret = !team_dev_queue_xmit(team, last, skb); if (!sum_ret) sum_ret = ret; } diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 4cd582a4f62..74fab1a4015 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -540,10 +540,12 @@ advance: (ctx->ether_desc == NULL) || (ctx->control != intf)) goto error; - /* claim interfaces, if any */ - temp = usb_driver_claim_interface(driver, ctx->data, dev); - if (temp) - goto error; + /* claim data interface, if different from control */ + if (ctx->data != ctx->control) { + temp = usb_driver_claim_interface(driver, ctx->data, dev); + if (temp) + goto error; + } iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber; @@ -623,6 +625,10 @@ static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf) tasklet_kill(&ctx->bh); + /* handle devices with combined control and data interface */ + if (ctx->control == ctx->data) + ctx->data = NULL; + /* disconnect master --> disconnect slave */ if (intf == ctx->control && ctx->data) { usb_set_intfdata(ctx->data, NULL); @@ -1245,6 +1251,14 @@ static const struct usb_device_id cdc_devs[] = { .driver_info = (unsigned long) &wwan_info, }, + /* Huawei NCM devices disguised as vendor specific */ + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16), + .driver_info = (unsigned long)&wwan_info, + }, + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46), + .driver_info = (unsigned long)&wwan_info, + }, + /* Generic CDC-NCM devices */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 3286166415b..362cb8cfeb9 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -184,7 +184,7 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx) /* set the address, index & direction (read from PHY) */ phy_id &= dev->mii.phy_id_mask; idx &= dev->mii.reg_num_mask; - addr = (phy_id << 11) | (idx << 6) | MII_READ_; + addr = (phy_id << 11) | (idx << 6) | MII_READ_ | MII_BUSY_; ret = smsc95xx_write_reg(dev, MII_ADDR, addr); check_warn_goto_done(ret, "Error writing MII_ADDR"); @@ -221,7 +221,7 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx, /* set the address, index & direction (write to PHY) */ phy_id &= dev->mii.phy_id_mask; idx &= dev->mii.reg_num_mask; - addr = (phy_id << 11) | (idx << 6) | MII_WRITE_; + addr = (phy_id << 11) | (idx << 6) | MII_WRITE_ | MII_BUSY_; ret = smsc95xx_write_reg(dev, MII_ADDR, addr); check_warn_goto_done(ret, "Error writing MII_ADDR"); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 7b4adde93c0..8b5c6191707 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1,5 +1,5 @@ /* - * VXLAN: Virtual eXtensiable Local Area Network + * VXLAN: Virtual eXtensible Local Area Network * * Copyright (c) 2012 Vyatta Inc. * @@ -50,8 +50,8 @@ #define VXLAN_N_VID (1u << 24) #define VXLAN_VID_MASK (VXLAN_N_VID - 1) -/* VLAN + IP header + UDP + VXLAN */ -#define VXLAN_HEADROOM (4 + 20 + 8 + 8) +/* IP header + UDP + VXLAN + Ethernet header */ +#define VXLAN_HEADROOM (20 + 8 + 8 + 14) #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */ @@ -1102,6 +1102,10 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, if (!tb[IFLA_MTU]) dev->mtu = lowerdev->mtu - VXLAN_HEADROOM; + + /* update header length based on lower device */ + dev->hard_header_len = lowerdev->hard_header_len + + VXLAN_HEADROOM; } if (data[IFLA_VXLAN_TOS]) diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index 3f575afd8cf..e9a3da588e9 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -969,10 +969,12 @@ static int init_hdlc_queues(struct port *port) { int i; - if (!ports_open) - if (!(dma_pool = dma_pool_create(DRV_NAME, NULL, - POOL_ALLOC_SIZE, 32, 0))) + if (!ports_open) { + dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev, + POOL_ALLOC_SIZE, 32, 0); + if (!dma_pool) return -ENOMEM; + } if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, &port->desc_tab_phys))) diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 8e1559aba49..1829b445d0b 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1456,7 +1456,7 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) switch (type) { case ATH9K_RESET_POWER_ON: ret = ath9k_hw_set_reset_power_on(ah); - if (!ret) + if (ret) ah->reset_power_on = true; break; case ATH9K_RESET_WARM: diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index a6f1e816600..481345c23de 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -4401,7 +4401,7 @@ static s32 brcmf_mode_to_nl80211_iftype(s32 mode) static void brcmf_wiphy_pno_params(struct wiphy *wiphy) { -#ifndef CONFIG_BRCMFISCAN +#ifndef CONFIG_BRCMISCAN /* scheduled scan settings */ wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT; wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT; diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index ff8162d4c45..2d9eee93c74 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -521,7 +521,7 @@ static void iwlagn_mac_tx(struct ieee80211_hw *hw, ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); if (iwlagn_tx_skb(priv, control->sta, skb)) - dev_kfree_skb_any(skb); + ieee80211_free_txskb(hw, skb); } static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, @@ -1354,6 +1354,20 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw, vif_priv->ctx = ctx; ctx->vif = vif; + /* + * In SNIFFER device type, the firmware reports the FCS to + * the host, rather than snipping it off. Unfortunately, + * mac80211 doesn't (yet) provide a per-packet flag for + * this, so that we have to set the hardware flag based + * on the interfaces added. As the monitor interface can + * only be present by itself, and will be removed before + * other interfaces are added, this is safe. + */ + if (vif->type == NL80211_IFTYPE_MONITOR) + priv->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS; + else + priv->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS; + err = iwl_setup_interface(priv, ctx); if (!err || reset) goto out; diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c index 7ff3f143067..408132cf83c 100644 --- a/drivers/net/wireless/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/iwlwifi/dvm/main.c @@ -2114,7 +2114,7 @@ static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) info = IEEE80211_SKB_CB(skb); iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]); - dev_kfree_skb_any(skb); + ieee80211_free_txskb(priv->hw, skb); } static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 17c8e5d8268..bb69f8f90b3 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -321,6 +321,14 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority) dma_map_page(trans->dev, page, 0, PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); + if (dma_mapping_error(trans->dev, rxb->page_dma)) { + rxb->page = NULL; + spin_lock_irqsave(&rxq->lock, flags); + list_add(&rxb->list, &rxq->rx_used); + spin_unlock_irqrestore(&rxq->lock, flags); + __free_pages(page, trans_pcie->rx_page_order); + return; + } /* dma address must be no more than 36 bits */ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); /* and also 256 byte aligned! */ @@ -488,8 +496,19 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, dma_map_page(trans->dev, rxb->page, 0, PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); - list_add_tail(&rxb->list, &rxq->rx_free); - rxq->free_count++; + if (dma_mapping_error(trans->dev, rxb->page_dma)) { + /* + * free the page(s) as well to not break + * the invariant that the items on the used + * list have no page(s) + */ + __free_pages(rxb->page, trans_pcie->rx_page_order); + rxb->page = NULL; + list_add_tail(&rxb->list, &rxq->rx_used); + } else { + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } } else list_add_tail(&rxb->list, &rxq->rx_used); spin_unlock_irqrestore(&rxq->lock, flags); diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 105e3af3c62..79a4ddc002d 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -480,20 +480,12 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u16 rd_ptr, wr_ptr; - int n_bd = trans_pcie->txq[txq_id].q.n_bd; if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { WARN_ONCE(1, "queue %d not used", txq_id); return; } - rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1); - wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)); - - WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]", - txq_id, rd_ptr, wr_ptr); - iwl_txq_set_inactive(trans, txq_id); IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); } diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 8d465107f52..ae9010ed58d 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -890,9 +890,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context) return; } cmd_node = adapter->curr_cmd; - if (cmd_node->wait_q_enabled) - adapter->cmd_wait_q.status = -ETIMEDOUT; - if (cmd_node) { adapter->dbg.timeout_cmd_id = adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index]; @@ -938,6 +935,14 @@ mwifiex_cmd_timeout_func(unsigned long function_context) dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n", adapter->ps_mode, adapter->ps_state); + + if (cmd_node->wait_q_enabled) { + adapter->cmd_wait_q.status = -ETIMEDOUT; + wake_up_interruptible(&adapter->cmd_wait_q.wait); + mwifiex_cancel_pending_ioctl(adapter); + /* reset cmd_sent flag to unblock new commands */ + adapter->cmd_sent = false; + } } if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) mwifiex_init_fw_complete(adapter); diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index fc8a9bfa124..82cf0fa2d9f 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -161,7 +161,6 @@ static int mwifiex_sdio_suspend(struct device *dev) struct sdio_mmc_card *card; struct mwifiex_adapter *adapter; mmc_pm_flag_t pm_flag = 0; - int hs_actived = 0; int i; int ret = 0; @@ -188,12 +187,14 @@ static int mwifiex_sdio_suspend(struct device *dev) adapter = card->adapter; /* Enable the Host Sleep */ - hs_actived = mwifiex_enable_hs(adapter); - if (hs_actived) { - pr_debug("cmd: suspend with MMC_PM_KEEP_POWER\n"); - ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (!mwifiex_enable_hs(adapter)) { + dev_err(adapter->dev, "cmd: failed to suspend\n"); + return -EFAULT; } + dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n"); + ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + /* Indicate device suspended */ adapter->is_suspended = true; diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 9970c2b1b19..b7e6607e6b6 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -297,6 +297,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { /*=== Customer ID ===*/ /****** 8188CU ********/ {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/ + {RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/ {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/ {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index caa011008cd..fc24eb9b394 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -452,29 +452,85 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, /* Grant backend access to each skb fragment page. */ for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; + struct page *page = skb_frag_page(frag); - tx->flags |= XEN_NETTXF_more_data; + len = skb_frag_size(frag); + offset = frag->page_offset; - id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); - np->tx_skbs[id].skb = skb_get(skb); - tx = RING_GET_REQUEST(&np->tx, prod++); - tx->id = id; - ref = gnttab_claim_grant_reference(&np->gref_tx_head); - BUG_ON((signed short)ref < 0); + /* Data must not cross a page boundary. */ + BUG_ON(len + offset > PAGE_SIZE<<compound_order(page)); - mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag))); - gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, - mfn, GNTMAP_readonly); + /* Skip unused frames from start of page */ + page += offset >> PAGE_SHIFT; + offset &= ~PAGE_MASK; - tx->gref = np->grant_tx_ref[id] = ref; - tx->offset = frag->page_offset; - tx->size = skb_frag_size(frag); - tx->flags = 0; + while (len > 0) { + unsigned long bytes; + + BUG_ON(offset >= PAGE_SIZE); + + bytes = PAGE_SIZE - offset; + if (bytes > len) + bytes = len; + + tx->flags |= XEN_NETTXF_more_data; + + id = get_id_from_freelist(&np->tx_skb_freelist, + np->tx_skbs); + np->tx_skbs[id].skb = skb_get(skb); + tx = RING_GET_REQUEST(&np->tx, prod++); + tx->id = id; + ref = gnttab_claim_grant_reference(&np->gref_tx_head); + BUG_ON((signed short)ref < 0); + + mfn = pfn_to_mfn(page_to_pfn(page)); + gnttab_grant_foreign_access_ref(ref, + np->xbdev->otherend_id, + mfn, GNTMAP_readonly); + + tx->gref = np->grant_tx_ref[id] = ref; + tx->offset = offset; + tx->size = bytes; + tx->flags = 0; + + offset += bytes; + len -= bytes; + + /* Next frame */ + if (offset == PAGE_SIZE && len) { + BUG_ON(!PageCompound(page)); + page++; + offset = 0; + } + } } np->tx.req_prod_pvt = prod; } +/* + * Count how many ring slots are required to send the frags of this + * skb. Each frag might be a compound page. + */ +static int xennet_count_skb_frag_slots(struct sk_buff *skb) +{ + int i, frags = skb_shinfo(skb)->nr_frags; + int pages = 0; + + for (i = 0; i < frags; i++) { + skb_frag_t *frag = skb_shinfo(skb)->frags + i; + unsigned long size = skb_frag_size(frag); + unsigned long offset = frag->page_offset; + + /* Skip unused frames from start of page */ + offset &= ~PAGE_MASK; + + pages += PFN_UP(offset + size); + } + + return pages; +} + static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; @@ -487,23 +543,23 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) grant_ref_t ref; unsigned long mfn; int notify; - int frags = skb_shinfo(skb)->nr_frags; + int slots; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned long flags; - frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); - if (unlikely(frags > MAX_SKB_FRAGS + 1)) { - printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", - frags); - dump_stack(); + slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) + + xennet_count_skb_frag_slots(skb); + if (unlikely(slots > MAX_SKB_FRAGS + 1)) { + net_alert_ratelimited( + "xennet: skb rides the rocket: %d slots\n", slots); goto drop; } spin_lock_irqsave(&np->tx_lock, flags); if (unlikely(!netif_carrier_ok(dev) || - (frags > 1 && !xennet_can_sg(dev)) || + (slots > 1 && !xennet_can_sg(dev)) || netif_needs_gso(skb, netif_skb_features(skb)))) { spin_unlock_irqrestore(&np->tx_lock, flags); goto drop; diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index 97c440a8cd6..30ae18a03a9 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -698,13 +698,14 @@ static void pn533_wq_cmd(struct work_struct *work) cmd = list_first_entry(&dev->cmd_queue, struct pn533_cmd, queue); + list_del(&cmd->queue); + mutex_unlock(&dev->cmd_lock); __pn533_send_cmd_frame_async(dev, cmd->out_frame, cmd->in_frame, cmd->in_frame_len, cmd->cmd_complete, cmd->arg, cmd->flags); - list_del(&cmd->queue); kfree(cmd); } @@ -1678,11 +1679,14 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev, static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, u8 *params, int params_len) { - struct pn533_cmd_jump_dep *cmd; struct pn533_cmd_jump_dep_response *resp; struct nfc_target nfc_target; u8 target_gt_len; int rc; + struct pn533_cmd_jump_dep *cmd = (struct pn533_cmd_jump_dep *)arg; + u8 active = cmd->active; + + kfree(arg); if (params_len == -ENOENT) { nfc_dev_dbg(&dev->interface->dev, ""); @@ -1704,7 +1708,6 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, } resp = (struct pn533_cmd_jump_dep_response *) params; - cmd = (struct pn533_cmd_jump_dep *) arg; rc = resp->status & PN533_CMD_RET_MASK; if (rc != PN533_CMD_RET_SUCCESS) { nfc_dev_err(&dev->interface->dev, @@ -1734,7 +1737,7 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, if (rc == 0) rc = nfc_dep_link_is_up(dev->nfc_dev, dev->nfc_dev->targets[0].idx, - !cmd->active, NFC_RF_INITIATOR); + !active, NFC_RF_INITIATOR); return 0; } @@ -1819,12 +1822,8 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame, dev->in_maxlen, pn533_in_dep_link_up_complete, cmd, GFP_KERNEL); - if (rc) - goto out; - - -out: - kfree(cmd); + if (rc < 0) + kfree(cmd); return rc; } @@ -2078,8 +2077,12 @@ error: static int pn533_tm_send_complete(struct pn533 *dev, void *arg, u8 *params, int params_len) { + struct sk_buff *skb_out = arg; + nfc_dev_dbg(&dev->interface->dev, "%s", __func__); + dev_kfree_skb(skb_out); + if (params_len < 0) { nfc_dev_err(&dev->interface->dev, "Error %d when sending data", @@ -2117,7 +2120,7 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb) rc = pn533_send_cmd_frame_async(dev, out_frame, dev->in_frame, dev->in_maxlen, pn533_tm_send_complete, - NULL, GFP_KERNEL); + skb, GFP_KERNEL); if (rc) { nfc_dev_err(&dev->interface->dev, "Error %d when trying to send data", rc); diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index d96caefd914..aeecf0f72ca 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -178,7 +178,7 @@ config PINCTRL_COH901 ports of 8 GPIO pins each. config PINCTRL_SAMSUNG - bool "Samsung pinctrl driver" + bool depends on OF && GPIOLIB select PINMUX select PINCONF diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 653d5637a37..72e822e17d4 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -58,7 +58,7 @@ static inline int __init is_exclusive_device(struct acpi_device *dev) if (!(('0' <= (c) && (c) <= '9') || ('A' <= (c) && (c) <= 'F'))) \ return 0 #define TEST_ALPHA(c) \ - if (!('@' <= (c) || (c) <= 'Z')) \ + if (!('A' <= (c) && (c) <= 'Z')) \ return 0 static int __init ispnpidacpi(const char *id) { @@ -95,6 +95,9 @@ static int pnpacpi_set_resources(struct pnp_dev *dev) return -ENODEV; } + if (WARN_ON_ONCE(acpi_dev != dev->data)) + dev->data = acpi_dev; + ret = pnpacpi_build_resource_template(dev, &buffer); if (ret) return ret; diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index c17ae22567e..0c6fcb461fa 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c @@ -401,7 +401,7 @@ EXPORT_SYMBOL_GPL(rio_release_inb_pwrite); /** * rio_map_inb_region -- Map inbound memory region. * @mport: Master port. - * @lstart: physical address of memory region to be mapped + * @local: physical address of memory region to be mapped * @rbase: RIO base address assigned to this window * @size: Size of the memory region * @rflags: Flags for mapping. diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 5c4829cba6a..e872c8be080 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1381,22 +1381,14 @@ struct regulator *regulator_get_exclusive(struct device *dev, const char *id) } EXPORT_SYMBOL_GPL(regulator_get_exclusive); -/** - * regulator_put - "free" the regulator source - * @regulator: regulator source - * - * Note: drivers must ensure that all regulator_enable calls made on this - * regulator source are balanced by regulator_disable calls prior to calling - * this function. - */ -void regulator_put(struct regulator *regulator) +/* Locks held by regulator_put() */ +static void _regulator_put(struct regulator *regulator) { struct regulator_dev *rdev; if (regulator == NULL || IS_ERR(regulator)) return; - mutex_lock(®ulator_list_mutex); rdev = regulator->rdev; debugfs_remove_recursive(regulator->debugfs); @@ -1412,6 +1404,20 @@ void regulator_put(struct regulator *regulator) rdev->exclusive = 0; module_put(rdev->owner); +} + +/** + * regulator_put - "free" the regulator source + * @regulator: regulator source + * + * Note: drivers must ensure that all regulator_enable calls made on this + * regulator source are balanced by regulator_disable calls prior to calling + * this function. + */ +void regulator_put(struct regulator *regulator) +{ + mutex_lock(®ulator_list_mutex); + _regulator_put(regulator); mutex_unlock(®ulator_list_mutex); } EXPORT_SYMBOL_GPL(regulator_put); @@ -1974,7 +1980,7 @@ int regulator_is_supported_voltage(struct regulator *regulator, if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { ret = regulator_get_voltage(regulator); if (ret >= 0) - return (min_uV >= ret && ret <= max_uV); + return (min_uV <= ret && ret <= max_uV); else return ret; } @@ -3365,7 +3371,7 @@ regulator_register(const struct regulator_desc *regulator_desc, if (ret != 0) { rdev_err(rdev, "Failed to request enable GPIO%d: %d\n", config->ena_gpio, ret); - goto clean; + goto wash; } rdev->ena_gpio = config->ena_gpio; @@ -3445,10 +3451,11 @@ unset_supplies: scrub: if (rdev->supply) - regulator_put(rdev->supply); + _regulator_put(rdev->supply); if (rdev->ena_gpio) gpio_free(rdev->ena_gpio); kfree(rdev->constraints); +wash: device_unregister(&rdev->dev); /* device core frees rdev */ rdev = ERR_PTR(ret); diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 9ffb6d5f17a..4ed343e4eb4 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -44,7 +44,6 @@ #define RAW3215_NR_CCWS 3 #define RAW3215_TIMEOUT HZ/10 /* time for delayed output */ -#define RAW3215_FIXED 1 /* 3215 console device is not be freed */ #define RAW3215_WORKING 4 /* set if a request is being worked on */ #define RAW3215_THROTTLED 8 /* set if reading is disabled */ #define RAW3215_STOPPED 16 /* set if writing is disabled */ @@ -339,8 +338,10 @@ static void raw3215_wakeup(unsigned long data) struct tty_struct *tty; tty = tty_port_tty_get(&raw->port); - tty_wakeup(tty); - tty_kref_put(tty); + if (tty) { + tty_wakeup(tty); + tty_kref_put(tty); + } } /* @@ -629,8 +630,7 @@ static void raw3215_shutdown(struct raw3215_info *raw) DECLARE_WAITQUEUE(wait, current); unsigned long flags; - if (!(raw->port.flags & ASYNC_INITIALIZED) || - (raw->flags & RAW3215_FIXED)) + if (!(raw->port.flags & ASYNC_INITIALIZED)) return; /* Wait for outstanding requests, then free irq */ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); @@ -926,8 +926,6 @@ static int __init con3215_init(void) dev_set_drvdata(&cdev->dev, raw); cdev->handler = raw3215_irq; - raw->flags |= RAW3215_FIXED; - /* Request the console irq */ if (raw3215_startup(raw) != 0) { raw3215_free_info(raw); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 3e25d315045..4d6ba00d004 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -2942,13 +2942,33 @@ static int qeth_query_ipassists_cb(struct qeth_card *card, QETH_DBF_TEXT(SETUP, 2, "qipasscb"); cmd = (struct qeth_ipa_cmd *) data; + + switch (cmd->hdr.return_code) { + case IPA_RC_NOTSUPP: + case IPA_RC_L2_UNSUPPORTED_CMD: + QETH_DBF_TEXT(SETUP, 2, "ipaunsup"); + card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS; + card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS; + return -0; + default: + if (cmd->hdr.return_code) { + QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled " + "rc=%d\n", + dev_name(&card->gdev->dev), + cmd->hdr.return_code); + return 0; + } + } + if (cmd->hdr.prot_version == QETH_PROT_IPV4) { card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; - } else { + } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) { card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; - } + } else + QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected" + "\n", dev_name(&card->gdev->dev)); QETH_DBF_TEXT(SETUP, 2, "suppenbl"); QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_supported); QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_enabled); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index e67e0258aec..fddb62654b6 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -626,10 +626,13 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "doL2init"); QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card)); - rc = qeth_query_setadapterparms(card); - if (rc) { - QETH_DBF_MESSAGE(2, "could not query adapter parameters on " - "device %s: x%x\n", CARD_BUS_ID(card), rc); + if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { + rc = qeth_query_setadapterparms(card); + if (rc) { + QETH_DBF_MESSAGE(2, "could not query adapter " + "parameters on device %s: x%x\n", + CARD_BUS_ID(card), rc); + } } if (card->info.type == QETH_CARD_TYPE_IQD || @@ -676,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) return -ERESTARTSYS; } rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); - if (!rc) + if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND)) rc = qeth_l2_send_setmac(card, addr->sa_data); return rc ? -EINVAL : 0; } diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index c1bafc3f3fb..9594ab62702 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -1972,7 +1972,7 @@ sci_io_request_frame_handler(struct isci_request *ireq, frame_index, (void **)&frame_buffer); - sci_controller_copy_sata_response(&ireq->stp.req, + sci_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 2936b447cae..2c0d0ec8150 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -55,6 +55,7 @@ #include <linux/cpu.h> #include <linux/mutex.h> #include <linux/async.h> +#include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -1062,6 +1063,50 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, EXPORT_SYMBOL_GPL(scsi_get_vpd_page); /** + * scsi_report_opcode - Find out if a given command opcode is supported + * @sdev: scsi device to query + * @buffer: scratch buffer (must be at least 20 bytes long) + * @len: length of buffer + * @opcode: opcode for command to look up + * + * Uses the REPORT SUPPORTED OPERATION CODES to look up the given + * opcode. Returns 0 if RSOC fails or if the command opcode is + * unsupported. Returns 1 if the device claims to support the command. + */ +int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, + unsigned int len, unsigned char opcode) +{ + unsigned char cmd[16]; + struct scsi_sense_hdr sshdr; + int result; + + if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3) + return 0; + + memset(cmd, 0, 16); + cmd[0] = MAINTENANCE_IN; + cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES; + cmd[2] = 1; /* One command format */ + cmd[3] = opcode; + put_unaligned_be32(len, &cmd[6]); + memset(buffer, 0, len); + + result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, + &sshdr, 30 * HZ, 3, NULL); + + if (result && scsi_sense_valid(&sshdr) && + sshdr.sense_key == ILLEGAL_REQUEST && + (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00) + return 0; + + if ((buffer[1] & 3) == 3) /* Command supported */ + return 1; + + return 0; +} +EXPORT_SYMBOL(scsi_report_opcode); + +/** * scsi_device_get - get an additional reference to a scsi_device * @sdev: device to get a reference to * diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index da36a3a81a9..9032e910bca 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -900,11 +900,23 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) action = ACTION_FAIL; error = -EILSEQ; /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ - } else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) && - (cmd->cmnd[0] == UNMAP || - cmd->cmnd[0] == WRITE_SAME_16 || - cmd->cmnd[0] == WRITE_SAME)) { - description = "Discard failure"; + } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { + switch (cmd->cmnd[0]) { + case UNMAP: + description = "Discard failure"; + break; + case WRITE_SAME: + case WRITE_SAME_16: + if (cmd->cmnd[1] & 0x8) + description = "Discard failure"; + else + description = + "Write same failure"; + break; + default: + description = "Invalid command failure"; + break; + } action = ACTION_FAIL; error = -EREMOTEIO; } else diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 12f6fdfc114..352bc77b7c8 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -99,6 +99,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); #endif static void sd_config_discard(struct scsi_disk *, unsigned int); +static void sd_config_write_same(struct scsi_disk *); static int sd_revalidate_disk(struct gendisk *); static void sd_unlock_native_capacity(struct gendisk *disk); static int sd_probe(struct device *); @@ -395,6 +396,45 @@ sd_store_max_medium_access_timeouts(struct device *dev, return err ? err : count; } +static ssize_t +sd_show_write_same_blocks(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + + return snprintf(buf, 20, "%u\n", sdkp->max_ws_blocks); +} + +static ssize_t +sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + unsigned long max; + int err; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (sdp->type != TYPE_DISK) + return -EINVAL; + + err = kstrtoul(buf, 10, &max); + + if (err) + return err; + + if (max == 0) + sdp->no_write_same = 1; + else if (max <= SD_MAX_WS16_BLOCKS) + sdkp->max_ws_blocks = max; + + sd_config_write_same(sdkp); + + return count; +} + static struct device_attribute sd_disk_attrs[] = { __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, sd_store_cache_type), @@ -410,6 +450,8 @@ static struct device_attribute sd_disk_attrs[] = { __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL), __ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode, sd_store_provisioning_mode), + __ATTR(max_write_same_blocks, S_IRUGO|S_IWUSR, + sd_show_write_same_blocks, sd_store_write_same_blocks), __ATTR(max_medium_access_timeouts, S_IRUGO|S_IWUSR, sd_show_max_medium_access_timeouts, sd_store_max_medium_access_timeouts), @@ -561,19 +603,23 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) return; case SD_LBP_UNMAP: - max_blocks = min_not_zero(sdkp->max_unmap_blocks, 0xffffffff); + max_blocks = min_not_zero(sdkp->max_unmap_blocks, + (u32)SD_MAX_WS16_BLOCKS); break; case SD_LBP_WS16: - max_blocks = min_not_zero(sdkp->max_ws_blocks, 0xffffffff); + max_blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS16_BLOCKS); break; case SD_LBP_WS10: - max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff); + max_blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS10_BLOCKS); break; case SD_LBP_ZERO: - max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff); + max_blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS10_BLOCKS); q->limits.discard_zeroes_data = 1; break; } @@ -583,29 +629,26 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) } /** - * scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device + * sd_setup_discard_cmnd - unmap blocks on thinly provisioned device * @sdp: scsi device to operate one * @rq: Request to prepare * * Will issue either UNMAP or WRITE SAME(16) depending on preference * indicated by target device. **/ -static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) +static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) { struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); - struct bio *bio = rq->bio; - sector_t sector = bio->bi_sector; - unsigned int nr_sectors = bio_sectors(bio); + sector_t sector = blk_rq_pos(rq); + unsigned int nr_sectors = blk_rq_sectors(rq); + unsigned int nr_bytes = blk_rq_bytes(rq); unsigned int len; int ret; char *buf; struct page *page; - if (sdkp->device->sector_size == 4096) { - sector >>= 3; - nr_sectors >>= 3; - } - + sector >>= ilog2(sdp->sector_size) - 9; + nr_sectors >>= ilog2(sdp->sector_size) - 9; rq->timeout = SD_TIMEOUT; memset(rq->cmd, 0, rq->cmd_len); @@ -660,6 +703,7 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) blk_add_request_payload(rq, page, len); ret = scsi_setup_blk_pc_cmnd(sdp, rq); rq->buffer = page_address(page); + rq->__data_len = nr_bytes; out: if (ret != BLKPREP_OK) { @@ -669,6 +713,83 @@ out: return ret; } +static void sd_config_write_same(struct scsi_disk *sdkp) +{ + struct request_queue *q = sdkp->disk->queue; + unsigned int logical_block_size = sdkp->device->sector_size; + unsigned int blocks = 0; + + if (sdkp->device->no_write_same) { + sdkp->max_ws_blocks = 0; + goto out; + } + + /* Some devices can not handle block counts above 0xffff despite + * supporting WRITE SAME(16). Consequently we default to 64k + * blocks per I/O unless the device explicitly advertises a + * bigger limit. + */ + if (sdkp->max_ws_blocks == 0) + sdkp->max_ws_blocks = SD_MAX_WS10_BLOCKS; + + if (sdkp->ws16 || sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS) + blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS16_BLOCKS); + else + blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS10_BLOCKS); + +out: + blk_queue_max_write_same_sectors(q, blocks * (logical_block_size >> 9)); +} + +/** + * sd_setup_write_same_cmnd - write the same data to multiple blocks + * @sdp: scsi device to operate one + * @rq: Request to prepare + * + * Will issue either WRITE SAME(10) or WRITE SAME(16) depending on + * preference indicated by target device. + **/ +static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq) +{ + struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); + struct bio *bio = rq->bio; + sector_t sector = blk_rq_pos(rq); + unsigned int nr_sectors = blk_rq_sectors(rq); + unsigned int nr_bytes = blk_rq_bytes(rq); + int ret; + + if (sdkp->device->no_write_same) + return BLKPREP_KILL; + + BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size); + + sector >>= ilog2(sdp->sector_size) - 9; + nr_sectors >>= ilog2(sdp->sector_size) - 9; + + rq->__data_len = sdp->sector_size; + rq->timeout = SD_WRITE_SAME_TIMEOUT; + memset(rq->cmd, 0, rq->cmd_len); + + if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) { + rq->cmd_len = 16; + rq->cmd[0] = WRITE_SAME_16; + put_unaligned_be64(sector, &rq->cmd[2]); + put_unaligned_be32(nr_sectors, &rq->cmd[10]); + } else { + rq->cmd_len = 10; + rq->cmd[0] = WRITE_SAME; + put_unaligned_be32(sector, &rq->cmd[2]); + put_unaligned_be16(nr_sectors, &rq->cmd[7]); + } + + ret = scsi_setup_blk_pc_cmnd(sdp, rq); + rq->__data_len = nr_bytes; + + return ret; +} + static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) { rq->timeout = SD_FLUSH_TIMEOUT; @@ -712,7 +833,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) * block PC requests to make life easier. */ if (rq->cmd_flags & REQ_DISCARD) { - ret = scsi_setup_discard_cmnd(sdp, rq); + ret = sd_setup_discard_cmnd(sdp, rq); + goto out; + } else if (rq->cmd_flags & REQ_WRITE_SAME) { + ret = sd_setup_write_same_cmnd(sdp, rq); goto out; } else if (rq->cmd_flags & REQ_FLUSH) { ret = scsi_setup_flush_cmnd(sdp, rq); @@ -1482,12 +1606,21 @@ static int sd_done(struct scsi_cmnd *SCpnt) unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt); struct scsi_sense_hdr sshdr; struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk); + struct request *req = SCpnt->request; int sense_valid = 0; int sense_deferred = 0; unsigned char op = SCpnt->cmnd[0]; + unsigned char unmap = SCpnt->cmnd[1] & 8; - if ((SCpnt->request->cmd_flags & REQ_DISCARD) && !result) - scsi_set_resid(SCpnt, 0); + if (req->cmd_flags & REQ_DISCARD || req->cmd_flags & REQ_WRITE_SAME) { + if (!result) { + good_bytes = blk_rq_bytes(req); + scsi_set_resid(SCpnt, 0); + } else { + good_bytes = 0; + scsi_set_resid(SCpnt, blk_rq_bytes(req)); + } + } if (result) { sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); @@ -1536,9 +1669,25 @@ static int sd_done(struct scsi_cmnd *SCpnt) if (sshdr.asc == 0x10) /* DIX: Host detected corruption */ good_bytes = sd_completed_bytes(SCpnt); /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ - if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) && - (op == UNMAP || op == WRITE_SAME_16 || op == WRITE_SAME)) - sd_config_discard(sdkp, SD_LBP_DISABLE); + if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { + switch (op) { + case UNMAP: + sd_config_discard(sdkp, SD_LBP_DISABLE); + break; + case WRITE_SAME_16: + case WRITE_SAME: + if (unmap) + sd_config_discard(sdkp, SD_LBP_DISABLE); + else { + sdkp->device->no_write_same = 1; + sd_config_write_same(sdkp); + + good_bytes = 0; + req->__data_len = blk_rq_bytes(req); + req->cmd_flags |= REQ_QUIET; + } + } + } break; default: break; @@ -2374,9 +2523,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) if (buffer[3] == 0x3c) { unsigned int lba_count, desc_count; - sdkp->max_ws_blocks = - (u32) min_not_zero(get_unaligned_be64(&buffer[36]), - (u64)0xffffffff); + sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]); if (!sdkp->lbpme) goto out; @@ -2469,6 +2616,13 @@ static void sd_read_block_provisioning(struct scsi_disk *sdkp) kfree(buffer); } +static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) +{ + if (scsi_report_opcode(sdkp->device, buffer, SD_BUF_SIZE, + WRITE_SAME_16)) + sdkp->ws16 = 1; +} + static int sd_try_extended_inquiry(struct scsi_device *sdp) { /* @@ -2528,6 +2682,7 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_read_write_protect_flag(sdkp, buffer); sd_read_cache_type(sdkp, buffer); sd_read_app_tag_own(sdkp, buffer); + sd_read_write_same(sdkp, buffer); } sdkp->first_scan = 0; @@ -2545,6 +2700,7 @@ static int sd_revalidate_disk(struct gendisk *disk) blk_queue_flush(sdkp->disk->queue, flush); set_capacity(disk, sdkp->capacity); + sd_config_write_same(sdkp); kfree(buffer); out: diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 47c52a6d733..74a1e4ca540 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -14,6 +14,7 @@ #define SD_TIMEOUT (30 * HZ) #define SD_MOD_TIMEOUT (75 * HZ) #define SD_FLUSH_TIMEOUT (60 * HZ) +#define SD_WRITE_SAME_TIMEOUT (120 * HZ) /* * Number of allowed retries @@ -39,6 +40,11 @@ enum { }; enum { + SD_MAX_WS10_BLOCKS = 0xffff, + SD_MAX_WS16_BLOCKS = 0x7fffff, +}; + +enum { SD_LBP_FULL = 0, /* Full logical block provisioning */ SD_LBP_UNMAP, /* Use UNMAP command */ SD_LBP_WS16, /* Use WRITE SAME(16) with UNMAP bit */ @@ -77,6 +83,7 @@ struct scsi_disk { unsigned lbpws : 1; unsigned lbpws10 : 1; unsigned lbpvpd : 1; + unsigned ws16 : 1; }; #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h index f2ffd963f1c..d0cafd63719 100644 --- a/drivers/staging/android/android_alarm.h +++ b/drivers/staging/android/android_alarm.h @@ -51,12 +51,10 @@ enum android_alarm_return_flags { #define ANDROID_ALARM_WAIT _IO('a', 1) #define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size) -#define ALARM_IOR(c, type, size) _IOR('a', (c) | ((type) << 4), size) - /* Set alarm */ #define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec) #define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec) -#define ANDROID_ALARM_GET_TIME(type) ALARM_IOR(4, type, struct timespec) +#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec) #define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec) #define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0))) #define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4) diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c index a5dec1ca1b8..13ee53bd0bf 100644 --- a/drivers/tty/hvc/hvc_console.c +++ b/drivers/tty/hvc/hvc_console.c @@ -424,7 +424,6 @@ static void hvc_hangup(struct tty_struct *tty) { struct hvc_struct *hp = tty->driver_data; unsigned long flags; - int temp_open_count; if (!hp) return; @@ -444,7 +443,6 @@ static void hvc_hangup(struct tty_struct *tty) return; } - temp_open_count = hp->port.count; hp->port.count = 0; spin_unlock_irqrestore(&hp->port.lock, flags); tty_port_tty_set(&hp->port, NULL); @@ -453,11 +451,6 @@ static void hvc_hangup(struct tty_struct *tty) if (hp->ops->notifier_hangup) hp->ops->notifier_hangup(hp, hp->data); - - while(temp_open_count) { - --temp_open_count; - tty_port_put(&hp->port); - } } /* diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c index 2bc28a59d38..1ab1d2c66de 100644 --- a/drivers/tty/serial/max310x.c +++ b/drivers/tty/serial/max310x.c @@ -1239,6 +1239,7 @@ static int __devexit max310x_remove(struct spi_device *spi) static const struct spi_device_id max310x_id_table[] = { { "max3107", MAX310X_TYPE_MAX3107 }, { "max3108", MAX310X_TYPE_MAX3108 }, + { } }; MODULE_DEVICE_TABLE(spi, max310x_id_table); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 1e741bca026..f034716190f 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -2151,8 +2151,15 @@ EXPORT_SYMBOL_GPL(usb_bus_start_enum); irqreturn_t usb_hcd_irq (int irq, void *__hcd) { struct usb_hcd *hcd = __hcd; + unsigned long flags; irqreturn_t rc; + /* IRQF_DISABLED doesn't work correctly with shared IRQs + * when the first handler doesn't use it. So let's just + * assume it's never used. + */ + local_irq_save(flags); + if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) rc = IRQ_NONE; else if (hcd->driver->irq(hcd) == IRQ_NONE) @@ -2160,6 +2167,7 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd) else rc = IRQ_HANDLED; + local_irq_restore(flags); return rc; } EXPORT_SYMBOL_GPL(usb_hcd_irq); @@ -2347,6 +2355,14 @@ static int usb_hcd_request_irqs(struct usb_hcd *hcd, int retval; if (hcd->driver->irq) { + + /* IRQF_DISABLED doesn't work as advertised when used together + * with IRQF_SHARED. As usb_hcd_irq() will always disable + * interrupts we can remove it here. + */ + if (irqflags & IRQF_SHARED) + irqflags &= ~IRQF_DISABLED; + snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d", hcd->driver->description, hcd->self.busnum); retval = request_irq(irqnum, &usb_hcd_irq, irqflags, diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c index e426ad626d7..4bfa78af379 100644 --- a/drivers/usb/early/ehci-dbgp.c +++ b/drivers/usb/early/ehci-dbgp.c @@ -20,6 +20,7 @@ #include <linux/usb/ehci_def.h> #include <linux/delay.h> #include <linux/serial_core.h> +#include <linux/kconfig.h> #include <linux/kgdb.h> #include <linux/kthread.h> #include <asm/io.h> @@ -614,12 +615,6 @@ err: return -ENODEV; } -int dbgp_external_startup(struct usb_hcd *hcd) -{ - return xen_dbgp_external_startup(hcd) ?: _dbgp_external_startup(); -} -EXPORT_SYMBOL_GPL(dbgp_external_startup); - static int ehci_reset_port(int port) { u32 portsc; @@ -979,6 +974,7 @@ struct console early_dbgp_console = { .index = -1, }; +#if IS_ENABLED(CONFIG_USB_EHCI_HCD) int dbgp_reset_prep(struct usb_hcd *hcd) { int ret = xen_dbgp_reset_prep(hcd); @@ -1007,6 +1003,13 @@ int dbgp_reset_prep(struct usb_hcd *hcd) } EXPORT_SYMBOL_GPL(dbgp_reset_prep); +int dbgp_external_startup(struct usb_hcd *hcd) +{ + return xen_dbgp_external_startup(hcd) ?: _dbgp_external_startup(); +} +EXPORT_SYMBOL_GPL(dbgp_external_startup); +#endif /* USB_EHCI_HCD */ + #ifdef CONFIG_KGDB static char kgdbdbgp_buf[DBGP_MAX_PACKET]; diff --git a/drivers/usb/host/ehci-ls1x.c b/drivers/usb/host/ehci-ls1x.c index ca759652626..aa0f328922d 100644 --- a/drivers/usb/host/ehci-ls1x.c +++ b/drivers/usb/host/ehci-ls1x.c @@ -113,7 +113,7 @@ static int ehci_hcd_ls1x_probe(struct platform_device *pdev) goto err_put_hcd; } - ret = usb_add_hcd(hcd, irq, IRQF_SHARED); + ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); if (ret) goto err_put_hcd; diff --git a/drivers/usb/host/ohci-xls.c b/drivers/usb/host/ohci-xls.c index 84201cd1a47..41e378f17c6 100644 --- a/drivers/usb/host/ohci-xls.c +++ b/drivers/usb/host/ohci-xls.c @@ -56,7 +56,7 @@ static int ohci_xls_probe_internal(const struct hc_driver *driver, goto err3; } - retval = usb_add_hcd(hcd, irq, IRQF_SHARED); + retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); if (retval != 0) goto err4; return retval; diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index d0b87e7b4ab..b6b84dacc79 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -707,11 +707,12 @@ static void rxstate(struct musb *musb, struct musb_request *req) fifo_count = musb_readw(epio, MUSB_RXCOUNT); /* - * use mode 1 only if we expect data of at least ep packet_sz - * and have not yet received a short packet + * Enable Mode 1 on RX transfers only when short_not_ok flag + * is set. Currently short_not_ok flag is set only from + * file_storage and f_mass_storage drivers */ - if ((request->length - request->actual >= musb_ep->packet_sz) && - (fifo_count >= musb_ep->packet_sz)) + + if (request->short_not_ok && fifo_count == musb_ep->packet_sz) use_mode_1 = 1; else use_mode_1 = 0; @@ -727,6 +728,27 @@ static void rxstate(struct musb *musb, struct musb_request *req) c = musb->dma_controller; channel = musb_ep->dma; + /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in + * mode 0 only. So we do not get endpoint interrupts due to DMA + * completion. We only get interrupts from DMA controller. + * + * We could operate in DMA mode 1 if we knew the size of the tranfer + * in advance. For mass storage class, request->length = what the host + * sends, so that'd work. But for pretty much everything else, + * request->length is routinely more than what the host sends. For + * most these gadgets, end of is signified either by a short packet, + * or filling the last byte of the buffer. (Sending extra data in + * that last pckate should trigger an overflow fault.) But in mode 1, + * we don't get DMA completion interrupt for short packets. + * + * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), + * to get endpoint interrupt on every DMA req, but that didn't seem + * to work reliably. + * + * REVISIT an updated g_file_storage can set req->short_not_ok, which + * then becomes usable as a runtime "use mode 1" hint... + */ + /* Experimental: Mode1 works with mass storage use cases */ if (use_mode_1) { csr |= MUSB_RXCSR_AUTOCLEAR; diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c index d62a91fedc2..0e62f504410 100644 --- a/drivers/usb/musb/ux500.c +++ b/drivers/usb/musb/ux500.c @@ -65,7 +65,7 @@ static int __devinit ux500_probe(struct platform_device *pdev) struct platform_device *musb; struct ux500_glue *glue; struct clk *clk; - + int musbid; int ret = -ENOMEM; glue = kzalloc(sizeof(*glue), GFP_KERNEL); diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig index d8c8a42bff3..6223062d5d1 100644 --- a/drivers/usb/otg/Kconfig +++ b/drivers/usb/otg/Kconfig @@ -58,7 +58,7 @@ config USB_ULPI_VIEWPORT config TWL4030_USB tristate "TWL4030 USB Transceiver Driver" - depends on TWL4030_CORE && REGULATOR_TWL4030 + depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS select USB_OTG_UTILS help Enable this to support the USB OTG transceiver on TWL4030 @@ -68,7 +68,7 @@ config TWL4030_USB config TWL6030_USB tristate "TWL6030 USB Transceiver Driver" - depends on TWL4030_CORE && OMAP_USB2 + depends on TWL4030_CORE && OMAP_USB2 && USB_MUSB_OMAP2PLUS select USB_OTG_UTILS help Enable this to support the USB OTG transceiver on TWL6030 diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index 7179b0c5f81..cff8dd5b462 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -2430,7 +2430,7 @@ static void keyspan_release(struct usb_serial *serial) static int keyspan_port_probe(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; - struct keyspan_port_private *s_priv; + struct keyspan_serial_private *s_priv; struct keyspan_port_private *p_priv; const struct keyspan_device_details *d_details; struct callbacks *cback; @@ -2445,7 +2445,6 @@ static int keyspan_port_probe(struct usb_serial_port *port) if (!p_priv) return -ENOMEM; - s_priv = usb_get_serial_data(port->serial); p_priv->device_details = d_details; /* Setup values for the various callback routines */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 5dee7d61241..edc64bb6f45 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -158,6 +158,7 @@ static void option_instat_callback(struct urb *urb); #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 +#define NOVATELWIRELESS_PRODUCT_E362 0x9010 #define NOVATELWIRELESS_PRODUCT_G1 0xA001 #define NOVATELWIRELESS_PRODUCT_G1_M 0xA002 #define NOVATELWIRELESS_PRODUCT_G2 0xA010 @@ -193,6 +194,9 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5730_MINICARD_TELUS 0x8181 #define DELL_PRODUCT_5730_MINICARD_VZW 0x8182 +#define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */ +#define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */ + #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da #define KYOCERA_PRODUCT_KPC680 0x180a @@ -283,6 +287,7 @@ static void option_instat_callback(struct urb *urb); /* ALCATEL PRODUCTS */ #define ALCATEL_VENDOR_ID 0x1bbb #define ALCATEL_PRODUCT_X060S_X200 0x0000 +#define ALCATEL_PRODUCT_X220_X500D 0x0017 #define PIRELLI_VENDOR_ID 0x1266 #define PIRELLI_PRODUCT_C100_1 0x1002 @@ -706,6 +711,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) }, /* Novatel Ovation MC551 a.k.a. Verizon USB551L */ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, @@ -728,6 +734,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ + { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -1157,6 +1165,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist }, + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) }, { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 61a73ad1a18..a3e9c095f0d 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c @@ -455,9 +455,6 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port, struct usb_serial *serial = port->serial; struct urb *urb; - if (endpoint == -1) - return NULL; /* endpoint not needed */ - urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */ if (urb == NULL) { dev_dbg(&serial->interface->dev, @@ -489,6 +486,9 @@ int usb_wwan_port_probe(struct usb_serial_port *port) init_usb_anchor(&portdata->delayed); for (i = 0; i < N_IN_URB; i++) { + if (!port->bulk_in_size) + break; + buffer = (u8 *)__get_free_page(GFP_KERNEL); if (!buffer) goto bail_out_error; @@ -502,8 +502,8 @@ int usb_wwan_port_probe(struct usb_serial_port *port) } for (i = 0; i < N_OUT_URB; i++) { - if (port->bulk_out_endpointAddress == -1) - continue; + if (!port->bulk_out_size) + break; buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL); if (!buffer) diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index a3d54366afc..92f35abee92 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -186,6 +186,12 @@ static int slave_configure(struct scsi_device *sdev) /* Some devices don't handle VPD pages correctly */ sdev->skip_vpd_pages = 1; + /* Do not attempt to use REPORT SUPPORTED OPERATION CODES */ + sdev->no_report_opcodes = 1; + + /* Do not attempt to use WRITE SAME */ + sdev->no_write_same = 1; + /* Some disks return the total number of blocks in response * to READ CAPACITY rather than the highest block number. * If this device makes that mistake, tell the sd driver. */ diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index d64ac384288..bee92846cfa 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c @@ -365,11 +365,20 @@ struct platform_device *dsi_get_dsidev_from_id(int module) struct omap_dss_output *out; enum omap_dss_output_id id; - id = module == 0 ? OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2; + switch (module) { + case 0: + id = OMAP_DSS_OUTPUT_DSI1; + break; + case 1: + id = OMAP_DSS_OUTPUT_DSI2; + break; + default: + return NULL; + } out = omap_dss_get_output(id); - return out->pdev; + return out ? out->pdev : NULL; } static inline void dsi_write_reg(struct platform_device *dsidev, diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c index 2ab1c3e9655..5f6eea801b0 100644 --- a/drivers/video/omap2/dss/dss.c +++ b/drivers/video/omap2/dss/dss.c @@ -697,11 +697,15 @@ static int dss_get_clocks(void) dss.dss_clk = clk; - clk = clk_get(NULL, dss.feat->clk_name); - if (IS_ERR(clk)) { - DSSERR("Failed to get %s\n", dss.feat->clk_name); - r = PTR_ERR(clk); - goto err; + if (dss.feat->clk_name) { + clk = clk_get(NULL, dss.feat->clk_name); + if (IS_ERR(clk)) { + DSSERR("Failed to get %s\n", dss.feat->clk_name); + r = PTR_ERR(clk); + goto err; + } + } else { + clk = NULL; } dss.dpll4_m4_ck = clk; @@ -805,10 +809,10 @@ static int __init dss_init_features(struct device *dev) if (cpu_is_omap24xx()) src = &omap24xx_dss_feats; - else if (cpu_is_omap34xx()) - src = &omap34xx_dss_feats; else if (cpu_is_omap3630()) src = &omap3630_dss_feats; + else if (cpu_is_omap34xx()) + src = &omap34xx_dss_feats; else if (cpu_is_omap44xx()) src = &omap44xx_dss_feats; else if (soc_is_omap54xx()) diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c index a48a7dd75b3..8c9b8b3b7f7 100644 --- a/drivers/video/omap2/dss/hdmi.c +++ b/drivers/video/omap2/dss/hdmi.c @@ -644,8 +644,10 @@ static void hdmi_dump_regs(struct seq_file *s) { mutex_lock(&hdmi.lock); - if (hdmi_runtime_get()) + if (hdmi_runtime_get()) { + mutex_unlock(&hdmi.lock); return; + } hdmi.ip_data.ops->dump_wrapper(&hdmi.ip_data, s); hdmi.ip_data.ops->dump_pll(&hdmi.ip_data, s); diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c index 606b89f1235..d630b26a005 100644 --- a/drivers/video/omap2/omapfb/omapfb-ioctl.c +++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c @@ -787,7 +787,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg) case OMAPFB_WAITFORVSYNC: DBG("ioctl WAITFORVSYNC\n"); - if (!display && !display->output && !display->output->manager) { + if (!display || !display->output || !display->output->manager) { r = -EINVAL; break; } diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 8adb9cc267f..71f5c459b08 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -361,13 +361,13 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) down_write(&mm->mmap_sem); vma = find_vma(mm, m.addr); - ret = -EINVAL; if (!vma || vma->vm_ops != &privcmd_vm_ops || (m.addr != vma->vm_start) || ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) || !privcmd_enforce_singleshot_mapping(vma)) { up_write(&mm->mmap_sem); + ret = -EINVAL; goto out; } @@ -383,12 +383,16 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) up_write(&mm->mmap_sem); - if (state.global_error && (version == 1)) { - /* Write back errors in second pass. */ - state.user_mfn = (xen_pfn_t *)m.arr; - state.err = err_array; - ret = traverse_pages(m.num, sizeof(xen_pfn_t), - &pagelist, mmap_return_errors_v1, &state); + if (version == 1) { + if (state.global_error) { + /* Write back errors in second pass. */ + state.user_mfn = (xen_pfn_t *)m.arr; + state.err = err_array; + ret = traverse_pages(m.num, sizeof(xen_pfn_t), + &pagelist, mmap_return_errors_v1, &state); + } else + ret = 0; + } else if (version == 2) { ret = __copy_to_user(m.err, err_array, m.num * sizeof(int)); if (ret) diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index 7320a66e958..22548f56197 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c @@ -2101,8 +2101,9 @@ int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range) end = start + (range->len >> sb->s_blocksize_bits) - 1; minlen = range->minlen >> sb->s_blocksize_bits; - if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb)) || - unlikely(start >= max_blks)) + if (minlen > EXT3_BLOCKS_PER_GROUP(sb) || + start >= max_blks || + range->len < sb->s_blocksize) return -EINVAL; if (end >= max_blks) end = max_blks - 1; diff --git a/fs/file.c b/fs/file.c index 708d997a774..7cb71b99260 100644 --- a/fs/file.c +++ b/fs/file.c @@ -685,7 +685,6 @@ void do_close_on_exec(struct files_struct *files) struct fdtable *fdt; /* exec unshares first */ - BUG_ON(atomic_read(&files->count) != 1); spin_lock(&files->file_lock); for (i = 0; ; i++) { unsigned long set; diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 60ef3fb707f..1506673c087 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -138,33 +138,39 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, struct page *pg; struct inode *inode = mapping->host; struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); + struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); + struct jffs2_raw_inode ri; + uint32_t alloc_len = 0; pgoff_t index = pos >> PAGE_CACHE_SHIFT; uint32_t pageofs = index << PAGE_CACHE_SHIFT; int ret = 0; + jffs2_dbg(1, "%s()\n", __func__); + + if (pageofs > inode->i_size) { + ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, + ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); + if (ret) + return ret; + } + + mutex_lock(&f->sem); pg = grab_cache_page_write_begin(mapping, index, flags); - if (!pg) + if (!pg) { + if (alloc_len) + jffs2_complete_reservation(c); + mutex_unlock(&f->sem); return -ENOMEM; + } *pagep = pg; - jffs2_dbg(1, "%s()\n", __func__); - - if (pageofs > inode->i_size) { + if (alloc_len) { /* Make new hole frag from old EOF to new page */ - struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); - struct jffs2_raw_inode ri; struct jffs2_full_dnode *fn; - uint32_t alloc_len; jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", (unsigned int)inode->i_size, pageofs); - ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, - ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); - if (ret) - goto out_page; - - mutex_lock(&f->sem); memset(&ri, 0, sizeof(ri)); ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); @@ -191,7 +197,6 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, if (IS_ERR(fn)) { ret = PTR_ERR(fn); jffs2_complete_reservation(c); - mutex_unlock(&f->sem); goto out_page; } ret = jffs2_add_full_dnode_to_inode(c, f, fn); @@ -206,12 +211,10 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, jffs2_mark_node_obsolete(c, fn->raw); jffs2_free_full_dnode(fn); jffs2_complete_reservation(c); - mutex_unlock(&f->sem); goto out_page; } jffs2_complete_reservation(c); inode->i_size = pageofs; - mutex_unlock(&f->sem); } /* @@ -220,18 +223,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, * case of a short-copy. */ if (!PageUptodate(pg)) { - mutex_lock(&f->sem); ret = jffs2_do_readpage_nolock(inode, pg); - mutex_unlock(&f->sem); if (ret) goto out_page; } + mutex_unlock(&f->sem); jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); return ret; out_page: unlock_page(pg); page_cache_release(pg); + mutex_unlock(&f->sem); return ret; } diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 721d692fa8d..6fcaeb8c902 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -258,7 +258,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, if (ret) goto out_close_fd; - fd_install(fd, f); + if (fd != FAN_NOFD) + fd_install(fd, f); return fanotify_event_metadata.event_len; out_close_fd: diff --git a/fs/proc/base.c b/fs/proc/base.c index 144a96732dd..3c231adf845 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -873,6 +873,113 @@ static const struct file_operations proc_environ_operations = { .release = mem_release, }; +static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); + char buffer[PROC_NUMBUF]; + int oom_adj = OOM_ADJUST_MIN; + size_t len; + unsigned long flags; + + if (!task) + return -ESRCH; + if (lock_task_sighand(task, &flags)) { + if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX) + oom_adj = OOM_ADJUST_MAX; + else + oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) / + OOM_SCORE_ADJ_MAX; + unlock_task_sighand(task, &flags); + } + put_task_struct(task); + len = snprintf(buffer, sizeof(buffer), "%d\n", oom_adj); + return simple_read_from_buffer(buf, count, ppos, buffer, len); +} + +static ssize_t oom_adj_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct task_struct *task; + char buffer[PROC_NUMBUF]; + int oom_adj; + unsigned long flags; + int err; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + err = -EFAULT; + goto out; + } + + err = kstrtoint(strstrip(buffer), 0, &oom_adj); + if (err) + goto out; + if ((oom_adj < OOM_ADJUST_MIN || oom_adj > OOM_ADJUST_MAX) && + oom_adj != OOM_DISABLE) { + err = -EINVAL; + goto out; + } + + task = get_proc_task(file->f_path.dentry->d_inode); + if (!task) { + err = -ESRCH; + goto out; + } + + task_lock(task); + if (!task->mm) { + err = -EINVAL; + goto err_task_lock; + } + + if (!lock_task_sighand(task, &flags)) { + err = -ESRCH; + goto err_task_lock; + } + + /* + * Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum + * value is always attainable. + */ + if (oom_adj == OOM_ADJUST_MAX) + oom_adj = OOM_SCORE_ADJ_MAX; + else + oom_adj = (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE; + + if (oom_adj < task->signal->oom_score_adj && + !capable(CAP_SYS_RESOURCE)) { + err = -EACCES; + goto err_sighand; + } + + /* + * /proc/pid/oom_adj is provided for legacy purposes, ask users to use + * /proc/pid/oom_score_adj instead. + */ + printk_once(KERN_WARNING "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n", + current->comm, task_pid_nr(current), task_pid_nr(task), + task_pid_nr(task)); + + task->signal->oom_score_adj = oom_adj; + trace_oom_score_adj_update(task); +err_sighand: + unlock_task_sighand(task, &flags); +err_task_lock: + task_unlock(task); + put_task_struct(task); +out: + return err < 0 ? err : count; +} + +static const struct file_operations proc_oom_adj_operations = { + .read = oom_adj_read, + .write = oom_adj_write, + .llseek = generic_file_llseek, +}; + static ssize_t oom_score_adj_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { @@ -2598,6 +2705,7 @@ static const struct pid_entry tgid_base_stuff[] = { REG("cgroup", S_IRUGO, proc_cgroup_operations), #endif INF("oom_score", S_IRUGO, proc_oom_score), + REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations), REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), #ifdef CONFIG_AUDITSYSCALL REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), @@ -2964,6 +3072,7 @@ static const struct pid_entry tid_base_stuff[] = { REG("cgroup", S_IRUGO, proc_cgroup_operations), #endif INF("oom_score", S_IRUGO, proc_oom_score), + REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations), REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), #ifdef CONFIG_AUDITSYSCALL REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index a40da07e93d..947fbe06c3b 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -161,6 +161,7 @@ static void pstore_console_write(struct console *con, const char *s, unsigned c) while (s < e) { unsigned long flags; + u64 id; if (c > psinfo->bufsize) c = psinfo->bufsize; @@ -172,7 +173,7 @@ static void pstore_console_write(struct console *con, const char *s, unsigned c) spin_lock_irqsave(&psinfo->buf_lock, flags); } memcpy(psinfo->buf, s, c); - psinfo->write(PSTORE_TYPE_CONSOLE, 0, NULL, 0, c, psinfo); + psinfo->write(PSTORE_TYPE_CONSOLE, 0, &id, 0, c, psinfo); spin_unlock_irqrestore(&psinfo->buf_lock, flags); s += c; c = e - s; diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index f27f01a98aa..d83736fbc26 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -1782,8 +1782,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, BUG_ON(!th->t_trans_id); - dquot_initialize(inode); + reiserfs_write_unlock(inode->i_sb); err = dquot_alloc_inode(inode); + reiserfs_write_lock(inode->i_sb); if (err) goto out_end_trans; if (!dir->i_nlink) { @@ -1979,8 +1980,10 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, out_end_trans: journal_end(th, th->t_super, th->t_blocks_allocated); + reiserfs_write_unlock(inode->i_sb); /* Drop can be outside and it needs more credits so it's better to have it outside */ dquot_drop(inode); + reiserfs_write_lock(inode->i_sb); inode->i_flags |= S_NOQUOTA; make_bad_inode(inode); @@ -3103,10 +3106,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) /* must be turned off for recursive notify_change calls */ ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID); - depth = reiserfs_write_lock_once(inode->i_sb); if (is_quota_modification(inode, attr)) dquot_initialize(inode); - + depth = reiserfs_write_lock_once(inode->i_sb); if (attr->ia_valid & ATTR_SIZE) { /* version 2 items will be caught by the s_maxbytes check ** done for us in vmtruncate @@ -3170,7 +3172,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) error = journal_begin(&th, inode->i_sb, jbegin_count); if (error) goto out; + reiserfs_write_unlock_once(inode->i_sb, depth); error = dquot_transfer(inode, attr); + depth = reiserfs_write_lock_once(inode->i_sb); if (error) { journal_end(&th, inode->i_sb, jbegin_count); goto out; diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index f8afa4b162b..2f40a4c70a4 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -1968,7 +1968,9 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree key2type(&(key->on_disk_key))); #endif + reiserfs_write_unlock(inode->i_sb); retval = dquot_alloc_space_nodirty(inode, pasted_size); + reiserfs_write_lock(inode->i_sb); if (retval) { pathrelse(search_path); return retval; @@ -2061,9 +2063,11 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, "reiserquota insert_item(): allocating %u id=%u type=%c", quota_bytes, inode->i_uid, head2type(ih)); #endif + reiserfs_write_unlock(inode->i_sb); /* We can't dirty inode here. It would be immediately written but * appropriate stat item isn't inserted yet... */ retval = dquot_alloc_space_nodirty(inode, quota_bytes); + reiserfs_write_lock(inode->i_sb); if (retval) { pathrelse(path); return retval; diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 1078ae17999..418bdc3a57d 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -298,7 +298,9 @@ static int finish_unfinished(struct super_block *s) retval = remove_save_link_only(s, &save_link_key, 0); continue; } + reiserfs_write_unlock(s); dquot_initialize(inode); + reiserfs_write_lock(s); if (truncate && S_ISDIR(inode->i_mode)) { /* We got a truncate request for a dir which is impossible. @@ -1335,7 +1337,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) kfree(qf_names[i]); #endif err = -EINVAL; - goto out_err; + goto out_unlock; } #ifdef CONFIG_QUOTA handle_quota_files(s, qf_names, &qfmt); @@ -1379,7 +1381,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) if (blocks) { err = reiserfs_resize(s, blocks); if (err != 0) - goto out_err; + goto out_unlock; } if (*mount_flags & MS_RDONLY) { @@ -1389,9 +1391,15 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) /* it is read-only already */ goto out_ok; + /* + * Drop write lock. Quota will retake it when needed and lock + * ordering requires calling dquot_suspend() without it. + */ + reiserfs_write_unlock(s); err = dquot_suspend(s, -1); if (err < 0) goto out_err; + reiserfs_write_lock(s); /* try to remount file system with read-only permissions */ if (sb_umount_state(rs) == REISERFS_VALID_FS @@ -1401,7 +1409,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) err = journal_begin(&th, s, 10); if (err) - goto out_err; + goto out_unlock; /* Mounting a rw partition read-only. */ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); @@ -1416,7 +1424,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) if (reiserfs_is_journal_aborted(journal)) { err = journal->j_errno; - goto out_err; + goto out_unlock; } handle_data_mode(s, mount_options); @@ -1425,7 +1433,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */ err = journal_begin(&th, s, 10); if (err) - goto out_err; + goto out_unlock; /* Mount a partition which is read-only, read-write */ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); @@ -1442,10 +1450,16 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) SB_JOURNAL(s)->j_must_wait = 1; err = journal_end(&th, s, 10); if (err) - goto out_err; + goto out_unlock; if (!(*mount_flags & MS_RDONLY)) { + /* + * Drop write lock. Quota will retake it when needed and lock + * ordering requires calling dquot_resume() without it. + */ + reiserfs_write_unlock(s); dquot_resume(s, -1); + reiserfs_write_lock(s); finish_unfinished(s); reiserfs_xattr_init(s, *mount_flags); } @@ -1455,9 +1469,10 @@ out_ok: reiserfs_write_unlock(s); return 0; +out_unlock: + reiserfs_write_unlock(s); out_err: kfree(new_opts); - reiserfs_write_unlock(s); return err; } @@ -2095,13 +2110,15 @@ static int reiserfs_write_dquot(struct dquot *dquot) REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); if (ret) goto out; + reiserfs_write_unlock(dquot->dq_sb); ret = dquot_commit(dquot); + reiserfs_write_lock(dquot->dq_sb); err = journal_end(&th, dquot->dq_sb, REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); if (!ret && err) ret = err; - out: +out: reiserfs_write_unlock(dquot->dq_sb); return ret; } @@ -2117,13 +2134,15 @@ static int reiserfs_acquire_dquot(struct dquot *dquot) REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb)); if (ret) goto out; + reiserfs_write_unlock(dquot->dq_sb); ret = dquot_acquire(dquot); + reiserfs_write_lock(dquot->dq_sb); err = journal_end(&th, dquot->dq_sb, REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb)); if (!ret && err) ret = err; - out: +out: reiserfs_write_unlock(dquot->dq_sb); return ret; } @@ -2137,19 +2156,21 @@ static int reiserfs_release_dquot(struct dquot *dquot) ret = journal_begin(&th, dquot->dq_sb, REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb)); + reiserfs_write_unlock(dquot->dq_sb); if (ret) { /* Release dquot anyway to avoid endless cycle in dqput() */ dquot_release(dquot); goto out; } ret = dquot_release(dquot); + reiserfs_write_lock(dquot->dq_sb); err = journal_end(&th, dquot->dq_sb, REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb)); if (!ret && err) ret = err; - out: reiserfs_write_unlock(dquot->dq_sb); +out: return ret; } @@ -2174,11 +2195,13 @@ static int reiserfs_write_info(struct super_block *sb, int type) ret = journal_begin(&th, sb, 2); if (ret) goto out; + reiserfs_write_unlock(sb); ret = dquot_commit_info(sb, type); + reiserfs_write_lock(sb); err = journal_end(&th, sb, 2); if (!ret && err) ret = err; - out: +out: reiserfs_write_unlock(sb); return ret; } @@ -2203,8 +2226,11 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, struct reiserfs_transaction_handle th; int opt = type == USRQUOTA ? REISERFS_USRQUOTA : REISERFS_GRPQUOTA; - if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt))) - return -EINVAL; + reiserfs_write_lock(sb); + if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt))) { + err = -EINVAL; + goto out; + } /* Quotafile not on the same filesystem? */ if (path->dentry->d_sb != sb) { @@ -2246,8 +2272,10 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, if (err) goto out; } - err = dquot_quota_on(sb, type, format_id, path); + reiserfs_write_unlock(sb); + return dquot_quota_on(sb, type, format_id, path); out: + reiserfs_write_unlock(sb); return err; } @@ -2320,7 +2348,9 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type, tocopy = sb->s_blocksize - offset < towrite ? sb->s_blocksize - offset : towrite; tmp_bh.b_state = 0; + reiserfs_write_lock(sb); err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE); + reiserfs_write_unlock(sb); if (err) goto out; if (offset || tocopy != sb->s_blocksize) @@ -2336,10 +2366,12 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type, flush_dcache_page(bh->b_page); set_buffer_uptodate(bh); unlock_buffer(bh); + reiserfs_write_lock(sb); reiserfs_prepare_for_journal(sb, bh, 1); journal_mark_dirty(current->journal_info, sb, bh); if (!journal_quota) reiserfs_add_ordered_list(inode, bh); + reiserfs_write_unlock(sb); brelse(bh); offset = 0; towrite -= tocopy; diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c index 28ec13af28d..2dcf3d473fe 100644 --- a/fs/ubifs/find.c +++ b/fs/ubifs/find.c @@ -681,8 +681,16 @@ int ubifs_find_free_leb_for_idx(struct ubifs_info *c) if (!lprops) { lprops = ubifs_fast_find_freeable(c); if (!lprops) { - ubifs_assert(c->freeable_cnt == 0); - if (c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) { + /* + * The first condition means the following: go scan the + * LPT if there are uncategorized lprops, which means + * there may be freeable LEBs there (UBIFS does not + * store the information about freeable LEBs in the + * master node). + */ + if (c->in_a_category_cnt != c->main_lebs || + c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) { + ubifs_assert(c->freeable_cnt == 0); lprops = scan_for_leb_for_idx(c); if (IS_ERR(lprops)) { err = PTR_ERR(lprops); diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index e5a2a35a46d..46190a7c42a 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c @@ -300,8 +300,11 @@ void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, default: ubifs_assert(0); } + lprops->flags &= ~LPROPS_CAT_MASK; lprops->flags |= cat; + c->in_a_category_cnt += 1; + ubifs_assert(c->in_a_category_cnt <= c->main_lebs); } /** @@ -334,6 +337,9 @@ static void ubifs_remove_from_cat(struct ubifs_info *c, default: ubifs_assert(0); } + + c->in_a_category_cnt -= 1; + ubifs_assert(c->in_a_category_cnt >= 0); } /** diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 5486346d0a3..d133c276fe0 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1183,6 +1183,8 @@ struct ubifs_debug_info; * @freeable_list: list of freeable non-index LEBs (free + dirty == @leb_size) * @frdi_idx_list: list of freeable index LEBs (free + dirty == @leb_size) * @freeable_cnt: number of freeable LEBs in @freeable_list + * @in_a_category_cnt: count of lprops which are in a certain category, which + * basically meants that they were loaded from the flash * * @ltab_lnum: LEB number of LPT's own lprops table * @ltab_offs: offset of LPT's own lprops table @@ -1412,6 +1414,7 @@ struct ubifs_info { struct list_head freeable_list; struct list_head frdi_idx_list; int freeable_cnt; + int in_a_category_cnt; int ltab_lnum; int ltab_offs; diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index e562dd43f41..e57e2daa357 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -481,11 +481,17 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh) * * The fix is two passes across the ioend list - one to start writeback on the * buffer_heads, and then submit them for I/O on the second pass. + * + * If @fail is non-zero, it means that we have a situation where some part of + * the submission process has failed after we have marked paged for writeback + * and unlocked them. In this situation, we need to fail the ioend chain rather + * than submit it to IO. This typically only happens on a filesystem shutdown. */ STATIC void xfs_submit_ioend( struct writeback_control *wbc, - xfs_ioend_t *ioend) + xfs_ioend_t *ioend, + int fail) { xfs_ioend_t *head = ioend; xfs_ioend_t *next; @@ -506,6 +512,18 @@ xfs_submit_ioend( next = ioend->io_list; bio = NULL; + /* + * If we are failing the IO now, just mark the ioend with an + * error and finish it. This will run IO completion immediately + * as there is only one reference to the ioend at this point in + * time. + */ + if (fail) { + ioend->io_error = -fail; + xfs_finish_ioend(ioend); + continue; + } + for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { if (!bio) { @@ -1060,7 +1078,18 @@ xfs_vm_writepage( xfs_start_page_writeback(page, 1, count); - if (ioend && imap_valid) { + /* if there is no IO to be submitted for this page, we are done */ + if (!ioend) + return 0; + + ASSERT(iohead); + + /* + * Any errors from this point onwards need tobe reported through the IO + * completion path as we have marked the initial page as under writeback + * and unlocked it. + */ + if (imap_valid) { xfs_off_t end_index; end_index = imap.br_startoff + imap.br_blockcount; @@ -1079,20 +1108,15 @@ xfs_vm_writepage( wbc, end_index); } - if (iohead) { - /* - * Reserve log space if we might write beyond the on-disk - * inode size. - */ - if (ioend->io_type != XFS_IO_UNWRITTEN && - xfs_ioend_is_append(ioend)) { - err = xfs_setfilesize_trans_alloc(ioend); - if (err) - goto error; - } - xfs_submit_ioend(wbc, iohead); - } + /* + * Reserve log space if we might write beyond the on-disk inode size. + */ + err = 0; + if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend)) + err = xfs_setfilesize_trans_alloc(ioend); + + xfs_submit_ioend(wbc, iohead, err); return 0; diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c index d330111ca73..70eec182977 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/xfs_attr_leaf.c @@ -1291,6 +1291,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, leaf2 = blk2->bp->b_addr; ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); + ASSERT(leaf2->hdr.count == 0); args = state->args; trace_xfs_attr_leaf_rebalance(args); @@ -1361,6 +1362,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, * I assert that since all callers pass in an empty * second buffer, this code should never execute. */ + ASSERT(0); /* * Figure the total bytes to be added to the destination leaf. @@ -1422,10 +1424,24 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, args->index2 = 0; args->blkno2 = blk2->blkno; } else { + /* + * On a double leaf split, the original attr location + * is already stored in blkno2/index2, so don't + * overwrite it overwise we corrupt the tree. + */ blk2->index = blk1->index - be16_to_cpu(leaf1->hdr.count); - args->index = args->index2 = blk2->index; - args->blkno = args->blkno2 = blk2->blkno; + args->index = blk2->index; + args->blkno = blk2->blkno; + if (!state->extravalid) { + /* + * set the new attr location to match the old + * one and let the higher level split code + * decide where in the leaf to place it. + */ + args->index2 = blk2->index; + args->blkno2 = blk2->blkno; + } } } else { ASSERT(state->inleaf == 1); diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 933b7930b86..4b0b8dd1b7b 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1197,9 +1197,14 @@ xfs_buf_bio_end_io( { xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; - xfs_buf_ioerror(bp, -error); + /* + * don't overwrite existing errors - otherwise we can lose errors on + * buffers that require multiple bios to complete. + */ + if (!bp->b_error) + xfs_buf_ioerror(bp, -error); - if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) + if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); _xfs_buf_ioend(bp, 1); @@ -1279,6 +1284,11 @@ next_chunk: if (size) goto next_chunk; } else { + /* + * This is guaranteed not to be the last io reference count + * because the caller (xfs_buf_iorequest) holds a count itself. + */ + atomic_dec(&bp->b_io_remaining); xfs_buf_ioerror(bp, EIO); bio_put(bio); } diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h index 03f14856bd0..0943457e0fa 100644 --- a/include/acpi/acconfig.h +++ b/include/acpi/acconfig.h @@ -241,6 +241,7 @@ *****************************************************************************/ #define ACPI_DEBUGGER_MAX_ARGS 8 /* Must be max method args + 1 */ +#define ACPI_DB_LINE_BUFFER_SIZE 512 #define ACPI_DEBUGGER_COMMAND_PROMPT '-' #define ACPI_DEBUGGER_EXECUTE_PROMPT '%' diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h index 19503449814..6c3890e0214 100644 --- a/include/acpi/acexcep.h +++ b/include/acpi/acexcep.h @@ -122,7 +122,7 @@ #define AE_CODE_TBL_MAX 0x0005 /* - * AML exceptions. These are caused by problems with + * AML exceptions. These are caused by problems with * the actual AML byte stream */ #define AE_AML_BAD_OPCODE (acpi_status) (0x0001 | AE_CODE_AML) diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h index 745dd24e3cb..7665df66328 100644 --- a/include/acpi/acnames.h +++ b/include/acpi/acnames.h @@ -50,6 +50,7 @@ #define METHOD_NAME__HID "_HID" #define METHOD_NAME__CID "_CID" #define METHOD_NAME__UID "_UID" +#define METHOD_NAME__SUB "_SUB" #define METHOD_NAME__ADR "_ADR" #define METHOD_NAME__INI "_INI" #define METHOD_NAME__STA "_STA" diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index c4ed4c2389c..7ced5dc20dd 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -144,12 +144,11 @@ struct acpi_device_flags { u32 bus_address:1; u32 removable:1; u32 ejectable:1; - u32 lockable:1; u32 suprise_removal_ok:1; u32 power_manageable:1; u32 performance_manageable:1; u32 eject_pending:1; - u32 reserved:23; + u32 reserved:24; }; /* File System */ @@ -180,6 +179,7 @@ struct acpi_device_pnp { acpi_device_name device_name; /* Driver-determined */ acpi_device_class device_class; /* " */ union acpi_object *str_obj; /* unicode string for _STR method */ + unsigned long sun; /* _SUN */ }; #define acpi_device_bid(d) ((d)->pnp.bus_id) diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index 1222ba93d80..43152742b46 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h @@ -1,7 +1,6 @@ - /****************************************************************************** * - * Name: acpiosxf.h - All interfaces to the OS Services Layer (OSL). These + * Name: acpiosxf.h - All interfaces to the OS Services Layer (OSL). These * interfaces must be implemented by OSL to interface the * ACPI components to the host operating system. * diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 8b891dbead6..3d88395d4d6 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -1,4 +1,3 @@ - /****************************************************************************** * * Name: acpixf.h - External interfaces to the ACPI subsystem @@ -47,7 +46,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20120913 +#define ACPI_CA_VERSION 0x20121018 #include <acpi/acconfig.h> #include <acpi/actypes.h> @@ -178,8 +177,7 @@ acpi_status acpi_unload_table_id(acpi_owner_id id); acpi_status acpi_get_table_header(acpi_string signature, - u32 instance, - struct acpi_table_header *out_table_header); + u32 instance, struct acpi_table_header *out_table_header); acpi_status acpi_get_table_with_size(acpi_string signature, @@ -190,8 +188,7 @@ acpi_get_table(acpi_string signature, u32 instance, struct acpi_table_header **out_table); acpi_status -acpi_get_table_by_index(u32 table_index, - struct acpi_table_header **out_table); +acpi_get_table_by_index(u32 table_index, struct acpi_table_header **out_table); acpi_status acpi_install_table_handler(acpi_tbl_handler handler, void *context); @@ -274,7 +271,7 @@ acpi_install_initialization_handler(acpi_init_handler handler, u32 function); ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_install_global_event_handler - (ACPI_GBL_EVENT_HANDLER handler, void *context)) + (acpi_gbl_event_handler handler, void *context)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_install_fixed_event_handler(u32 @@ -300,10 +297,9 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status u32 gpe_number, acpi_gpe_handler address)) -acpi_status -acpi_install_notify_handler(acpi_handle device, - u32 handler_type, - acpi_notify_handler handler, void *context); +acpi_status acpi_install_notify_handler(acpi_handle device, u32 handler_type, + acpi_notify_handler handler, + void *context); acpi_status acpi_remove_notify_handler(acpi_handle device, diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h index 8c61b5fe42a..6585141e4b9 100644 --- a/include/acpi/actbl3.h +++ b/include/acpi/actbl3.h @@ -277,10 +277,10 @@ struct acpi_table_gtdt { ******************************************************************************/ #define ACPI_MPST_CHANNEL_INFO \ - u16 reserved1; \ u8 channel_id; \ - u8 reserved2; \ - u16 power_node_count; + u8 reserved1[3]; \ + u16 power_node_count; \ + u16 reserved2; /* Main table */ @@ -304,9 +304,8 @@ struct acpi_mpst_power_node { u32 length; u64 range_address; u64 range_length; - u8 num_power_states; - u8 num_physical_components; - u16 reserved2; + u32 num_power_states; + u32 num_physical_components; }; /* Values for Flags field above */ @@ -332,10 +331,11 @@ struct acpi_mpst_component { struct acpi_mpst_data_hdr { u16 characteristics_count; + u16 reserved; }; struct acpi_mpst_power_data { - u8 revision; + u8 structure_id; u8 flags; u16 reserved1; u32 average_power; @@ -356,10 +356,10 @@ struct acpi_mpst_shared { u32 signature; u16 pcc_command; u16 pcc_status; - u16 command_register; - u16 status_register; - u16 power_state_id; - u16 power_node_id; + u32 command_register; + u32 status_register; + u32 power_state_id; + u32 power_node_id; u64 energy_consumed; u64 average_power; }; diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index a85bae96826..4f43f1fba13 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -453,10 +453,14 @@ typedef u64 acpi_integer; #define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i) #define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) +/* Optimizations for 4-character (32-bit) acpi_name manipulation */ + #ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED #define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b))) +#define ACPI_MOVE_NAME(dest,src) (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src))) #else #define ACPI_COMPARE_NAME(a,b) (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE)) +#define ACPI_MOVE_NAME(dest,src) (ACPI_STRNCPY (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAME_SIZE)) #endif /******************************************************************************* @@ -796,11 +800,11 @@ typedef u8 acpi_adr_space_type; /* Sleep function dispatch */ -typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state); +typedef acpi_status(*acpi_sleep_function) (u8 sleep_state); struct acpi_sleep_functions { - ACPI_SLEEP_FUNCTION legacy_function; - ACPI_SLEEP_FUNCTION extended_function; + acpi_sleep_function legacy_function; + acpi_sleep_function extended_function; }; /* @@ -922,7 +926,8 @@ struct acpi_system_info { /* * Types specific to the OS service interfaces */ -typedef u32(ACPI_SYSTEM_XFACE * acpi_osd_handler) (void *context); +typedef u32 + (ACPI_SYSTEM_XFACE * acpi_osd_handler) (void *context); typedef void (ACPI_SYSTEM_XFACE * acpi_osd_exec_callback) (void *context); @@ -931,14 +936,15 @@ typedef void * Various handlers and callback procedures */ typedef -void (*ACPI_GBL_EVENT_HANDLER) (u32 event_type, +void (*acpi_gbl_event_handler) (u32 event_type, acpi_handle device, u32 event_number, void *context); #define ACPI_EVENT_TYPE_GPE 0 #define ACPI_EVENT_TYPE_FIXED 1 -typedef u32(*acpi_event_handler) (void *context); +typedef +u32(*acpi_event_handler) (void *context); typedef u32 (*acpi_gpe_handler) (acpi_handle gpe_device, u32 gpe_number, void *context); @@ -1018,17 +1024,17 @@ u32 (*acpi_interface_handler) (acpi_string interface_name, u32 supported); #define ACPI_UUID_LENGTH 16 -/* Structures used for device/processor HID, UID, CID */ +/* Structures used for device/processor HID, UID, CID, and SUB */ -struct acpica_device_id { +struct acpi_pnp_device_id { u32 length; /* Length of string + null */ char *string; }; -struct acpica_device_id_list { +struct acpi_pnp_device_id_list { u32 count; /* Number of IDs in Ids array */ u32 list_size; /* Size of list, including ID strings */ - struct acpica_device_id ids[1]; /* ID array */ + struct acpi_pnp_device_id ids[1]; /* ID array */ }; /* @@ -1046,9 +1052,10 @@ struct acpi_device_info { u8 lowest_dstates[5]; /* _sx_w values: 0xFF indicates not valid */ u32 current_status; /* _STA value */ u64 address; /* _ADR value */ - struct acpica_device_id hardware_id; /* _HID value */ - struct acpica_device_id unique_id; /* _UID value */ - struct acpica_device_id_list compatible_id_list; /* _CID list <must be last> */ + struct acpi_pnp_device_id hardware_id; /* _HID value */ + struct acpi_pnp_device_id unique_id; /* _UID value */ + struct acpi_pnp_device_id subsystem_id; /* _SUB value */ + struct acpi_pnp_device_id_list compatible_id_list; /* _CID list <must be last> */ }; /* Values for Flags field above (acpi_get_object_info) */ @@ -1061,11 +1068,12 @@ struct acpi_device_info { #define ACPI_VALID_ADR 0x02 #define ACPI_VALID_HID 0x04 #define ACPI_VALID_UID 0x08 -#define ACPI_VALID_CID 0x10 -#define ACPI_VALID_SXDS 0x20 -#define ACPI_VALID_SXWS 0x40 +#define ACPI_VALID_SUB 0x10 +#define ACPI_VALID_CID 0x20 +#define ACPI_VALID_SXDS 0x40 +#define ACPI_VALID_SXWS 0x80 -/* Flags for _STA method */ +/* Flags for _STA return value (current_status above) */ #define ACPI_STA_DEVICE_PRESENT 0x01 #define ACPI_STA_DEVICE_ENABLED 0x02 diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index af1cbaf535e..c5c35e62942 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -210,6 +210,7 @@ {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 3574e4a2bf1..c33fa3ce9b7 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -279,10 +279,14 @@ int acpi_check_region(resource_size_t start, resource_size_t n, int acpi_resources_are_enforced(void); -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_HIBERNATION void __init acpi_no_s4_hw_signature(void); +#endif + +#ifdef CONFIG_PM_SLEEP void __init acpi_old_suspend_ordering(void); void __init acpi_nvs_nosave(void); +void __init acpi_nvs_nosave_s3(void); #endif /* CONFIG_PM_SLEEP */ struct acpi_osc_context { @@ -507,7 +511,7 @@ static inline int acpi_subsys_resume_early(struct device *dev) { return 0; } #if defined(CONFIG_ACPI) && defined(CONFIG_PM) int acpi_dev_pm_attach(struct device *dev, bool power_on); -int acpi_dev_pm_detach(struct device *dev, bool power_off); +void acpi_dev_pm_detach(struct device *dev, bool power_off); #else static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) { @@ -516,4 +520,47 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) static inline void acpi_dev_pm_detach(struct device *dev, bool power_off) {} #endif +#ifdef CONFIG_ACPI +__printf(3, 4) +void acpi_handle_printk(const char *level, acpi_handle handle, + const char *fmt, ...); +#else /* !CONFIG_ACPI */ +static inline __printf(3, 4) void +acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {} +#endif /* !CONFIG_ACPI */ + +/* + * acpi_handle_<level>: Print message with ACPI prefix and object path + * + * These interfaces acquire the global namespace mutex to obtain an object + * path. In interrupt context, it shows the object path as <n/a>. + */ +#define acpi_handle_emerg(handle, fmt, ...) \ + acpi_handle_printk(KERN_EMERG, handle, fmt, ##__VA_ARGS__) +#define acpi_handle_alert(handle, fmt, ...) \ + acpi_handle_printk(KERN_ALERT, handle, fmt, ##__VA_ARGS__) +#define acpi_handle_crit(handle, fmt, ...) \ + acpi_handle_printk(KERN_CRIT, handle, fmt, ##__VA_ARGS__) +#define acpi_handle_err(handle, fmt, ...) \ + acpi_handle_printk(KERN_ERR, handle, fmt, ##__VA_ARGS__) +#define acpi_handle_warn(handle, fmt, ...) \ + acpi_handle_printk(KERN_WARNING, handle, fmt, ##__VA_ARGS__) +#define acpi_handle_notice(handle, fmt, ...) \ + acpi_handle_printk(KERN_NOTICE, handle, fmt, ##__VA_ARGS__) +#define acpi_handle_info(handle, fmt, ...) \ + acpi_handle_printk(KERN_INFO, handle, fmt, ##__VA_ARGS__) + +/* REVISIT: Support CONFIG_DYNAMIC_DEBUG when necessary */ +#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) +#define acpi_handle_debug(handle, fmt, ...) \ + acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__) +#else +#define acpi_handle_debug(handle, fmt, ...) \ +({ \ + if (0) \ + acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__); \ + 0; \ +}) +#endif + #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index c1273158292..f9f5e9eeb9d 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -335,8 +335,8 @@ const char *__clk_get_name(struct clk *clk); struct clk_hw *__clk_get_hw(struct clk *clk); u8 __clk_get_num_parents(struct clk *clk); struct clk *__clk_get_parent(struct clk *clk); -inline int __clk_get_enable_count(struct clk *clk); -inline int __clk_get_prepare_count(struct clk *clk); +int __clk_get_enable_count(struct clk *clk); +int __clk_get_prepare_count(struct clk *clk); unsigned long __clk_get_rate(struct clk *clk); unsigned long __clk_get_flags(struct clk *clk); int __clk_is_enabled(struct clk *clk); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index b60f6ba01d0..a55b88eaf96 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -11,6 +11,7 @@ #ifndef _LINUX_CPUFREQ_H #define _LINUX_CPUFREQ_H +#include <asm/cputime.h> #include <linux/mutex.h> #include <linux/notifier.h> #include <linux/threads.h> @@ -22,6 +23,8 @@ #include <asm/div64.h> #define CPUFREQ_NAME_LEN 16 +/* Print length for names. Extra 1 space for accomodating '\n' in prints */ +#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1) /********************************************************************* @@ -404,6 +407,4 @@ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, unsigned int cpu); void cpufreq_frequency_table_put_attr(unsigned int cpu); - - #endif /* _LINUX_CPUFREQ_H */ diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 279b1eaa8b7..3711b34dc4f 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -82,13 +82,6 @@ cpuidle_set_statedata(struct cpuidle_state_usage *st_usage, void *data) st_usage->driver_data = data; } -struct cpuidle_state_kobj { - struct cpuidle_state *state; - struct cpuidle_state_usage *state_usage; - struct completion kobj_unregister; - struct kobject kobj; -}; - struct cpuidle_device { unsigned int registered:1; unsigned int enabled:1; @@ -98,7 +91,7 @@ struct cpuidle_device { int state_count; struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; - + struct cpuidle_driver_kobj *kobj_driver; struct list_head device_list; struct kobject kobj; struct completion kobj_unregister; @@ -131,6 +124,7 @@ static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) struct cpuidle_driver { const char *name; struct module *owner; + int refcnt; unsigned int power_specified:1; /* set to 1 to use the core cpuidle time keeping (for all states). */ @@ -163,6 +157,10 @@ extern int cpuidle_wrap_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index)); extern int cpuidle_play_dead(void); +extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); +extern int cpuidle_register_cpu_driver(struct cpuidle_driver *drv, int cpu); +extern void cpuidle_unregister_cpu_driver(struct cpuidle_driver *drv, int cpu); + #else static inline void disable_cpuidle(void) { } static inline int cpuidle_idle_call(void) { return -ENODEV; } @@ -189,7 +187,6 @@ static inline int cpuidle_wrap_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index)) { return -ENODEV; } static inline int cpuidle_play_dead(void) {return -ENODEV; } - #endif #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 281c72a3b9d..e83ef39b3be 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -25,12 +25,12 @@ struct devfreq; * struct devfreq_dev_status - Data given from devfreq user device to * governors. Represents the performance * statistics. - * @total_time The total time represented by this instance of + * @total_time: The total time represented by this instance of * devfreq_dev_status - * @busy_time The time that the device was working among the + * @busy_time: The time that the device was working among the * total_time. - * @current_frequency The operating frequency. - * @private_data An entry not specified by the devfreq framework. + * @current_frequency: The operating frequency. + * @private_data: An entry not specified by the devfreq framework. * A device and a specific governor may have their * own protocol with private_data. However, because * this is governor-specific, a governor using this @@ -54,23 +54,27 @@ struct devfreq_dev_status { /** * struct devfreq_dev_profile - Devfreq's user device profile - * @initial_freq The operating frequency when devfreq_add_device() is + * @initial_freq: The operating frequency when devfreq_add_device() is * called. - * @polling_ms The polling interval in ms. 0 disables polling. - * @target The device should set its operating frequency at + * @polling_ms: The polling interval in ms. 0 disables polling. + * @target: The device should set its operating frequency at * freq or lowest-upper-than-freq value. If freq is * higher than any operable frequency, set maximum. * Before returning, target function should set * freq at the current frequency. * The "flags" parameter's possible values are * explained above with "DEVFREQ_FLAG_*" macros. - * @get_dev_status The device should provide the current performance + * @get_dev_status: The device should provide the current performance * status to devfreq, which is used by governors. - * @exit An optional callback that is called when devfreq + * @get_cur_freq: The device should provide the current frequency + * at which it is operating. + * @exit: An optional callback that is called when devfreq * is removing the devfreq object due to error or * from devfreq_remove_device() call. If the user * has registered devfreq->nb at a notifier-head, * this is the time to unregister it. + * @freq_table: Optional list of frequencies to support statistics. + * @max_state: The size of freq_table. */ struct devfreq_dev_profile { unsigned long initial_freq; @@ -79,63 +83,63 @@ struct devfreq_dev_profile { int (*target)(struct device *dev, unsigned long *freq, u32 flags); int (*get_dev_status)(struct device *dev, struct devfreq_dev_status *stat); + int (*get_cur_freq)(struct device *dev, unsigned long *freq); void (*exit)(struct device *dev); + + unsigned int *freq_table; + unsigned int max_state; }; /** * struct devfreq_governor - Devfreq policy governor - * @name Governor's name - * @get_target_freq Returns desired operating frequency for the device. + * @node: list node - contains registered devfreq governors + * @name: Governor's name + * @get_target_freq: Returns desired operating frequency for the device. * Basically, get_target_freq will run * devfreq_dev_profile.get_dev_status() to get the * status of the device (load = busy_time / total_time). * If no_central_polling is set, this callback is called * only with update_devfreq() notified by OPP. - * @init Called when the devfreq is being attached to a device - * @exit Called when the devfreq is being removed from a - * device. Governor should stop any internal routines - * before return because related data may be - * freed after exit(). - * @no_central_polling Do not use devfreq's central polling mechanism. - * When this is set, devfreq will not call - * get_target_freq with devfreq_monitor(). However, - * devfreq will call get_target_freq with - * devfreq_update() notified by OPP framework. + * @event_handler: Callback for devfreq core framework to notify events + * to governors. Events include per device governor + * init and exit, opp changes out of devfreq, suspend + * and resume of per device devfreq during device idle. * * Note that the callbacks are called with devfreq->lock locked by devfreq. */ struct devfreq_governor { + struct list_head node; + const char name[DEVFREQ_NAME_LEN]; int (*get_target_freq)(struct devfreq *this, unsigned long *freq); - int (*init)(struct devfreq *this); - void (*exit)(struct devfreq *this); - const bool no_central_polling; + int (*event_handler)(struct devfreq *devfreq, + unsigned int event, void *data); }; /** * struct devfreq - Device devfreq structure - * @node list node - contains the devices with devfreq that have been + * @node: list node - contains the devices with devfreq that have been * registered. - * @lock a mutex to protect accessing devfreq. - * @dev device registered by devfreq class. dev.parent is the device + * @lock: a mutex to protect accessing devfreq. + * @dev: device registered by devfreq class. dev.parent is the device * using devfreq. - * @profile device-specific devfreq profile - * @governor method how to choose frequency based on the usage. - * @nb notifier block used to notify devfreq object that it should + * @profile: device-specific devfreq profile + * @governor: method how to choose frequency based on the usage. + * @governor_name: devfreq governor name for use with this devfreq + * @nb: notifier block used to notify devfreq object that it should * reevaluate operable frequencies. Devfreq users may use * devfreq.nb to the corresponding register notifier call chain. - * @polling_jiffies interval in jiffies. - * @previous_freq previously configured frequency value. - * @next_polling the number of remaining jiffies to poll with - * "devfreq_monitor" executions to reevaluate - * frequency/voltage of the device. Set by - * profile's polling_ms interval. - * @data Private data of the governor. The devfreq framework does not + * @work: delayed work for load monitoring. + * @previous_freq: previously configured frequency value. + * @data: Private data of the governor. The devfreq framework does not * touch this. - * @being_removed a flag to mark that this object is being removed in - * order to prevent trying to remove the object multiple times. - * @min_freq Limit minimum frequency requested by user (0: none) - * @max_freq Limit maximum frequency requested by user (0: none) + * @min_freq: Limit minimum frequency requested by user (0: none) + * @max_freq: Limit maximum frequency requested by user (0: none) + * @stop_polling: devfreq polling status of a device. + * @total_trans: Number of devfreq transitions + * @trans_table: Statistics of devfreq transitions + * @time_in_state: Statistics of devfreq states + * @last_stat_updated: The last time stat updated * * This structure stores the devfreq information for a give device. * @@ -152,26 +156,33 @@ struct devfreq { struct device dev; struct devfreq_dev_profile *profile; const struct devfreq_governor *governor; + char governor_name[DEVFREQ_NAME_LEN]; struct notifier_block nb; + struct delayed_work work; - unsigned long polling_jiffies; unsigned long previous_freq; - unsigned int next_polling; void *data; /* private data for governors */ - bool being_removed; - unsigned long min_freq; unsigned long max_freq; + bool stop_polling; + + /* information for device freqeuncy transition */ + unsigned int total_trans; + unsigned int *trans_table; + unsigned long *time_in_state; + unsigned long last_stat_updated; }; #if defined(CONFIG_PM_DEVFREQ) extern struct devfreq *devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, - const struct devfreq_governor *governor, + const char *governor_name, void *data); extern int devfreq_remove_device(struct devfreq *devfreq); +extern int devfreq_suspend_device(struct devfreq *devfreq); +extern int devfreq_resume_device(struct devfreq *devfreq); /* Helper functions for devfreq user device driver with OPP. */ extern struct opp *devfreq_recommended_opp(struct device *dev, @@ -181,23 +192,13 @@ extern int devfreq_register_opp_notifier(struct device *dev, extern int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq); -#ifdef CONFIG_DEVFREQ_GOV_POWERSAVE -extern const struct devfreq_governor devfreq_powersave; -#endif -#ifdef CONFIG_DEVFREQ_GOV_PERFORMANCE -extern const struct devfreq_governor devfreq_performance; -#endif -#ifdef CONFIG_DEVFREQ_GOV_USERSPACE -extern const struct devfreq_governor devfreq_userspace; -#endif -#ifdef CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND -extern const struct devfreq_governor devfreq_simple_ondemand; +#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) /** * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq * and devfreq_add_device - * @ upthreshold If the load is over this value, the frequency jumps. + * @upthreshold: If the load is over this value, the frequency jumps. * Specify 0 to use the default. Valid value = 0 to 100. - * @ downdifferential If the load is under upthreshold - downdifferential, + * @downdifferential: If the load is under upthreshold - downdifferential, * the governor may consider slowing the frequency down. * Specify 0 to use the default. Valid value = 0 to 100. * downdifferential < upthreshold must hold. @@ -214,7 +215,7 @@ struct devfreq_simple_ondemand_data { #else /* !CONFIG_PM_DEVFREQ */ static struct devfreq *devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, - struct devfreq_governor *governor, + const char *governor_name, void *data) { return NULL; @@ -225,6 +226,16 @@ static int devfreq_remove_device(struct devfreq *devfreq) return 0; } +static int devfreq_suspend_device(struct devfreq *devfreq) +{ + return 0; +} + +static int devfreq_resume_device(struct devfreq *devfreq) +{ + return 0; +} + static struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, u32 flags) { @@ -243,11 +254,6 @@ static int devfreq_unregister_opp_notifier(struct device *dev, return -EINVAL; } -#define devfreq_powersave NULL -#define devfreq_performance NULL -#define devfreq_userspace NULL -#define devfreq_simple_ondemand NULL - #endif /* CONFIG_PM_DEVFREQ */ #endif /* __LINUX_DEVFREQ_H__ */ diff --git a/include/linux/freezer.h b/include/linux/freezer.h index d09af4b67cf..b90091af579 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -177,6 +177,7 @@ static inline int freeze_kernel_threads(void) { return -ENOSYS; } static inline void thaw_processes(void) {} static inline void thaw_kernel_threads(void) {} +static inline bool try_to_freeze_nowarn(void) { return false; } static inline bool try_to_freeze(void) { return false; } static inline void freezer_do_not_count(void) {} diff --git a/include/linux/i2c-omap.h b/include/linux/i2c-omap.h index df804ba73e0..92a0dc75bc7 100644 --- a/include/linux/i2c-omap.h +++ b/include/linux/i2c-omap.h @@ -34,6 +34,7 @@ struct omap_i2c_bus_platform_data { u32 clkrate; u32 rev; u32 flags; + void (*set_mpu_wkup_lat)(struct device *dev, long set); }; #endif diff --git a/include/linux/mm.h b/include/linux/mm.h index fa068040273..bcaab4e6fe9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1684,9 +1684,5 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; } static inline bool page_is_guard(struct page *page) { return false; } #endif /* CONFIG_DEBUG_PAGEALLOC */ -extern void reset_zone_present_pages(void); -extern void fixup_zone_present_pages(int nid, unsigned long start_pfn, - unsigned long end_pfn); - #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 50aaca81f63..a23923ba826 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -752,7 +752,7 @@ extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, unsigned long size, enum memmap_context context); -extern void lruvec_init(struct lruvec *lruvec, struct zone *zone); +extern void lruvec_init(struct lruvec *lruvec); static inline struct zone *lruvec_zone(struct lruvec *lruvec) { diff --git a/include/linux/of_address.h b/include/linux/of_address.h index e20e3af68fb..0506eb53519 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -42,10 +42,12 @@ static inline struct device_node *of_find_matching_node_by_address( { return NULL; } +#ifndef of_iomap static inline void __iomem *of_iomap(struct device_node *device, int index) { return NULL; } +#endif static inline const __be32 *of_get_address(struct device_node *dev, int index, u64 *size, unsigned int *flags) { diff --git a/include/linux/platform_data/omap_ocp2scp.h b/include/linux/platform_data/omap_ocp2scp.h new file mode 100644 index 00000000000..5c6c3939355 --- /dev/null +++ b/include/linux/platform_data/omap_ocp2scp.h @@ -0,0 +1,31 @@ +/* + * omap_ocp2scp.h -- ocp2scp header file + * + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Author: Kishon Vijay Abraham I <kishon@ti.com> + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __DRIVERS_OMAP_OCP2SCP_H +#define __DRIVERS_OMAP_OCP2SCP_H + +struct omap_ocp2scp_dev { + const char *drv_name; + struct resource *res; +}; + +struct omap_ocp2scp_platform_data { + int dev_cnt; + struct omap_ocp2scp_dev **devices; +}; +#endif /* __DRIVERS_OMAP_OCP2SCP_H */ diff --git a/include/linux/rio.h b/include/linux/rio.h index 4187da51100..a3e78427866 100644 --- a/include/linux/rio.h +++ b/include/linux/rio.h @@ -275,9 +275,11 @@ struct rio_id_table { * struct rio_net - RIO network info * @node: Node in global list of RIO networks * @devices: List of devices in this network + * @switches: List of switches in this netowrk * @mports: List of master ports accessing this network * @hport: Default port for accessing this network * @id: RIO network ID + * @destid_table: destID allocation table */ struct rio_net { struct list_head node; /* node in list of networks */ diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h index c64de9dd763..2f694f3846a 100644 --- a/include/linux/spi/ads7846.h +++ b/include/linux/spi/ads7846.h @@ -46,8 +46,9 @@ struct ads7846_platform_data { u16 debounce_rep; /* additional consecutive good readings * required after the first two */ int gpio_pendown; /* the GPIO used to decide the pendown - * state if get_pendown_state == NULL - */ + * state if get_pendown_state == NULL */ + int gpio_pendown_debounce; /* platform specific debounce time for + * the gpio_pendown */ int (*get_pendown_state)(void); int (*filter_init) (const struct ads7846_platform_data *pdata, void **filter_data); diff --git a/include/linux/tick.h b/include/linux/tick.h index f37fceb69b7..1a6567b4849 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -142,4 +142,10 @@ static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } # endif /* !NO_HZ */ +# ifdef CONFIG_CPU_IDLE_GOV_MENU +extern void menu_hrtimer_cancel(void); +# else +static inline void menu_hrtimer_cancel(void) {} +# endif /* CONFIG_CPU_IDLE_GOV_MENU */ + #endif diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 6f0ba01afe7..63445ede48b 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1351,7 +1351,7 @@ struct xfrm6_tunnel { }; extern void xfrm_init(void); -extern void xfrm4_init(int rt_hash_size); +extern void xfrm4_init(void); extern int xfrm_state_init(struct net *net); extern void xfrm_state_fini(struct net *net); extern void xfrm4_state_init(void); diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 88fae8d2015..55367b04dc9 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -135,6 +135,8 @@ struct scsi_device { * because we did a bus reset. */ unsigned use_10_for_rw:1; /* first try 10-byte read / write */ unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */ + unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */ + unsigned no_write_same:1; /* no WRITE SAME command */ unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */ unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */ unsigned skip_vpd_pages:1; /* do not read VPD pages */ @@ -362,6 +364,8 @@ extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, struct scsi_sense_hdr *sshdr); extern int scsi_get_vpd_page(struct scsi_device *, u8 page, unsigned char *buf, int buf_len); +extern int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, + unsigned int len, unsigned char opcode); extern int scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state); extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, diff --git a/include/uapi/linux/oom.h b/include/uapi/linux/oom.h index a49c4afc706..b29272d621c 100644 --- a/include/uapi/linux/oom.h +++ b/include/uapi/linux/oom.h @@ -8,4 +8,13 @@ #define OOM_SCORE_ADJ_MIN (-1000) #define OOM_SCORE_ADJ_MAX 1000 +/* + * /proc/<pid>/oom_adj set to -17 protects from the oom killer for legacy + * purposes. + */ +#define OOM_DISABLE (-17) +/* inclusive */ +#define OOM_ADJUST_MIN (-16) +#define OOM_ADJUST_MAX 15 + #endif /* _UAPI__INCLUDE_LINUX_OOM_H */ diff --git a/kernel/cpu.c b/kernel/cpu.c index 42bd331ee0a..f45657f1eb8 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -348,11 +348,13 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; struct task_struct *idle; - if (cpu_online(cpu) || !cpu_present(cpu)) - return -EINVAL; - cpu_hotplug_begin(); + if (cpu_online(cpu) || !cpu_present(cpu)) { + ret = -EINVAL; + goto out; + } + idle = idle_thread_get(cpu); if (IS_ERR(idle)) { ret = PTR_ERR(idle); diff --git a/kernel/futex.c b/kernel/futex.c index 3717e7b306e..20ef219bbe9 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -716,7 +716,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, struct futex_pi_state **ps, struct task_struct *task, int set_waiters) { - int lock_taken, ret, ownerdied = 0; + int lock_taken, ret, force_take = 0; u32 uval, newval, curval, vpid = task_pid_vnr(task); retry: @@ -755,17 +755,15 @@ retry: newval = curval | FUTEX_WAITERS; /* - * There are two cases, where a futex might have no owner (the - * owner TID is 0): OWNER_DIED. We take over the futex in this - * case. We also do an unconditional take over, when the owner - * of the futex died. - * - * This is safe as we are protected by the hash bucket lock ! + * Should we force take the futex? See below. */ - if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { - /* Keep the OWNER_DIED bit */ + if (unlikely(force_take)) { + /* + * Keep the OWNER_DIED and the WAITERS bit and set the + * new TID value. + */ newval = (curval & ~FUTEX_TID_MASK) | vpid; - ownerdied = 0; + force_take = 0; lock_taken = 1; } @@ -775,7 +773,7 @@ retry: goto retry; /* - * We took the lock due to owner died take over. + * We took the lock due to forced take over. */ if (unlikely(lock_taken)) return 1; @@ -790,20 +788,25 @@ retry: switch (ret) { case -ESRCH: /* - * No owner found for this futex. Check if the - * OWNER_DIED bit is set to figure out whether - * this is a robust futex or not. + * We failed to find an owner for this + * futex. So we have no pi_state to block + * on. This can happen in two cases: + * + * 1) The owner died + * 2) A stale FUTEX_WAITERS bit + * + * Re-read the futex value. */ if (get_futex_value_locked(&curval, uaddr)) return -EFAULT; /* - * We simply start over in case of a robust - * futex. The code above will take the futex - * and return happy. + * If the owner died or we have a stale + * WAITERS bit the owner TID in the user space + * futex is 0. */ - if (curval & FUTEX_OWNER_DIED) { - ownerdied = 1; + if (!(curval & FUTEX_TID_MASK)) { + force_take = 1; goto retry; } default: diff --git a/kernel/power/main.c b/kernel/power/main.c index f458238109c..1c16f9167de 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -59,7 +59,7 @@ static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr, { unsigned long val; - if (strict_strtoul(buf, 10, &val)) + if (kstrtoul(buf, 10, &val)) return -EINVAL; if (val > 1) diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 2ab2819aee6..9322ff7eaad 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -563,7 +563,7 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, } else { ascii_value[count] = '\0'; } - ret = strict_strtoul(ascii_value, 16, &ulval); + ret = kstrtoul(ascii_value, 16, &ulval); if (ret) { pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret); return -EINVAL; diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 3c9d764eb0d..7c33ed20041 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -126,7 +126,7 @@ static int swsusp_extents_insert(unsigned long swap_offset) /* Figure out where to put the new node */ while (*new) { - ext = container_of(*new, struct swsusp_extent, node); + ext = rb_entry(*new, struct swsusp_extent, node); parent = *new; if (swap_offset < ext->start) { /* Try to merge */ diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index a4026088526..6f337068dc4 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -526,6 +526,8 @@ void tick_nohz_irq_exit(void) if (!ts->inidle) return; + /* Cancel the timer because CPU already waken up from the C-states*/ + menu_hrtimer_cancel(); __tick_nohz_idle_enter(ts); } @@ -621,6 +623,8 @@ void tick_nohz_idle_exit(void) ts->inidle = 0; + /* Cancel the timer because CPU already waken up from the C-states*/ + menu_hrtimer_cancel(); if (ts->idle_active || ts->tick_stopped) now = ktime_get(); diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h index 678ce4f1e12..095ab157a52 100644 --- a/lib/mpi/longlong.h +++ b/lib/mpi/longlong.h @@ -641,7 +641,14 @@ do { \ ************** MIPS ***************** ***************************************/ #if defined(__mips__) && W_TYPE_SIZE == 32 -#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7 +#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 +#define umul_ppmm(w1, w0, u, v) \ +do { \ + UDItype __ll = (UDItype)(u) * (v); \ + w1 = __ll >> 32; \ + w0 = __ll; \ +} while (0) +#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7 #define umul_ppmm(w1, w0, u, v) \ __asm__ ("multu %2,%3" \ : "=l" ((USItype)(w0)), \ @@ -666,7 +673,15 @@ do { \ ************** MIPS/64 ************** ***************************************/ #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 -#if __GNUC__ > 2 || __GNUC_MINOR__ >= 7 +#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 +#define umul_ppmm(w1, w0, u, v) \ +do { \ + typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \ + __ll_UTItype __ll = (__ll_UTItype)(u) * (v); \ + w1 = __ll >> 64; \ + w0 = __ll; \ +} while (0) +#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7 #define umul_ppmm(w1, w0, u, v) \ __asm__ ("dmultu %2,%3" \ : "=l" ((UDItype)(w0)), \ diff --git a/mm/bootmem.c b/mm/bootmem.c index 434be4ae7a0..f468185b3b2 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -198,8 +198,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) int order = ilog2(BITS_PER_LONG); __free_pages_bootmem(pfn_to_page(start), order); - fixup_zone_present_pages(page_to_nid(pfn_to_page(start)), - start, start + BITS_PER_LONG); count += BITS_PER_LONG; start += BITS_PER_LONG; } else { @@ -210,9 +208,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) if (vec & 1) { page = pfn_to_page(start + off); __free_pages_bootmem(page, 0); - fixup_zone_present_pages( - page_to_nid(page), - start + off, start + off + 1); count++; } vec >>= 1; @@ -226,11 +221,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) pages = bdata->node_low_pfn - bdata->node_min_pfn; pages = bootmem_bootmap_pages(pages); count += pages; - while (pages--) { - fixup_zone_present_pages(page_to_nid(page), - page_to_pfn(page), page_to_pfn(page) + 1); + while (pages--) __free_pages_bootmem(page++, 0); - } bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); diff --git a/mm/highmem.c b/mm/highmem.c index d517cd16a6e..2da13a5c50e 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -98,7 +98,7 @@ struct page *kmap_to_page(void *vaddr) { unsigned long addr = (unsigned long)vaddr; - if (addr >= PKMAP_ADDR(0) && addr <= PKMAP_ADDR(LAST_PKMAP)) { + if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) { int i = (addr - PKMAP_ADDR(0)) >> PAGE_SHIFT; return pte_page(pkmap_page_table[i]); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7acf43bf04a..dd39ba000b3 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1055,12 +1055,24 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, struct mem_cgroup *memcg) { struct mem_cgroup_per_zone *mz; + struct lruvec *lruvec; - if (mem_cgroup_disabled()) - return &zone->lruvec; + if (mem_cgroup_disabled()) { + lruvec = &zone->lruvec; + goto out; + } mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone)); - return &mz->lruvec; + lruvec = &mz->lruvec; +out: + /* + * Since a node can be onlined after the mem_cgroup was created, + * we have to be prepared to initialize lruvec->zone here; + * and if offlined then reonlined, we need to reinitialize it. + */ + if (unlikely(lruvec->zone != zone)) + lruvec->zone = zone; + return lruvec; } /* @@ -1087,9 +1099,12 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) struct mem_cgroup_per_zone *mz; struct mem_cgroup *memcg; struct page_cgroup *pc; + struct lruvec *lruvec; - if (mem_cgroup_disabled()) - return &zone->lruvec; + if (mem_cgroup_disabled()) { + lruvec = &zone->lruvec; + goto out; + } pc = lookup_page_cgroup(page); memcg = pc->mem_cgroup; @@ -1107,7 +1122,16 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) pc->mem_cgroup = memcg = root_mem_cgroup; mz = page_cgroup_zoneinfo(memcg, page); - return &mz->lruvec; + lruvec = &mz->lruvec; +out: + /* + * Since a node can be onlined after the mem_cgroup was created, + * we have to be prepared to initialize lruvec->zone here; + * and if offlined then reonlined, we need to reinitialize it. + */ + if (unlikely(lruvec->zone != zone)) + lruvec->zone = zone; + return lruvec; } /** @@ -1452,17 +1476,26 @@ static int mem_cgroup_count_children(struct mem_cgroup *memcg) static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) { u64 limit; - u64 memsw; limit = res_counter_read_u64(&memcg->res, RES_LIMIT); - limit += total_swap_pages << PAGE_SHIFT; - memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); /* - * If memsw is finite and limits the amount of swap space available - * to this memcg, return that limit. + * Do not consider swap space if we cannot swap due to swappiness */ - return min(limit, memsw); + if (mem_cgroup_swappiness(memcg)) { + u64 memsw; + + limit += total_swap_pages << PAGE_SHIFT; + memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); + + /* + * If memsw is finite and limits the amount of swap space + * available to this memcg, return that limit. + */ + limit = min(limit, memsw); + } + + return limit; } void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, @@ -3688,17 +3721,17 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, static bool mem_cgroup_force_empty_list(struct mem_cgroup *memcg, int node, int zid, enum lru_list lru) { - struct mem_cgroup_per_zone *mz; + struct lruvec *lruvec; unsigned long flags, loop; struct list_head *list; struct page *busy; struct zone *zone; zone = &NODE_DATA(node)->node_zones[zid]; - mz = mem_cgroup_zoneinfo(memcg, node, zid); - list = &mz->lruvec.lists[lru]; + lruvec = mem_cgroup_zone_lruvec(zone, memcg); + list = &lruvec->lists[lru]; - loop = mz->lru_size[lru]; + loop = mem_cgroup_get_lru_size(lruvec, lru); /* give some margin against EBUSY etc...*/ loop += 256; busy = NULL; @@ -4736,7 +4769,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) for (zone = 0; zone < MAX_NR_ZONES; zone++) { mz = &pn->zoneinfo[zone]; - lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]); + lruvec_init(&mz->lruvec); mz->usage_in_excess = 0; mz->on_tree = false; mz->memcg = memcg; diff --git a/mm/memory.c b/mm/memory.c index fb135ba4aba..221fc9ffcab 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2527,9 +2527,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, int ret = 0; int page_mkwrite = 0; struct page *dirty_page = NULL; - unsigned long mmun_start; /* For mmu_notifiers */ - unsigned long mmun_end; /* For mmu_notifiers */ - bool mmun_called = false; /* For mmu_notifiers */ + unsigned long mmun_start = 0; /* For mmu_notifiers */ + unsigned long mmun_end = 0; /* For mmu_notifiers */ old_page = vm_normal_page(vma, address, orig_pte); if (!old_page) { @@ -2708,8 +2707,7 @@ gotten: goto oom_free_new; mmun_start = address & PAGE_MASK; - mmun_end = (address & PAGE_MASK) + PAGE_SIZE; - mmun_called = true; + mmun_end = mmun_start + PAGE_SIZE; mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); /* @@ -2778,7 +2776,7 @@ gotten: page_cache_release(new_page); unlock: pte_unmap_unlock(page_table, ptl); - if (mmun_called) + if (mmun_end > mmun_start) mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); if (old_page) { /* diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 56b758ae57d..e4eeacae2b9 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -106,7 +106,6 @@ static void get_page_bootmem(unsigned long info, struct page *page, void __ref put_page_bootmem(struct page *page) { unsigned long type; - struct zone *zone; type = (unsigned long) page->lru.next; BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || @@ -117,12 +116,6 @@ void __ref put_page_bootmem(struct page *page) set_page_private(page, 0); INIT_LIST_HEAD(&page->lru); __free_pages_bootmem(page, 0); - - zone = page_zone(page); - zone_span_writelock(zone); - zone->present_pages++; - zone_span_writeunlock(zone); - totalram_pages++; } } diff --git a/mm/mmap.c b/mm/mmap.c index 2d942353d68..9a796c41e7d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -334,8 +334,10 @@ void validate_mm(struct mm_struct *mm) struct vm_area_struct *vma = mm->mmap; while (vma) { struct anon_vma_chain *avc; + vma_lock_anon_vma(vma); list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_verify(avc); + vma_unlock_anon_vma(vma); vma = vma->vm_next; i++; } diff --git a/mm/mmzone.c b/mm/mmzone.c index 3cef80f6ac7..4596d81b89b 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c @@ -87,7 +87,7 @@ int memmap_valid_within(unsigned long pfn, } #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ -void lruvec_init(struct lruvec *lruvec, struct zone *zone) +void lruvec_init(struct lruvec *lruvec) { enum lru_list lru; @@ -95,8 +95,4 @@ void lruvec_init(struct lruvec *lruvec, struct zone *zone) for_each_lru(lru) INIT_LIST_HEAD(&lruvec->lists[lru]); - -#ifdef CONFIG_MEMCG - lruvec->zone = zone; -#endif } diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 714d5d65047..bd82f6b3141 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -116,8 +116,6 @@ static unsigned long __init __free_memory_core(phys_addr_t start, return 0; __free_pages_memory(start_pfn, end_pfn); - fixup_zone_present_pages(pfn_to_nid(start >> PAGE_SHIFT), - start_pfn, end_pfn); return end_pfn - start_pfn; } @@ -128,7 +126,6 @@ unsigned long __init free_low_memory_core_early(int nodeid) phys_addr_t start, end, size; u64 i; - reset_zone_present_pages(); for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) count += __free_memory_core(start, end); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5b74de6702e..bcb72c6e2b2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1405,7 +1405,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype) mt = get_pageblock_migratetype(page); if (unlikely(mt != MIGRATE_ISOLATE)) - __mod_zone_freepage_state(zone, -(1UL << order), mt); + __mod_zone_freepage_state(zone, -(1UL << alloc_order), mt); if (alloc_order != order) expand(zone, page, alloc_order, order, @@ -4505,7 +4505,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, zone->zone_pgdat = pgdat; zone_pcp_init(zone); - lruvec_init(&zone->lruvec, zone); + lruvec_init(&zone->lruvec); if (!size) continue; @@ -6098,37 +6098,3 @@ void dump_page(struct page *page) dump_page_flags(page->flags); mem_cgroup_print_bad_page(page); } - -/* reset zone->present_pages */ -void reset_zone_present_pages(void) -{ - struct zone *z; - int i, nid; - - for_each_node_state(nid, N_HIGH_MEMORY) { - for (i = 0; i < MAX_NR_ZONES; i++) { - z = NODE_DATA(nid)->node_zones + i; - z->present_pages = 0; - } - } -} - -/* calculate zone's present pages in buddy system */ -void fixup_zone_present_pages(int nid, unsigned long start_pfn, - unsigned long end_pfn) -{ - struct zone *z; - unsigned long zone_start_pfn, zone_end_pfn; - int i; - - for (i = 0; i < MAX_NR_ZONES; i++) { - z = NODE_DATA(nid)->node_zones + i; - zone_start_pfn = z->zone_start_pfn; - zone_end_pfn = zone_start_pfn + z->spanned_pages; - - /* if the two regions intersect */ - if (!(zone_start_pfn >= end_pfn || zone_end_pfn <= start_pfn)) - z->present_pages += min(end_pfn, zone_end_pfn) - - max(start_pfn, zone_start_pfn); - } -} diff --git a/mm/shmem.c b/mm/shmem.c index 67afba5117f..89341b658bd 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -643,7 +643,7 @@ static void shmem_evict_inode(struct inode *inode) kfree(info->symlink); simple_xattrs_free(&info->xattrs); - BUG_ON(inode->i_blocks); + WARN_ON(inode->i_blocks); shmem_free_inode(inode->i_sb); clear_inode(inode); } @@ -1145,8 +1145,20 @@ repeat: if (!error) { error = shmem_add_to_page_cache(page, mapping, index, gfp, swp_to_radix_entry(swap)); - /* We already confirmed swap, and make no allocation */ - VM_BUG_ON(error); + /* + * We already confirmed swap under page lock, and make + * no memory allocation here, so usually no possibility + * of error; but free_swap_and_cache() only trylocks a + * page, so it is just possible that the entry has been + * truncated or holepunched since swap was confirmed. + * shmem_undo_range() will have done some of the + * unaccounting, now delete_from_swap_cache() will do + * the rest (including mem_cgroup_uncharge_swapcache). + * Reset swap.val? No, leave it so "failed" goes back to + * "repeat": reading a hole and writing should succeed. + */ + if (error) + delete_from_swap_cache(page); } if (error) goto failed; diff --git a/mm/swapfile.c b/mm/swapfile.c index 71cd288b200..f91a25547ff 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1494,9 +1494,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) BUG_ON(!current->mm); pathname = getname(specialfile); - err = PTR_ERR(pathname); if (IS_ERR(pathname)) - goto out; + return PTR_ERR(pathname); victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0); err = PTR_ERR(victim); @@ -1608,6 +1607,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) out_dput: filp_close(victim, NULL); out: + putname(pathname); return err; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 8b055e9379b..48550c66f1f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1760,28 +1760,6 @@ static bool in_reclaim_compaction(struct scan_control *sc) return false; } -#ifdef CONFIG_COMPACTION -/* - * If compaction is deferred for sc->order then scale the number of pages - * reclaimed based on the number of consecutive allocation failures - */ -static unsigned long scale_for_compaction(unsigned long pages_for_compaction, - struct lruvec *lruvec, struct scan_control *sc) -{ - struct zone *zone = lruvec_zone(lruvec); - - if (zone->compact_order_failed <= sc->order) - pages_for_compaction <<= zone->compact_defer_shift; - return pages_for_compaction; -} -#else -static unsigned long scale_for_compaction(unsigned long pages_for_compaction, - struct lruvec *lruvec, struct scan_control *sc) -{ - return pages_for_compaction; -} -#endif - /* * Reclaim/compaction is used for high-order allocation requests. It reclaims * order-0 pages before compacting the zone. should_continue_reclaim() returns @@ -1829,9 +1807,6 @@ static inline bool should_continue_reclaim(struct lruvec *lruvec, * inactive lists are large enough, continue reclaiming */ pages_for_compaction = (2UL << sc->order); - - pages_for_compaction = scale_for_compaction(pages_for_compaction, - lruvec, sc); inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE); if (nr_swap_pages > 0) inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON); diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index b9a28d2dd3e..ce0684a1fc8 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -325,6 +325,12 @@ void batadv_interface_rx(struct net_device *soft_iface, soft_iface->last_rx = jiffies; + /* Let the bridge loop avoidance check the packet. If will + * not handle it, we can safely push it up. + */ + if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) + goto out; + if (orig_node) batadv_tt_add_temporary_global_entry(bat_priv, orig_node, ethhdr->h_source); @@ -332,12 +338,6 @@ void batadv_interface_rx(struct net_device *soft_iface, if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) goto dropped; - /* Let the bridge loop avoidance check the packet. If will - * not handle it, we can safely push it up. - */ - if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) - goto out; - netif_rx(skb); goto out; diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 112edd371b2..baae7158580 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -769,6 +769,12 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv, */ tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP; + /* the change can carry possible "attribute" flags like the + * TT_CLIENT_WIFI, therefore they have to be copied in the + * client entry + */ + tt_global_entry->common.flags |= flags; + /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only * one originator left in the list and we previously received a * delete + roaming change for this originator. @@ -1496,7 +1502,7 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, memcpy(tt_change->addr, tt_common_entry->addr, ETH_ALEN); - tt_change->flags = BATADV_NO_FLAGS; + tt_change->flags = tt_common_entry->flags; tt_count++; tt_change++; @@ -2450,6 +2456,13 @@ bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv, { bool ret = false; + /* if the originator is a backbone node (meaning it belongs to the same + * LAN of this node) the temporary client must not be added because to + * reach such destination the node must use the LAN instead of the mesh + */ + if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig)) + goto out; + if (!batadv_tt_global_add(bat_priv, orig_node, addr, BATADV_TT_CLIENT_TEMP, atomic_read(&orig_node->last_ttvn))) diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 8a0ce706aeb..a0a2f97b9c6 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1754,11 +1754,11 @@ int hci_register_dev(struct hci_dev *hdev) if (hdev->dev_type != HCI_AMP) set_bit(HCI_AUTO_OFF, &hdev->dev_flags); - schedule_work(&hdev->power_on); - hci_notify(hdev, HCI_DEV_REG); hci_dev_hold(hdev); + schedule_work(&hdev->power_on); + return id; err_wqueue: diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index aa2ea0a8142..91de4239da6 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -326,7 +326,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, struct hci_dev *d; size_t rp_len; u16 count; - int i, err; + int err; BT_DBG("sock %p", sk); @@ -347,9 +347,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, return -ENOMEM; } - rp->num_controllers = cpu_to_le16(count); - - i = 0; + count = 0; list_for_each_entry(d, &hci_dev_list, list) { if (test_bit(HCI_SETUP, &d->dev_flags)) continue; @@ -357,10 +355,13 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, if (!mgmt_valid_hdev(d)) continue; - rp->index[i++] = cpu_to_le16(d->id); + rp->index[count++] = cpu_to_le16(d->id); BT_DBG("Added hci%u", d->id); } + rp->num_controllers = cpu_to_le16(count); + rp_len = sizeof(*rp) + (2 * count); + read_unlock(&hci_dev_list_lock); err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp, @@ -1366,6 +1367,7 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, continue; list_del(&match->list); + kfree(match); found++; } diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 2ac8d50861e..a5923378bdf 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -267,7 +267,7 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send) clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->flags); mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type, - hcon->dst_type, reason); + hcon->dst_type, HCI_ERROR_AUTH_FAILURE); cancel_delayed_work_sync(&conn->security_timer); diff --git a/net/core/dev.c b/net/core/dev.c index bda6d004f9f..c0946cb2b35 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2818,8 +2818,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, if (unlikely(tcpu != next_cpu) && (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || ((int)(per_cpu(softnet_data, tcpu).input_queue_head - - rflow->last_qtail)) >= 0)) + rflow->last_qtail)) >= 0)) { + tcpu = next_cpu; rflow = set_rps_cpu(dev, skb, rflow, next_cpu); + } if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { *rflowp = rflow; diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 87cc17db2d5..b079c7bbc15 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -319,7 +319,8 @@ int dev_addr_del(struct net_device *dev, const unsigned char *addr, */ ha = list_first_entry(&dev->dev_addrs.list, struct netdev_hw_addr, list); - if (ha->addr == dev->dev_addr && ha->refcount == 1) + if (!memcmp(ha->addr, addr, dev->addr_len) && + ha->type == addr_type && ha->refcount == 1) return -ENOENT; err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index bcf02f608cb..017a8bacfb2 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -429,6 +429,17 @@ static struct attribute_group netstat_group = { .name = "statistics", .attrs = netstat_attrs, }; + +#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211) +static struct attribute *wireless_attrs[] = { + NULL +}; + +static struct attribute_group wireless_group = { + .name = "wireless", + .attrs = wireless_attrs, +}; +#endif #endif /* CONFIG_SYSFS */ #ifdef CONFIG_RPS @@ -1409,6 +1420,15 @@ int netdev_register_kobject(struct net_device *net) groups++; *groups++ = &netstat_group; + +#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211) + if (net->ieee80211_ptr) + *groups++ = &wireless_group; +#if IS_ENABLED(CONFIG_WIRELESS_EXT) + else if (net->wireless_handlers) + *groups++ = &wireless_group; +#endif +#endif #endif /* CONFIG_SYSFS */ error = device_add(dev); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 5eea4a81104..14bbfcf717a 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -457,19 +457,28 @@ static int do_ip_setsockopt(struct sock *sk, int level, struct inet_sock *inet = inet_sk(sk); int val = 0, err; - if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) | - (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) | - (1<<IP_RETOPTS) | (1<<IP_TOS) | - (1<<IP_TTL) | (1<<IP_HDRINCL) | - (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) | - (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | - (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) | - (1<<IP_MINTTL) | (1<<IP_NODEFRAG))) || - optname == IP_UNICAST_IF || - optname == IP_MULTICAST_TTL || - optname == IP_MULTICAST_ALL || - optname == IP_MULTICAST_LOOP || - optname == IP_RECVORIGDSTADDR) { + switch (optname) { + case IP_PKTINFO: + case IP_RECVTTL: + case IP_RECVOPTS: + case IP_RECVTOS: + case IP_RETOPTS: + case IP_TOS: + case IP_TTL: + case IP_HDRINCL: + case IP_MTU_DISCOVER: + case IP_RECVERR: + case IP_ROUTER_ALERT: + case IP_FREEBIND: + case IP_PASSSEC: + case IP_TRANSPARENT: + case IP_MINTTL: + case IP_NODEFRAG: + case IP_UNICAST_IF: + case IP_MULTICAST_TTL: + case IP_MULTICAST_ALL: + case IP_MULTICAST_LOOP: + case IP_RECVORIGDSTADDR: if (optlen >= sizeof(int)) { if (get_user(val, (int __user *) optval)) return -EFAULT; diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 1831092f999..858fddf6482 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -338,12 +338,17 @@ static int vti_rcv(struct sk_buff *skb) if (tunnel != NULL) { struct pcpu_tstats *tstats; + if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) + return -1; + tstats = this_cpu_ptr(tunnel->dev->tstats); u64_stats_update_begin(&tstats->syncp); tstats->rx_packets++; tstats->rx_bytes += skb->len; u64_stats_update_end(&tstats->syncp); + skb->mark = 0; + secpath_reset(skb); skb->dev = tunnel->dev; return 1; } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index a8c651216fa..df251424d81 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1785,6 +1785,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res, if (dev_out->flags & IFF_LOOPBACK) flags |= RTCF_LOCAL; + do_cache = true; if (type == RTN_BROADCAST) { flags |= RTCF_BROADCAST | RTCF_LOCAL; fi = NULL; @@ -1793,6 +1794,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res, if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr, fl4->flowi4_proto)) flags &= ~RTCF_LOCAL; + else + do_cache = false; /* If multicast route do not exist use * default one, but do not gateway in this case. * Yes, it is hack. @@ -1802,8 +1805,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res, } fnhe = NULL; - do_cache = fi != NULL; - if (fi) { + do_cache &= fi != NULL; + if (do_cache) { struct rtable __rcu **prth; struct fib_nh *nh = &FIB_RES_NH(*res); @@ -2597,7 +2600,7 @@ int __init ip_rt_init(void) pr_err("Unable to create route proc files\n"); #ifdef CONFIG_XFRM xfrm_init(); - xfrm4_init(ip_rt_max_size); + xfrm4_init(); #endif rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 197c0008503..083092e3aed 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1212,7 +1212,7 @@ new_segment: wait_for_sndbuf: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: - if (copied && likely(!tp->repair)) + if (copied) tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) @@ -1223,7 +1223,7 @@ wait_for_memory: } out: - if (copied && likely(!tp->repair)) + if (copied) tcp_push(sk, flags, mss_now, tp->nonagle); release_sock(sk); return copied + copied_syn; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2c2b13a999e..609ff98aeb4 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5313,11 +5313,6 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, goto discard; } - /* ts_recent update must be made after we are sure that the packet - * is in window. - */ - tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); - /* step 3: check security and precedence [ignored] */ /* step 4: Check for a SYN @@ -5552,6 +5547,11 @@ step5: if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) goto discard; + /* ts_recent update must be made after we are sure that the packet + * is in window. + */ + tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); + tcp_rcv_rtt_measure_ts(sk, skb); /* Process urgent data. */ @@ -6130,6 +6130,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, } else goto discard; + /* ts_recent update must be made after we are sure that the packet + * is in window. + */ + tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); + /* step 6: check the URG bit */ tcp_urg(sk, skb, th); diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 53bc5847bfa..f696d7c2e9f 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -1,7 +1,6 @@ #include <linux/rcupdate.h> #include <linux/spinlock.h> #include <linux/jiffies.h> -#include <linux/bootmem.h> #include <linux/module.h> #include <linux/cache.h> #include <linux/slab.h> @@ -9,6 +8,7 @@ #include <linux/tcp.h> #include <linux/hash.h> #include <linux/tcp_metrics.h> +#include <linux/vmalloc.h> #include <net/inet_connection_sock.h> #include <net/net_namespace.h> @@ -1034,7 +1034,10 @@ static int __net_init tcp_net_metrics_init(struct net *net) net->ipv4.tcp_metrics_hash_log = order_base_2(slots); size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log; - net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL); + net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (!net->ipv4.tcp_metrics_hash) + net->ipv4.tcp_metrics_hash = vzalloc(size); + if (!net->ipv4.tcp_metrics_hash) return -ENOMEM; @@ -1055,7 +1058,10 @@ static void __net_exit tcp_net_metrics_exit(struct net *net) tm = next; } } - kfree(net->ipv4.tcp_metrics_hash); + if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash)) + vfree(net->ipv4.tcp_metrics_hash); + else + kfree(net->ipv4.tcp_metrics_hash); } static __net_initdata struct pernet_operations tcp_net_metrics_ops = { diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index cfe6ffe1c17..2798706cb06 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1986,6 +1986,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, tso_segs = tcp_init_tso_segs(sk, skb, mss_now); BUG_ON(!tso_segs); + if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) + goto repair; /* Skip network transmission */ + cwnd_quota = tcp_cwnd_test(tp, skb); if (!cwnd_quota) break; @@ -2026,6 +2029,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) break; +repair: /* Advance the send_head. This one is sent out. * This call will increment packets_out. */ diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 05c5ab8d983..3be0ac2c192 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -279,19 +279,8 @@ static void __exit xfrm4_policy_fini(void) xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo); } -void __init xfrm4_init(int rt_max_size) +void __init xfrm4_init(void) { - /* - * Select a default value for the gc_thresh based on the main route - * table hash size. It seems to me the worst case scenario is when - * we have ipsec operating in transport mode, in which we create a - * dst_entry per socket. The xfrm gc algorithm starts trying to remove - * entries at gc_thresh, and prevents new allocations as 2*gc_thresh - * so lets set an initial xfrm gc_thresh value at the rt_max_size/2. - * That will let us store an ipsec connection per route table entry, - * and start cleaning when were 1/2 full - */ - xfrm4_dst_ops.gc_thresh = rt_max_size/2; dst_entries_init(&xfrm4_dst_ops); xfrm4_state_init(); diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index c4f934176ca..30647857a37 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -252,6 +252,7 @@ struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu) return NULL; dst->ops->update_pmtu(dst, sk, NULL, mtu); - return inet6_csk_route_socket(sk, &fl6); + dst = inet6_csk_route_socket(sk, &fl6); + return IS_ERR(dst) ? NULL : dst; } EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index ba6d13d1f1e..e02faed6d17 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -827,6 +827,7 @@ pref_skip_coa: if (val < 0 || val > 255) goto e_inval; np->min_hopcount = val; + retv = 0; break; case IPV6_DONTFRAG: np->dontfrag = valbool; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 05f3a313db8..7371f676cf4 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2594,6 +2594,9 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy, else local->probe_req_reg--; + if (!local->open_count) + break; + ieee80211_queue_work(&local->hw, &local->reconfig_filter); break; default: diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index bf87c70ac6c..c21e33d1abd 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -1151,10 +1151,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) mutex_lock(&sdata->u.ibss.mtx); - sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH; - memset(sdata->u.ibss.bssid, 0, ETH_ALEN); - sdata->u.ibss.ssid_len = 0; - active_ibss = ieee80211_sta_active_ibss(sdata); if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) { @@ -1175,6 +1171,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) } } + ifibss->state = IEEE80211_IBSS_MLME_SEARCH; + memset(ifibss->bssid, 0, ETH_ALEN); + ifibss->ssid_len = 0; + sta_info_flush(sdata->local, sdata); spin_lock_bh(&ifibss->incomplete_lock); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 8c804550465..156e5835e37 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1314,6 +1314,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); +void ieee80211_purge_tx_queue(struct ieee80211_hw *hw, + struct sk_buff_head *skbs); /* HT */ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/main.c b/net/mac80211/main.c index c80c4490351..f57f597972f 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -871,8 +871,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) local->hw.wiphy->cipher_suites, sizeof(u32) * local->hw.wiphy->n_cipher_suites, GFP_KERNEL); - if (!suites) - return -ENOMEM; + if (!suites) { + result = -ENOMEM; + goto fail_wiphy_register; + } for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) { u32 suite = local->hw.wiphy->cipher_suites[r]; if (suite == WLAN_CIPHER_SUITE_WEP40 || diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index c4cdbde24fd..43e60b5a754 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -917,7 +917,7 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, struct cfg80211_sched_scan_request *req) { struct ieee80211_local *local = sdata->local; - struct ieee80211_sched_scan_ies sched_scan_ies; + struct ieee80211_sched_scan_ies sched_scan_ies = {}; int ret, i; mutex_lock(&local->mtx); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 0a4e4c04db8..d2eb64e1235 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -117,8 +117,8 @@ static void free_sta_work(struct work_struct *wk) for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]); - __skb_queue_purge(&sta->ps_tx_buf[ac]); - __skb_queue_purge(&sta->tx_filtered[ac]); + ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]); + ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]); } #ifdef CONFIG_MAC80211_MESH @@ -141,7 +141,7 @@ static void free_sta_work(struct work_struct *wk) tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]); if (!tid_tx) continue; - __skb_queue_purge(&tid_tx->pending); + ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); kfree(tid_tx); } @@ -961,6 +961,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) struct ieee80211_local *local = sdata->local; struct sk_buff_head pending; int filtered = 0, buffered = 0, ac; + unsigned long flags; clear_sta_flag(sta, WLAN_STA_SP); @@ -976,12 +977,16 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { int count = skb_queue_len(&pending), tmp; + spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending); + spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); tmp = skb_queue_len(&pending); filtered += tmp - count; count = tmp; + spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending); + spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); tmp = skb_queue_len(&pending); buffered += tmp - count; } diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 3af0cc4130f..101eb88a2b7 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -668,3 +668,12 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb) dev_kfree_skb_any(skb); } EXPORT_SYMBOL(ieee80211_free_txskb); + +void ieee80211_purge_tx_queue(struct ieee80211_hw *hw, + struct sk_buff_head *skbs) +{ + struct sk_buff *skb; + + while ((skb = __skb_dequeue(skbs))) + ieee80211_free_txskb(hw, skb); +} diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index c9bf83f3665..b858ebe41fd 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1358,7 +1358,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) if (tx->skb) ieee80211_free_txskb(&tx->local->hw, tx->skb); else - __skb_queue_purge(&tx->skbs); + ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs); return -1; } else if (unlikely(res == TX_QUEUED)) { I802_DEBUG_INC(tx->local->tx_handlers_queued); @@ -2120,10 +2120,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, */ void ieee80211_clear_tx_pending(struct ieee80211_local *local) { + struct sk_buff *skb; int i; - for (i = 0; i < local->hw.queues; i++) - skb_queue_purge(&local->pending[i]); + for (i = 0; i < local->hw.queues; i++) { + while ((skb = skb_dequeue(&local->pending[i])) != NULL) + ieee80211_free_txskb(&local->hw, skb); + } } /* diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 239391807ca..0151ae33c4c 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1491,6 +1491,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type != NL80211_IFTYPE_STATION) continue; + if (!sdata->u.mgd.associated) + continue; ieee80211_send_nullfunc(local, sdata, 0); } diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c index ec3dba5dcd6..5c0b78528e5 100644 --- a/net/netfilter/ipset/ip_set_hash_ip.c +++ b/net/netfilter/ipset/ip_set_hash_ip.c @@ -173,6 +173,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], return adtfn(set, &nip, timeout, flags); } + ip_to = ip; if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) @@ -185,8 +186,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], if (!cidr || cidr > 32) return -IPSET_ERR_INVALID_CIDR; ip_set_mask_from_to(ip, ip_to, cidr); - } else - ip_to = ip; + } hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c index 0171f7502fa..6283351f4ee 100644 --- a/net/netfilter/ipset/ip_set_hash_ipport.c +++ b/net/netfilter/ipset/ip_set_hash_ipport.c @@ -162,7 +162,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipport4_elem data = { }; - u32 ip, ip_to = 0, p = 0, port, port_to; + u32 ip, ip_to, p = 0, port, port_to; u32 timeout = h->timeout; bool with_ports = false; int ret; @@ -210,7 +210,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], return ip_set_eexist(ret, flags) ? 0 : ret; } - ip = ntohl(data.ip); + ip_to = ip = ntohl(data.ip); if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) @@ -223,8 +223,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], if (!cidr || cidr > 32) return -IPSET_ERR_INVALID_CIDR; ip_set_mask_from_to(ip, ip_to, cidr); - } else - ip_to = ip; + } port_to = port = ntohs(data.port); if (with_ports && tb[IPSET_ATTR_PORT_TO]) { diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c index 6344ef551ec..6a21271c8d5 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportip.c +++ b/net/netfilter/ipset/ip_set_hash_ipportip.c @@ -166,7 +166,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportip4_elem data = { }; - u32 ip, ip_to = 0, p = 0, port, port_to; + u32 ip, ip_to, p = 0, port, port_to; u32 timeout = h->timeout; bool with_ports = false; int ret; @@ -218,7 +218,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], return ip_set_eexist(ret, flags) ? 0 : ret; } - ip = ntohl(data.ip); + ip_to = ip = ntohl(data.ip); if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) @@ -231,8 +231,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], if (!cidr || cidr > 32) return -IPSET_ERR_INVALID_CIDR; ip_set_mask_from_to(ip, ip_to, cidr); - } else - ip_to = ip; + } port_to = port = ntohs(data.port); if (with_ports && tb[IPSET_ATTR_PORT_TO]) { diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index cb71f9a774e..2d5cd4ee30e 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c @@ -215,8 +215,8 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], const struct ip_set_hash *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet4_elem data = { .cidr = HOST_MASK - 1 }; - u32 ip, ip_to = 0, p = 0, port, port_to; - u32 ip2_from = 0, ip2_to, ip2_last, ip2; + u32 ip, ip_to, p = 0, port, port_to; + u32 ip2_from, ip2_to, ip2_last, ip2; u32 timeout = h->timeout; bool with_ports = false; u8 cidr; @@ -286,6 +286,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], return ip_set_eexist(ret, flags) ? 0 : ret; } + ip_to = ip; if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) @@ -306,6 +307,8 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], if (port > port_to) swap(port, port_to); } + + ip2_to = ip2_from; if (tb[IPSET_ATTR_IP2_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to); if (ret) diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index 8847b4d8be0..701c88a20fe 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -41,7 +41,8 @@ MODULE_DESCRIPTION("cttimeout: Extended Netfilter Connection Tracking timeout tu static LIST_HEAD(cttimeout_list); static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = { - [CTA_TIMEOUT_NAME] = { .type = NLA_NUL_STRING }, + [CTA_TIMEOUT_NAME] = { .type = NLA_NUL_STRING, + .len = CTNL_TIMEOUT_NAME_MAX - 1}, [CTA_TIMEOUT_L3PROTO] = { .type = NLA_U16 }, [CTA_TIMEOUT_L4PROTO] = { .type = NLA_U8 }, [CTA_TIMEOUT_DATA] = { .type = NLA_NESTED }, diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index cc10d073c33..9e8f4b2801f 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c @@ -1210,7 +1210,7 @@ int nfc_llcp_register_device(struct nfc_dev *ndev) local->remote_miu = LLCP_DEFAULT_MIU; local->remote_lto = LLCP_DEFAULT_LTO; - list_add(&llcp_devices, &local->list); + list_add(&local->list, &llcp_devices); return 0; } diff --git a/net/sctp/proc.c b/net/sctp/proc.c index c3bea269faf..9966e7b1645 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -102,7 +102,7 @@ static const struct file_operations sctp_snmp_seq_fops = { .open = sctp_snmp_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = single_release, + .release = single_release_net, }; /* Set up the proc fs entry for 'snmp' object. */ @@ -251,7 +251,7 @@ static const struct file_operations sctp_eps_seq_fops = { .open = sctp_eps_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = seq_release_net, }; /* Set up the proc fs entry for 'eps' object. */ @@ -372,7 +372,7 @@ static const struct file_operations sctp_assocs_seq_fops = { .open = sctp_assocs_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = seq_release_net, }; /* Set up the proc fs entry for 'assocs' object. */ @@ -517,7 +517,7 @@ static const struct file_operations sctp_remaddr_seq_fops = { .open = sctp_remaddr_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = seq_release_net, }; int __net_init sctp_remaddr_proc_init(struct net *net) diff --git a/net/wireless/reg.c b/net/wireless/reg.c index bcc7d7ee5a5..b75756b05af 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -141,9 +141,8 @@ static const struct ieee80211_regdomain world_regdom = { .reg_rules = { /* IEEE 802.11b/g, channels 1..11 */ REG_RULE(2412-10, 2462+10, 40, 6, 20, 0), - /* IEEE 802.11b/g, channels 12..13. No HT40 - * channel fits here. */ - REG_RULE(2467-10, 2472+10, 20, 6, 20, + /* IEEE 802.11b/g, channels 12..13. */ + REG_RULE(2467-10, 2472+10, 40, 6, 20, NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS), /* IEEE 802.11 channel 14 - Only JP enables diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h index bd2e0989555..cdd48600e02 100644 --- a/scripts/kconfig/expr.h +++ b/scripts/kconfig/expr.h @@ -12,7 +12,7 @@ extern "C" { #include <assert.h> #include <stdio.h> -#include <sys/queue.h> +#include "list.h" #ifndef __cplusplus #include <stdbool.h> #endif @@ -175,12 +175,11 @@ struct menu { #define MENU_ROOT 0x0002 struct jump_key { - CIRCLEQ_ENTRY(jump_key) entries; + struct list_head entries; size_t offset; struct menu *target; int index; }; -CIRCLEQ_HEAD(jk_head, jump_key); #define JUMP_NB 9 diff --git a/scripts/kconfig/list.h b/scripts/kconfig/list.h new file mode 100644 index 00000000000..0ae730be5f4 --- /dev/null +++ b/scripts/kconfig/list.h @@ -0,0 +1,91 @@ +#ifndef LIST_H +#define LIST_H + +/* + * Copied from include/linux/... + */ + +#undef offsetof +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) + +/** + * container_of - cast a member of a structure out to the containing structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) + + +struct list_head { + struct list_head *next, *prev; +}; + + +#define LIST_HEAD_INIT(name) { &(name), &(name) } + +#define LIST_HEAD(name) \ + struct list_head name = LIST_HEAD_INIT(name) + +/** + * list_entry - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + */ +#define list_entry(ptr, type, member) \ + container_of(ptr, type, member) + +/** + * list_for_each_entry - iterate over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry(pos, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_empty - tests whether a list is empty + * @head: the list to test. + */ +static inline int list_empty(const struct list_head *head) +{ + return head->next == head; +} + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_add(struct list_head *_new, + struct list_head *prev, + struct list_head *next) +{ + next->prev = _new; + _new->next = next; + _new->prev = prev; + prev->next = _new; +} + +/** + * list_add_tail - add a new entry + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + */ +static inline void list_add_tail(struct list_head *_new, struct list_head *head) +{ + __list_add(_new, head->prev, head); +} + +#endif diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h index 1d1c08537f1..ef1a7381f95 100644 --- a/scripts/kconfig/lkc_proto.h +++ b/scripts/kconfig/lkc_proto.h @@ -21,9 +21,9 @@ P(menu_get_root_menu,struct menu *,(struct menu *menu)); P(menu_get_parent_menu,struct menu *,(struct menu *menu)); P(menu_has_help,bool,(struct menu *menu)); P(menu_get_help,const char *,(struct menu *menu)); -P(get_symbol_str, void, (struct gstr *r, struct symbol *sym, struct jk_head +P(get_symbol_str, void, (struct gstr *r, struct symbol *sym, struct list_head *head)); -P(get_relations_str, struct gstr, (struct symbol **sym_arr, struct jk_head +P(get_relations_str, struct gstr, (struct symbol **sym_arr, struct list_head *head)); P(menu_get_ext_help,void,(struct menu *menu, struct gstr *help)); diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c index 48f67448af7..53975cf8760 100644 --- a/scripts/kconfig/mconf.c +++ b/scripts/kconfig/mconf.c @@ -312,7 +312,7 @@ static void set_config_filename(const char *config_filename) struct search_data { - struct jk_head *head; + struct list_head *head; struct menu **targets; int *keys; }; @@ -323,7 +323,7 @@ static void update_text(char *buf, size_t start, size_t end, void *_data) struct jump_key *pos; int k = 0; - CIRCLEQ_FOREACH(pos, data->head, entries) { + list_for_each_entry(pos, data->head, entries) { if (pos->offset >= start && pos->offset < end) { char header[4]; @@ -375,7 +375,7 @@ again: sym_arr = sym_re_search(dialog_input); do { - struct jk_head head = CIRCLEQ_HEAD_INITIALIZER(head); + LIST_HEAD(head); struct menu *targets[JUMP_NB]; int keys[JUMP_NB + 1], i; struct search_data data = { diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c index a3cade659f8..e98a05c8e50 100644 --- a/scripts/kconfig/menu.c +++ b/scripts/kconfig/menu.c @@ -508,7 +508,7 @@ const char *menu_get_help(struct menu *menu) } static void get_prompt_str(struct gstr *r, struct property *prop, - struct jk_head *head) + struct list_head *head) { int i, j; struct menu *submenu[8], *menu, *location = NULL; @@ -544,12 +544,13 @@ static void get_prompt_str(struct gstr *r, struct property *prop, } else jump->target = location; - if (CIRCLEQ_EMPTY(head)) + if (list_empty(head)) jump->index = 0; else - jump->index = CIRCLEQ_LAST(head)->index + 1; + jump->index = list_entry(head->prev, struct jump_key, + entries)->index + 1; - CIRCLEQ_INSERT_TAIL(head, jump, entries); + list_add_tail(&jump->entries, head); } if (i > 0) { @@ -573,7 +574,8 @@ static void get_prompt_str(struct gstr *r, struct property *prop, /* * head is optional and may be NULL */ -void get_symbol_str(struct gstr *r, struct symbol *sym, struct jk_head *head) +void get_symbol_str(struct gstr *r, struct symbol *sym, + struct list_head *head) { bool hit; struct property *prop; @@ -612,7 +614,7 @@ void get_symbol_str(struct gstr *r, struct symbol *sym, struct jk_head *head) str_append(r, "\n\n"); } -struct gstr get_relations_str(struct symbol **sym_arr, struct jk_head *head) +struct gstr get_relations_str(struct symbol **sym_arr, struct list_head *head) { struct symbol *sym; struct gstr res = str_new(); diff --git a/scripts/sign-file b/scripts/sign-file index 87ca59d36e7..974a20b661b 100755 --- a/scripts/sign-file +++ b/scripts/sign-file @@ -156,12 +156,12 @@ sub asn1_extract($$@) if ($l == 0x1) { $len = unpack("C", substr(${$cursor->[2]}, $cursor->[0], 1)); - } elsif ($l = 0x2) { + } elsif ($l == 0x2) { $len = unpack("n", substr(${$cursor->[2]}, $cursor->[0], 2)); - } elsif ($l = 0x3) { + } elsif ($l == 0x3) { $len = unpack("C", substr(${$cursor->[2]}, $cursor->[0], 1)) << 16; $len = unpack("n", substr(${$cursor->[2]}, $cursor->[0] + 1, 2)); - } elsif ($l = 0x4) { + } elsif ($l == 0x4) { $len = unpack("N", substr(${$cursor->[2]}, $cursor->[0], 4)); } else { die $x509, ": ", $cursor->[0], ": ASN.1 element too long (", $l, ")\n"; diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 842c254396d..b08d20c66c2 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -164,8 +164,8 @@ static void dev_exception_clean(struct dev_cgroup *dev_cgroup) struct dev_exception_item *ex, *tmp; list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) { - list_del(&ex->list); - kfree(ex); + list_del_rcu(&ex->list); + kfree_rcu(ex, rcu); } } @@ -298,7 +298,7 @@ static int may_access(struct dev_cgroup *dev_cgroup, struct dev_exception_item *ex; bool match = false; - list_for_each_entry(ex, &dev_cgroup->exceptions, list) { + list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) { if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK)) continue; if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR)) @@ -352,6 +352,8 @@ static int parent_has_perm(struct dev_cgroup *childcg, */ static inline int may_allow_all(struct dev_cgroup *parent) { + if (!parent) + return 1; return parent->behavior == DEVCG_DEFAULT_ALLOW; } @@ -376,11 +378,14 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, int count, rc; struct dev_exception_item ex; struct cgroup *p = devcgroup->css.cgroup; - struct dev_cgroup *parent = cgroup_to_devcgroup(p->parent); + struct dev_cgroup *parent = NULL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; + if (p->parent) + parent = cgroup_to_devcgroup(p->parent); + memset(&ex, 0, sizeof(ex)); b = buffer; @@ -391,11 +396,14 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, if (!may_allow_all(parent)) return -EPERM; dev_exception_clean(devcgroup); + devcgroup->behavior = DEVCG_DEFAULT_ALLOW; + if (!parent) + break; + rc = dev_exceptions_copy(&devcgroup->exceptions, &parent->exceptions); if (rc) return rc; - devcgroup->behavior = DEVCG_DEFAULT_ALLOW; break; case DEVCG_DENY: dev_exception_clean(devcgroup); diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c index 28f911cdd7c..c5454c0477c 100644 --- a/security/selinux/netnode.c +++ b/security/selinux/netnode.c @@ -174,7 +174,8 @@ static void sel_netnode_insert(struct sel_netnode *node) if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) { struct sel_netnode *tail; tail = list_entry( - rcu_dereference(sel_netnode_hash[idx].list.prev), + rcu_dereference_protected(sel_netnode_hash[idx].list.prev, + lockdep_is_held(&sel_netnode_lock)), struct sel_netnode, list); list_del_rcu(&tail->list); kfree_rcu(tail, rcu); diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c index 50169bcfd90..7266020c16c 100644 --- a/sound/pci/es1968.c +++ b/sound/pci/es1968.c @@ -2581,9 +2581,14 @@ static u8 snd_es1968_tea575x_get_pins(struct snd_tea575x *tea) struct es1968 *chip = tea->private_data; unsigned long io = chip->io_port + GPIO_DATA; u16 val = inw(io); - - return (val & STR_DATA) ? TEA575X_DATA : 0 | - (val & STR_MOST) ? TEA575X_MOST : 0; + u8 ret; + + ret = 0; + if (val & STR_DATA) + ret |= TEA575X_DATA; + if (val & STR_MOST) + ret |= TEA575X_MOST; + return ret; } static void snd_es1968_tea575x_set_direction(struct snd_tea575x *tea, bool output) diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c index cc2e91d1553..c5806f89be1 100644 --- a/sound/pci/fm801.c +++ b/sound/pci/fm801.c @@ -767,9 +767,14 @@ static u8 snd_fm801_tea575x_get_pins(struct snd_tea575x *tea) struct fm801 *chip = tea->private_data; unsigned short reg = inw(FM801_REG(chip, GPIO_CTRL)); struct snd_fm801_tea575x_gpio gpio = *get_tea575x_gpio(chip); - - return (reg & FM801_GPIO_GP(gpio.data)) ? TEA575X_DATA : 0 | - (reg & FM801_GPIO_GP(gpio.most)) ? TEA575X_MOST : 0; + u8 ret; + + ret = 0; + if (reg & FM801_GPIO_GP(gpio.data)) + ret |= TEA575X_DATA; + if (reg & FM801_GPIO_GP(gpio.most)) + ret |= TEA575X_MOST; + return ret; } static void snd_fm801_tea575x_set_direction(struct snd_tea575x *tea, bool output) diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 70d4848b5cd..d010de12335 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -95,6 +95,7 @@ int snd_hda_delete_codec_preset(struct hda_codec_preset_list *preset) EXPORT_SYMBOL_HDA(snd_hda_delete_codec_preset); #ifdef CONFIG_PM +#define codec_in_pm(codec) ((codec)->in_pm) static void hda_power_work(struct work_struct *work); static void hda_keep_power_on(struct hda_codec *codec); #define hda_codec_is_power_on(codec) ((codec)->power_on) @@ -104,6 +105,7 @@ static inline void hda_call_pm_notify(struct hda_bus *bus, bool power_up) bus->ops.pm_notify(bus, power_up); } #else +#define codec_in_pm(codec) 0 static inline void hda_keep_power_on(struct hda_codec *codec) {} #define hda_codec_is_power_on(codec) 1 #define hda_call_pm_notify(bus, state) {} @@ -228,7 +230,7 @@ static int codec_exec_verb(struct hda_codec *codec, unsigned int cmd, } mutex_unlock(&bus->cmd_mutex); snd_hda_power_down(codec); - if (res && *res == -1 && bus->rirb_error) { + if (!codec_in_pm(codec) && res && *res == -1 && bus->rirb_error) { if (bus->response_reset) { snd_printd("hda_codec: resetting BUS due to " "fatal communication error\n"); @@ -238,7 +240,7 @@ static int codec_exec_verb(struct hda_codec *codec, unsigned int cmd, goto again; } /* clear reset-flag when the communication gets recovered */ - if (!err) + if (!err || codec_in_pm(codec)) bus->response_reset = 0; return err; } @@ -3616,6 +3618,8 @@ static unsigned int hda_call_codec_suspend(struct hda_codec *codec, bool in_wq) { unsigned int state; + codec->in_pm = 1; + if (codec->patch_ops.suspend) codec->patch_ops.suspend(codec); hda_cleanup_all_streams(codec); @@ -3630,6 +3634,7 @@ static unsigned int hda_call_codec_suspend(struct hda_codec *codec, bool in_wq) codec->power_transition = 0; codec->power_jiffies = jiffies; spin_unlock(&codec->power_lock); + codec->in_pm = 0; return state; } @@ -3638,6 +3643,8 @@ static unsigned int hda_call_codec_suspend(struct hda_codec *codec, bool in_wq) */ static void hda_call_codec_resume(struct hda_codec *codec) { + codec->in_pm = 1; + /* set as if powered on for avoiding re-entering the resume * in the resume / power-save sequence */ @@ -3656,6 +3663,8 @@ static void hda_call_codec_resume(struct hda_codec *codec) snd_hda_codec_resume_cache(codec); } snd_hda_jack_report_sync(codec); + + codec->in_pm = 0; snd_hda_power_down(codec); /* flag down before returning */ } #endif /* CONFIG_PM */ diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h index 507fe8a917b..4f4e545c0f4 100644 --- a/sound/pci/hda/hda_codec.h +++ b/sound/pci/hda/hda_codec.h @@ -869,6 +869,7 @@ struct hda_codec { unsigned int power_on :1; /* current (global) power-state */ unsigned int d3_stop_clk:1; /* support D3 operation without BCLK */ unsigned int pm_down_notified:1; /* PM notified to controller */ + unsigned int in_pm:1; /* suspend/resume being performed */ int power_transition; /* power-state in transition */ int power_count; /* current (global) power refcount */ struct delayed_work power_work; /* delayed task for powerdown */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index cd2dbaf1be7..f9d870e554d 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -556,6 +556,12 @@ enum { #define AZX_DCAPS_ALIGN_BUFSIZE (1 << 22) /* buffer size alignment */ #define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */ #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ +#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ + +/* quirks for Intel PCH */ +#define AZX_DCAPS_INTEL_PCH \ + (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \ + AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_PM_RUNTIME) /* quirks for ATI SB / AMD Hudson */ #define AZX_DCAPS_PRESET_ATI_SB \ @@ -2433,6 +2439,9 @@ static void azx_power_notify(struct hda_bus *bus, bool power_up) { struct azx *chip = bus->private_data; + if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) + return; + if (power_up) pm_runtime_get_sync(&chip->pci->dev); else @@ -2548,7 +2557,8 @@ static int azx_runtime_suspend(struct device *dev) struct snd_card *card = dev_get_drvdata(dev); struct azx *chip = card->private_data; - if (!power_save_controller) + if (!power_save_controller || + !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) return -EAGAIN; azx_stop_chip(chip); @@ -3429,39 +3439,30 @@ static void __devexit azx_remove(struct pci_dev *pci) static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { /* CPT */ { PCI_DEVICE(0x8086, 0x1c20), - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | - AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, /* PBG */ { PCI_DEVICE(0x8086, 0x1d20), - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | - AZX_DCAPS_BUFSIZE}, + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, /* Panther Point */ { PCI_DEVICE(0x8086, 0x1e20), - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | - AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, /* Lynx Point */ { PCI_DEVICE(0x8086, 0x8c20), - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | - AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, /* Lynx Point-LP */ { PCI_DEVICE(0x8086, 0x9c20), - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | - AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, /* Lynx Point-LP */ { PCI_DEVICE(0x8086, 0x9c21), - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | - AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, /* Haswell */ { PCI_DEVICE(0x8086, 0x0c0c), - .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | - AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, { PCI_DEVICE(0x8086, 0x0d0c), - .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | - AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, /* 5 Series/3400 */ { PCI_DEVICE(0x8086, 0x3b56), - .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | - AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY }, + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, /* SCH */ { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index d5f3a26d608..3bcb6717235 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -466,6 +466,7 @@ static int parse_output(struct hda_codec *codec) memcpy(cfg->speaker_pins, cfg->line_out_pins, sizeof(cfg->speaker_pins)); cfg->line_outs = 0; + memset(cfg->line_out_pins, 0, sizeof(cfg->line_out_pins)); } return 0; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index c0ce3b1f04b..ad68d223f8a 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5407,6 +5407,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO), + SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF), @@ -7064,6 +7065,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = { { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 }, { .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 }, { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 }, + { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 }, { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660", .patch = patch_alc861 }, { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd }, diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c index c03b65af305..054967d8bac 100644 --- a/sound/soc/codecs/arizona.c +++ b/sound/soc/codecs/arizona.c @@ -268,7 +268,7 @@ EXPORT_SYMBOL_GPL(arizona_out_ev); static unsigned int arizona_sysclk_48k_rates[] = { 6144000, 12288000, - 22579200, + 24576000, 49152000, 73728000, 98304000, @@ -278,7 +278,7 @@ static unsigned int arizona_sysclk_48k_rates[] = { static unsigned int arizona_sysclk_44k1_rates[] = { 5644800, 11289600, - 24576000, + 22579200, 45158400, 67737600, 90316800, diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c index f994af34f55..e3f0a7f3131 100644 --- a/sound/soc/codecs/cs4271.c +++ b/sound/soc/codecs/cs4271.c @@ -485,7 +485,7 @@ static int cs4271_probe(struct snd_soc_codec *codec) gpio_nreset = cs4271plat->gpio_nreset; if (gpio_nreset >= 0) - if (gpio_request(gpio_nreset, "CS4271 Reset")) + if (devm_gpio_request(codec->dev, gpio_nreset, "CS4271 Reset")) gpio_nreset = -EINVAL; if (gpio_nreset >= 0) { /* Reset codec */ @@ -535,15 +535,10 @@ static int cs4271_probe(struct snd_soc_codec *codec) static int cs4271_remove(struct snd_soc_codec *codec) { struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); - int gpio_nreset; - gpio_nreset = cs4271->gpio_nreset; - - if (gpio_is_valid(gpio_nreset)) { + if (gpio_is_valid(cs4271->gpio_nreset)) /* Set codec to the reset state */ - gpio_set_value(gpio_nreset, 0); - gpio_free(gpio_nreset); - } + gpio_set_value(cs4271->gpio_nreset, 0); return 0; }; diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c index 4d8db3685e9..97a81051e88 100644 --- a/sound/soc/codecs/cs42l52.c +++ b/sound/soc/codecs/cs42l52.c @@ -773,7 +773,6 @@ static int cs42l52_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; struct cs42l52_private *cs42l52 = snd_soc_codec_get_drvdata(codec); - int ret = 0; u8 iface = 0; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { @@ -822,7 +821,7 @@ static int cs42l52_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) case SND_SOC_DAIFMT_NB_IF: break; default: - ret = -EINVAL; + return -EINVAL; } cs42l52->config.format = iface; snd_soc_write(codec, CS42L52_IFACE_CTL1, cs42l52->config.format); diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c index 1722b586bdb..7394e73fa43 100644 --- a/sound/soc/codecs/wm5102.c +++ b/sound/soc/codecs/wm5102.c @@ -42,6 +42,556 @@ static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0); static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0); static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0); +static const struct reg_default wm5102_sysclk_reva_patch[] = { + { 0x3000, 0x2225 }, + { 0x3001, 0x3a03 }, + { 0x3002, 0x0225 }, + { 0x3003, 0x0801 }, + { 0x3004, 0x6249 }, + { 0x3005, 0x0c04 }, + { 0x3006, 0x0225 }, + { 0x3007, 0x5901 }, + { 0x3008, 0xe249 }, + { 0x3009, 0x030d }, + { 0x300a, 0x0249 }, + { 0x300b, 0x2c01 }, + { 0x300c, 0xe249 }, + { 0x300d, 0x4342 }, + { 0x300e, 0xe249 }, + { 0x300f, 0x73c0 }, + { 0x3010, 0x4249 }, + { 0x3011, 0x0c00 }, + { 0x3012, 0x0225 }, + { 0x3013, 0x1f01 }, + { 0x3014, 0x0225 }, + { 0x3015, 0x1e01 }, + { 0x3016, 0x0225 }, + { 0x3017, 0xfa00 }, + { 0x3018, 0x0000 }, + { 0x3019, 0xf000 }, + { 0x301a, 0x0000 }, + { 0x301b, 0xf000 }, + { 0x301c, 0x0000 }, + { 0x301d, 0xf000 }, + { 0x301e, 0x0000 }, + { 0x301f, 0xf000 }, + { 0x3020, 0x0000 }, + { 0x3021, 0xf000 }, + { 0x3022, 0x0000 }, + { 0x3023, 0xf000 }, + { 0x3024, 0x0000 }, + { 0x3025, 0xf000 }, + { 0x3026, 0x0000 }, + { 0x3027, 0xf000 }, + { 0x3028, 0x0000 }, + { 0x3029, 0xf000 }, + { 0x302a, 0x0000 }, + { 0x302b, 0xf000 }, + { 0x302c, 0x0000 }, + { 0x302d, 0xf000 }, + { 0x302e, 0x0000 }, + { 0x302f, 0xf000 }, + { 0x3030, 0x0225 }, + { 0x3031, 0x1a01 }, + { 0x3032, 0x0225 }, + { 0x3033, 0x1e00 }, + { 0x3034, 0x0225 }, + { 0x3035, 0x1f00 }, + { 0x3036, 0x6225 }, + { 0x3037, 0xf800 }, + { 0x3038, 0x0000 }, + { 0x3039, 0xf000 }, + { 0x303a, 0x0000 }, + { 0x303b, 0xf000 }, + { 0x303c, 0x0000 }, + { 0x303d, 0xf000 }, + { 0x303e, 0x0000 }, + { 0x303f, 0xf000 }, + { 0x3040, 0x2226 }, + { 0x3041, 0x3a03 }, + { 0x3042, 0x0226 }, + { 0x3043, 0x0801 }, + { 0x3044, 0x6249 }, + { 0x3045, 0x0c06 }, + { 0x3046, 0x0226 }, + { 0x3047, 0x5901 }, + { 0x3048, 0xe249 }, + { 0x3049, 0x030d }, + { 0x304a, 0x0249 }, + { 0x304b, 0x2c01 }, + { 0x304c, 0xe249 }, + { 0x304d, 0x4342 }, + { 0x304e, 0xe249 }, + { 0x304f, 0x73c0 }, + { 0x3050, 0x4249 }, + { 0x3051, 0x0c00 }, + { 0x3052, 0x0226 }, + { 0x3053, 0x1f01 }, + { 0x3054, 0x0226 }, + { 0x3055, 0x1e01 }, + { 0x3056, 0x0226 }, + { 0x3057, 0xfa00 }, + { 0x3058, 0x0000 }, + { 0x3059, 0xf000 }, + { 0x305a, 0x0000 }, + { 0x305b, 0xf000 }, + { 0x305c, 0x0000 }, + { 0x305d, 0xf000 }, + { 0x305e, 0x0000 }, + { 0x305f, 0xf000 }, + { 0x3060, 0x0000 }, + { 0x3061, 0xf000 }, + { 0x3062, 0x0000 }, + { 0x3063, 0xf000 }, + { 0x3064, 0x0000 }, + { 0x3065, 0xf000 }, + { 0x3066, 0x0000 }, + { 0x3067, 0xf000 }, + { 0x3068, 0x0000 }, + { 0x3069, 0xf000 }, + { 0x306a, 0x0000 }, + { 0x306b, 0xf000 }, + { 0x306c, 0x0000 }, + { 0x306d, 0xf000 }, + { 0x306e, 0x0000 }, + { 0x306f, 0xf000 }, + { 0x3070, 0x0226 }, + { 0x3071, 0x1a01 }, + { 0x3072, 0x0226 }, + { 0x3073, 0x1e00 }, + { 0x3074, 0x0226 }, + { 0x3075, 0x1f00 }, + { 0x3076, 0x6226 }, + { 0x3077, 0xf800 }, + { 0x3078, 0x0000 }, + { 0x3079, 0xf000 }, + { 0x307a, 0x0000 }, + { 0x307b, 0xf000 }, + { 0x307c, 0x0000 }, + { 0x307d, 0xf000 }, + { 0x307e, 0x0000 }, + { 0x307f, 0xf000 }, + { 0x3080, 0x2227 }, + { 0x3081, 0x3a03 }, + { 0x3082, 0x0227 }, + { 0x3083, 0x0801 }, + { 0x3084, 0x6255 }, + { 0x3085, 0x0c04 }, + { 0x3086, 0x0227 }, + { 0x3087, 0x5901 }, + { 0x3088, 0xe255 }, + { 0x3089, 0x030d }, + { 0x308a, 0x0255 }, + { 0x308b, 0x2c01 }, + { 0x308c, 0xe255 }, + { 0x308d, 0x4342 }, + { 0x308e, 0xe255 }, + { 0x308f, 0x73c0 }, + { 0x3090, 0x4255 }, + { 0x3091, 0x0c00 }, + { 0x3092, 0x0227 }, + { 0x3093, 0x1f01 }, + { 0x3094, 0x0227 }, + { 0x3095, 0x1e01 }, + { 0x3096, 0x0227 }, + { 0x3097, 0xfa00 }, + { 0x3098, 0x0000 }, + { 0x3099, 0xf000 }, + { 0x309a, 0x0000 }, + { 0x309b, 0xf000 }, + { 0x309c, 0x0000 }, + { 0x309d, 0xf000 }, + { 0x309e, 0x0000 }, + { 0x309f, 0xf000 }, + { 0x30a0, 0x0000 }, + { 0x30a1, 0xf000 }, + { 0x30a2, 0x0000 }, + { 0x30a3, 0xf000 }, + { 0x30a4, 0x0000 }, + { 0x30a5, 0xf000 }, + { 0x30a6, 0x0000 }, + { 0x30a7, 0xf000 }, + { 0x30a8, 0x0000 }, + { 0x30a9, 0xf000 }, + { 0x30aa, 0x0000 }, + { 0x30ab, 0xf000 }, + { 0x30ac, 0x0000 }, + { 0x30ad, 0xf000 }, + { 0x30ae, 0x0000 }, + { 0x30af, 0xf000 }, + { 0x30b0, 0x0227 }, + { 0x30b1, 0x1a01 }, + { 0x30b2, 0x0227 }, + { 0x30b3, 0x1e00 }, + { 0x30b4, 0x0227 }, + { 0x30b5, 0x1f00 }, + { 0x30b6, 0x6227 }, + { 0x30b7, 0xf800 }, + { 0x30b8, 0x0000 }, + { 0x30b9, 0xf000 }, + { 0x30ba, 0x0000 }, + { 0x30bb, 0xf000 }, + { 0x30bc, 0x0000 }, + { 0x30bd, 0xf000 }, + { 0x30be, 0x0000 }, + { 0x30bf, 0xf000 }, + { 0x30c0, 0x2228 }, + { 0x30c1, 0x3a03 }, + { 0x30c2, 0x0228 }, + { 0x30c3, 0x0801 }, + { 0x30c4, 0x6255 }, + { 0x30c5, 0x0c06 }, + { 0x30c6, 0x0228 }, + { 0x30c7, 0x5901 }, + { 0x30c8, 0xe255 }, + { 0x30c9, 0x030d }, + { 0x30ca, 0x0255 }, + { 0x30cb, 0x2c01 }, + { 0x30cc, 0xe255 }, + { 0x30cd, 0x4342 }, + { 0x30ce, 0xe255 }, + { 0x30cf, 0x73c0 }, + { 0x30d0, 0x4255 }, + { 0x30d1, 0x0c00 }, + { 0x30d2, 0x0228 }, + { 0x30d3, 0x1f01 }, + { 0x30d4, 0x0228 }, + { 0x30d5, 0x1e01 }, + { 0x30d6, 0x0228 }, + { 0x30d7, 0xfa00 }, + { 0x30d8, 0x0000 }, + { 0x30d9, 0xf000 }, + { 0x30da, 0x0000 }, + { 0x30db, 0xf000 }, + { 0x30dc, 0x0000 }, + { 0x30dd, 0xf000 }, + { 0x30de, 0x0000 }, + { 0x30df, 0xf000 }, + { 0x30e0, 0x0000 }, + { 0x30e1, 0xf000 }, + { 0x30e2, 0x0000 }, + { 0x30e3, 0xf000 }, + { 0x30e4, 0x0000 }, + { 0x30e5, 0xf000 }, + { 0x30e6, 0x0000 }, + { 0x30e7, 0xf000 }, + { 0x30e8, 0x0000 }, + { 0x30e9, 0xf000 }, + { 0x30ea, 0x0000 }, + { 0x30eb, 0xf000 }, + { 0x30ec, 0x0000 }, + { 0x30ed, 0xf000 }, + { 0x30ee, 0x0000 }, + { 0x30ef, 0xf000 }, + { 0x30f0, 0x0228 }, + { 0x30f1, 0x1a01 }, + { 0x30f2, 0x0228 }, + { 0x30f3, 0x1e00 }, + { 0x30f4, 0x0228 }, + { 0x30f5, 0x1f00 }, + { 0x30f6, 0x6228 }, + { 0x30f7, 0xf800 }, + { 0x30f8, 0x0000 }, + { 0x30f9, 0xf000 }, + { 0x30fa, 0x0000 }, + { 0x30fb, 0xf000 }, + { 0x30fc, 0x0000 }, + { 0x30fd, 0xf000 }, + { 0x30fe, 0x0000 }, + { 0x30ff, 0xf000 }, + { 0x3100, 0x222b }, + { 0x3101, 0x3a03 }, + { 0x3102, 0x222b }, + { 0x3103, 0x5803 }, + { 0x3104, 0xe26f }, + { 0x3105, 0x030d }, + { 0x3106, 0x626f }, + { 0x3107, 0x2c01 }, + { 0x3108, 0xe26f }, + { 0x3109, 0x4342 }, + { 0x310a, 0xe26f }, + { 0x310b, 0x73c0 }, + { 0x310c, 0x026f }, + { 0x310d, 0x0c00 }, + { 0x310e, 0x022b }, + { 0x310f, 0x1f01 }, + { 0x3110, 0x022b }, + { 0x3111, 0x1e01 }, + { 0x3112, 0x022b }, + { 0x3113, 0xfa00 }, + { 0x3114, 0x0000 }, + { 0x3115, 0xf000 }, + { 0x3116, 0x0000 }, + { 0x3117, 0xf000 }, + { 0x3118, 0x0000 }, + { 0x3119, 0xf000 }, + { 0x311a, 0x0000 }, + { 0x311b, 0xf000 }, + { 0x311c, 0x0000 }, + { 0x311d, 0xf000 }, + { 0x311e, 0x0000 }, + { 0x311f, 0xf000 }, + { 0x3120, 0x022b }, + { 0x3121, 0x0a01 }, + { 0x3122, 0x022b }, + { 0x3123, 0x1e00 }, + { 0x3124, 0x022b }, + { 0x3125, 0x1f00 }, + { 0x3126, 0x622b }, + { 0x3127, 0xf800 }, + { 0x3128, 0x0000 }, + { 0x3129, 0xf000 }, + { 0x312a, 0x0000 }, + { 0x312b, 0xf000 }, + { 0x312c, 0x0000 }, + { 0x312d, 0xf000 }, + { 0x312e, 0x0000 }, + { 0x312f, 0xf000 }, + { 0x3130, 0x0000 }, + { 0x3131, 0xf000 }, + { 0x3132, 0x0000 }, + { 0x3133, 0xf000 }, + { 0x3134, 0x0000 }, + { 0x3135, 0xf000 }, + { 0x3136, 0x0000 }, + { 0x3137, 0xf000 }, + { 0x3138, 0x0000 }, + { 0x3139, 0xf000 }, + { 0x313a, 0x0000 }, + { 0x313b, 0xf000 }, + { 0x313c, 0x0000 }, + { 0x313d, 0xf000 }, + { 0x313e, 0x0000 }, + { 0x313f, 0xf000 }, + { 0x3140, 0x0000 }, + { 0x3141, 0xf000 }, + { 0x3142, 0x0000 }, + { 0x3143, 0xf000 }, + { 0x3144, 0x0000 }, + { 0x3145, 0xf000 }, + { 0x3146, 0x0000 }, + { 0x3147, 0xf000 }, + { 0x3148, 0x0000 }, + { 0x3149, 0xf000 }, + { 0x314a, 0x0000 }, + { 0x314b, 0xf000 }, + { 0x314c, 0x0000 }, + { 0x314d, 0xf000 }, + { 0x314e, 0x0000 }, + { 0x314f, 0xf000 }, + { 0x3150, 0x0000 }, + { 0x3151, 0xf000 }, + { 0x3152, 0x0000 }, + { 0x3153, 0xf000 }, + { 0x3154, 0x0000 }, + { 0x3155, 0xf000 }, + { 0x3156, 0x0000 }, + { 0x3157, 0xf000 }, + { 0x3158, 0x0000 }, + { 0x3159, 0xf000 }, + { 0x315a, 0x0000 }, + { 0x315b, 0xf000 }, + { 0x315c, 0x0000 }, + { 0x315d, 0xf000 }, + { 0x315e, 0x0000 }, + { 0x315f, 0xf000 }, + { 0x3160, 0x0000 }, + { 0x3161, 0xf000 }, + { 0x3162, 0x0000 }, + { 0x3163, 0xf000 }, + { 0x3164, 0x0000 }, + { 0x3165, 0xf000 }, + { 0x3166, 0x0000 }, + { 0x3167, 0xf000 }, + { 0x3168, 0x0000 }, + { 0x3169, 0xf000 }, + { 0x316a, 0x0000 }, + { 0x316b, 0xf000 }, + { 0x316c, 0x0000 }, + { 0x316d, 0xf000 }, + { 0x316e, 0x0000 }, + { 0x316f, 0xf000 }, + { 0x3170, 0x0000 }, + { 0x3171, 0xf000 }, + { 0x3172, 0x0000 }, + { 0x3173, 0xf000 }, + { 0x3174, 0x0000 }, + { 0x3175, 0xf000 }, + { 0x3176, 0x0000 }, + { 0x3177, 0xf000 }, + { 0x3178, 0x0000 }, + { 0x3179, 0xf000 }, + { 0x317a, 0x0000 }, + { 0x317b, 0xf000 }, + { 0x317c, 0x0000 }, + { 0x317d, 0xf000 }, + { 0x317e, 0x0000 }, + { 0x317f, 0xf000 }, + { 0x3180, 0x2001 }, + { 0x3181, 0xf101 }, + { 0x3182, 0x0000 }, + { 0x3183, 0xf000 }, + { 0x3184, 0x0000 }, + { 0x3185, 0xf000 }, + { 0x3186, 0x0000 }, + { 0x3187, 0xf000 }, + { 0x3188, 0x0000 }, + { 0x3189, 0xf000 }, + { 0x318a, 0x0000 }, + { 0x318b, 0xf000 }, + { 0x318c, 0x0000 }, + { 0x318d, 0xf000 }, + { 0x318e, 0x0000 }, + { 0x318f, 0xf000 }, + { 0x3190, 0x0000 }, + { 0x3191, 0xf000 }, + { 0x3192, 0x0000 }, + { 0x3193, 0xf000 }, + { 0x3194, 0x0000 }, + { 0x3195, 0xf000 }, + { 0x3196, 0x0000 }, + { 0x3197, 0xf000 }, + { 0x3198, 0x0000 }, + { 0x3199, 0xf000 }, + { 0x319a, 0x0000 }, + { 0x319b, 0xf000 }, + { 0x319c, 0x0000 }, + { 0x319d, 0xf000 }, + { 0x319e, 0x0000 }, + { 0x319f, 0xf000 }, + { 0x31a0, 0x0000 }, + { 0x31a1, 0xf000 }, + { 0x31a2, 0x0000 }, + { 0x31a3, 0xf000 }, + { 0x31a4, 0x0000 }, + { 0x31a5, 0xf000 }, + { 0x31a6, 0x0000 }, + { 0x31a7, 0xf000 }, + { 0x31a8, 0x0000 }, + { 0x31a9, 0xf000 }, + { 0x31aa, 0x0000 }, + { 0x31ab, 0xf000 }, + { 0x31ac, 0x0000 }, + { 0x31ad, 0xf000 }, + { 0x31ae, 0x0000 }, + { 0x31af, 0xf000 }, + { 0x31b0, 0x0000 }, + { 0x31b1, 0xf000 }, + { 0x31b2, 0x0000 }, + { 0x31b3, 0xf000 }, + { 0x31b4, 0x0000 }, + { 0x31b5, 0xf000 }, + { 0x31b6, 0x0000 }, + { 0x31b7, 0xf000 }, + { 0x31b8, 0x0000 }, + { 0x31b9, 0xf000 }, + { 0x31ba, 0x0000 }, + { 0x31bb, 0xf000 }, + { 0x31bc, 0x0000 }, + { 0x31bd, 0xf000 }, + { 0x31be, 0x0000 }, + { 0x31bf, 0xf000 }, + { 0x31c0, 0x0000 }, + { 0x31c1, 0xf000 }, + { 0x31c2, 0x0000 }, + { 0x31c3, 0xf000 }, + { 0x31c4, 0x0000 }, + { 0x31c5, 0xf000 }, + { 0x31c6, 0x0000 }, + { 0x31c7, 0xf000 }, + { 0x31c8, 0x0000 }, + { 0x31c9, 0xf000 }, + { 0x31ca, 0x0000 }, + { 0x31cb, 0xf000 }, + { 0x31cc, 0x0000 }, + { 0x31cd, 0xf000 }, + { 0x31ce, 0x0000 }, + { 0x31cf, 0xf000 }, + { 0x31d0, 0x0000 }, + { 0x31d1, 0xf000 }, + { 0x31d2, 0x0000 }, + { 0x31d3, 0xf000 }, + { 0x31d4, 0x0000 }, + { 0x31d5, 0xf000 }, + { 0x31d6, 0x0000 }, + { 0x31d7, 0xf000 }, + { 0x31d8, 0x0000 }, + { 0x31d9, 0xf000 }, + { 0x31da, 0x0000 }, + { 0x31db, 0xf000 }, + { 0x31dc, 0x0000 }, + { 0x31dd, 0xf000 }, + { 0x31de, 0x0000 }, + { 0x31df, 0xf000 }, + { 0x31e0, 0x0000 }, + { 0x31e1, 0xf000 }, + { 0x31e2, 0x0000 }, + { 0x31e3, 0xf000 }, + { 0x31e4, 0x0000 }, + { 0x31e5, 0xf000 }, + { 0x31e6, 0x0000 }, + { 0x31e7, 0xf000 }, + { 0x31e8, 0x0000 }, + { 0x31e9, 0xf000 }, + { 0x31ea, 0x0000 }, + { 0x31eb, 0xf000 }, + { 0x31ec, 0x0000 }, + { 0x31ed, 0xf000 }, + { 0x31ee, 0x0000 }, + { 0x31ef, 0xf000 }, + { 0x31f0, 0x0000 }, + { 0x31f1, 0xf000 }, + { 0x31f2, 0x0000 }, + { 0x31f3, 0xf000 }, + { 0x31f4, 0x0000 }, + { 0x31f5, 0xf000 }, + { 0x31f6, 0x0000 }, + { 0x31f7, 0xf000 }, + { 0x31f8, 0x0000 }, + { 0x31f9, 0xf000 }, + { 0x31fa, 0x0000 }, + { 0x31fb, 0xf000 }, + { 0x31fc, 0x0000 }, + { 0x31fd, 0xf000 }, + { 0x31fe, 0x0000 }, + { 0x31ff, 0xf000 }, + { 0x024d, 0xff50 }, + { 0x0252, 0xff50 }, + { 0x0259, 0x0112 }, + { 0x025e, 0x0112 }, +}; + +static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = w->codec; + struct arizona *arizona = dev_get_drvdata(codec->dev); + struct regmap *regmap = codec->control_data; + const struct reg_default *patch = NULL; + int i, patch_size; + + switch (arizona->rev) { + case 0: + patch = wm5102_sysclk_reva_patch; + patch_size = ARRAY_SIZE(wm5102_sysclk_reva_patch); + break; + } + + switch (event) { + case SND_SOC_DAPM_POST_PMU: + if (patch) + for (i = 0; i < patch_size; i++) + regmap_write(regmap, patch[i].reg, + patch[i].def); + break; + + default: + break; + } + + return 0; +} + static const struct snd_kcontrol_new wm5102_snd_controls[] = { SOC_SINGLE("IN1 High Performance Switch", ARIZONA_IN1L_CONTROL, ARIZONA_IN1_OSR_SHIFT, 1, 0), @@ -297,7 +847,7 @@ static const struct snd_kcontrol_new wm5102_aec_loopback_mux = static const struct snd_soc_dapm_widget wm5102_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT, - 0, NULL, 0), + 0, wm5102_sysclk_ev, SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1, ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK, diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c index 5421fd9fbcb..4c0a8e49613 100644 --- a/sound/soc/codecs/wm8978.c +++ b/sound/soc/codecs/wm8978.c @@ -782,7 +782,7 @@ static int wm8978_hw_params(struct snd_pcm_substream *substream, wm8978->mclk_idx = -1; f_sel = wm8978->f_mclk; } else { - if (!wm8978->f_pllout) { + if (!wm8978->f_opclk) { /* We only enter here, if OPCLK is not used */ int ret = wm8978_configure_pll(codec); if (ret < 0) diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c index b9f16598324..2ba08148655 100644 --- a/sound/soc/kirkwood/kirkwood-dma.c +++ b/sound/soc/kirkwood/kirkwood-dma.c @@ -71,7 +71,6 @@ static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id) printk(KERN_WARNING "%s: got err interrupt 0x%lx\n", __func__, cause); writel(cause, priv->io + KIRKWOOD_ERR_CAUSE); - return IRQ_HANDLED; } /* we've enabled only bytes interrupts ... */ @@ -178,7 +177,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream) } dram = mv_mbus_dram_info(); - addr = virt_to_phys(substream->dma_buffer.area); + addr = substream->dma_buffer.addr; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { prdata->play_stream = substream; kirkwood_dma_conf_mbus_windows(priv->io, diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c index 542538d10ab..1d5db484d2d 100644 --- a/sound/soc/kirkwood/kirkwood-i2s.c +++ b/sound/soc/kirkwood/kirkwood-i2s.c @@ -95,7 +95,7 @@ static inline void kirkwood_set_dco(void __iomem *io, unsigned long rate) do { cpu_relax(); value = readl(io + KIRKWOOD_DCO_SPCR_STATUS); - value &= KIRKWOOD_DCO_SPCR_STATUS; + value &= KIRKWOOD_DCO_SPCR_STATUS_DCO_LOCK; } while (value == 0); } @@ -180,67 +180,72 @@ static int kirkwood_i2s_play_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct kirkwood_dma_data *priv = snd_soc_dai_get_drvdata(dai); - unsigned long value; - - /* - * specs says KIRKWOOD_PLAYCTL must be read 2 times before - * changing it. So read 1 time here and 1 later. - */ - value = readl(priv->io + KIRKWOOD_PLAYCTL); + uint32_t ctl, value; + + ctl = readl(priv->io + KIRKWOOD_PLAYCTL); + if (ctl & KIRKWOOD_PLAYCTL_PAUSE) { + unsigned timeout = 5000; + /* + * The Armada510 spec says that if we enter pause mode, the + * busy bit must be read back as clear _twice_. Make sure + * we respect that otherwise we get DMA underruns. + */ + do { + value = ctl; + ctl = readl(priv->io + KIRKWOOD_PLAYCTL); + if (!((ctl | value) & KIRKWOOD_PLAYCTL_PLAY_BUSY)) + break; + udelay(1); + } while (timeout--); + + if ((ctl | value) & KIRKWOOD_PLAYCTL_PLAY_BUSY) + dev_notice(dai->dev, "timed out waiting for busy to deassert: %08x\n", + ctl); + } switch (cmd) { case SNDRV_PCM_TRIGGER_START: - /* stop audio, enable interrupts */ - value = readl(priv->io + KIRKWOOD_PLAYCTL); - value |= KIRKWOOD_PLAYCTL_PAUSE; - writel(value, priv->io + KIRKWOOD_PLAYCTL); - value = readl(priv->io + KIRKWOOD_INT_MASK); value |= KIRKWOOD_INT_CAUSE_PLAY_BYTES; writel(value, priv->io + KIRKWOOD_INT_MASK); /* configure audio & enable i2s playback */ - value = readl(priv->io + KIRKWOOD_PLAYCTL); - value &= ~KIRKWOOD_PLAYCTL_BURST_MASK; - value &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE + ctl &= ~KIRKWOOD_PLAYCTL_BURST_MASK; + ctl &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE | KIRKWOOD_PLAYCTL_SPDIF_EN); if (priv->burst == 32) - value |= KIRKWOOD_PLAYCTL_BURST_32; + ctl |= KIRKWOOD_PLAYCTL_BURST_32; else - value |= KIRKWOOD_PLAYCTL_BURST_128; - value |= KIRKWOOD_PLAYCTL_I2S_EN; - writel(value, priv->io + KIRKWOOD_PLAYCTL); + ctl |= KIRKWOOD_PLAYCTL_BURST_128; + ctl |= KIRKWOOD_PLAYCTL_I2S_EN; + writel(ctl, priv->io + KIRKWOOD_PLAYCTL); break; case SNDRV_PCM_TRIGGER_STOP: /* stop audio, disable interrupts */ - value = readl(priv->io + KIRKWOOD_PLAYCTL); - value |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE; - writel(value, priv->io + KIRKWOOD_PLAYCTL); + ctl |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE; + writel(ctl, priv->io + KIRKWOOD_PLAYCTL); value = readl(priv->io + KIRKWOOD_INT_MASK); value &= ~KIRKWOOD_INT_CAUSE_PLAY_BYTES; writel(value, priv->io + KIRKWOOD_INT_MASK); /* disable all playbacks */ - value = readl(priv->io + KIRKWOOD_PLAYCTL); - value &= ~(KIRKWOOD_PLAYCTL_I2S_EN | KIRKWOOD_PLAYCTL_SPDIF_EN); - writel(value, priv->io + KIRKWOOD_PLAYCTL); + ctl &= ~(KIRKWOOD_PLAYCTL_I2S_EN | KIRKWOOD_PLAYCTL_SPDIF_EN); + writel(ctl, priv->io + KIRKWOOD_PLAYCTL); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: - value = readl(priv->io + KIRKWOOD_PLAYCTL); - value |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE; - writel(value, priv->io + KIRKWOOD_PLAYCTL); + ctl |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE; + writel(ctl, priv->io + KIRKWOOD_PLAYCTL); break; case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - value = readl(priv->io + KIRKWOOD_PLAYCTL); - value &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE); - writel(value, priv->io + KIRKWOOD_PLAYCTL); + ctl &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE); + writel(ctl, priv->io + KIRKWOOD_PLAYCTL); break; default: @@ -260,11 +265,6 @@ static int kirkwood_i2s_rec_trigger(struct snd_pcm_substream *substream, switch (cmd) { case SNDRV_PCM_TRIGGER_START: - /* stop audio, enable interrupts */ - value = readl(priv->io + KIRKWOOD_RECCTL); - value |= KIRKWOOD_RECCTL_PAUSE; - writel(value, priv->io + KIRKWOOD_RECCTL); - value = readl(priv->io + KIRKWOOD_INT_MASK); value |= KIRKWOOD_INT_CAUSE_REC_BYTES; writel(value, priv->io + KIRKWOOD_INT_MASK); diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c index aa037b292f3..c294fbb523f 100644 --- a/sound/soc/mxs/mxs-saif.c +++ b/sound/soc/mxs/mxs-saif.c @@ -523,16 +523,24 @@ static int mxs_saif_trigger(struct snd_pcm_substream *substream, int cmd, if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { /* - * write a data to saif data register to trigger - * the transfer + * write data to saif data register to trigger + * the transfer. + * For 24-bit format the 32-bit FIFO register stores + * only one channel, so we need to write twice. + * This is also safe for the other non 24-bit formats. */ __raw_writel(0, saif->base + SAIF_DATA); + __raw_writel(0, saif->base + SAIF_DATA); } else { /* - * read a data from saif data register to trigger - * the receive + * read data from saif data register to trigger + * the receive. + * For 24-bit format the 32-bit FIFO register stores + * only one channel, so we need to read twice. + * This is also safe for the other non 24-bit formats. */ __raw_readl(saif->base + SAIF_DATA); + __raw_readl(saif->base + SAIF_DATA); } master_saif->ongoing = 1; @@ -812,3 +820,4 @@ module_platform_driver(mxs_saif_driver); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_DESCRIPTION("MXS ASoC SAIF driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:mxs-saif"); diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig index e7b83179aca..3c7c3a59ed3 100644 --- a/sound/soc/samsung/Kconfig +++ b/sound/soc/samsung/Kconfig @@ -207,6 +207,8 @@ config SND_SOC_BELLS select SND_SOC_WM5102 select SND_SOC_WM5110 select SND_SOC_WM9081 + select SND_SOC_WM0010 + select SND_SOC_WM1250_EV1 config SND_SOC_LOWLAND tristate "Audio support for Wolfson Lowland" diff --git a/sound/soc/samsung/bells.c b/sound/soc/samsung/bells.c index b0d46d63d55..a2ca1567b9e 100644 --- a/sound/soc/samsung/bells.c +++ b/sound/soc/samsung/bells.c @@ -212,7 +212,7 @@ static struct snd_soc_dai_link bells_dai_wm5102[] = { { .name = "Sub", .stream_name = "Sub", - .cpu_dai_name = "wm5110-aif3", + .cpu_dai_name = "wm5102-aif3", .codec_dai_name = "wm9081-hifi", .codec_name = "wm9081.1-006c", .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index d1198627fc4..10d21be383f 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -2786,8 +2786,9 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol, val = (ucontrol->value.integer.value[0] + min) & mask; val = val << shift; - if (snd_soc_update_bits_locked(codec, reg, val_mask, val)) - return err; + err = snd_soc_update_bits_locked(codec, reg, val_mask, val); + if (err < 0) + return err; if (snd_soc_volsw_is_stereo(mc)) { val_mask = mask << rshift; diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index d0a4be38dc0..6e35bcae02d 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -3745,7 +3745,7 @@ void snd_soc_dapm_shutdown(struct snd_soc_card *card) { struct snd_soc_codec *codec; - list_for_each_entry(codec, &card->codec_dev_list, list) { + list_for_each_entry(codec, &card->codec_dev_list, card_list) { soc_dapm_shutdown_codec(&codec->dapm); if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY) snd_soc_dapm_set_bias_level(&codec->dapm, diff --git a/sound/usb/card.c b/sound/usb/card.c index 282f0fc9fed..dbf7999d18b 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -559,9 +559,11 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, return; card = chip->card; - mutex_lock(®ister_mutex); down_write(&chip->shutdown_rwsem); chip->shutdown = 1; + up_write(&chip->shutdown_rwsem); + + mutex_lock(®ister_mutex); chip->num_interfaces--; if (chip->num_interfaces <= 0) { snd_card_disconnect(card); @@ -582,11 +584,9 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, snd_usb_mixer_disconnect(p); } usb_chip[chip->index] = NULL; - up_write(&chip->shutdown_rwsem); mutex_unlock(®ister_mutex); snd_card_free_when_closed(card); } else { - up_write(&chip->shutdown_rwsem); mutex_unlock(®ister_mutex); } } diff --git a/sound/usb/midi.c b/sound/usb/midi.c index c83f6143c0e..eeefbce3873 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -148,6 +148,7 @@ struct snd_usb_midi_out_endpoint { struct snd_usb_midi_out_endpoint* ep; struct snd_rawmidi_substream *substream; int active; + bool autopm_reference; uint8_t cable; /* cable number << 4 */ uint8_t state; #define STATE_UNKNOWN 0 @@ -1076,7 +1077,8 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) return -ENXIO; } err = usb_autopm_get_interface(umidi->iface); - if (err < 0) + port->autopm_reference = err >= 0; + if (err < 0 && err != -EACCES) return -EIO; substream->runtime->private_data = port; port->state = STATE_UNKNOWN; @@ -1087,9 +1089,11 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream) { struct snd_usb_midi* umidi = substream->rmidi->private_data; + struct usbmidi_out_port *port = substream->runtime->private_data; substream_open(substream, 0); - usb_autopm_put_interface(umidi->iface); + if (port->autopm_reference) + usb_autopm_put_interface(umidi->iface); return 0; } diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 5c12a3fe8c3..ef6fa24fc47 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -459,7 +459,7 @@ static int configure_endpoint(struct snd_usb_substream *subs) return ret; if (subs->sync_endpoint) - ret = snd_usb_endpoint_set_params(subs->data_endpoint, + ret = snd_usb_endpoint_set_params(subs->sync_endpoint, subs->pcm_format, subs->channels, subs->period_bytes, diff --git a/tools/power/cpupower/.gitignore b/tools/power/cpupower/.gitignore index 8a83dd2ffc1..d42073f1260 100644 --- a/tools/power/cpupower/.gitignore +++ b/tools/power/cpupower/.gitignore @@ -20,3 +20,10 @@ utils/cpufreq-set.o utils/cpufreq-aperf.o cpupower bench/cpufreq-bench +debug/kernel/Module.symvers +debug/i386/centrino-decode +debug/i386/dump_psb +debug/i386/intel_gsic +debug/i386/powernow-k8-decode +debug/x86_64/centrino-decode +debug/x86_64/powernow-k8-decode diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile index cf397bd26d0..d875a74a3bd 100644 --- a/tools/power/cpupower/Makefile +++ b/tools/power/cpupower/Makefile @@ -253,7 +253,8 @@ clean: | xargs rm -f -rm -f $(OUTPUT)cpupower -rm -f $(OUTPUT)libcpupower.so* - -rm -rf $(OUTPUT)po/*.{gmo,pot} + -rm -rf $(OUTPUT)po/*.gmo + -rm -rf $(OUTPUT)po/*.pot $(MAKE) -C bench O=$(OUTPUT) clean diff --git a/tools/power/cpupower/debug/i386/Makefile b/tools/power/cpupower/debug/i386/Makefile index 3ba158f0e28..c05cc0ac80c 100644 --- a/tools/power/cpupower/debug/i386/Makefile +++ b/tools/power/cpupower/debug/i386/Makefile @@ -26,7 +26,10 @@ $(OUTPUT)powernow-k8-decode: powernow-k8-decode.c all: $(OUTPUT)centrino-decode $(OUTPUT)dump_psb $(OUTPUT)intel_gsic $(OUTPUT)powernow-k8-decode clean: - rm -rf $(OUTPUT){centrino-decode,dump_psb,intel_gsic,powernow-k8-decode} + rm -rf $(OUTPUT)centrino-decode + rm -rf $(OUTPUT)dump_psb + rm -rf $(OUTPUT)intel_gsic + rm -rf $(OUTPUT)powernow-k8-decode install: $(INSTALL) -d $(DESTDIR)${bindir} diff --git a/tools/power/cpupower/man/cpupower-monitor.1 b/tools/power/cpupower/man/cpupower-monitor.1 index 1141c207371..e01c35d13b6 100644 --- a/tools/power/cpupower/man/cpupower-monitor.1 +++ b/tools/power/cpupower/man/cpupower-monitor.1 @@ -7,11 +7,11 @@ cpupower\-monitor \- Report processor frequency and idle statistics .RB "\-l" .B cpupower monitor -.RB [ "\-m <mon1>," [ "<mon2>,..." ] ] +.RB [ -c ] [ "\-m <mon1>," [ "<mon2>,..." ] ] .RB [ "\-i seconds" ] .br .B cpupower monitor -.RB [ "\-m <mon1>," [ "<mon2>,..." ] ] +.RB [ -c ][ "\-m <mon1>," [ "<mon2>,..." ] ] .RB command .br .SH DESCRIPTION @@ -64,6 +64,17 @@ Only display specific monitors. Use the monitor string(s) provided by \-l option Measure intervall. .RE .PP +\-c +.RS 4 +Schedule the process on every core before starting and ending measuring. +This could be needed for the Idle_Stats monitor when no other MSR based +monitor (has to be run on the core that is measured) is run in parallel. +This is to wake up the processors from deeper sleep states and let the +kernel re +-account its cpuidle (C-state) information before reading the +cpuidle timings from sysfs. +.RE +.PP command .RS 4 Measure idle and frequency characteristics of an arbitrary command/workload. diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c index 906895d21cc..93b0aa74ca0 100644 --- a/tools/power/cpupower/utils/helpers/cpuid.c +++ b/tools/power/cpupower/utils/helpers/cpuid.c @@ -158,6 +158,8 @@ out: cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; case 0x2A: /* SNB */ case 0x2D: /* SNB Xeon */ + case 0x3A: /* IVB */ + case 0x3E: /* IVB Xeon */ cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; cpu_info->caps |= CPUPOWER_CAP_IS_SNB; break; diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h index 2eb584cf2f5..aa9e95486a2 100644 --- a/tools/power/cpupower/utils/helpers/helpers.h +++ b/tools/power/cpupower/utils/helpers/helpers.h @@ -92,6 +92,14 @@ extern int get_cpu_info(unsigned int cpu, struct cpupower_cpu_info *cpu_info); extern struct cpupower_cpu_info cpupower_cpu_info; /* cpuid and cpuinfo helpers **************************/ +struct cpuid_core_info { + int pkg; + int core; + int cpu; + + /* flags */ + unsigned int is_online:1; +}; /* CPU topology/hierarchy parsing ******************/ struct cpupower_topology { @@ -101,18 +109,12 @@ struct cpupower_topology { unsigned int threads; /* per core */ /* Array gets mallocated with cores entries, holding per core info */ - struct { - int pkg; - int core; - int cpu; - - /* flags */ - unsigned int is_online:1; - } *core_info; + struct cpuid_core_info *core_info; }; extern int get_cpu_topology(struct cpupower_topology *cpu_top); extern void cpu_topology_release(struct cpupower_topology cpu_top); + /* CPU topology/hierarchy parsing ******************/ /* X86 ONLY ****************************************/ diff --git a/tools/power/cpupower/utils/helpers/sysfs.c b/tools/power/cpupower/utils/helpers/sysfs.c index 96e28c124b5..38ab9162946 100644 --- a/tools/power/cpupower/utils/helpers/sysfs.c +++ b/tools/power/cpupower/utils/helpers/sysfs.c @@ -37,25 +37,6 @@ unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen) return (unsigned int) numread; } -static unsigned int sysfs_write_file(const char *path, - const char *value, size_t len) -{ - int fd; - ssize_t numwrite; - - fd = open(path, O_WRONLY); - if (fd == -1) - return 0; - - numwrite = write(fd, value, len); - if (numwrite < 1) { - close(fd); - return 0; - } - close(fd); - return (unsigned int) numwrite; -} - /* * Detect whether a CPU is online * diff --git a/tools/power/cpupower/utils/helpers/topology.c b/tools/power/cpupower/utils/helpers/topology.c index 4eae2c47ba4..c13120af519 100644 --- a/tools/power/cpupower/utils/helpers/topology.c +++ b/tools/power/cpupower/utils/helpers/topology.c @@ -20,9 +20,8 @@ #include <helpers/sysfs.h> /* returns -1 on failure, 0 on success */ -int sysfs_topology_read_file(unsigned int cpu, const char *fname) +static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *result) { - unsigned long value; char linebuf[MAX_LINE_LEN]; char *endp; char path[SYSFS_PATH_MAX]; @@ -31,20 +30,12 @@ int sysfs_topology_read_file(unsigned int cpu, const char *fname) cpu, fname); if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0) return -1; - value = strtoul(linebuf, &endp, 0); + *result = strtol(linebuf, &endp, 0); if (endp == linebuf || errno == ERANGE) return -1; - return value; + return 0; } -struct cpuid_core_info { - unsigned int pkg; - unsigned int thread; - unsigned int cpu; - /* flags */ - unsigned int is_online:1; -}; - static int __compare(const void *t1, const void *t2) { struct cpuid_core_info *top1 = (struct cpuid_core_info *)t1; @@ -53,9 +44,9 @@ static int __compare(const void *t1, const void *t2) return -1; else if (top1->pkg > top2->pkg) return 1; - else if (top1->thread < top2->thread) + else if (top1->core < top2->core) return -1; - else if (top1->thread > top2->thread) + else if (top1->core > top2->core) return 1; else if (top1->cpu < top2->cpu) return -1; @@ -73,28 +64,42 @@ static int __compare(const void *t1, const void *t2) */ int get_cpu_topology(struct cpupower_topology *cpu_top) { - int cpu, cpus = sysconf(_SC_NPROCESSORS_CONF); + int cpu, last_pkg, cpus = sysconf(_SC_NPROCESSORS_CONF); - cpu_top->core_info = malloc(sizeof(struct cpupower_topology) * cpus); + cpu_top->core_info = malloc(sizeof(struct cpuid_core_info) * cpus); if (cpu_top->core_info == NULL) return -ENOMEM; cpu_top->pkgs = cpu_top->cores = 0; for (cpu = 0; cpu < cpus; cpu++) { cpu_top->core_info[cpu].cpu = cpu; cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu); - cpu_top->core_info[cpu].pkg = - sysfs_topology_read_file(cpu, "physical_package_id"); - if ((int)cpu_top->core_info[cpu].pkg != -1 && - cpu_top->core_info[cpu].pkg > cpu_top->pkgs) - cpu_top->pkgs = cpu_top->core_info[cpu].pkg; - cpu_top->core_info[cpu].core = - sysfs_topology_read_file(cpu, "core_id"); + if(sysfs_topology_read_file( + cpu, + "physical_package_id", + &(cpu_top->core_info[cpu].pkg)) < 0) + return -1; + if(sysfs_topology_read_file( + cpu, + "core_id", + &(cpu_top->core_info[cpu].core)) < 0) + return -1; } - cpu_top->pkgs++; qsort(cpu_top->core_info, cpus, sizeof(struct cpuid_core_info), __compare); + /* Count the number of distinct pkgs values. This works + because the primary sort of the core_info struct was just + done by pkg value. */ + last_pkg = cpu_top->core_info[0].pkg; + for(cpu = 1; cpu < cpus; cpu++) { + if(cpu_top->core_info[cpu].pkg != last_pkg) { + last_pkg = cpu_top->core_info[cpu].pkg; + cpu_top->pkgs++; + } + } + cpu_top->pkgs++; + /* Intel's cores count is not consecutively numbered, there may * be a core_id of 3, but none of 2. Assume there always is 0 * Get amount of cores by counting duplicates in a package diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c index 0d6571e418d..c4bae9203a6 100644 --- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c +++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c @@ -39,6 +39,7 @@ static int mode; static int interval = 1; static char *show_monitors_param; static struct cpupower_topology cpu_top; +static unsigned int wake_cpus; /* ToDo: Document this in the manpage */ static char range_abbr[RANGE_MAX] = { 'T', 'C', 'P', 'M', }; @@ -84,7 +85,7 @@ int fill_string_with_spaces(char *s, int n) void print_header(int topology_depth) { int unsigned mon; - int state, need_len, pr_mon_len; + int state, need_len; cstate_t s; char buf[128] = ""; int percent_width = 4; @@ -93,7 +94,6 @@ void print_header(int topology_depth) printf("%s|", buf); for (mon = 0; mon < avail_monitors; mon++) { - pr_mon_len = 0; need_len = monitors[mon]->hw_states_num * (percent_width + 3) - 1; if (mon != 0) { @@ -315,16 +315,28 @@ int fork_it(char **argv) int do_interval_measure(int i) { unsigned int num; + int cpu; + + if (wake_cpus) + for (cpu = 0; cpu < cpu_count; cpu++) + bind_cpu(cpu); for (num = 0; num < avail_monitors; num++) { dprint("HW C-state residency monitor: %s - States: %d\n", monitors[num]->name, monitors[num]->hw_states_num); monitors[num]->start(); } + sleep(i); + + if (wake_cpus) + for (cpu = 0; cpu < cpu_count; cpu++) + bind_cpu(cpu); + for (num = 0; num < avail_monitors; num++) monitors[num]->stop(); + return 0; } @@ -333,7 +345,7 @@ static void cmdline(int argc, char *argv[]) int opt; progname = basename(argv[0]); - while ((opt = getopt(argc, argv, "+li:m:")) != -1) { + while ((opt = getopt(argc, argv, "+lci:m:")) != -1) { switch (opt) { case 'l': if (mode) @@ -352,6 +364,9 @@ static void cmdline(int argc, char *argv[]) mode = show; show_monitors_param = optarg; break; + case 'c': + wake_cpus = 1; + break; default: print_wrong_arg_exit(); } diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h index 9312ee1f2db..9e43f3371fb 100644 --- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h +++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h @@ -65,4 +65,21 @@ extern long long timespec_diff_us(struct timespec start, struct timespec end); "could be inaccurate\n"), mes, ov); \ } + +/* Taken over from x86info project sources -> return 0 on success */ +#include <sched.h> +#include <sys/types.h> +#include <unistd.h> +static inline int bind_cpu(int cpu) +{ + cpu_set_t set; + + if (sched_getaffinity(getpid(), sizeof(set), &set) == 0) { + CPU_ZERO(&set); + CPU_SET(cpu, &set); + return sched_setaffinity(getpid(), sizeof(set), &set); + } + return 1; +} + #endif /* __CPUIDLE_INFO_HW__ */ diff --git a/tools/power/cpupower/utils/idle_monitor/snb_idle.c b/tools/power/cpupower/utils/idle_monitor/snb_idle.c index a1bc07cd53e..a99b43b97d6 100644 --- a/tools/power/cpupower/utils/idle_monitor/snb_idle.c +++ b/tools/power/cpupower/utils/idle_monitor/snb_idle.c @@ -150,9 +150,15 @@ static struct cpuidle_monitor *snb_register(void) || cpupower_cpu_info.family != 6) return NULL; - if (cpupower_cpu_info.model != 0x2A - && cpupower_cpu_info.model != 0x2D) + switch (cpupower_cpu_info.model) { + case 0x2A: /* SNB */ + case 0x2D: /* SNB Xeon */ + case 0x3A: /* IVB */ + case 0x3E: /* IVB Xeon */ + break; + default: return NULL; + } is_valid = calloc(cpu_count, sizeof(int)); for (num = 0; num < SNB_CSTATE_COUNT; num++) { diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 2655ae9a3ad..ea095abbe97 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -206,8 +206,10 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr) retval = pread(fd, msr, sizeof *msr, offset); close(fd); - if (retval != sizeof *msr) + if (retval != sizeof *msr) { + fprintf(stderr, "%s offset 0x%zx read failed\n", pathname, offset); return -1; + } return 0; } @@ -1101,7 +1103,9 @@ void turbostat_loop() restart: retval = for_all_cpus(get_counters, EVEN_COUNTERS); - if (retval) { + if (retval < -1) { + exit(retval); + } else if (retval == -1) { re_initialize(); goto restart; } @@ -1114,7 +1118,9 @@ restart: } sleep(interval_sec); retval = for_all_cpus(get_counters, ODD_COUNTERS); - if (retval) { + if (retval < -1) { + exit(retval); + } else if (retval == -1) { re_initialize(); goto restart; } @@ -1126,7 +1132,9 @@ restart: flush_stdout(); sleep(interval_sec); retval = for_all_cpus(get_counters, EVEN_COUNTERS); - if (retval) { + if (retval < -1) { + exit(retval); + } else if (retval == -1) { re_initialize(); goto restart; } @@ -1545,8 +1553,11 @@ void turbostat_init() int fork_it(char **argv) { pid_t child_pid; + int status; - for_all_cpus(get_counters, EVEN_COUNTERS); + status = for_all_cpus(get_counters, EVEN_COUNTERS); + if (status) + exit(status); /* clear affinity side-effect of get_counters() */ sched_setaffinity(0, cpu_present_setsize, cpu_present_set); gettimeofday(&tv_even, (struct timezone *)NULL); @@ -1556,7 +1567,6 @@ int fork_it(char **argv) /* child */ execvp(argv[0], argv); } else { - int status; /* parent */ if (child_pid == -1) { @@ -1568,7 +1578,7 @@ int fork_it(char **argv) signal(SIGQUIT, SIG_IGN); if (waitpid(child_pid, &status, 0) == -1) { perror("wait"); - exit(1); + exit(status); } } /* @@ -1585,7 +1595,7 @@ int fork_it(char **argv) fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0); - return 0; + return status; } void cmdline(int argc, char **argv) @@ -1594,7 +1604,7 @@ void cmdline(int argc, char **argv) progname = argv[0]; - while ((opt = getopt(argc, argv, "+pPSvisc:sC:m:M:")) != -1) { + while ((opt = getopt(argc, argv, "+pPSvi:sc:sC:m:M:")) != -1) { switch (opt) { case 'p': show_core_only++; |