diff options
310 files changed, 4361 insertions, 2200 deletions
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt new file mode 100644 index 00000000000..ae8af1694e9 --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt @@ -0,0 +1,93 @@ +Pinctrl-based I2C Bus Mux + +This binding describes an I2C bus multiplexer that uses pin multiplexing to +route the I2C signals, and represents the pin multiplexing configuration +using the pinctrl device tree bindings. + + +-----+ +-----+ + | dev | | dev | + +------------------------+ +-----+ +-----+ + | SoC | | | + | /----|------+--------+ + | +---+ +------+ | child bus A, on first set of pins + | |I2C|---|Pinmux| | + | +---+ +------+ | child bus B, on second set of pins + | \----|------+--------+--------+ + | | | | | + +------------------------+ +-----+ +-----+ +-----+ + | dev | | dev | | dev | + +-----+ +-----+ +-----+ + +Required properties: +- compatible: i2c-mux-pinctrl +- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side + port is connected to. + +Also required are: + +* Standard pinctrl properties that specify the pin mux state for each child + bus. See ../pinctrl/pinctrl-bindings.txt. + +* Standard I2C mux properties. See mux.txt in this directory. + +* I2C child bus nodes. See mux.txt in this directory. + +For each named state defined in the pinctrl-names property, an I2C child bus +will be created. I2C child bus numbers are assigned based on the index into +the pinctrl-names property. + +The only exception is that no bus will be created for a state named "idle". If +such a state is defined, it must be the last entry in pinctrl-names. For +example: + + pinctrl-names = "ddc", "pta", "idle" -> ddc = bus 0, pta = bus 1 + pinctrl-names = "ddc", "idle", "pta" -> Invalid ("idle" not last) + pinctrl-names = "idle", "ddc", "pta" -> Invalid ("idle" not last) + +Whenever an access is made to a device on a child bus, the relevant pinctrl +state will be programmed into hardware. + +If an idle state is defined, whenever an access is not being made to a device +on a child bus, the idle pinctrl state will be programmed into hardware. + +If an idle state is not defined, the most recently used pinctrl state will be +left programmed into hardware whenever no access is being made of a device on +a child bus. + +Example: + + i2cmux { + compatible = "i2c-mux-pinctrl"; + #address-cells = <1>; + #size-cells = <0>; + + i2c-parent = <&i2c1>; + + pinctrl-names = "ddc", "pta", "idle"; + pinctrl-0 = <&state_i2cmux_ddc>; + pinctrl-1 = <&state_i2cmux_pta>; + pinctrl-2 = <&state_i2cmux_idle>; + + i2c@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom { + compatible = "eeprom"; + reg = <0x50>; + }; + }; + + i2c@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + eeprom { + compatible = "eeprom"; + reg = <0x50>; + }; + }; + }; + diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index c45513d806a..a92c5ebf373 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2543,6 +2543,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted. sched_debug [KNL] Enables verbose scheduler debug messages. + skew_tick= [KNL] Offset the periodic timer tick per cpu to mitigate + xtime_lock contention on larger systems, and/or RCU lock + contention on all systems with CONFIG_MAXSMP set. + Format: { "0" | "1" } + 0 -- disable. (may be 1 via CONFIG_CMDLINE="skew_tick=1" + 1 -- enable. + Note: increases power consumption, thus should only be + enabled if running jitter sensitive (HPC/RT) workloads. + security= [SECURITY] Choose a security module to enable at boot. If this boot parameter is not specified, only the first security module asking for security registration will be diff --git a/Documentation/vm/frontswap.txt b/Documentation/vm/frontswap.txt new file mode 100644 index 00000000000..37067cf455f --- /dev/null +++ b/Documentation/vm/frontswap.txt @@ -0,0 +1,278 @@ +Frontswap provides a "transcendent memory" interface for swap pages. +In some environments, dramatic performance savings may be obtained because +swapped pages are saved in RAM (or a RAM-like device) instead of a swap disk. + +(Note, frontswap -- and cleancache (merged at 3.0) -- are the "frontends" +and the only necessary changes to the core kernel for transcendent memory; +all other supporting code -- the "backends" -- is implemented as drivers. +See the LWN.net article "Transcendent memory in a nutshell" for a detailed +overview of frontswap and related kernel parts: +https://lwn.net/Articles/454795/ ) + +Frontswap is so named because it can be thought of as the opposite of +a "backing" store for a swap device. The storage is assumed to be +a synchronous concurrency-safe page-oriented "pseudo-RAM device" conforming +to the requirements of transcendent memory (such as Xen's "tmem", or +in-kernel compressed memory, aka "zcache", or future RAM-like devices); +this pseudo-RAM device is not directly accessible or addressable by the +kernel and is of unknown and possibly time-varying size. The driver +links itself to frontswap by calling frontswap_register_ops to set the +frontswap_ops funcs appropriately and the functions it provides must +conform to certain policies as follows: + +An "init" prepares the device to receive frontswap pages associated +with the specified swap device number (aka "type"). A "store" will +copy the page to transcendent memory and associate it with the type and +offset associated with the page. A "load" will copy the page, if found, +from transcendent memory into kernel memory, but will NOT remove the page +from from transcendent memory. An "invalidate_page" will remove the page +from transcendent memory and an "invalidate_area" will remove ALL pages +associated with the swap type (e.g., like swapoff) and notify the "device" +to refuse further stores with that swap type. + +Once a page is successfully stored, a matching load on the page will normally +succeed. So when the kernel finds itself in a situation where it needs +to swap out a page, it first attempts to use frontswap. If the store returns +success, the data has been successfully saved to transcendent memory and +a disk write and, if the data is later read back, a disk read are avoided. +If a store returns failure, transcendent memory has rejected the data, and the +page can be written to swap as usual. + +If a backend chooses, frontswap can be configured as a "writethrough +cache" by calling frontswap_writethrough(). In this mode, the reduction +in swap device writes is lost (and also a non-trivial performance advantage) +in order to allow the backend to arbitrarily "reclaim" space used to +store frontswap pages to more completely manage its memory usage. + +Note that if a page is stored and the page already exists in transcendent memory +(a "duplicate" store), either the store succeeds and the data is overwritten, +or the store fails AND the page is invalidated. This ensures stale data may +never be obtained from frontswap. + +If properly configured, monitoring of frontswap is done via debugfs in +the /sys/kernel/debug/frontswap directory. The effectiveness of +frontswap can be measured (across all swap devices) with: + +failed_stores - how many store attempts have failed +loads - how many loads were attempted (all should succeed) +succ_stores - how many store attempts have succeeded +invalidates - how many invalidates were attempted + +A backend implementation may provide additional metrics. + +FAQ + +1) Where's the value? + +When a workload starts swapping, performance falls through the floor. +Frontswap significantly increases performance in many such workloads by +providing a clean, dynamic interface to read and write swap pages to +"transcendent memory" that is otherwise not directly addressable to the kernel. +This interface is ideal when data is transformed to a different form +and size (such as with compression) or secretly moved (as might be +useful for write-balancing for some RAM-like devices). Swap pages (and +evicted page-cache pages) are a great use for this kind of slower-than-RAM- +but-much-faster-than-disk "pseudo-RAM device" and the frontswap (and +cleancache) interface to transcendent memory provides a nice way to read +and write -- and indirectly "name" -- the pages. + +Frontswap -- and cleancache -- with a fairly small impact on the kernel, +provides a huge amount of flexibility for more dynamic, flexible RAM +utilization in various system configurations: + +In the single kernel case, aka "zcache", pages are compressed and +stored in local memory, thus increasing the total anonymous pages +that can be safely kept in RAM. Zcache essentially trades off CPU +cycles used in compression/decompression for better memory utilization. +Benchmarks have shown little or no impact when memory pressure is +low while providing a significant performance improvement (25%+) +on some workloads under high memory pressure. + +"RAMster" builds on zcache by adding "peer-to-peer" transcendent memory +support for clustered systems. Frontswap pages are locally compressed +as in zcache, but then "remotified" to another system's RAM. This +allows RAM to be dynamically load-balanced back-and-forth as needed, +i.e. when system A is overcommitted, it can swap to system B, and +vice versa. RAMster can also be configured as a memory server so +many servers in a cluster can swap, dynamically as needed, to a single +server configured with a large amount of RAM... without pre-configuring +how much of the RAM is available for each of the clients! + +In the virtual case, the whole point of virtualization is to statistically +multiplex physical resources acrosst the varying demands of multiple +virtual machines. This is really hard to do with RAM and efforts to do +it well with no kernel changes have essentially failed (except in some +well-publicized special-case workloads). +Specifically, the Xen Transcendent Memory backend allows otherwise +"fallow" hypervisor-owned RAM to not only be "time-shared" between multiple +virtual machines, but the pages can be compressed and deduplicated to +optimize RAM utilization. And when guest OS's are induced to surrender +underutilized RAM (e.g. with "selfballooning"), sudden unexpected +memory pressure may result in swapping; frontswap allows those pages +to be swapped to and from hypervisor RAM (if overall host system memory +conditions allow), thus mitigating the potentially awful performance impact +of unplanned swapping. + +A KVM implementation is underway and has been RFC'ed to lkml. And, +using frontswap, investigation is also underway on the use of NVM as +a memory extension technology. + +2) Sure there may be performance advantages in some situations, but + what's the space/time overhead of frontswap? + +If CONFIG_FRONTSWAP is disabled, every frontswap hook compiles into +nothingness and the only overhead is a few extra bytes per swapon'ed +swap device. If CONFIG_FRONTSWAP is enabled but no frontswap "backend" +registers, there is one extra global variable compared to zero for +every swap page read or written. If CONFIG_FRONTSWAP is enabled +AND a frontswap backend registers AND the backend fails every "store" +request (i.e. provides no memory despite claiming it might), +CPU overhead is still negligible -- and since every frontswap fail +precedes a swap page write-to-disk, the system is highly likely +to be I/O bound and using a small fraction of a percent of a CPU +will be irrelevant anyway. + +As for space, if CONFIG_FRONTSWAP is enabled AND a frontswap backend +registers, one bit is allocated for every swap page for every swap +device that is swapon'd. This is added to the EIGHT bits (which +was sixteen until about 2.6.34) that the kernel already allocates +for every swap page for every swap device that is swapon'd. (Hugh +Dickins has observed that frontswap could probably steal one of +the existing eight bits, but let's worry about that minor optimization +later.) For very large swap disks (which are rare) on a standard +4K pagesize, this is 1MB per 32GB swap. + +When swap pages are stored in transcendent memory instead of written +out to disk, there is a side effect that this may create more memory +pressure that can potentially outweigh the other advantages. A +backend, such as zcache, must implement policies to carefully (but +dynamically) manage memory limits to ensure this doesn't happen. + +3) OK, how about a quick overview of what this frontswap patch does + in terms that a kernel hacker can grok? + +Let's assume that a frontswap "backend" has registered during +kernel initialization; this registration indicates that this +frontswap backend has access to some "memory" that is not directly +accessible by the kernel. Exactly how much memory it provides is +entirely dynamic and random. + +Whenever a swap-device is swapon'd frontswap_init() is called, +passing the swap device number (aka "type") as a parameter. +This notifies frontswap to expect attempts to "store" swap pages +associated with that number. + +Whenever the swap subsystem is readying a page to write to a swap +device (c.f swap_writepage()), frontswap_store is called. Frontswap +consults with the frontswap backend and if the backend says it does NOT +have room, frontswap_store returns -1 and the kernel swaps the page +to the swap device as normal. Note that the response from the frontswap +backend is unpredictable to the kernel; it may choose to never accept a +page, it could accept every ninth page, or it might accept every +page. But if the backend does accept a page, the data from the page +has already been copied and associated with the type and offset, +and the backend guarantees the persistence of the data. In this case, +frontswap sets a bit in the "frontswap_map" for the swap device +corresponding to the page offset on the swap device to which it would +otherwise have written the data. + +When the swap subsystem needs to swap-in a page (swap_readpage()), +it first calls frontswap_load() which checks the frontswap_map to +see if the page was earlier accepted by the frontswap backend. If +it was, the page of data is filled from the frontswap backend and +the swap-in is complete. If not, the normal swap-in code is +executed to obtain the page of data from the real swap device. + +So every time the frontswap backend accepts a page, a swap device read +and (potentially) a swap device write are replaced by a "frontswap backend +store" and (possibly) a "frontswap backend loads", which are presumably much +faster. + +4) Can't frontswap be configured as a "special" swap device that is + just higher priority than any real swap device (e.g. like zswap, + or maybe swap-over-nbd/NFS)? + +No. First, the existing swap subsystem doesn't allow for any kind of +swap hierarchy. Perhaps it could be rewritten to accomodate a hierarchy, +but this would require fairly drastic changes. Even if it were +rewritten, the existing swap subsystem uses the block I/O layer which +assumes a swap device is fixed size and any page in it is linearly +addressable. Frontswap barely touches the existing swap subsystem, +and works around the constraints of the block I/O subsystem to provide +a great deal of flexibility and dynamicity. + +For example, the acceptance of any swap page by the frontswap backend is +entirely unpredictable. This is critical to the definition of frontswap +backends because it grants completely dynamic discretion to the +backend. In zcache, one cannot know a priori how compressible a page is. +"Poorly" compressible pages can be rejected, and "poorly" can itself be +defined dynamically depending on current memory constraints. + +Further, frontswap is entirely synchronous whereas a real swap +device is, by definition, asynchronous and uses block I/O. The +block I/O layer is not only unnecessary, but may perform "optimizations" +that are inappropriate for a RAM-oriented device including delaying +the write of some pages for a significant amount of time. Synchrony is +required to ensure the dynamicity of the backend and to avoid thorny race +conditions that would unnecessarily and greatly complicate frontswap +and/or the block I/O subsystem. That said, only the initial "store" +and "load" operations need be synchronous. A separate asynchronous thread +is free to manipulate the pages stored by frontswap. For example, +the "remotification" thread in RAMster uses standard asynchronous +kernel sockets to move compressed frontswap pages to a remote machine. +Similarly, a KVM guest-side implementation could do in-guest compression +and use "batched" hypercalls. + +In a virtualized environment, the dynamicity allows the hypervisor +(or host OS) to do "intelligent overcommit". For example, it can +choose to accept pages only until host-swapping might be imminent, +then force guests to do their own swapping. + +There is a downside to the transcendent memory specifications for +frontswap: Since any "store" might fail, there must always be a real +slot on a real swap device to swap the page. Thus frontswap must be +implemented as a "shadow" to every swapon'd device with the potential +capability of holding every page that the swap device might have held +and the possibility that it might hold no pages at all. This means +that frontswap cannot contain more pages than the total of swapon'd +swap devices. For example, if NO swap device is configured on some +installation, frontswap is useless. Swapless portable devices +can still use frontswap but a backend for such devices must configure +some kind of "ghost" swap device and ensure that it is never used. + +5) Why this weird definition about "duplicate stores"? If a page + has been previously successfully stored, can't it always be + successfully overwritten? + +Nearly always it can, but no, sometimes it cannot. Consider an example +where data is compressed and the original 4K page has been compressed +to 1K. Now an attempt is made to overwrite the page with data that +is non-compressible and so would take the entire 4K. But the backend +has no more space. In this case, the store must be rejected. Whenever +frontswap rejects a store that would overwrite, it also must invalidate +the old data and ensure that it is no longer accessible. Since the +swap subsystem then writes the new data to the read swap device, +this is the correct course of action to ensure coherency. + +6) What is frontswap_shrink for? + +When the (non-frontswap) swap subsystem swaps out a page to a real +swap device, that page is only taking up low-value pre-allocated disk +space. But if frontswap has placed a page in transcendent memory, that +page may be taking up valuable real estate. The frontswap_shrink +routine allows code outside of the swap subsystem to force pages out +of the memory managed by frontswap and back into kernel-addressable memory. +For example, in RAMster, a "suction driver" thread will attempt +to "repatriate" pages sent to a remote machine back to the local machine; +this is driven using the frontswap_shrink mechanism when memory pressure +subsides. + +7) Why does the frontswap patch create the new include file swapfile.h? + +The frontswap code depends on some swap-subsystem-internal data +structures that have, over the years, moved back and forth between +static and global. This seemed a reasonable compromise: Define +them as global but declare them in a new include file that isn't +included by the large number of source files that include swap.h. + +Dan Magenheimer, last updated April 9, 2012 diff --git a/MAINTAINERS b/MAINTAINERS index 3075a2a2951..ae5a12a718c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1077,7 +1077,7 @@ F: drivers/media/video/s5p-fimc/ ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT M: Kyungmin Park <kyungmin.park@samsung.com> M: Kamil Debski <k.debski@samsung.com> -M: Jeongtae Park <jtp.park@samsung.com> +M: Jeongtae Park <jtp.park@samsung.com> L: linux-arm-kernel@lists.infradead.org L: linux-media@vger.kernel.org S: Maintained @@ -1743,10 +1743,10 @@ F: include/linux/can/platform/ CAPABILITIES M: Serge Hallyn <serge.hallyn@canonical.com> L: linux-security-module@vger.kernel.org -S: Supported +S: Supported F: include/linux/capability.h F: security/capability.c -F: security/commoncap.c +F: security/commoncap.c F: kernel/capability.c CELL BROADBAND ENGINE ARCHITECTURE @@ -2149,11 +2149,11 @@ S: Orphan F: drivers/net/wan/pc300* CYTTSP TOUCHSCREEN DRIVER -M: Javier Martinez Canillas <javier@dowhile0.org> -L: linux-input@vger.kernel.org -S: Maintained -F: drivers/input/touchscreen/cyttsp* -F: include/linux/input/cyttsp.h +M: Javier Martinez Canillas <javier@dowhile0.org> +L: linux-input@vger.kernel.org +S: Maintained +F: drivers/input/touchscreen/cyttsp* +F: include/linux/input/cyttsp.h DAMA SLAVE for AX.25 M: Joerg Reuter <jreuter@yaina.de> @@ -2273,7 +2273,7 @@ F: include/linux/device-mapper.h F: include/linux/dm-*.h DIOLAN U2C-12 I2C DRIVER -M: Guenter Roeck <guenter.roeck@ericsson.com> +M: Guenter Roeck <linux@roeck-us.net> L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/busses/i2c-diolan-u2c.c @@ -2933,6 +2933,13 @@ F: Documentation/power/freezing-of-tasks.txt F: include/linux/freezer.h F: kernel/freezer.c +FRONTSWAP API +M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> +L: linux-kernel@vger.kernel.org +S: Maintained +F: mm/frontswap.c +F: include/linux/frontswap.h + FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS M: David Howells <dhowells@redhat.com> L: linux-cachefs@redhat.com @@ -3141,7 +3148,7 @@ F: drivers/tty/hvc/ HARDWARE MONITORING M: Jean Delvare <khali@linux-fr.org> -M: Guenter Roeck <guenter.roeck@ericsson.com> +M: Guenter Roeck <linux@roeck-us.net> L: lm-sensors@lm-sensors.org W: http://www.lm-sensors.org/ T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ @@ -4099,6 +4106,8 @@ F: drivers/scsi/53c700* LED SUBSYSTEM M: Bryan Wu <bryan.wu@canonical.com> M: Richard Purdie <rpurdie@rpsys.net> +L: linux-leds@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git S: Maintained F: drivers/leds/ F: include/linux/leds.h @@ -4416,6 +4425,13 @@ S: Orphan F: drivers/video/matrox/matroxfb_* F: include/linux/matroxfb.h +MAX16065 HARDWARE MONITOR DRIVER +M: Guenter Roeck <linux@roeck-us.net> +L: lm-sensors@lm-sensors.org +S: Maintained +F: Documentation/hwmon/max16065 +F: drivers/hwmon/max16065.c + MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER M: "Hans J. Koch" <hjk@hansjkoch.de> L: lm-sensors@lm-sensors.org @@ -5154,7 +5170,7 @@ F: drivers/leds/leds-pca9532.c F: include/linux/leds-pca9532.h PCA9541 I2C BUS MASTER SELECTOR DRIVER -M: Guenter Roeck <guenter.roeck@ericsson.com> +M: Guenter Roeck <linux@roeck-us.net> L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/muxes/i2c-mux-pca9541.c @@ -5174,7 +5190,7 @@ S: Maintained F: drivers/firmware/pcdp.* PCI ERROR RECOVERY -M: Linas Vepstas <linasvepstas@gmail.com> +M: Linas Vepstas <linasvepstas@gmail.com> L: linux-pci@vger.kernel.org S: Supported F: Documentation/PCI/pci-error-recovery.txt @@ -5304,7 +5320,7 @@ F: drivers/video/fb-puv3.c F: drivers/rtc/rtc-puv3.c PMBUS HARDWARE MONITORING DRIVERS -M: Guenter Roeck <guenter.roeck@ericsson.com> +M: Guenter Roeck <linux@roeck-us.net> L: lm-sensors@lm-sensors.org W: http://www.lm-sensors.org/ W: http://www.roeck-us.net/linux/drivers/ @@ -7299,11 +7315,11 @@ F: Documentation/DocBook/uio-howto.tmpl F: drivers/uio/ F: include/linux/uio*.h -UTIL-LINUX-NG PACKAGE +UTIL-LINUX PACKAGE M: Karel Zak <kzak@redhat.com> -L: util-linux-ng@vger.kernel.org -W: http://kernel.org/~kzak/util-linux-ng/ -T: git git://git.kernel.org/pub/scm/utils/util-linux-ng/util-linux-ng.git +L: util-linux@vger.kernel.org +W: http://en.wikipedia.org/wiki/Util-linux +T: git git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git S: Maintained UVESAFB DRIVER @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 5 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc2 NAME = Saber-toothed Squirrel # *DOCUMENTATION* diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index b649c5904a4..84449dd8f03 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -7,7 +7,6 @@ config ARM select HAVE_IDE if PCI || ISA || PCMCIA select HAVE_DMA_ATTRS select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) - select CMA if (CPU_V6 || CPU_V6K || CPU_V7) select HAVE_MEMBLOCK select RTC_LIB select SYS_SUPPORTS_APM_EMULATION diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 54d49ddb9b8..5fb47a14f4b 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -271,9 +271,9 @@ static struct platform_device *create_simple_dss_pdev(const char *pdev_name, goto err; } - r = omap_device_register(pdev); + r = platform_device_add(pdev); if (r) { - pr_err("Could not register omap_device for %s\n", pdev_name); + pr_err("Could not register platform_device for %s\n", pdev_name); goto err; } diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig index f31383c32f9..df33909205e 100644 --- a/arch/arm/mach-shmobile/Kconfig +++ b/arch/arm/mach-shmobile/Kconfig @@ -186,6 +186,12 @@ config SH_TIMER_TMU help This enables build of the TMU timer driver. +config EM_TIMER_STI + bool "STI timer driver" + default y + help + This enables build of the STI timer driver. + endmenu config SH_CLK_CPG diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ea6b4315409..106c4c0ebcc 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -268,10 +268,8 @@ static int __init consistent_init(void) unsigned long base = consistent_base; unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT; -#ifndef CONFIG_ARM_DMA_USE_IOMMU - if (cpu_architecture() >= CPU_ARCH_ARMv6) + if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) return 0; -#endif consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); if (!consistent_pte) { @@ -342,7 +340,7 @@ static int __init coherent_init(void) struct page *page; void *ptr; - if (cpu_architecture() < CPU_ARCH_ARMv6) + if (!IS_ENABLED(CONFIG_CMA)) return 0; ptr = __alloc_from_contiguous(NULL, size, prot, &page); @@ -704,7 +702,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, if (arch_is_coherent() || nommu()) addr = __alloc_simple_buffer(dev, size, gfp, &page); - else if (cpu_architecture() < CPU_ARCH_ARMv6) + else if (!IS_ENABLED(CONFIG_CMA)) addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); else if (gfp & GFP_ATOMIC) addr = __alloc_from_pool(dev, size, &page, caller); @@ -773,7 +771,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, if (arch_is_coherent() || nommu()) { __dma_free_buffer(page, size); - } else if (cpu_architecture() < CPU_ARCH_ARMv6) { + } else if (!IS_ENABLED(CONFIG_CMA)) { __dma_free_remap(cpu_addr, size); __dma_free_buffer(page, size); } else { diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c index c140f9b41dc..d552a854dac 100644 --- a/arch/avr32/kernel/signal.c +++ b/arch/avr32/kernel/signal.c @@ -300,7 +300,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti) if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR) syscall = 1; - if (ti->flags & _TIF_SIGPENDING)) + if (ti->flags & _TIF_SIGPENDING) do_signal(regs, syscall); if (ti->flags & _TIF_NOTIFY_RESUME) { diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 2e3994b2016..62bcea7dcc6 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c @@ -173,7 +173,7 @@ asmlinkage int bfin_clone(struct pt_regs *regs) unsigned long newsp; #ifdef __ARCH_SYNC_CORE_DCACHE - if (current->rt.nr_cpus_allowed == num_possible_cpus()) + if (current->nr_cpus_allowed == num_possible_cpus()) set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id())); #endif diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index cac5b6be572..14712012826 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -7,6 +7,8 @@ config M68K select GENERIC_IRQ_SHOW select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS select GENERIC_CPU_DEVICES + select GENERIC_STRNCPY_FROM_USER if MMU + select GENERIC_STRNLEN_USER if MMU select FPU if MMU select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index 1a922fad76f..eafa2539a8e 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild @@ -1,2 +1,4 @@ include include/asm-generic/Kbuild.asm header-y += cachectl.h + +generic-y += word-at-a-time.h diff --git a/arch/m68k/include/asm/m528xsim.h b/arch/m68k/include/asm/m528xsim.h index d63b99ff7ff..497c31c803f 100644 --- a/arch/m68k/include/asm/m528xsim.h +++ b/arch/m68k/include/asm/m528xsim.h @@ -86,7 +86,7 @@ /* * QSPI module. */ -#define MCFQSPI_IOBASE (MCF_IPSBAR + 0x340) +#define MCFQSPI_BASE (MCF_IPSBAR + 0x340) #define MCFQSPI_SIZE 0x40 #define MCFQSPI_CS0 147 diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index 9c80cd515b2..472c891a4ae 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h @@ -379,12 +379,15 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n) #define copy_from_user(to, from, n) __copy_from_user(to, from, n) #define copy_to_user(to, from, n) __copy_to_user(to, from, n) -long strncpy_from_user(char *dst, const char __user *src, long count); -long strnlen_user(const char __user *src, long n); +#define user_addr_max() \ + (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) + +extern long strncpy_from_user(char *dst, const char __user *src, long count); +extern __must_check long strlen_user(const char __user *str); +extern __must_check long strnlen_user(const char __user *str, long n); + unsigned long __clear_user(void __user *to, unsigned long n); #define clear_user __clear_user -#define strlen_user(str) strnlen_user(str, 32767) - #endif /* _M68K_UACCESS_H */ diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c index 8b4a2222e65..1bc10e62b9a 100644 --- a/arch/m68k/kernel/ptrace.c +++ b/arch/m68k/kernel/ptrace.c @@ -286,7 +286,7 @@ asmlinkage void syscall_trace(void) } } -#ifdef CONFIG_COLDFIRE +#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU) asmlinkage int syscall_trace_enter(void) { int ret = 0; diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c index d7deb7fc7eb..707f0573ec6 100644 --- a/arch/m68k/kernel/time.c +++ b/arch/m68k/kernel/time.c @@ -85,7 +85,7 @@ void __init time_init(void) mach_sched_init(timer_interrupt); } -#ifdef CONFIG_M68KCLASSIC +#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET u32 arch_gettimeoffset(void) { @@ -108,4 +108,4 @@ static int __init rtc_init(void) module_init(rtc_init); -#endif /* CONFIG_M68KCLASSIC */ +#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */ diff --git a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c index 5664386338d..5e97f2ee7c1 100644 --- a/arch/m68k/lib/uaccess.c +++ b/arch/m68k/lib/uaccess.c @@ -104,80 +104,6 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from, EXPORT_SYMBOL(__generic_copy_to_user); /* - * Copy a null terminated string from userspace. - */ -long strncpy_from_user(char *dst, const char __user *src, long count) -{ - long res; - char c; - - if (count <= 0) - return count; - - asm volatile ("\n" - "1: "MOVES".b (%2)+,%4\n" - " move.b %4,(%1)+\n" - " jeq 2f\n" - " subq.l #1,%3\n" - " jne 1b\n" - "2: sub.l %3,%0\n" - "3:\n" - " .section .fixup,\"ax\"\n" - " .even\n" - "10: move.l %5,%0\n" - " jra 3b\n" - " .previous\n" - "\n" - " .section __ex_table,\"a\"\n" - " .align 4\n" - " .long 1b,10b\n" - " .previous" - : "=d" (res), "+a" (dst), "+a" (src), "+r" (count), "=&d" (c) - : "i" (-EFAULT), "0" (count)); - - return res; -} -EXPORT_SYMBOL(strncpy_from_user); - -/* - * Return the size of a string (including the ending 0) - * - * Return 0 on exception, a value greater than N if too long - */ -long strnlen_user(const char __user *src, long n) -{ - char c; - long res; - - asm volatile ("\n" - "1: subq.l #1,%1\n" - " jmi 3f\n" - "2: "MOVES".b (%0)+,%2\n" - " tst.b %2\n" - " jne 1b\n" - " jra 4f\n" - "\n" - "3: addq.l #1,%0\n" - "4: sub.l %4,%0\n" - "5:\n" - " .section .fixup,\"ax\"\n" - " .even\n" - "20: sub.l %0,%0\n" - " jra 5b\n" - " .previous\n" - "\n" - " .section __ex_table,\"a\"\n" - " .align 4\n" - " .long 2b,20b\n" - " .previous\n" - : "=&a" (res), "+d" (n), "=&d" (c) - : "0" (src), "r" (src)); - - return res; -} -EXPORT_SYMBOL(strnlen_user); - -/* * Zero Userspace */ diff --git a/arch/m68k/platform/68328/timers.c b/arch/m68k/platform/68328/timers.c index c801c172b82..f4dc9b29560 100644 --- a/arch/m68k/platform/68328/timers.c +++ b/arch/m68k/platform/68328/timers.c @@ -53,6 +53,7 @@ #endif static u32 m68328_tick_cnt; +static irq_handler_t timer_interrupt; /***************************************************************************/ @@ -62,7 +63,7 @@ static irqreturn_t hw_tick(int irq, void *dummy) TSTAT &= 0; m68328_tick_cnt += TICKS_PER_JIFFY; - return arch_timer_interrupt(irq, dummy); + return timer_interrupt(irq, dummy); } /***************************************************************************/ @@ -99,7 +100,7 @@ static struct clocksource m68328_clk = { /***************************************************************************/ -void hw_timer_init(void) +void hw_timer_init(irq_handler_t handler) { /* disable timer 1 */ TCTL = 0; @@ -115,6 +116,7 @@ void hw_timer_init(void) /* Enable timer 1 */ TCTL |= TCTL_TEN; clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ); + timer_interrupt = handler; } /***************************************************************************/ diff --git a/arch/m68k/platform/68360/config.c b/arch/m68k/platform/68360/config.c index 255fc03913e..9877cefad1e 100644 --- a/arch/m68k/platform/68360/config.c +++ b/arch/m68k/platform/68360/config.c @@ -35,6 +35,7 @@ extern void m360_cpm_reset(void); #define OSCILLATOR (unsigned long int)33000000 #endif +static irq_handler_t timer_interrupt; unsigned long int system_clock; extern QUICC *pquicc; @@ -52,7 +53,7 @@ static irqreturn_t hw_tick(int irq, void *dummy) pquicc->timer_ter1 = 0x0002; /* clear timer event */ - return arch_timer_interrupt(irq, dummy); + return timer_interrupt(irq, dummy); } static struct irqaction m68360_timer_irq = { @@ -61,7 +62,7 @@ static struct irqaction m68360_timer_irq = { .handler = hw_tick, }; -void hw_timer_init(void) +void hw_timer_init(irq_handler_t handler) { unsigned char prescaler; unsigned short tgcr_save; @@ -94,6 +95,8 @@ void hw_timer_init(void) pquicc->timer_ter1 = 0x0003; /* clear timer events */ + timer_interrupt = handler; + /* enable timer 1 interrupt in CIMR */ setup_irq(CPMVEC_TIMER1, &m68360_timer_irq); diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index dbc3850b1d0..5707f1a6234 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -21,6 +21,7 @@ KBUILD_DEFCONFIG := default_defconfig NM = sh $(srctree)/arch/parisc/nm CHECKFLAGS += -D__hppa__=1 +LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) MACHINE := $(shell uname -m) ifeq ($(MACHINE),parisc*) @@ -79,7 +80,7 @@ kernel-y := mm/ kernel/ math-emu/ kernel-$(CONFIG_HPUX) += hpux/ core-y += $(addprefix arch/parisc/, $(kernel-y)) -libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name` +libs-y += arch/parisc/lib/ $(LIBGCC) drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/ diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index 19a434f5505..4383707d980 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -1,3 +1,4 @@ include include/asm-generic/Kbuild.asm header-y += pdc.h +generic-y += word-at-a-time.h diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h index 72cfdb0cfdd..62a33338549 100644 --- a/arch/parisc/include/asm/bug.h +++ b/arch/parisc/include/asm/bug.h @@ -1,6 +1,8 @@ #ifndef _PARISC_BUG_H #define _PARISC_BUG_H +#include <linux/kernel.h> /* for BUGFLAG_TAINT */ + /* * Tell the user there is some problem. * The offending file and line are encoded in the __bug_table section. diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index 0b6d79617d7..2e3200ca485 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c @@ -176,8 +176,8 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr, static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val) { - if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16) - && entry->jump[1] == 0x396b0000 + (val & 0xffff)) + if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16) + && entry->jump[1] == 0x398c0000 + (val & 0xffff)) return 1; return 0; } @@ -204,10 +204,9 @@ static uint32_t do_plt_call(void *location, entry++; } - /* Stolen from Paul Mackerras as well... */ - entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */ - entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/ - entry->jump[2] = 0x7d6903a6; /* mtctr r11 */ + entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */ + entry->jump[1] = 0x398c0000 + (val&0xffff); /* addi r12,r12,sym@l*/ + entry->jump[2] = 0x7d8903a6; /* mtctr r12 */ entry->jump[3] = 0x4e800420; /* bctr */ DEBUGP("Initialized plt for 0x%x at %p\n", val, entry); diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 99a995c2a3f..be171ee73bf 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -475,6 +475,7 @@ void timer_interrupt(struct pt_regs * regs) struct pt_regs *old_regs; u64 *next_tb = &__get_cpu_var(decrementers_next_tb); struct clock_event_device *evt = &__get_cpu_var(decrementers); + u64 now; /* Ensure a positive value is written to the decrementer, or else * some CPUs will continue to take decrementer exceptions. @@ -509,9 +510,16 @@ void timer_interrupt(struct pt_regs * regs) irq_work_run(); } - *next_tb = ~(u64)0; - if (evt->event_handler) - evt->event_handler(evt); + now = get_tb_or_rtc(); + if (now >= *next_tb) { + *next_tb = ~(u64)0; + if (evt->event_handler) + evt->event_handler(evt); + } else { + now = *next_tb - now; + if (now <= DECREMENTER_MAX) + set_dec((int)now); + } #ifdef CONFIG_PPC64 /* collect purr register values often, for accurate calculations */ diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 99bcd0ee838..31d9db7913e 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -32,6 +32,8 @@ config SUPERH select GENERIC_SMP_IDLE_THREAD select GENERIC_CLOCKEVENTS select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST + select GENERIC_STRNCPY_FROM_USER + select GENERIC_STRNLEN_USER help The SuperH is a RISC processor targeted for use in embedded systems and consumer electronics; it was also used in the Sega Dreamcast diff --git a/arch/sh/Makefile b/arch/sh/Makefile index 46edf070da1..aed701c7b11 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -9,6 +9,12 @@ # License. See the file "COPYING" in the main directory of this archive # for more details. # +ifneq ($(SUBARCH),$(ARCH)) + ifeq ($(CROSS_COMPILE),) + CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-) + endif +endif + isa-y := any isa-$(CONFIG_SH_DSP) := sh isa-$(CONFIG_CPU_SH2) := sh2 @@ -106,19 +112,13 @@ LDFLAGS_vmlinux += --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \ KBUILD_DEFCONFIG := cayman_defconfig endif -ifneq ($(SUBARCH),$(ARCH)) - ifeq ($(CROSS_COMPILE),) - CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-) - endif -endif - ifdef CONFIG_CPU_LITTLE_ENDIAN ld-bfd := elf32-$(UTS_MACHINE)-linux -LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64' --oformat $(ld-bfd) +LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld-bfd) LDFLAGS += -EL else ld-bfd := elf32-$(UTS_MACHINE)big-linux -LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64+4' --oformat $(ld-bfd) +LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd) LDFLAGS += -EB endif diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index 7beb42322f6..7b673ddcd55 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild @@ -1,5 +1,39 @@ include include/asm-generic/Kbuild.asm +generic-y += bitsperlong.h +generic-y += cputime.h +generic-y += current.h +generic-y += delay.h +generic-y += div64.h +generic-y += emergency-restart.h +generic-y += errno.h +generic-y += fcntl.h +generic-y += ioctl.h +generic-y += ipcbuf.h +generic-y += irq_regs.h +generic-y += kvm_para.h +generic-y += local.h +generic-y += local64.h +generic-y += param.h +generic-y += parport.h +generic-y += percpu.h +generic-y += poll.h +generic-y += mman.h +generic-y += msgbuf.h +generic-y += resource.h +generic-y += scatterlist.h +generic-y += sembuf.h +generic-y += serial.h +generic-y += shmbuf.h +generic-y += siginfo.h +generic-y += sizes.h +generic-y += socket.h +generic-y += statfs.h +generic-y += termbits.h +generic-y += termios.h +generic-y += ucontext.h +generic-y += xor.h + header-y += cachectl.h header-y += cpu-features.h header-y += hw_breakpoint.h diff --git a/arch/sh/include/asm/bitsperlong.h b/arch/sh/include/asm/bitsperlong.h deleted file mode 100644 index 6dc0bb0c13b..00000000000 --- a/arch/sh/include/asm/bitsperlong.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/bitsperlong.h> diff --git a/arch/sh/include/asm/cputime.h b/arch/sh/include/asm/cputime.h deleted file mode 100644 index 6ca395d1393..00000000000 --- a/arch/sh/include/asm/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __SH_CPUTIME_H -#define __SH_CPUTIME_H - -#include <asm-generic/cputime.h> - -#endif /* __SH_CPUTIME_H */ diff --git a/arch/sh/include/asm/current.h b/arch/sh/include/asm/current.h deleted file mode 100644 index 4c51401b553..00000000000 --- a/arch/sh/include/asm/current.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/current.h> diff --git a/arch/sh/include/asm/delay.h b/arch/sh/include/asm/delay.h deleted file mode 100644 index 9670e127b7b..00000000000 --- a/arch/sh/include/asm/delay.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/delay.h> diff --git a/arch/sh/include/asm/div64.h b/arch/sh/include/asm/div64.h deleted file mode 100644 index 6cd978cefb2..00000000000 --- a/arch/sh/include/asm/div64.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/div64.h> diff --git a/arch/sh/include/asm/emergency-restart.h b/arch/sh/include/asm/emergency-restart.h deleted file mode 100644 index 108d8c48e42..00000000000 --- a/arch/sh/include/asm/emergency-restart.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_EMERGENCY_RESTART_H -#define _ASM_EMERGENCY_RESTART_H - -#include <asm-generic/emergency-restart.h> - -#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/arch/sh/include/asm/errno.h b/arch/sh/include/asm/errno.h deleted file mode 100644 index 51cf6f9cebb..00000000000 --- a/arch/sh/include/asm/errno.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_SH_ERRNO_H -#define __ASM_SH_ERRNO_H - -#include <asm-generic/errno.h> - -#endif /* __ASM_SH_ERRNO_H */ diff --git a/arch/sh/include/asm/fcntl.h b/arch/sh/include/asm/fcntl.h deleted file mode 100644 index 46ab12db573..00000000000 --- a/arch/sh/include/asm/fcntl.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/fcntl.h> diff --git a/arch/sh/include/asm/ioctl.h b/arch/sh/include/asm/ioctl.h deleted file mode 100644 index b279fe06dfe..00000000000 --- a/arch/sh/include/asm/ioctl.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/ioctl.h> diff --git a/arch/sh/include/asm/ipcbuf.h b/arch/sh/include/asm/ipcbuf.h deleted file mode 100644 index 84c7e51cb6d..00000000000 --- a/arch/sh/include/asm/ipcbuf.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/ipcbuf.h> diff --git a/arch/sh/include/asm/irq_regs.h b/arch/sh/include/asm/irq_regs.h deleted file mode 100644 index 3dd9c0b7027..00000000000 --- a/arch/sh/include/asm/irq_regs.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/irq_regs.h> diff --git a/arch/sh/include/asm/kvm_para.h b/arch/sh/include/asm/kvm_para.h deleted file mode 100644 index 14fab8f0b95..00000000000 --- a/arch/sh/include/asm/kvm_para.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/kvm_para.h> diff --git a/arch/sh/include/asm/local.h b/arch/sh/include/asm/local.h deleted file mode 100644 index 9ed9b9cb459..00000000000 --- a/arch/sh/include/asm/local.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef __ASM_SH_LOCAL_H -#define __ASM_SH_LOCAL_H - -#include <asm-generic/local.h> - -#endif /* __ASM_SH_LOCAL_H */ - diff --git a/arch/sh/include/asm/local64.h b/arch/sh/include/asm/local64.h deleted file mode 100644 index 36c93b5cc23..00000000000 --- a/arch/sh/include/asm/local64.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/local64.h> diff --git a/arch/sh/include/asm/mman.h b/arch/sh/include/asm/mman.h deleted file mode 100644 index 8eebf89f5ab..00000000000 --- a/arch/sh/include/asm/mman.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/mman.h> diff --git a/arch/sh/include/asm/msgbuf.h b/arch/sh/include/asm/msgbuf.h deleted file mode 100644 index 809134c644a..00000000000 --- a/arch/sh/include/asm/msgbuf.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/msgbuf.h> diff --git a/arch/sh/include/asm/param.h b/arch/sh/include/asm/param.h deleted file mode 100644 index 965d4542797..00000000000 --- a/arch/sh/include/asm/param.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/param.h> diff --git a/arch/sh/include/asm/parport.h b/arch/sh/include/asm/parport.h deleted file mode 100644 index cf252af6459..00000000000 --- a/arch/sh/include/asm/parport.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/parport.h> diff --git a/arch/sh/include/asm/percpu.h b/arch/sh/include/asm/percpu.h deleted file mode 100644 index 4db4b39a439..00000000000 --- a/arch/sh/include/asm/percpu.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ARCH_SH_PERCPU -#define __ARCH_SH_PERCPU - -#include <asm-generic/percpu.h> - -#endif /* __ARCH_SH_PERCPU */ diff --git a/arch/sh/include/asm/poll.h b/arch/sh/include/asm/poll.h deleted file mode 100644 index c98509d3149..00000000000 --- a/arch/sh/include/asm/poll.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/poll.h> diff --git a/arch/sh/include/asm/resource.h b/arch/sh/include/asm/resource.h deleted file mode 100644 index 9c2499a86ec..00000000000 --- a/arch/sh/include/asm/resource.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_SH_RESOURCE_H -#define __ASM_SH_RESOURCE_H - -#include <asm-generic/resource.h> - -#endif /* __ASM_SH_RESOURCE_H */ diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h deleted file mode 100644 index 98dfc3510f1..00000000000 --- a/arch/sh/include/asm/scatterlist.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_SH_SCATTERLIST_H -#define __ASM_SH_SCATTERLIST_H - -#include <asm-generic/scatterlist.h> - -#endif /* __ASM_SH_SCATTERLIST_H */ diff --git a/arch/sh/include/asm/sembuf.h b/arch/sh/include/asm/sembuf.h deleted file mode 100644 index 7673b83cfef..00000000000 --- a/arch/sh/include/asm/sembuf.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/sembuf.h> diff --git a/arch/sh/include/asm/serial.h b/arch/sh/include/asm/serial.h deleted file mode 100644 index a0cb0caff15..00000000000 --- a/arch/sh/include/asm/serial.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/serial.h> diff --git a/arch/sh/include/asm/shmbuf.h b/arch/sh/include/asm/shmbuf.h deleted file mode 100644 index 83c05fc2de3..00000000000 --- a/arch/sh/include/asm/shmbuf.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/shmbuf.h> diff --git a/arch/sh/include/asm/siginfo.h b/arch/sh/include/asm/siginfo.h deleted file mode 100644 index 813040ed68a..00000000000 --- a/arch/sh/include/asm/siginfo.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_SH_SIGINFO_H -#define __ASM_SH_SIGINFO_H - -#include <asm-generic/siginfo.h> - -#endif /* __ASM_SH_SIGINFO_H */ diff --git a/arch/sh/include/asm/sizes.h b/arch/sh/include/asm/sizes.h deleted file mode 100644 index dd248c2e108..00000000000 --- a/arch/sh/include/asm/sizes.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/sizes.h> diff --git a/arch/sh/include/asm/socket.h b/arch/sh/include/asm/socket.h deleted file mode 100644 index 6b71384b9d8..00000000000 --- a/arch/sh/include/asm/socket.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/socket.h> diff --git a/arch/sh/include/asm/statfs.h b/arch/sh/include/asm/statfs.h deleted file mode 100644 index 9202a023328..00000000000 --- a/arch/sh/include/asm/statfs.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_SH_STATFS_H -#define __ASM_SH_STATFS_H - -#include <asm-generic/statfs.h> - -#endif /* __ASM_SH_STATFS_H */ diff --git a/arch/sh/include/asm/termbits.h b/arch/sh/include/asm/termbits.h deleted file mode 100644 index 3935b106de7..00000000000 --- a/arch/sh/include/asm/termbits.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/termbits.h> diff --git a/arch/sh/include/asm/termios.h b/arch/sh/include/asm/termios.h deleted file mode 100644 index 280d78a9d96..00000000000 --- a/arch/sh/include/asm/termios.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/termios.h> diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index 050f221fa89..8698a80ed00 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h @@ -25,6 +25,8 @@ (__chk_user_ptr(addr), \ __access_ok((unsigned long __force)(addr), (size))) +#define user_addr_max() (current_thread_info()->addr_limit.seg) + /* * Uh, these should become the main single-value transfer routines ... * They automatically use the right size if we just have the right @@ -100,6 +102,11 @@ struct __large_struct { unsigned long buf[100]; }; # include "uaccess_64.h" #endif +extern long strncpy_from_user(char *dest, const char __user *src, long count); + +extern __must_check long strlen_user(const char __user *str); +extern __must_check long strnlen_user(const char __user *str, long n); + /* Generic arbitrary sized copy. */ /* Return the number of bytes NOT copied */ __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); @@ -137,37 +144,6 @@ __kernel_size_t __clear_user(void *addr, __kernel_size_t size); __cl_size; \ }) -/** - * strncpy_from_user: - Copy a NUL terminated string from userspace. - * @dst: Destination address, in kernel space. This buffer must be at - * least @count bytes long. - * @src: Source address, in user space. - * @count: Maximum number of bytes to copy, including the trailing NUL. - * - * Copies a NUL-terminated string from userspace to kernel space. - * - * On success, returns the length of the string (not including the trailing - * NUL). - * - * If access to userspace fails, returns -EFAULT (some data may have been - * copied). - * - * If @count is smaller than the length of the string, copies @count bytes - * and returns @count. - */ -#define strncpy_from_user(dest,src,count) \ -({ \ - unsigned long __sfu_src = (unsigned long)(src); \ - int __sfu_count = (int)(count); \ - long __sfu_res = -EFAULT; \ - \ - if (__access_ok(__sfu_src, __sfu_count)) \ - __sfu_res = __strncpy_from_user((unsigned long)(dest), \ - __sfu_src, __sfu_count); \ - \ - __sfu_res; \ -}) - static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { @@ -192,43 +168,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n) return __copy_size; } -/** - * strnlen_user: - Get the size of a string in user space. - * @s: The string to measure. - * @n: The maximum valid length - * - * Context: User context only. This function may sleep. - * - * Get the size of a NUL-terminated string in user space. - * - * Returns the size of the string INCLUDING the terminating NUL. - * On exception, returns 0. - * If the string is too long, returns a value greater than @n. - */ -static inline long strnlen_user(const char __user *s, long n) -{ - if (!__addr_ok(s)) - return 0; - else - return __strnlen_user(s, n); -} - -/** - * strlen_user: - Get the size of a string in user space. - * @str: The string to measure. - * - * Context: User context only. This function may sleep. - * - * Get the size of a NUL-terminated string in user space. - * - * Returns the size of the string INCLUDING the terminating NUL. - * On exception, returns 0. - * - * If there is a limit on the length of a valid string, you may wish to - * consider using strnlen_user() instead. - */ -#define strlen_user(str) strnlen_user(str, ~0UL >> 1) - /* * The exception table consists of pairs of addresses: the first is the * address of an instruction that is allowed to fault, and the second is diff --git a/arch/sh/include/asm/uaccess_32.h b/arch/sh/include/asm/uaccess_32.h index ae0d24f6653..c0de7ee35ab 100644 --- a/arch/sh/include/asm/uaccess_32.h +++ b/arch/sh/include/asm/uaccess_32.h @@ -170,79 +170,4 @@ __asm__ __volatile__( \ extern void __put_user_unknown(void); -static inline int -__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count) -{ - __kernel_size_t res; - unsigned long __dummy, _d, _s, _c; - - __asm__ __volatile__( - "9:\n" - "mov.b @%2+, %1\n\t" - "cmp/eq #0, %1\n\t" - "bt/s 2f\n" - "1:\n" - "mov.b %1, @%3\n\t" - "dt %4\n\t" - "bf/s 9b\n\t" - " add #1, %3\n\t" - "2:\n\t" - "sub %4, %0\n" - "3:\n" - ".section .fixup,\"ax\"\n" - "4:\n\t" - "mov.l 5f, %1\n\t" - "jmp @%1\n\t" - " mov %9, %0\n\t" - ".balign 4\n" - "5: .long 3b\n" - ".previous\n" - ".section __ex_table,\"a\"\n" - " .balign 4\n" - " .long 9b,4b\n" - ".previous" - : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d), "=r"(_c) - : "0" (__count), "2" (__src), "3" (__dest), "4" (__count), - "i" (-EFAULT) - : "memory", "t"); - - return res; -} - -/* - * Return the size of a string (including the ending 0 even when we have - * exceeded the maximum string length). - */ -static inline long __strnlen_user(const char __user *__s, long __n) -{ - unsigned long res; - unsigned long __dummy; - - __asm__ __volatile__( - "1:\t" - "mov.b @(%0,%3), %1\n\t" - "cmp/eq %4, %0\n\t" - "bt/s 2f\n\t" - " add #1, %0\n\t" - "tst %1, %1\n\t" - "bf 1b\n\t" - "2:\n" - ".section .fixup,\"ax\"\n" - "3:\n\t" - "mov.l 4f, %1\n\t" - "jmp @%1\n\t" - " mov #0, %0\n" - ".balign 4\n" - "4: .long 2b\n" - ".previous\n" - ".section __ex_table,\"a\"\n" - " .balign 4\n" - " .long 1b,3b\n" - ".previous" - : "=z" (res), "=&r" (__dummy) - : "0" (0), "r" (__s), "r" (__n) - : "t"); - return res; -} - #endif /* __ASM_SH_UACCESS_32_H */ diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h index 56fd20b8cdc..2e07e0f40c6 100644 --- a/arch/sh/include/asm/uaccess_64.h +++ b/arch/sh/include/asm/uaccess_64.h @@ -84,8 +84,4 @@ extern long __put_user_asm_l(void *, long); extern long __put_user_asm_q(void *, long); extern void __put_user_unknown(void); -extern long __strnlen_user(const char *__s, long __n); -extern int __strncpy_from_user(unsigned long __dest, - unsigned long __user __src, int __count); - #endif /* __ASM_SH_UACCESS_64_H */ diff --git a/arch/sh/include/asm/ucontext.h b/arch/sh/include/asm/ucontext.h deleted file mode 100644 index 9bc07b9f30f..00000000000 --- a/arch/sh/include/asm/ucontext.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/ucontext.h> diff --git a/arch/sh/include/asm/word-at-a-time.h b/arch/sh/include/asm/word-at-a-time.h new file mode 100644 index 00000000000..6e38953ff7f --- /dev/null +++ b/arch/sh/include/asm/word-at-a-time.h @@ -0,0 +1,53 @@ +#ifndef __ASM_SH_WORD_AT_A_TIME_H +#define __ASM_SH_WORD_AT_A_TIME_H + +#ifdef CONFIG_CPU_BIG_ENDIAN +# include <asm-generic/word-at-a-time.h> +#else +/* + * Little-endian version cribbed from x86. + */ +struct word_at_a_time { + const unsigned long one_bits, high_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } + +/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ +static inline long count_masked_bytes(long mask) +{ + /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ + long a = (0x0ff0001+mask) >> 23; + /* Fix the 1 for 00 case */ + return a & mask; +} + +/* Return nonzero if it has a zero */ +static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) +{ + unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; + *bits = mask; + return mask; +} + +static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) +{ + return bits; +} + +static inline unsigned long create_zero_mask(unsigned long bits) +{ + bits = (bits - 1) & ~bits; + return bits >> 7; +} + +/* The mask we created is directly usable as a bytemask */ +#define zero_bytemask(mask) (mask) + +static inline unsigned long find_zero(unsigned long mask) +{ + return count_masked_bytes(mask); +} +#endif + +#endif diff --git a/arch/sh/include/asm/xor.h b/arch/sh/include/asm/xor.h deleted file mode 100644 index c82eb12a5b1..00000000000 --- a/arch/sh/include/asm/xor.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/xor.h> diff --git a/arch/sh/include/cpu-sh2a/cpu/ubc.h b/arch/sh/include/cpu-sh2a/cpu/ubc.h deleted file mode 100644 index 1192e1c761a..00000000000 --- a/arch/sh/include/cpu-sh2a/cpu/ubc.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * SH-2A UBC definitions - * - * Copyright (C) 2008 Kieran Bingham - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#ifndef __ASM_CPU_SH2A_UBC_H -#define __ASM_CPU_SH2A_UBC_H - -#define UBC_BARA 0xfffc0400 -#define UBC_BAMRA 0xfffc0404 -#define UBC_BBRA 0xfffc04a0 /* 16 bit access */ -#define UBC_BDRA 0xfffc0408 -#define UBC_BDMRA 0xfffc040c - -#define UBC_BARB 0xfffc0410 -#define UBC_BAMRB 0xfffc0414 -#define UBC_BBRB 0xfffc04b0 /* 16 bit access */ -#define UBC_BDRB 0xfffc0418 -#define UBC_BDMRB 0xfffc041c - -#define UBC_BRCR 0xfffc04c0 - -#endif /* __ASM_CPU_SH2A_UBC_H */ diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S index ff1f0e6e9be..b7cf6a547f1 100644 --- a/arch/sh/kernel/cpu/sh5/entry.S +++ b/arch/sh/kernel/cpu/sh5/entry.S @@ -1569,86 +1569,6 @@ ___clear_user_exit: #endif /* CONFIG_MMU */ /* - * int __strncpy_from_user(unsigned long __dest, unsigned long __src, - * int __count) - * - * Inputs: - * (r2) target address - * (r3) source address - * (r4) maximum size in bytes - * - * Ouputs: - * (*r2) copied data - * (r2) -EFAULT (in case of faulting) - * copied data (otherwise) - */ - .global __strncpy_from_user -__strncpy_from_user: - pta ___strncpy_from_user1, tr0 - pta ___strncpy_from_user_done, tr1 - or r4, ZERO, r5 /* r5 = original count */ - beq/u r4, r63, tr1 /* early exit if r4==0 */ - movi -(EFAULT), r6 /* r6 = reply, no real fixup */ - or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */ - -___strncpy_from_user1: - ld.b r3, 0, r7 /* Fault address: only in reading */ - st.b r2, 0, r7 - addi r2, 1, r2 - addi r3, 1, r3 - beq/u ZERO, r7, tr1 - addi r4, -1, r4 /* return real number of copied bytes */ - bne/l ZERO, r4, tr0 - -___strncpy_from_user_done: - sub r5, r4, r6 /* If done, return copied */ - -___strncpy_from_user_exit: - or r6, ZERO, r2 - ptabs LINK, tr0 - blink tr0, ZERO - -/* - * extern long __strnlen_user(const char *__s, long __n) - * - * Inputs: - * (r2) source address - * (r3) source size in bytes - * - * Ouputs: - * (r2) -EFAULT (in case of faulting) - * string length (otherwise) - */ - .global __strnlen_user -__strnlen_user: - pta ___strnlen_user_set_reply, tr0 - pta ___strnlen_user1, tr1 - or ZERO, ZERO, r5 /* r5 = counter */ - movi -(EFAULT), r6 /* r6 = reply, no real fixup */ - or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */ - beq r3, ZERO, tr0 - -___strnlen_user1: - ldx.b r2, r5, r7 /* Fault address: only in reading */ - addi r3, -1, r3 /* No real fixup */ - addi r5, 1, r5 - beq r3, ZERO, tr0 - bne r7, ZERO, tr1 -! The line below used to be active. This meant led to a junk byte lying between each pair -! of entries in the argv & envp structures in memory. Whilst the program saw the right data -! via the argv and envp arguments to main, it meant the 'flat' representation visible through -! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example. -! addi r5, 1, r5 /* Include '\0' */ - -___strnlen_user_set_reply: - or r5, ZERO, r6 /* If done, return counter */ - -___strnlen_user_exit: - or r6, ZERO, r2 - ptabs LINK, tr0 - blink tr0, ZERO - -/* * extern long __get_user_asm_?(void *val, long addr) * * Inputs: @@ -1982,8 +1902,6 @@ asm_uaccess_start: .long ___copy_user2, ___copy_user_exit .long ___clear_user1, ___clear_user_exit #endif - .long ___strncpy_from_user1, ___strncpy_from_user_exit - .long ___strnlen_user1, ___strnlen_user_exit .long ___get_user_asm_b1, ___get_user_asm_b_exit .long ___get_user_asm_w1, ___get_user_asm_w_exit .long ___get_user_asm_l1, ___get_user_asm_l_exit diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c index 9b7a459a461..055d91b7030 100644 --- a/arch/sh/kernel/process.c +++ b/arch/sh/kernel/process.c @@ -4,6 +4,7 @@ #include <linux/sched.h> #include <linux/export.h> #include <linux/stackprotector.h> +#include <asm/fpu.h> struct kmem_cache *task_xstate_cachep = NULL; unsigned int xstate_size; diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index 4264583eaba..602545b12a8 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c @@ -33,6 +33,7 @@ #include <asm/switch_to.h> struct task_struct *last_task_used_math = NULL; +struct pt_regs fake_swapper_regs = { 0, }; void show_regs(struct pt_regs *regs) { diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c index 45afa5c51f6..26a0774f527 100644 --- a/arch/sh/kernel/sh_ksyms_64.c +++ b/arch/sh/kernel/sh_ksyms_64.c @@ -32,8 +32,6 @@ EXPORT_SYMBOL(__get_user_asm_b); EXPORT_SYMBOL(__get_user_asm_w); EXPORT_SYMBOL(__get_user_asm_l); EXPORT_SYMBOL(__get_user_asm_q); -EXPORT_SYMBOL(__strnlen_user); -EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(__copy_user); diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index 7e1fef36bde..e9c670d7a7f 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h @@ -91,11 +91,6 @@ extern void smp_nap(void); /* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */ extern void _cpu_idle(void); -/* Switch boot idle thread to a freshly-allocated stack and free old stack. */ -extern void cpu_idle_on_new_stack(struct thread_info *old_ti, - unsigned long new_sp, - unsigned long new_ss10); - #else /* __ASSEMBLY__ */ /* diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S index 133c4b56a99..c31637baff2 100644 --- a/arch/tile/kernel/entry.S +++ b/arch/tile/kernel/entry.S @@ -68,20 +68,6 @@ STD_ENTRY(KBacktraceIterator_init_current) jrp lr /* keep backtracer happy */ STD_ENDPROC(KBacktraceIterator_init_current) -/* - * Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then - * free the old stack (passed in r0) and re-invoke cpu_idle(). - * We update sp and ksp0 simultaneously to avoid backtracer warnings. - */ -STD_ENTRY(cpu_idle_on_new_stack) - { - move sp, r1 - mtspr SPR_SYSTEM_SAVE_K_0, r2 - } - jal free_thread_info - j cpu_idle - STD_ENDPROC(cpu_idle_on_new_stack) - /* Loop forever on a nap during SMP boot. */ STD_ENTRY(smp_nap) nap diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 6098ccc59be..dd87f342039 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -29,6 +29,7 @@ #include <linux/smp.h> #include <linux/timex.h> #include <linux/hugetlb.h> +#include <linux/start_kernel.h> #include <asm/setup.h> #include <asm/sections.h> #include <asm/cacheflush.h> diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 8bbea6aa40d..efe5acfc79c 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S @@ -94,10 +94,10 @@ bs_die: .section ".bsdata", "a" bugger_off_msg: - .ascii "Direct booting from floppy is no longer supported.\r\n" - .ascii "Please use a boot loader program instead.\r\n" + .ascii "Direct floppy boot is not supported. " + .ascii "Use a boot loader program instead.\r\n" .ascii "\n" - .ascii "Remove disk and press any key to reboot . . .\r\n" + .ascii "Remove disk and press any key to reboot ...\r\n" .byte 0 #ifdef CONFIG_EFI_STUB @@ -111,7 +111,7 @@ coff_header: #else .word 0x8664 # x86-64 #endif - .word 2 # nr_sections + .word 3 # nr_sections .long 0 # TimeDateStamp .long 0 # PointerToSymbolTable .long 1 # NumberOfSymbols @@ -158,8 +158,8 @@ extra_header_fields: #else .quad 0 # ImageBase #endif - .long 0x1000 # SectionAlignment - .long 0x200 # FileAlignment + .long 0x20 # SectionAlignment + .long 0x20 # FileAlignment .word 0 # MajorOperatingSystemVersion .word 0 # MinorOperatingSystemVersion .word 0 # MajorImageVersion @@ -200,8 +200,10 @@ extra_header_fields: # Section table section_table: - .ascii ".text" - .byte 0 + # + # The offset & size fields are filled in by build.c. + # + .ascii ".setup" .byte 0 .byte 0 .long 0 @@ -217,9 +219,8 @@ section_table: # # The EFI application loader requires a relocation section - # because EFI applications must be relocatable. But since - # we don't need the loader to fixup any relocs for us, we - # just create an empty (zero-length) .reloc section header. + # because EFI applications must be relocatable. The .reloc + # offset & size fields are filled in by build.c. # .ascii ".reloc" .byte 0 @@ -233,6 +234,25 @@ section_table: .word 0 # NumberOfRelocations .word 0 # NumberOfLineNumbers .long 0x42100040 # Characteristics (section flags) + + # + # The offset & size fields are filled in by build.c. + # + .ascii ".text" + .byte 0 + .byte 0 + .byte 0 + .long 0 + .long 0x0 # startup_{32,64} + .long 0 # Size of initialized data + # on disk + .long 0x0 # startup_{32,64} + .long 0 # PointerToRelocations + .long 0 # PointerToLineNumbers + .word 0 # NumberOfRelocations + .word 0 # NumberOfLineNumbers + .long 0x60500020 # Characteristics (section flags) + #endif /* CONFIG_EFI_STUB */ # Kernel attributes; used by setup. This is part 1 of the diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c index 3f61f6e2b46..4b8e165ee57 100644 --- a/arch/x86/boot/tools/build.c +++ b/arch/x86/boot/tools/build.c @@ -50,6 +50,8 @@ typedef unsigned int u32; u8 buf[SETUP_SECT_MAX*512]; int is_big_kernel; +#define PECOFF_RELOC_RESERVE 0x20 + /*----------------------------------------------------------------------*/ static const u32 crctab32[] = { @@ -133,11 +135,103 @@ static void usage(void) die("Usage: build setup system [> image]"); } -int main(int argc, char ** argv) -{ #ifdef CONFIG_EFI_STUB - unsigned int file_sz, pe_header; + +static void update_pecoff_section_header(char *section_name, u32 offset, u32 size) +{ + unsigned int pe_header; + unsigned short num_sections; + u8 *section; + + pe_header = get_unaligned_le32(&buf[0x3c]); + num_sections = get_unaligned_le16(&buf[pe_header + 6]); + +#ifdef CONFIG_X86_32 + section = &buf[pe_header + 0xa8]; +#else + section = &buf[pe_header + 0xb8]; #endif + + while (num_sections > 0) { + if (strncmp((char*)section, section_name, 8) == 0) { + /* section header size field */ + put_unaligned_le32(size, section + 0x8); + + /* section header vma field */ + put_unaligned_le32(offset, section + 0xc); + + /* section header 'size of initialised data' field */ + put_unaligned_le32(size, section + 0x10); + + /* section header 'file offset' field */ + put_unaligned_le32(offset, section + 0x14); + + break; + } + section += 0x28; + num_sections--; + } +} + +static void update_pecoff_setup_and_reloc(unsigned int size) +{ + u32 setup_offset = 0x200; + u32 reloc_offset = size - PECOFF_RELOC_RESERVE; + u32 setup_size = reloc_offset - setup_offset; + + update_pecoff_section_header(".setup", setup_offset, setup_size); + update_pecoff_section_header(".reloc", reloc_offset, PECOFF_RELOC_RESERVE); + + /* + * Modify .reloc section contents with a single entry. The + * relocation is applied to offset 10 of the relocation section. + */ + put_unaligned_le32(reloc_offset + 10, &buf[reloc_offset]); + put_unaligned_le32(10, &buf[reloc_offset + 4]); +} + +static void update_pecoff_text(unsigned int text_start, unsigned int file_sz) +{ + unsigned int pe_header; + unsigned int text_sz = file_sz - text_start; + + pe_header = get_unaligned_le32(&buf[0x3c]); + + /* Size of image */ + put_unaligned_le32(file_sz, &buf[pe_header + 0x50]); + + /* + * Size of code: Subtract the size of the first sector (512 bytes) + * which includes the header. + */ + put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]); + +#ifdef CONFIG_X86_32 + /* + * Address of entry point. + * + * The EFI stub entry point is +16 bytes from the start of + * the .text section. + */ + put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]); +#else + /* + * Address of entry point. startup_32 is at the beginning and + * the 64-bit entry point (startup_64) is always 512 bytes + * after. The EFI stub entry point is 16 bytes after that, as + * the first instruction allows legacy loaders to jump over + * the EFI stub initialisation + */ + put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]); +#endif /* CONFIG_X86_32 */ + + update_pecoff_section_header(".text", text_start, text_sz); +} + +#endif /* CONFIG_EFI_STUB */ + +int main(int argc, char ** argv) +{ unsigned int i, sz, setup_sectors; int c; u32 sys_size; @@ -163,6 +257,12 @@ int main(int argc, char ** argv) die("Boot block hasn't got boot flag (0xAA55)"); fclose(file); +#ifdef CONFIG_EFI_STUB + /* Reserve 0x20 bytes for .reloc section */ + memset(buf+c, 0, PECOFF_RELOC_RESERVE); + c += PECOFF_RELOC_RESERVE; +#endif + /* Pad unused space with zeros */ setup_sectors = (c + 511) / 512; if (setup_sectors < SETUP_SECT_MIN) @@ -170,6 +270,10 @@ int main(int argc, char ** argv) i = setup_sectors*512; memset(buf+c, 0, i-c); +#ifdef CONFIG_EFI_STUB + update_pecoff_setup_and_reloc(i); +#endif + /* Set the default root device */ put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]); @@ -194,66 +298,8 @@ int main(int argc, char ** argv) put_unaligned_le32(sys_size, &buf[0x1f4]); #ifdef CONFIG_EFI_STUB - file_sz = sz + i + ((sys_size * 16) - sz); - - pe_header = get_unaligned_le32(&buf[0x3c]); - - /* Size of image */ - put_unaligned_le32(file_sz, &buf[pe_header + 0x50]); - - /* - * Subtract the size of the first section (512 bytes) which - * includes the header and .reloc section. The remaining size - * is that of the .text section. - */ - file_sz -= 512; - - /* Size of code */ - put_unaligned_le32(file_sz, &buf[pe_header + 0x1c]); - -#ifdef CONFIG_X86_32 - /* - * Address of entry point. - * - * The EFI stub entry point is +16 bytes from the start of - * the .text section. - */ - put_unaligned_le32(i + 16, &buf[pe_header + 0x28]); - - /* .text size */ - put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]); - - /* .text vma */ - put_unaligned_le32(0x200, &buf[pe_header + 0xb4]); - - /* .text size of initialised data */ - put_unaligned_le32(file_sz, &buf[pe_header + 0xb8]); - - /* .text file offset */ - put_unaligned_le32(0x200, &buf[pe_header + 0xbc]); -#else - /* - * Address of entry point. startup_32 is at the beginning and - * the 64-bit entry point (startup_64) is always 512 bytes - * after. The EFI stub entry point is 16 bytes after that, as - * the first instruction allows legacy loaders to jump over - * the EFI stub initialisation - */ - put_unaligned_le32(i + 528, &buf[pe_header + 0x28]); - - /* .text size */ - put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]); - - /* .text vma */ - put_unaligned_le32(0x200, &buf[pe_header + 0xc4]); - - /* .text size of initialised data */ - put_unaligned_le32(file_sz, &buf[pe_header + 0xc8]); - - /* .text file offset */ - put_unaligned_le32(0x200, &buf[pe_header + 0xcc]); -#endif /* CONFIG_X86_32 */ -#endif /* CONFIG_EFI_STUB */ + update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); +#endif crc = partial_crc32(buf, i, crc); if (fwrite(buf, 1, i, stdout) != i) diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index be6d9e365a8..3470624d783 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -2460,10 +2460,12 @@ ENTRY(aesni_cbc_dec) pxor IN3, STATE4 movaps IN4, IV #else - pxor (INP), STATE2 - pxor 0x10(INP), STATE3 pxor IN1, STATE4 movaps IN2, IV + movups (INP), IN1 + pxor IN1, STATE2 + movups 0x10(INP), IN2 + pxor IN2, STATE3 #endif movups STATE1, (OUTP) movups STATE2, 0x10(OUTP) diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index 0e3793b821e..dc580c42851 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h @@ -54,6 +54,20 @@ struct nmiaction { __register_nmi_handler((t), &fn##_na); \ }) +/* + * For special handlers that register/unregister in the + * init section only. This should be considered rare. + */ +#define register_nmi_handler_initonly(t, fn, fg, n) \ +({ \ + static struct nmiaction fn##_na __initdata = { \ + .handler = (fn), \ + .name = (n), \ + .flags = (fg), \ + }; \ + __register_nmi_handler((t), &fn##_na); \ +}) + int __register_nmi_handler(unsigned int, struct nmiaction *); void unregister_nmi_handler(unsigned int, const char *); diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 04cd6882308..e1f3a17034f 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -33,9 +33,8 @@ #define segment_eq(a, b) ((a).seg == (b).seg) #define user_addr_max() (current_thread_info()->addr_limit.seg) -#define __addr_ok(addr) \ - ((unsigned long __force)(addr) < \ - (current_thread_info()->addr_limit.seg)) +#define __addr_ok(addr) \ + ((unsigned long __force)(addr) < user_addr_max()) /* * Test whether a block of memory is a valid user space address. @@ -47,14 +46,14 @@ * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... */ -#define __range_not_ok(addr, size) \ +#define __range_not_ok(addr, size, limit) \ ({ \ unsigned long flag, roksum; \ __chk_user_ptr(addr); \ asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ : "=&r" (flag), "=r" (roksum) \ : "1" (addr), "g" ((long)(size)), \ - "rm" (current_thread_info()->addr_limit.seg)); \ + "rm" (limit)); \ flag; \ }) @@ -77,7 +76,8 @@ * checks that the pointer is in the user space range - after calling * this function, memory access functions may still return -EFAULT. */ -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) +#define access_ok(type, addr, size) \ + (likely(__range_not_ok(addr, size, user_addr_max()) == 0)) /* * The exception table consists of pairs of addresses relative to the diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index becf47b8173..6149b476d9d 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h @@ -149,7 +149,6 @@ /* 4 bits of software ack period */ #define UV2_ACK_MASK 0x7UL #define UV2_ACK_UNITS_SHFT 3 -#define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT #define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT /* diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 6e76c191a83..d5fd66f0d4c 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -20,7 +20,6 @@ #include <linux/bitops.h> #include <linux/ioport.h> #include <linux/suspend.h> -#include <linux/kmemleak.h> #include <asm/e820.h> #include <asm/io.h> #include <asm/iommu.h> @@ -95,11 +94,6 @@ static u32 __init allocate_aperture(void) return 0; } memblock_reserve(addr, aper_size); - /* - * Kmemleak should not scan this block as it may not be mapped via the - * kernel direct mapping. - */ - kmemleak_ignore(phys_to_virt(addr)); printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", aper_size >> 10, addr); insert_aperture_resource((u32)addr, aper_size); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index ac96561d1a9..5f0ff597437 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1195,7 +1195,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) BUG_ON(!cfg->vector); vector = cfg->vector; - for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) + for_each_cpu(cpu, cfg->domain) per_cpu(vector_irq, cpu)[vector] = -1; cfg->vector = 0; @@ -1203,7 +1203,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) if (likely(!cfg->move_in_progress)) return; - for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { + for_each_cpu(cpu, cfg->old_domain) { for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { if (per_cpu(vector_irq, cpu)[vector] != irq) diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 0a687fd185e..da27c5d2168 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1274,7 +1274,7 @@ static void mce_timer_fn(unsigned long data) */ iv = __this_cpu_read(mce_next_interval); if (mce_notify_irq()) - iv = max(iv, (unsigned long) HZ/100); + iv = max(iv / 2, (unsigned long) HZ/100); else iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); __this_cpu_write(mce_next_interval, iv); @@ -1557,7 +1557,7 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) static void __mcheck_cpu_init_timer(void) { struct timer_list *t = &__get_cpu_var(mce_timer); - unsigned long iv = __this_cpu_read(mce_next_interval); + unsigned long iv = check_interval * HZ; setup_timer(t, mce_timer_fn, smp_processor_id()); diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index e049d6da018..c4706cf9c01 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1496,6 +1496,7 @@ static struct cpu_hw_events *allocate_fake_cpuc(void) if (!cpuc->shared_regs) goto error; } + cpuc->is_fake = 1; return cpuc; error: free_fake_cpuc(cpuc); @@ -1756,6 +1757,12 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); } +static inline int +valid_user_frame(const void __user *fp, unsigned long size) +{ + return (__range_not_ok(fp, size, TASK_SIZE) == 0); +} + #ifdef CONFIG_COMPAT #include <asm/compat.h> @@ -1780,7 +1787,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) if (bytes != sizeof(frame)) break; - if (fp < compat_ptr(regs->sp)) + if (!valid_user_frame(fp, sizeof(frame))) break; perf_callchain_store(entry, frame.return_address); @@ -1826,7 +1833,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) if (bytes != sizeof(frame)) break; - if ((unsigned long)fp < regs->sp) + if (!valid_user_frame(fp, sizeof(frame))) break; perf_callchain_store(entry, frame.return_address); diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 6638aaf5449..7241e2fc3c1 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -117,6 +117,7 @@ struct cpu_hw_events { struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ unsigned int group_flag; + int is_fake; /* * Intel DebugStore bits @@ -364,6 +365,7 @@ struct x86_pmu { int pebs_record_size; void (*drain_pebs)(struct pt_regs *regs); struct event_constraint *pebs_constraints; + void (*pebs_aliases)(struct perf_event *event); /* * Intel LBR diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 166546ec6ae..187c294bc65 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1119,27 +1119,33 @@ intel_bts_constraints(struct perf_event *event) return NULL; } -static bool intel_try_alt_er(struct perf_event *event, int orig_idx) +static int intel_alt_er(int idx) { if (!(x86_pmu.er_flags & ERF_HAS_RSP_1)) - return false; + return idx; - if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) { - event->hw.config &= ~INTEL_ARCH_EVENT_MASK; - event->hw.config |= 0x01bb; - event->hw.extra_reg.idx = EXTRA_REG_RSP_1; - event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; - } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) { + if (idx == EXTRA_REG_RSP_0) + return EXTRA_REG_RSP_1; + + if (idx == EXTRA_REG_RSP_1) + return EXTRA_REG_RSP_0; + + return idx; +} + +static void intel_fixup_er(struct perf_event *event, int idx) +{ + event->hw.extra_reg.idx = idx; + + if (idx == EXTRA_REG_RSP_0) { event->hw.config &= ~INTEL_ARCH_EVENT_MASK; event->hw.config |= 0x01b7; - event->hw.extra_reg.idx = EXTRA_REG_RSP_0; event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; + } else if (idx == EXTRA_REG_RSP_1) { + event->hw.config &= ~INTEL_ARCH_EVENT_MASK; + event->hw.config |= 0x01bb; + event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; } - - if (event->hw.extra_reg.idx == orig_idx) - return false; - - return true; } /* @@ -1157,14 +1163,18 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, struct event_constraint *c = &emptyconstraint; struct er_account *era; unsigned long flags; - int orig_idx = reg->idx; + int idx = reg->idx; - /* already allocated shared msr */ - if (reg->alloc) + /* + * reg->alloc can be set due to existing state, so for fake cpuc we + * need to ignore this, otherwise we might fail to allocate proper fake + * state for this extra reg constraint. Also see the comment below. + */ + if (reg->alloc && !cpuc->is_fake) return NULL; /* call x86_get_event_constraint() */ again: - era = &cpuc->shared_regs->regs[reg->idx]; + era = &cpuc->shared_regs->regs[idx]; /* * we use spin_lock_irqsave() to avoid lockdep issues when * passing a fake cpuc @@ -1173,6 +1183,29 @@ again: if (!atomic_read(&era->ref) || era->config == reg->config) { + /* + * If its a fake cpuc -- as per validate_{group,event}() we + * shouldn't touch event state and we can avoid doing so + * since both will only call get_event_constraints() once + * on each event, this avoids the need for reg->alloc. + * + * Not doing the ER fixup will only result in era->reg being + * wrong, but since we won't actually try and program hardware + * this isn't a problem either. + */ + if (!cpuc->is_fake) { + if (idx != reg->idx) + intel_fixup_er(event, idx); + + /* + * x86_schedule_events() can call get_event_constraints() + * multiple times on events in the case of incremental + * scheduling(). reg->alloc ensures we only do the ER + * allocation once. + */ + reg->alloc = 1; + } + /* lock in msr value */ era->config = reg->config; era->reg = reg->reg; @@ -1180,17 +1213,17 @@ again: /* one more user */ atomic_inc(&era->ref); - /* no need to reallocate during incremental event scheduling */ - reg->alloc = 1; - /* * need to call x86_get_event_constraint() * to check if associated event has constraints */ c = NULL; - } else if (intel_try_alt_er(event, orig_idx)) { - raw_spin_unlock_irqrestore(&era->lock, flags); - goto again; + } else { + idx = intel_alt_er(idx); + if (idx != reg->idx) { + raw_spin_unlock_irqrestore(&era->lock, flags); + goto again; + } } raw_spin_unlock_irqrestore(&era->lock, flags); @@ -1204,11 +1237,14 @@ __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc, struct er_account *era; /* - * only put constraint if extra reg was actually - * allocated. Also takes care of event which do - * not use an extra shared reg + * Only put constraint if extra reg was actually allocated. Also takes + * care of event which do not use an extra shared reg. + * + * Also, if this is a fake cpuc we shouldn't touch any event state + * (reg->alloc) and we don't care about leaving inconsistent cpuc state + * either since it'll be thrown out. */ - if (!reg->alloc) + if (!reg->alloc || cpuc->is_fake) return; era = &cpuc->shared_regs->regs[reg->idx]; @@ -1300,15 +1336,9 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc, intel_put_shared_regs_event_constraints(cpuc, event); } -static int intel_pmu_hw_config(struct perf_event *event) +static void intel_pebs_aliases_core2(struct perf_event *event) { - int ret = x86_pmu_hw_config(event); - - if (ret) - return ret; - - if (event->attr.precise_ip && - (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { + if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { /* * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P * (0x003c) so that we can use it with PEBS. @@ -1329,10 +1359,48 @@ static int intel_pmu_hw_config(struct perf_event *event) */ u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); + alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); + event->hw.config = alt_config; + } +} + +static void intel_pebs_aliases_snb(struct perf_event *event) +{ + if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { + /* + * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P + * (0x003c) so that we can use it with PEBS. + * + * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't + * PEBS capable. However we can use UOPS_RETIRED.ALL + * (0x01c2), which is a PEBS capable event, to get the same + * count. + * + * UOPS_RETIRED.ALL counts the number of cycles that retires + * CNTMASK micro-ops. By setting CNTMASK to a value (16) + * larger than the maximum number of micro-ops that can be + * retired per cycle (4) and then inverting the condition, we + * count all cycles that retire 16 or less micro-ops, which + * is every cycle. + * + * Thereby we gain a PEBS capable cycle counter. + */ + u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16); alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); event->hw.config = alt_config; } +} + +static int intel_pmu_hw_config(struct perf_event *event) +{ + int ret = x86_pmu_hw_config(event); + + if (ret) + return ret; + + if (event->attr.precise_ip && x86_pmu.pebs_aliases) + x86_pmu.pebs_aliases(event); if (intel_pmu_needs_lbr_smpl(event)) { ret = intel_pmu_setup_lbr_filter(event); @@ -1607,6 +1675,7 @@ static __initconst const struct x86_pmu intel_pmu = { .max_period = (1ULL << 31) - 1, .get_event_constraints = intel_get_event_constraints, .put_event_constraints = intel_put_event_constraints, + .pebs_aliases = intel_pebs_aliases_core2, .format_attrs = intel_arch3_formats_attr, @@ -1840,8 +1909,9 @@ __init int intel_pmu_init(void) break; case 42: /* SandyBridge */ - x86_add_quirk(intel_sandybridge_quirk); case 45: /* SandyBridge, "Romely-EP" */ + x86_add_quirk(intel_sandybridge_quirk); + case 58: /* IvyBridge */ memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -1849,6 +1919,7 @@ __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_snb_event_constraints; x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; + x86_pmu.pebs_aliases = intel_pebs_aliases_snb; x86_pmu.extra_regs = intel_snb_extra_regs; /* all extra regs are per-cpu when HT is on */ x86_pmu.er_flags |= ERF_HAS_RSP_1; diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 5a3edc27f6e..35e2192df9f 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -400,14 +400,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = { INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ - INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */ - INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */ - INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */ - INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */ - INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */ - INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */ - INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */ - INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */ + INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 086eb58c6e8..f1b42b3a186 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -120,11 +120,6 @@ bool kvm_check_and_clear_guest_paused(void) bool ret = false; struct pvclock_vcpu_time_info *src; - /* - * per_cpu() is safe here because this function is only called from - * timer functions where preemption is already disabled. - */ - WARN_ON(!in_atomic()); src = &__get_cpu_var(hv_clock); if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) { __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED); diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c index e31bf8d5c4d..149b8d9c6ad 100644 --- a/arch/x86/kernel/nmi_selftest.c +++ b/arch/x86/kernel/nmi_selftest.c @@ -42,7 +42,7 @@ static int __init nmi_unk_cb(unsigned int val, struct pt_regs *regs) static void __init init_nmi_testsuite(void) { /* trap all the unknown NMIs we may generate */ - register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk"); + register_nmi_handler_initonly(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk"); } static void __init cleanup_nmi_testsuite(void) @@ -64,7 +64,7 @@ static void __init test_nmi_ipi(struct cpumask *mask) { unsigned long timeout; - if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback, + if (register_nmi_handler_initonly(NMI_LOCAL, test_nmi_ipi_callback, NMI_FLAG_FIRST, "nmi_selftest")) { nmi_fail = FAILURE; return; diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 79c45af8160..25b48edb847 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -639,9 +639,11 @@ void native_machine_shutdown(void) set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); /* - * O.K Now that I'm on the appropriate processor, - * stop all of the others. + * O.K Now that I'm on the appropriate processor, stop all of the + * others. Also disable the local irq to not receive the per-cpu + * timer interrupt which may trigger scheduler's load balance. */ + local_irq_disable(); stop_other_cpus(); #endif diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f56f96da77f..3fab55bea29 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -382,6 +382,15 @@ void __cpuinit set_cpu_sibling_map(int cpu) if ((i == cpu) || (has_mc && match_llc(c, o))) link_mask(llc_shared, cpu, i); + } + + /* + * This needs a separate iteration over the cpus because we rely on all + * cpu_sibling_mask links to be set-up. + */ + for_each_cpu(i, cpu_sibling_setup_mask) { + o = &cpu_data(i); + if ((i == cpu) || (has_mc && match_mc(c, o))) { link_mask(core, cpu, i); @@ -410,15 +419,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) /* maps the cpu to the sched domain representing multi-core */ const struct cpumask *cpu_coregroup_mask(int cpu) { - struct cpuinfo_x86 *c = &cpu_data(cpu); - /* - * For perf, we return last level cache shared map. - * And for power savings, we return cpu_core_map - */ - if (!(cpu_has(c, X86_FEATURE_AMD_DCM))) - return cpu_core_mask(cpu); - else - return cpu_llc_shared_mask(cpu); + return cpu_llc_shared_mask(cpu); } static void impress_friends(void) diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index f61ee67ec00..677b1ed184c 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c @@ -8,6 +8,7 @@ #include <linux/module.h> #include <asm/word-at-a-time.h> +#include <linux/sched.h> /* * best effort, GUP based copy_from_user() that is NMI-safe @@ -21,6 +22,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) void *map; int ret; + if (__range_not_ok(from, n, TASK_SIZE) == 0) + return len; + do { ret = __get_user_pages_fast(addr, 1, 0, &page); if (!ret) diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 81913790442..5d7e51f3fd2 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -28,7 +28,7 @@ # - (66): the last prefix is 0x66 # - (F3): the last prefix is 0xF3 # - (F2): the last prefix is 0xF2 -# +# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case) Table: one byte opcode Referrer: @@ -515,12 +515,12 @@ b4: LFS Gv,Mp b5: LGS Gv,Mp b6: MOVZX Gv,Eb b7: MOVZX Gv,Ew -b8: JMPE | POPCNT Gv,Ev (F3) +b8: JMPE (!F3) | POPCNT Gv,Ev (F3) b9: Grp10 (1A) ba: Grp8 Ev,Ib (1A) bb: BTC Ev,Gv -bc: BSF Gv,Ev | TZCNT Gv,Ev (F3) -bd: BSR Gv,Ev | LZCNT Gv,Ev (F3) +bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3) +bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3) be: MOVSX Gv,Eb bf: MOVSX Gv,Ew # 0x0f 0xc0-0xcf diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 97141c26a13..bc4e9d84157 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -62,7 +62,8 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en extra += PMD_SIZE; #endif /* The first 2/4M doesn't use large pages. */ - extra += mr->end - mr->start; + if (mr->start < PMD_SIZE) + extra += mr->end - mr->start; ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; } else diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c index 732af3a9618..4599c3e8bcb 100644 --- a/arch/x86/mm/srat.c +++ b/arch/x86/mm/srat.c @@ -176,6 +176,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) return; } + node_set(node, numa_nodes_parsed); + printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", node, pxm, (unsigned long long) start, (unsigned long long) end - 1); diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index e31bcd8f2ee..fd41a9262d6 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c @@ -782,7 +782,7 @@ BLOCKING_NOTIFIER_HEAD(intel_scu_notifier); EXPORT_SYMBOL_GPL(intel_scu_notifier); /* Called by IPC driver */ -void intel_scu_devices_create(void) +void __devinit intel_scu_devices_create(void) { int i; diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 3ae0e61abd2..59880afa851 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -1295,7 +1295,6 @@ static void __init enable_timeouts(void) */ mmr_image |= (1L << SOFTACK_MSHIFT); if (is_uv2_hub()) { - mmr_image &= ~(1L << UV2_LEG_SHFT); mmr_image |= (1L << UV2_EXT_SHFT); } write_mmr_misc_control(pnode, mmr_image); diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk index 5f6a5b6c3a1..ddcf39b1a18 100644 --- a/arch/x86/tools/gen-insn-attr-x86.awk +++ b/arch/x86/tools/gen-insn-attr-x86.awk @@ -66,9 +66,10 @@ BEGIN { rex_expr = "^REX(\\.[XRWB]+)*" fpu_expr = "^ESC" # TODO - lprefix1_expr = "\\(66\\)" + lprefix1_expr = "\\((66|!F3)\\)" lprefix2_expr = "\\(F3\\)" - lprefix3_expr = "\\(F2\\)" + lprefix3_expr = "\\((F2|!F3)\\)" + lprefix_expr = "\\((66|F2|F3)\\)" max_lprefix = 4 # All opcodes starting with lower-case 'v' or with (v1) superscript @@ -333,13 +334,16 @@ function convert_operands(count,opnd, i,j,imm,mod) if (match(ext, lprefix1_expr)) { lptable1[idx] = add_flags(lptable1[idx],flags) variant = "INAT_VARIANT" - } else if (match(ext, lprefix2_expr)) { + } + if (match(ext, lprefix2_expr)) { lptable2[idx] = add_flags(lptable2[idx],flags) variant = "INAT_VARIANT" - } else if (match(ext, lprefix3_expr)) { + } + if (match(ext, lprefix3_expr)) { lptable3[idx] = add_flags(lptable3[idx],flags) variant = "INAT_VARIANT" - } else { + } + if (!match(ext, lprefix_expr)){ table[idx] = add_flags(table[idx],flags) } } diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h index 0b9f2e13c78..c1dacca312f 100644 --- a/arch/xtensa/include/asm/syscall.h +++ b/arch/xtensa/include/asm/syscall.h @@ -31,5 +31,5 @@ asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp, asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, struct timespec __user *tsp, const sigset_t __user *sigmask, size_t sigsetsize); - - +asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, + size_t sigsetsize); diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index b9f8e5850d3..efe4e854b3c 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -493,7 +493,7 @@ static void do_signal(struct pt_regs *regs) if (ret) return; - signal_delivered(signr, info, ka, regs, 0); + signal_delivered(signr, &info, &ka, regs, 0); if (current->ptrace & PT_SINGLESTEP) task_pt_regs(current)->icountlevel = 1; diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 47768ff8734..80998958cf4 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -208,7 +208,7 @@ config ACPI_IPMI config ACPI_HOTPLUG_CPU bool - depends on ACPI_PROCESSOR && HOTPLUG_CPU + depends on EXPERIMENTAL && ACPI_PROCESSOR && HOTPLUG_CPU select ACPI_CONTAINER default y diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 86933ca8b47..7dd3f9fb9f3 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -643,11 +643,19 @@ static int acpi_battery_update(struct acpi_battery *battery) static void acpi_battery_refresh(struct acpi_battery *battery) { + int power_unit; + if (!battery->bat.dev) return; + power_unit = battery->power_unit; + acpi_battery_get_info(battery); - /* The battery may have changed its reporting units. */ + + if (power_unit == battery->power_unit) + return; + + /* The battery has changed its reporting units. */ sysfs_remove_battery(battery); sysfs_add_battery(battery); } diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 3188da3df8d..adceafda9c1 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -182,41 +182,66 @@ EXPORT_SYMBOL(acpi_bus_get_private_data); Power Management -------------------------------------------------------------------------- */ +static const char *state_string(int state) +{ + switch (state) { + case ACPI_STATE_D0: + return "D0"; + case ACPI_STATE_D1: + return "D1"; + case ACPI_STATE_D2: + return "D2"; + case ACPI_STATE_D3_HOT: + return "D3hot"; + case ACPI_STATE_D3_COLD: + return "D3"; + default: + return "(unknown)"; + } +} + static int __acpi_bus_get_power(struct acpi_device *device, int *state) { - int result = 0; - acpi_status status = 0; - unsigned long long psc = 0; + int result = ACPI_STATE_UNKNOWN; if (!device || !state) return -EINVAL; - *state = ACPI_STATE_UNKNOWN; - - if (device->flags.power_manageable) { - /* - * Get the device's power state either directly (via _PSC) or - * indirectly (via power resources). - */ - if (device->power.flags.power_resources) { - result = acpi_power_get_inferred_state(device, state); - if (result) - return result; - } else if (device->power.flags.explicit_get) { - status = acpi_evaluate_integer(device->handle, "_PSC", - NULL, &psc); - if (ACPI_FAILURE(status)) - return -ENODEV; - *state = (int)psc; - } - } else { + if (!device->flags.power_manageable) { /* TBD: Non-recursive algorithm for walking up hierarchy. */ *state = device->parent ? device->parent->power.state : ACPI_STATE_D0; + goto out; + } + + /* + * Get the device's power state either directly (via _PSC) or + * indirectly (via power resources). + */ + if (device->power.flags.explicit_get) { + unsigned long long psc; + acpi_status status = acpi_evaluate_integer(device->handle, + "_PSC", NULL, &psc); + if (ACPI_FAILURE(status)) + return -ENODEV; + + result = psc; + } + /* The test below covers ACPI_STATE_UNKNOWN too. */ + if (result <= ACPI_STATE_D2) { + ; /* Do nothing. */ + } else if (device->power.flags.power_resources) { + int error = acpi_power_get_inferred_state(device, &result); + if (error) + return error; + } else if (result == ACPI_STATE_D3_HOT) { + result = ACPI_STATE_D3; } + *state = result; - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n", - device->pnp.bus_id, *state)); + out: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n", + device->pnp.bus_id, state_string(*state))); return 0; } @@ -234,13 +259,14 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state) /* Make sure this is a valid target state */ if (state == device->power.state) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n", - state)); + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n", + state_string(state))); return 0; } if (!device->power.states[state].flags.valid) { - printk(KERN_WARNING PREFIX "Device does not support D%d\n", state); + printk(KERN_WARNING PREFIX "Device does not support %s\n", + state_string(state)); return -ENODEV; } if (device->parent && (state < device->parent->power.state)) { @@ -294,13 +320,13 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state) end: if (result) printk(KERN_WARNING PREFIX - "Device [%s] failed to transition to D%d\n", - device->pnp.bus_id, state); + "Device [%s] failed to transition to %s\n", + device->pnp.bus_id, state_string(state)); else { device->power.state = state; ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Device [%s] transitioned to D%d\n", - device->pnp.bus_id, state)); + "Device [%s] transitioned to %s\n", + device->pnp.bus_id, state_string(state))); } return result; diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 0500f719f63..dd6d6a3c678 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -631,7 +631,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state) * We know a device's inferred power state when all the resources * required for a given D-state are 'on'. */ - for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) { + for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { list = &device->power.states[i].resources; if (list->count < 1) continue; diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 0af48a8554c..a093dc163a4 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -333,6 +333,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr) struct acpi_buffer state = { 0, NULL }; union acpi_object *pss = NULL; int i; + int last_invalid = -1; status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); @@ -394,14 +395,33 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr) ((u32)(px->core_frequency * 1000) != (px->core_frequency * 1000))) { printk(KERN_ERR FW_BUG PREFIX - "Invalid BIOS _PSS frequency: 0x%llx MHz\n", - px->core_frequency); - result = -EFAULT; - kfree(pr->performance->states); - goto end; + "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n", + pr->id, px->core_frequency); + if (last_invalid == -1) + last_invalid = i; + } else { + if (last_invalid != -1) { + /* + * Copy this valid entry over last_invalid entry + */ + memcpy(&(pr->performance->states[last_invalid]), + px, sizeof(struct acpi_processor_px)); + ++last_invalid; + } } } + if (last_invalid == 0) { + printk(KERN_ERR FW_BUG PREFIX + "No valid BIOS _PSS frequency found for processor %d\n", pr->id); + result = -EFAULT; + kfree(pr->performance->states); + pr->performance->states = NULL; + } + + if (last_invalid > 0) + pr->performance->state_count = last_invalid; + end: kfree(buffer.pointer); diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 85cbfdccc97..c8a1f3b6811 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1567,6 +1567,7 @@ static int acpi_bus_scan_fixed(void) ACPI_BUS_TYPE_POWER_BUTTON, ACPI_STA_DEFAULT, &ops); + device_init_wakeup(&device->dev, true); } if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) { diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 74ee4ab577b..88561029cca 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -57,6 +57,7 @@ MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend."); MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".); static u8 sleep_states[ACPI_S_STATE_COUNT]; +static bool pwr_btn_event_pending; static void acpi_sleep_tts_switch(u32 acpi_state) { @@ -184,6 +185,14 @@ static int acpi_pm_prepare(void) return error; } +static int find_powerf_dev(struct device *dev, void *data) +{ + struct acpi_device *device = to_acpi_device(dev); + const char *hid = acpi_device_hid(device); + + return !strcmp(hid, ACPI_BUTTON_HID_POWERF); +} + /** * acpi_pm_finish - Instruct the platform to leave a sleep state. * @@ -192,6 +201,7 @@ static int acpi_pm_prepare(void) */ static void acpi_pm_finish(void) { + struct device *pwr_btn_dev; u32 acpi_state = acpi_target_sleep_state; acpi_ec_unblock_transactions(); @@ -209,6 +219,23 @@ static void acpi_pm_finish(void) acpi_set_firmware_waking_vector((acpi_physical_address) 0); acpi_target_sleep_state = ACPI_STATE_S0; + + /* If we were woken with the fixed power button, provide a small + * hint to userspace in the form of a wakeup event on the fixed power + * button device (if it can be found). + * + * We delay the event generation til now, as the PM layer requires + * timekeeping to be running before we generate events. */ + if (!pwr_btn_event_pending) + return; + + pwr_btn_event_pending = false; + pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL, + find_powerf_dev); + if (pwr_btn_dev) { + pm_wakeup_event(pwr_btn_dev, 0); + put_device(pwr_btn_dev); + } } /** @@ -298,9 +325,23 @@ static int acpi_suspend_enter(suspend_state_t pm_state) /* ACPI 3.0 specs (P62) says that it's the responsibility * of the OSPM to clear the status bit [ implying that the * POWER_BUTTON event should not reach userspace ] + * + * However, we do generate a small hint for userspace in the form of + * a wakeup event. We flag this condition for now and generate the + * event later, as we're currently too early in resume to be able to + * generate wakeup events. */ - if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) - acpi_clear_event(ACPI_EVENT_POWER_BUTTON); + if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) { + acpi_event_status pwr_btn_status; + + acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status); + + if (pwr_btn_status & ACPI_EVENT_FLAG_SET) { + acpi_clear_event(ACPI_EVENT_POWER_BUTTON); + /* Flag for later */ + pwr_btn_event_pending = true; + } + } /* * Disable and clear GPE status before interrupt is enabled. Some GPEs @@ -730,8 +771,8 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p) * can wake the system. _S0W may be valid, too. */ if (acpi_target_sleep_state == ACPI_STATE_S0 || - (device_may_wakeup(dev) && - adev->wakeup.sleep_state <= acpi_target_sleep_state)) { + (device_may_wakeup(dev) && adev->wakeup.flags.valid && + adev->wakeup.sleep_state >= acpi_target_sleep_state)) { acpi_status status; acpi_method[3] = 'W'; diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 9577b6fa265..a576575617d 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -1687,10 +1687,6 @@ static int acpi_video_bus_add(struct acpi_device *device) set_bit(KEY_BRIGHTNESS_ZERO, input->keybit); set_bit(KEY_DISPLAY_OFF, input->keybit); - error = input_register_device(input); - if (error) - goto err_stop_video; - printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n", ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device), video->flags.multihead ? "yes" : "no", @@ -1701,12 +1697,16 @@ static int acpi_video_bus_add(struct acpi_device *device) video->pm_nb.priority = 0; error = register_pm_notifier(&video->pm_nb); if (error) - goto err_unregister_input_dev; + goto err_stop_video; + + error = input_register_device(input); + if (error) + goto err_unregister_pm_notifier; return 0; - err_unregister_input_dev: - input_unregister_device(input); + err_unregister_pm_notifier: + unregister_pm_notifier(&video->pm_nb); err_stop_video: acpi_video_bus_stop_devices(video); err_free_input_dev: @@ -1743,9 +1743,18 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type) return 0; } +static int __init is_i740(struct pci_dev *dev) +{ + if (dev->device == 0x00D1) + return 1; + if (dev->device == 0x7000) + return 1; + return 0; +} + static int __init intel_opregion_present(void) { -#if defined(CONFIG_DRM_I915) || defined(CONFIG_DRM_I915_MODULE) + int opregion = 0; struct pci_dev *dev = NULL; u32 address; @@ -1754,13 +1763,15 @@ static int __init intel_opregion_present(void) continue; if (dev->vendor != PCI_VENDOR_ID_INTEL) continue; + /* We don't want to poke around undefined i740 registers */ + if (is_i740(dev)) + continue; pci_read_config_dword(dev, 0xfc, &address); if (!address) continue; - return 1; + opregion = 1; } -#endif - return 0; + return opregion; } int acpi_video_register(void) diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 0bcda488f11..c89aa01fb1d 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -246,11 +246,11 @@ struct regmap *regmap_init(struct device *dev, map->lock = regmap_lock_mutex; map->unlock = regmap_unlock_mutex; } - map->format.buf_size = (config->reg_bits + config->val_bits) / 8; map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); map->format.pad_bytes = config->pad_bits / 8; map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); - map->format.buf_size += map->format.pad_bytes; + map->format.buf_size = DIV_ROUND_UP(config->reg_bits + + config->val_bits + config->pad_bits, 8); map->reg_shift = config->pad_bits % 8; if (config->reg_stride) map->reg_stride = config->reg_stride; @@ -368,7 +368,7 @@ struct regmap *regmap_init(struct device *dev, ret = regcache_init(map, config); if (ret < 0) - goto err_free_workbuf; + goto err_debugfs; /* Add a devres resource for dev_get_regmap() */ m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); @@ -383,7 +383,8 @@ struct regmap *regmap_init(struct device *dev, err_cache: regcache_exit(map); -err_free_workbuf: +err_debugfs: + regmap_debugfs_exit(map); kfree(map->work_buf); err_map: kfree(map); @@ -471,6 +472,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) return ret; } +EXPORT_SYMBOL_GPL(regmap_reinit_cache); /** * regmap_exit(): Free a previously allocated register map diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 764f70c5e69..0a418527941 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -898,6 +898,7 @@ static struct pci_device_id agp_intel_pci_table[] = { ID(PCI_DEVICE_ID_INTEL_B43_HB), ID(PCI_DEVICE_ID_INTEL_B43_1_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), + ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index c0091753a0d..8e2d9140f30 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -212,6 +212,7 @@ #define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 #define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 +#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB 0x0069 #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c index f518b99f53f..6289f0eee24 100644 --- a/drivers/char/hw_random/atmel-rng.c +++ b/drivers/char/hw_random/atmel-rng.c @@ -36,6 +36,13 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max, /* data ready? */ if (readl(trng->base + TRNG_ODATA) & 1) { *data = readl(trng->base + TRNG_ODATA); + /* + ensure data ready is only set again AFTER the next data + word is ready in case it got set between checking ISR + and reading ODATA, so we don't risk re-reading the + same word + */ + readl(trng->base + TRNG_ISR); return 4; } else return 0; diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 8d81a1d3265..dd3e661a124 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o +obj-$(CONFIG_EM_TIMER_STI) += em_sti.o obj-$(CONFIG_CLKBLD_I8253) += i8253.o obj-$(CONFIG_CLKSRC_MMIO) += mmio.o obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c new file mode 100644 index 00000000000..372051d1bba --- /dev/null +++ b/drivers/clocksource/em_sti.c @@ -0,0 +1,406 @@ +/* + * Emma Mobile Timer Support - STI + * + * Copyright (C) 2012 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/irq.h> +#include <linux/err.h> +#include <linux/delay.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/slab.h> +#include <linux/module.h> + +enum { USER_CLOCKSOURCE, USER_CLOCKEVENT, USER_NR }; + +struct em_sti_priv { + void __iomem *base; + struct clk *clk; + struct platform_device *pdev; + unsigned int active[USER_NR]; + unsigned long rate; + raw_spinlock_t lock; + struct clock_event_device ced; + struct clocksource cs; +}; + +#define STI_CONTROL 0x00 +#define STI_COMPA_H 0x10 +#define STI_COMPA_L 0x14 +#define STI_COMPB_H 0x18 +#define STI_COMPB_L 0x1c +#define STI_COUNT_H 0x20 +#define STI_COUNT_L 0x24 +#define STI_COUNT_RAW_H 0x28 +#define STI_COUNT_RAW_L 0x2c +#define STI_SET_H 0x30 +#define STI_SET_L 0x34 +#define STI_INTSTATUS 0x40 +#define STI_INTRAWSTATUS 0x44 +#define STI_INTENSET 0x48 +#define STI_INTENCLR 0x4c +#define STI_INTFFCLR 0x50 + +static inline unsigned long em_sti_read(struct em_sti_priv *p, int offs) +{ + return ioread32(p->base + offs); +} + +static inline void em_sti_write(struct em_sti_priv *p, int offs, + unsigned long value) +{ + iowrite32(value, p->base + offs); +} + +static int em_sti_enable(struct em_sti_priv *p) +{ + int ret; + + /* enable clock */ + ret = clk_enable(p->clk); + if (ret) { + dev_err(&p->pdev->dev, "cannot enable clock\n"); + return ret; + } + + /* configure channel, periodic mode and maximum timeout */ + p->rate = clk_get_rate(p->clk); + + /* reset the counter */ + em_sti_write(p, STI_SET_H, 0x40000000); + em_sti_write(p, STI_SET_L, 0x00000000); + + /* mask and clear pending interrupts */ + em_sti_write(p, STI_INTENCLR, 3); + em_sti_write(p, STI_INTFFCLR, 3); + + /* enable updates of counter registers */ + em_sti_write(p, STI_CONTROL, 1); + + return 0; +} + +static void em_sti_disable(struct em_sti_priv *p) +{ + /* mask interrupts */ + em_sti_write(p, STI_INTENCLR, 3); + + /* stop clock */ + clk_disable(p->clk); +} + +static cycle_t em_sti_count(struct em_sti_priv *p) +{ + cycle_t ticks; + unsigned long flags; + + /* the STI hardware buffers the 48-bit count, but to + * break it out into two 32-bit access the registers + * must be accessed in a certain order. + * Always read STI_COUNT_H before STI_COUNT_L. + */ + raw_spin_lock_irqsave(&p->lock, flags); + ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32; + ticks |= em_sti_read(p, STI_COUNT_L); + raw_spin_unlock_irqrestore(&p->lock, flags); + + return ticks; +} + +static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&p->lock, flags); + + /* mask compare A interrupt */ + em_sti_write(p, STI_INTENCLR, 1); + + /* update compare A value */ + em_sti_write(p, STI_COMPA_H, next >> 32); + em_sti_write(p, STI_COMPA_L, next & 0xffffffff); + + /* clear compare A interrupt source */ + em_sti_write(p, STI_INTFFCLR, 1); + + /* unmask compare A interrupt */ + em_sti_write(p, STI_INTENSET, 1); + + raw_spin_unlock_irqrestore(&p->lock, flags); + + return next; +} + +static irqreturn_t em_sti_interrupt(int irq, void *dev_id) +{ + struct em_sti_priv *p = dev_id; + + p->ced.event_handler(&p->ced); + return IRQ_HANDLED; +} + +static int em_sti_start(struct em_sti_priv *p, unsigned int user) +{ + unsigned long flags; + int used_before; + int ret = 0; + + raw_spin_lock_irqsave(&p->lock, flags); + used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT]; + if (!used_before) + ret = em_sti_enable(p); + + if (!ret) + p->active[user] = 1; + raw_spin_unlock_irqrestore(&p->lock, flags); + + return ret; +} + +static void em_sti_stop(struct em_sti_priv *p, unsigned int user) +{ + unsigned long flags; + int used_before, used_after; + + raw_spin_lock_irqsave(&p->lock, flags); + used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT]; + p->active[user] = 0; + used_after = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT]; + + if (used_before && !used_after) + em_sti_disable(p); + raw_spin_unlock_irqrestore(&p->lock, flags); +} + +static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs) +{ + return container_of(cs, struct em_sti_priv, cs); +} + +static cycle_t em_sti_clocksource_read(struct clocksource *cs) +{ + return em_sti_count(cs_to_em_sti(cs)); +} + +static int em_sti_clocksource_enable(struct clocksource *cs) +{ + int ret; + struct em_sti_priv *p = cs_to_em_sti(cs); + + ret = em_sti_start(p, USER_CLOCKSOURCE); + if (!ret) + __clocksource_updatefreq_hz(cs, p->rate); + return ret; +} + +static void em_sti_clocksource_disable(struct clocksource *cs) +{ + em_sti_stop(cs_to_em_sti(cs), USER_CLOCKSOURCE); +} + +static void em_sti_clocksource_resume(struct clocksource *cs) +{ + em_sti_clocksource_enable(cs); +} + +static int em_sti_register_clocksource(struct em_sti_priv *p) +{ + struct clocksource *cs = &p->cs; + + memset(cs, 0, sizeof(*cs)); + cs->name = dev_name(&p->pdev->dev); + cs->rating = 200; + cs->read = em_sti_clocksource_read; + cs->enable = em_sti_clocksource_enable; + cs->disable = em_sti_clocksource_disable; + cs->suspend = em_sti_clocksource_disable; + cs->resume = em_sti_clocksource_resume; + cs->mask = CLOCKSOURCE_MASK(48); + cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; + + dev_info(&p->pdev->dev, "used as clock source\n"); + + /* Register with dummy 1 Hz value, gets updated in ->enable() */ + clocksource_register_hz(cs, 1); + return 0; +} + +static struct em_sti_priv *ced_to_em_sti(struct clock_event_device *ced) +{ + return container_of(ced, struct em_sti_priv, ced); +} + +static void em_sti_clock_event_mode(enum clock_event_mode mode, + struct clock_event_device *ced) +{ + struct em_sti_priv *p = ced_to_em_sti(ced); + + /* deal with old setting first */ + switch (ced->mode) { + case CLOCK_EVT_MODE_ONESHOT: + em_sti_stop(p, USER_CLOCKEVENT); + break; + default: + break; + } + + switch (mode) { + case CLOCK_EVT_MODE_ONESHOT: + dev_info(&p->pdev->dev, "used for oneshot clock events\n"); + em_sti_start(p, USER_CLOCKEVENT); + clockevents_config(&p->ced, p->rate); + break; + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_UNUSED: + em_sti_stop(p, USER_CLOCKEVENT); + break; + default: + break; + } +} + +static int em_sti_clock_event_next(unsigned long delta, + struct clock_event_device *ced) +{ + struct em_sti_priv *p = ced_to_em_sti(ced); + cycle_t next; + int safe; + + next = em_sti_set_next(p, em_sti_count(p) + delta); + safe = em_sti_count(p) < (next - 1); + + return !safe; +} + +static void em_sti_register_clockevent(struct em_sti_priv *p) +{ + struct clock_event_device *ced = &p->ced; + + memset(ced, 0, sizeof(*ced)); + ced->name = dev_name(&p->pdev->dev); + ced->features = CLOCK_EVT_FEAT_ONESHOT; + ced->rating = 200; + ced->cpumask = cpumask_of(0); + ced->set_next_event = em_sti_clock_event_next; + ced->set_mode = em_sti_clock_event_mode; + + dev_info(&p->pdev->dev, "used for clock events\n"); + + /* Register with dummy 1 Hz value, gets updated in ->set_mode() */ + clockevents_config_and_register(ced, 1, 2, 0xffffffff); +} + +static int __devinit em_sti_probe(struct platform_device *pdev) +{ + struct em_sti_priv *p; + struct resource *res; + int irq, ret; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (p == NULL) { + dev_err(&pdev->dev, "failed to allocate driver data\n"); + ret = -ENOMEM; + goto err0; + } + + p->pdev = pdev; + platform_set_drvdata(pdev, p); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "failed to get I/O memory\n"); + ret = -EINVAL; + goto err0; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "failed to get irq\n"); + ret = -EINVAL; + goto err0; + } + + /* map memory, let base point to the STI instance */ + p->base = ioremap_nocache(res->start, resource_size(res)); + if (p->base == NULL) { + dev_err(&pdev->dev, "failed to remap I/O memory\n"); + ret = -ENXIO; + goto err0; + } + + /* get hold of clock */ + p->clk = clk_get(&pdev->dev, "sclk"); + if (IS_ERR(p->clk)) { + dev_err(&pdev->dev, "cannot get clock\n"); + ret = PTR_ERR(p->clk); + goto err1; + } + + if (request_irq(irq, em_sti_interrupt, + IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, + dev_name(&pdev->dev), p)) { + dev_err(&pdev->dev, "failed to request low IRQ\n"); + ret = -ENOENT; + goto err2; + } + + raw_spin_lock_init(&p->lock); + em_sti_register_clockevent(p); + em_sti_register_clocksource(p); + return 0; + +err2: + clk_put(p->clk); +err1: + iounmap(p->base); +err0: + kfree(p); + return ret; +} + +static int __devexit em_sti_remove(struct platform_device *pdev) +{ + return -EBUSY; /* cannot unregister clockevent and clocksource */ +} + +static const struct of_device_id em_sti_dt_ids[] __devinitconst = { + { .compatible = "renesas,em-sti", }, + {}, +}; +MODULE_DEVICE_TABLE(of, em_sti_dt_ids); + +static struct platform_driver em_sti_device_driver = { + .probe = em_sti_probe, + .remove = __devexit_p(em_sti_remove), + .driver = { + .name = "em_sti", + .of_match_table = em_sti_dt_ids, + } +}; + +module_platform_driver(em_sti_device_driver); + +MODULE_AUTHOR("Magnus Damm"); +MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 32fe9ef5cc5..98b06baafcc 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -48,13 +48,13 @@ struct sh_cmt_priv { unsigned long next_match_value; unsigned long max_match_value; unsigned long rate; - spinlock_t lock; + raw_spinlock_t lock; struct clock_event_device ced; struct clocksource cs; unsigned long total_cycles; }; -static DEFINE_SPINLOCK(sh_cmt_lock); +static DEFINE_RAW_SPINLOCK(sh_cmt_lock); #define CMSTR -1 /* shared register */ #define CMCSR 0 /* channel register */ @@ -139,7 +139,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) unsigned long flags, value; /* start stop register shared by multiple timer channels */ - spin_lock_irqsave(&sh_cmt_lock, flags); + raw_spin_lock_irqsave(&sh_cmt_lock, flags); value = sh_cmt_read(p, CMSTR); if (start) @@ -148,7 +148,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) value &= ~(1 << cfg->timer_bit); sh_cmt_write(p, CMSTR, value); - spin_unlock_irqrestore(&sh_cmt_lock, flags); + raw_spin_unlock_irqrestore(&sh_cmt_lock, flags); } static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) @@ -328,9 +328,9 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) { unsigned long flags; - spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&p->lock, flags); __sh_cmt_set_next(p, delta); - spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&p->lock, flags); } static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) @@ -385,7 +385,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag) int ret = 0; unsigned long flags; - spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&p->lock, flags); if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) ret = sh_cmt_enable(p, &p->rate); @@ -398,7 +398,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag) if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) __sh_cmt_set_next(p, p->max_match_value); out: - spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&p->lock, flags); return ret; } @@ -408,7 +408,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag) unsigned long flags; unsigned long f; - spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&p->lock, flags); f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); p->flags &= ~flag; @@ -420,7 +420,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag) if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) __sh_cmt_set_next(p, p->max_match_value); - spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&p->lock, flags); } static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs) @@ -435,13 +435,13 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs) unsigned long value; int has_wrapped; - spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&p->lock, flags); value = p->total_cycles; raw = sh_cmt_get_counter(p, &has_wrapped); if (unlikely(has_wrapped)) raw += p->match_value + 1; - spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&p->lock, flags); return value + raw; } @@ -591,7 +591,7 @@ static int sh_cmt_register(struct sh_cmt_priv *p, char *name, p->max_match_value = (1 << p->width) - 1; p->match_value = p->max_match_value; - spin_lock_init(&p->lock); + raw_spin_lock_init(&p->lock); if (clockevent_rating) sh_cmt_register_clockevent(p, name, clockevent_rating); diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index a2172f69041..d9b76ca64a6 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c @@ -43,7 +43,7 @@ struct sh_mtu2_priv { struct clock_event_device ced; }; -static DEFINE_SPINLOCK(sh_mtu2_lock); +static DEFINE_RAW_SPINLOCK(sh_mtu2_lock); #define TSTR -1 /* shared register */ #define TCR 0 /* channel register */ @@ -107,7 +107,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start) unsigned long flags, value; /* start stop register shared by multiple timer channels */ - spin_lock_irqsave(&sh_mtu2_lock, flags); + raw_spin_lock_irqsave(&sh_mtu2_lock, flags); value = sh_mtu2_read(p, TSTR); if (start) @@ -116,7 +116,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start) value &= ~(1 << cfg->timer_bit); sh_mtu2_write(p, TSTR, value); - spin_unlock_irqrestore(&sh_mtu2_lock, flags); + raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags); } static int sh_mtu2_enable(struct sh_mtu2_priv *p) diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 97f54b634be..c1b51d49d10 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -45,7 +45,7 @@ struct sh_tmu_priv { struct clocksource cs; }; -static DEFINE_SPINLOCK(sh_tmu_lock); +static DEFINE_RAW_SPINLOCK(sh_tmu_lock); #define TSTR -1 /* shared register */ #define TCOR 0 /* channel register */ @@ -95,7 +95,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start) unsigned long flags, value; /* start stop register shared by multiple timer channels */ - spin_lock_irqsave(&sh_tmu_lock, flags); + raw_spin_lock_irqsave(&sh_tmu_lock, flags); value = sh_tmu_read(p, TSTR); if (start) @@ -104,7 +104,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start) value &= ~(1 << cfg->timer_bit); sh_tmu_write(p, TSTR, value); - spin_unlock_irqrestore(&sh_tmu_lock, flags); + raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); } static int sh_tmu_enable(struct sh_tmu_priv *p) @@ -245,12 +245,7 @@ static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic) sh_tmu_enable(p); - /* TODO: calculate good shift from rate and counter bit width */ - - ced->shift = 32; - ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); - ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced); - ced->min_delta_ns = 5000; + clockevents_config(ced, p->rate); if (periodic) { p->periodic = (p->rate + HZ/2) / HZ; @@ -323,7 +318,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, ced->set_mode = sh_tmu_clock_event_mode; dev_info(&p->pdev->dev, "used for clock events\n"); - clockevents_register_device(ced); + + clockevents_config_and_register(ced, 1, 0x300, 0xffffffff); ret = setup_irq(p->irqaction.irq, &p->irqaction); if (ret) { diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c index 7bb00448e13..b6453d0e44a 100644 --- a/drivers/gpio/gpio-samsung.c +++ b/drivers/gpio/gpio-samsung.c @@ -2833,7 +2833,7 @@ static __init void exynos5_gpiolib_init(void) } /* need to set base address for gpc4 */ - exonys5_gpios_1[11].base = gpio_base1 + 0x2E0; + exynos5_gpios_1[11].base = gpio_base1 + 0x2E0; /* need to set base address for gpx */ chip = &exynos5_gpios_1[21]; diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 420953197d0..d6de2e07fa0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -244,8 +244,8 @@ static const struct file_operations exynos_drm_driver_fops = { }; static struct drm_driver exynos_drm_driver = { - .driver_features = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM | - DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, + .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | + DRIVER_GEM | DRIVER_PRIME, .load = exynos_drm_load, .unload = exynos_drm_unload, .open = exynos_drm_open, diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c index 6e9ac7bd1dc..23d5ad379f8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c @@ -172,19 +172,12 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder) manager_ops->commit(manager->dev); } -static struct drm_crtc * -exynos_drm_encoder_get_crtc(struct drm_encoder *encoder) -{ - return encoder->crtc; -} - static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = { .dpms = exynos_drm_encoder_dpms, .mode_fixup = exynos_drm_encoder_mode_fixup, .mode_set = exynos_drm_encoder_mode_set, .prepare = exynos_drm_encoder_prepare, .commit = exynos_drm_encoder_commit, - .get_crtc = exynos_drm_encoder_get_crtc, }; static void exynos_drm_encoder_destroy(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index f82a299553f..4ccfe4328fa 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -51,11 +51,22 @@ struct exynos_drm_fb { static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) { struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); + unsigned int i; DRM_DEBUG_KMS("%s\n", __FILE__); drm_framebuffer_cleanup(fb); + for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) { + struct drm_gem_object *obj; + + if (exynos_fb->exynos_gem_obj[i] == NULL) + continue; + + obj = &exynos_fb->exynos_gem_obj[i]->base; + drm_gem_object_unreference_unlocked(obj); + } + kfree(exynos_fb); exynos_fb = NULL; } @@ -134,11 +145,11 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, return ERR_PTR(-ENOENT); } - drm_gem_object_unreference_unlocked(obj); - fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj); - if (IS_ERR(fb)) + if (IS_ERR(fb)) { + drm_gem_object_unreference_unlocked(obj); return fb; + } exynos_fb = to_exynos_fb(fb); nr = exynos_drm_format_num_buffers(fb->pixel_format); @@ -152,8 +163,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, return ERR_PTR(-ENOENT); } - drm_gem_object_unreference_unlocked(obj); - exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h index 3ecb30d9355..50823756cde 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.h +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h @@ -31,10 +31,10 @@ static inline int exynos_drm_format_num_buffers(uint32_t format) { switch (format) { - case DRM_FORMAT_NV12M: + case DRM_FORMAT_NV12: case DRM_FORMAT_NV12MT: return 2; - case DRM_FORMAT_YUV420M: + case DRM_FORMAT_YUV420: return 3; default: return 1; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index fc91293c456..5c8b683029e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -689,7 +689,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset) { - struct exynos_drm_gem_obj *exynos_gem_obj; struct drm_gem_object *obj; int ret = 0; @@ -710,15 +709,13 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, goto unlock; } - exynos_gem_obj = to_exynos_gem_obj(obj); - - if (!exynos_gem_obj->base.map_list.map) { - ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base); + if (!obj->map_list.map) { + ret = drm_gem_create_mmap_offset(obj); if (ret) goto out; } - *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT; + *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); out: diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 68ef0102837..e2147a2ddce 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -365,7 +365,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win) switch (win_data->pixel_format) { case DRM_FORMAT_NV12MT: tiled_mode = true; - case DRM_FORMAT_NV12M: + case DRM_FORMAT_NV12: crcb_mode = false; buf_num = 2; break; @@ -601,18 +601,20 @@ static void mixer_win_reset(struct mixer_context *ctx) mixer_reg_write(res, MXR_BG_COLOR2, 0x008080); /* setting graphical layers */ - val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */ val |= MXR_GRP_CFG_WIN_BLEND_EN; + val |= MXR_GRP_CFG_BLEND_PRE_MUL; + val |= MXR_GRP_CFG_PIXEL_BLEND_EN; val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */ /* the same configuration for both layers */ mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val); - - val |= MXR_GRP_CFG_BLEND_PRE_MUL; - val |= MXR_GRP_CFG_PIXEL_BLEND_EN; mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val); + /* setting video layers */ + val = MXR_GRP_CFG_ALPHA_VAL(0); + mixer_reg_write(res, MXR_VIDEO_CFG, val); + /* configuration of Video Processor Registers */ vp_win_reset(ctx); vp_default_filter(res); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 238a5216583..9fe9ebe52a7 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -233,6 +233,7 @@ static const struct intel_device_info intel_sandybridge_d_info = { .has_blt_ring = 1, .has_llc = 1, .has_pch_split = 1, + .has_force_wake = 1, }; static const struct intel_device_info intel_sandybridge_m_info = { @@ -243,6 +244,7 @@ static const struct intel_device_info intel_sandybridge_m_info = { .has_blt_ring = 1, .has_llc = 1, .has_pch_split = 1, + .has_force_wake = 1, }; static const struct intel_device_info intel_ivybridge_d_info = { @@ -252,6 +254,7 @@ static const struct intel_device_info intel_ivybridge_d_info = { .has_blt_ring = 1, .has_llc = 1, .has_pch_split = 1, + .has_force_wake = 1, }; static const struct intel_device_info intel_ivybridge_m_info = { @@ -262,6 +265,7 @@ static const struct intel_device_info intel_ivybridge_m_info = { .has_blt_ring = 1, .has_llc = 1, .has_pch_split = 1, + .has_force_wake = 1, }; static const struct intel_device_info intel_valleyview_m_info = { @@ -289,6 +293,7 @@ static const struct intel_device_info intel_haswell_d_info = { .has_blt_ring = 1, .has_llc = 1, .has_pch_split = 1, + .has_force_wake = 1, }; static const struct intel_device_info intel_haswell_m_info = { @@ -298,6 +303,7 @@ static const struct intel_device_info intel_haswell_m_info = { .has_blt_ring = 1, .has_llc = 1, .has_pch_split = 1, + .has_force_wake = 1, }; static const struct pci_device_id pciidlist[] = { /* aka */ @@ -1139,10 +1145,9 @@ MODULE_LICENSE("GPL and additional rights"); /* We give fast paths for the really cool registers */ #define NEEDS_FORCE_WAKE(dev_priv, reg) \ - (((dev_priv)->info->gen >= 6) && \ - ((reg) < 0x40000) && \ - ((reg) != FORCEWAKE)) && \ - (!IS_VALLEYVIEW((dev_priv)->dev)) + ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ + ((reg) < 0x40000) && \ + ((reg) != FORCEWAKE)) #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c9cfc67c2cf..b0b676abde0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -285,6 +285,7 @@ struct intel_device_info { u8 is_ivybridge:1; u8 is_valleyview:1; u8 has_pch_split:1; + u8 has_force_wake:1; u8 is_haswell:1; u8 has_fbc:1; u8 has_pipe_cxsr:1; @@ -1101,6 +1102,8 @@ struct drm_i915_file_private { #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) +#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) + #include "i915_trace.h" /** diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 1417660a93e..b1fe0edda95 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -510,7 +510,7 @@ out: return ret; } -static void pch_irq_handler(struct drm_device *dev, u32 pch_iir) +static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int pipe; @@ -550,6 +550,35 @@ static void pch_irq_handler(struct drm_device *dev, u32 pch_iir) DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); } +static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe; + + if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) + DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", + (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> + SDE_AUDIO_POWER_SHIFT_CPT); + + if (pch_iir & SDE_AUX_MASK_CPT) + DRM_DEBUG_DRIVER("AUX channel interrupt\n"); + + if (pch_iir & SDE_GMBUS_CPT) + DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); + + if (pch_iir & SDE_AUDIO_CP_REQ_CPT) + DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); + + if (pch_iir & SDE_AUDIO_CP_CHG_CPT) + DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); + + if (pch_iir & SDE_FDI_MASK_CPT) + for_each_pipe(pipe) + DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", + pipe_name(pipe), + I915_READ(FDI_RX_IIR(pipe))); +} + static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; @@ -591,7 +620,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) if (pch_iir & SDE_HOTPLUG_MASK_CPT) queue_work(dev_priv->wq, &dev_priv->hotplug_work); - pch_irq_handler(dev, pch_iir); + cpt_irq_handler(dev, pch_iir); /* clear PCH hotplug event before clear CPU irq */ I915_WRITE(SDEIIR, pch_iir); @@ -684,7 +713,10 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) if (de_iir & DE_PCH_EVENT) { if (pch_iir & hotplug_mask) queue_work(dev_priv->wq, &dev_priv->hotplug_work); - pch_irq_handler(dev, pch_iir); + if (HAS_PCH_CPT(dev)) + cpt_irq_handler(dev, pch_iir); + else + ibx_irq_handler(dev, pch_iir); } if (de_iir & DE_PCU_EVENT) { diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 2d49b9507ed..48d5e8e051c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -210,6 +210,14 @@ #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) +/* IVB has funny definitions for which plane to flip. */ +#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19) +#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19) +#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19) +#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) +#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) +#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) + #define MI_SET_CONTEXT MI_INSTR(0x18, 0) #define MI_MM_SPACE_GTT (1<<8) #define MI_MM_SPACE_PHYSICAL (0<<8) @@ -3313,7 +3321,7 @@ /* PCH */ -/* south display engine interrupt */ +/* south display engine interrupt: IBX */ #define SDE_AUDIO_POWER_D (1 << 27) #define SDE_AUDIO_POWER_C (1 << 26) #define SDE_AUDIO_POWER_B (1 << 25) @@ -3349,15 +3357,44 @@ #define SDE_TRANSA_CRC_ERR (1 << 1) #define SDE_TRANSA_FIFO_UNDER (1 << 0) #define SDE_TRANS_MASK (0x3f) -/* CPT */ -#define SDE_CRT_HOTPLUG_CPT (1 << 19) + +/* south display engine interrupt: CPT/PPT */ +#define SDE_AUDIO_POWER_D_CPT (1 << 31) +#define SDE_AUDIO_POWER_C_CPT (1 << 30) +#define SDE_AUDIO_POWER_B_CPT (1 << 29) +#define SDE_AUDIO_POWER_SHIFT_CPT 29 +#define SDE_AUDIO_POWER_MASK_CPT (7 << 29) +#define SDE_AUXD_CPT (1 << 27) +#define SDE_AUXC_CPT (1 << 26) +#define SDE_AUXB_CPT (1 << 25) +#define SDE_AUX_MASK_CPT (7 << 25) #define SDE_PORTD_HOTPLUG_CPT (1 << 23) #define SDE_PORTC_HOTPLUG_CPT (1 << 22) #define SDE_PORTB_HOTPLUG_CPT (1 << 21) +#define SDE_CRT_HOTPLUG_CPT (1 << 19) #define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ SDE_PORTD_HOTPLUG_CPT | \ SDE_PORTC_HOTPLUG_CPT | \ SDE_PORTB_HOTPLUG_CPT) +#define SDE_GMBUS_CPT (1 << 17) +#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10) +#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9) +#define SDE_FDI_RXC_CPT (1 << 8) +#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6) +#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5) +#define SDE_FDI_RXB_CPT (1 << 4) +#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2) +#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1) +#define SDE_FDI_RXA_CPT (1 << 0) +#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \ + SDE_AUDIO_CP_REQ_B_CPT | \ + SDE_AUDIO_CP_REQ_A_CPT) +#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \ + SDE_AUDIO_CP_CHG_B_CPT | \ + SDE_AUDIO_CP_CHG_A_CPT) +#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \ + SDE_FDI_RXB_CPT | \ + SDE_FDI_RXA_CPT) #define SDEISR 0xc4000 #define SDEIMR 0xc4004 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 91478942090..e0aa064def3 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6158,17 +6158,34 @@ static int intel_gen7_queue_flip(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; + uint32_t plane_bit = 0; int ret; ret = intel_pin_and_fence_fb_obj(dev, obj, ring); if (ret) goto err; + switch(intel_crtc->plane) { + case PLANE_A: + plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; + break; + case PLANE_B: + plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; + break; + case PLANE_C: + plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; + break; + default: + WARN_ONCE(1, "unknown plane in flip command\n"); + ret = -ENODEV; + goto err; + } + ret = intel_ring_begin(ring, 4); if (ret) goto err_unpin; - intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); + intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); intel_ring_emit(ring, (obj->gtt_offset)); intel_ring_emit(ring, (MI_NOOP)); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b59b6d5b758..e5b84ff89ca 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -266,10 +266,15 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) static int init_ring_common(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = ring->dev->dev_private; + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj = ring->obj; + int ret = 0; u32 head; + if (HAS_FORCE_WAKE(dev)) + gen6_gt_force_wake_get(dev_priv); + /* Stop the ring if it's running. */ I915_WRITE_CTL(ring, 0); I915_WRITE_HEAD(ring, 0); @@ -317,7 +322,8 @@ static int init_ring_common(struct intel_ring_buffer *ring) I915_READ_HEAD(ring), I915_READ_TAIL(ring), I915_READ_START(ring)); - return -EIO; + ret = -EIO; + goto out; } if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) @@ -326,9 +332,14 @@ static int init_ring_common(struct intel_ring_buffer *ring) ring->head = I915_READ_HEAD(ring); ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; ring->space = ring_space(ring); + ring->last_retired_head = -1; } - return 0; +out: + if (HAS_FORCE_WAKE(dev)) + gen6_gt_force_wake_put(dev_priv); + + return ret; } static int @@ -987,6 +998,10 @@ static int intel_init_ring_buffer(struct drm_device *dev, if (ret) goto err_unref; + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + goto err_unpin; + ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset, ring->size); if (ring->virtual_start == NULL) { diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 4e7dd2b4843..c16554122cc 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -52,6 +52,7 @@ struct evergreen_cs_track { u32 cb_color_view[12]; u32 cb_color_pitch[12]; u32 cb_color_slice[12]; + u32 cb_color_slice_idx[12]; u32 cb_color_attrib[12]; u32 cb_color_cmask_slice[8];/* unused */ u32 cb_color_fmask_slice[8];/* unused */ @@ -127,12 +128,14 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track) track->cb_color_info[i] = 0; track->cb_color_view[i] = 0xFFFFFFFF; track->cb_color_pitch[i] = 0; - track->cb_color_slice[i] = 0; + track->cb_color_slice[i] = 0xfffffff; + track->cb_color_slice_idx[i] = 0; } track->cb_target_mask = 0xFFFFFFFF; track->cb_shader_mask = 0xFFFFFFFF; track->cb_dirty = true; + track->db_depth_slice = 0xffffffff; track->db_depth_view = 0xFFFFC000; track->db_depth_size = 0xFFFFFFFF; track->db_depth_control = 0xFFFFFFFF; @@ -250,10 +253,9 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p, { struct evergreen_cs_track *track = p->track; unsigned palign, halign, tileb, slice_pt; + unsigned mtile_pr, mtile_ps, mtileb; tileb = 64 * surf->bpe * surf->nsamples; - palign = track->group_size / (8 * surf->bpe * surf->nsamples); - palign = MAX(8, palign); slice_pt = 1; if (tileb > surf->tsplit) { slice_pt = tileb / surf->tsplit; @@ -262,7 +264,10 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p, /* macro tile width & height */ palign = (8 * surf->bankw * track->npipes) * surf->mtilea; halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea; - surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt; + mtileb = (palign / 8) * (halign / 8) * tileb;; + mtile_pr = surf->nbx / palign; + mtile_ps = (mtile_pr * surf->nby) / halign; + surf->layer_size = mtile_ps * mtileb * slice_pt; surf->base_align = (palign / 8) * (halign / 8) * tileb; surf->palign = palign; surf->halign = halign; @@ -434,6 +439,39 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i offset += surf.layer_size * mslice; if (offset > radeon_bo_size(track->cb_color_bo[id])) { + /* old ddx are broken they allocate bo with w*h*bpp but + * program slice with ALIGN(h, 8), catch this and patch + * command stream. + */ + if (!surf.mode) { + volatile u32 *ib = p->ib.ptr; + unsigned long tmp, nby, bsize, size, min = 0; + + /* find the height the ddx wants */ + if (surf.nby > 8) { + min = surf.nby - 8; + } + bsize = radeon_bo_size(track->cb_color_bo[id]); + tmp = track->cb_color_bo_offset[id] << 8; + for (nby = surf.nby; nby > min; nby--) { + size = nby * surf.nbx * surf.bpe * surf.nsamples; + if ((tmp + size * mslice) <= bsize) { + break; + } + } + if (nby > min) { + surf.nby = nby; + slice = ((nby * surf.nbx) / 64) - 1; + if (!evergreen_surface_check(p, &surf, "cb")) { + /* check if this one works */ + tmp += surf.layer_size * mslice; + if (tmp <= bsize) { + ib[track->cb_color_slice_idx[id]] = slice; + goto old_ddx_ok; + } + } + } + } dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, " "offset %d, max layer %d, bo size %ld, slice %d)\n", __func__, __LINE__, id, surf.layer_size, @@ -446,6 +484,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i surf.tsplit, surf.mtilea); return -EINVAL; } +old_ddx_ok: return 0; } @@ -1532,6 +1571,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case CB_COLOR7_SLICE: tmp = (reg - CB_COLOR0_SLICE) / 0x3c; track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); + track->cb_color_slice_idx[tmp] = idx; track->cb_dirty = true; break; case CB_COLOR8_SLICE: @@ -1540,6 +1580,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case CB_COLOR11_SLICE: tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8; track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); + track->cb_color_slice_idx[tmp] = idx; track->cb_dirty = true; break; case CB_COLOR0_ATTRIB: diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 3df4efa1194..3186522a445 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -460,15 +460,28 @@ static void cayman_gpu_init(struct radeon_device *rdev) rdev->config.cayman.max_pipes_per_simd = 4; rdev->config.cayman.max_tile_pipes = 2; if ((rdev->pdev->device == 0x9900) || - (rdev->pdev->device == 0x9901)) { + (rdev->pdev->device == 0x9901) || + (rdev->pdev->device == 0x9905) || + (rdev->pdev->device == 0x9906) || + (rdev->pdev->device == 0x9907) || + (rdev->pdev->device == 0x9908) || + (rdev->pdev->device == 0x9909) || + (rdev->pdev->device == 0x9910) || + (rdev->pdev->device == 0x9917)) { rdev->config.cayman.max_simds_per_se = 6; rdev->config.cayman.max_backends_per_se = 2; } else if ((rdev->pdev->device == 0x9903) || - (rdev->pdev->device == 0x9904)) { + (rdev->pdev->device == 0x9904) || + (rdev->pdev->device == 0x990A) || + (rdev->pdev->device == 0x9913) || + (rdev->pdev->device == 0x9918)) { rdev->config.cayman.max_simds_per_se = 4; rdev->config.cayman.max_backends_per_se = 2; - } else if ((rdev->pdev->device == 0x9990) || - (rdev->pdev->device == 0x9991)) { + } else if ((rdev->pdev->device == 0x9919) || + (rdev->pdev->device == 0x9990) || + (rdev->pdev->device == 0x9991) || + (rdev->pdev->device == 0x9994) || + (rdev->pdev->device == 0x99A0)) { rdev->config.cayman.max_simds_per_se = 3; rdev->config.cayman.max_backends_per_se = 1; } else { diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 45cfcea6350..f30dc95f83b 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2426,6 +2426,12 @@ int r600_startup(struct radeon_device *rdev) if (r) return r; + r = r600_audio_init(rdev); + if (r) { + DRM_ERROR("radeon: audio init failed\n"); + return r; + } + return 0; } @@ -2462,12 +2468,6 @@ int r600_resume(struct radeon_device *rdev) return r; } - r = r600_audio_init(rdev); - if (r) { - DRM_ERROR("radeon: audio resume failed\n"); - return r; - } - return r; } @@ -2577,9 +2577,6 @@ int r600_init(struct radeon_device *rdev) rdev->accel_working = false; } - r = r600_audio_init(rdev); - if (r) - return r; /* TODO error handling */ return 0; } diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index 7c4fa77f018..7479a5c503e 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c @@ -192,6 +192,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); int base_rate = 48000; switch (radeon_encoder->encoder_id) { @@ -217,8 +218,8 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock) WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10); WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071); - /* Some magic trigger or src sel? */ - WREG32_P(0x5ac, 0x01, ~0x77); + /* Select DTO source */ + WREG32(0x5ac, radeon_crtc->crtc_id); } else { switch (dig->dig_encoder) { case 0: diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 226379e00ac..969c27529df 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -348,7 +348,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ - HDMI0_AUDIO_SEND_MAX_PACKETS | /* send NULL packets if no audio is available */ HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 85dac33e3cc..fefcca55c1e 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1374,9 +1374,9 @@ struct cayman_asic { struct si_asic { unsigned max_shader_engines; - unsigned max_pipes_per_simd; unsigned max_tile_pipes; - unsigned max_simds_per_se; + unsigned max_cu_per_sh; + unsigned max_sh_per_se; unsigned max_backends_per_se; unsigned max_texture_channel_caches; unsigned max_gprs; @@ -1387,7 +1387,6 @@ struct si_asic { unsigned sc_hiz_tile_fifo_size; unsigned sc_earlyz_tile_fifo_size; - unsigned num_shader_engines; unsigned num_tile_pipes; unsigned num_backends_per_se; unsigned backend_disable_mask_per_asic; diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index f0bb2b543b1..03e5f5df40f 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -57,9 +57,10 @@ * 2.13.0 - virtual memory support, streamout * 2.14.0 - add evergreen tiling informations * 2.15.0 - add max_pipes query + * 2.16.0 - fix evergreen 2D tiled surface calculation */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 15 +#define KMS_DRIVER_MINOR 16 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 79db56e6c2a..59d44937dd9 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -476,12 +476,18 @@ int radeon_vm_bo_add(struct radeon_device *rdev, mutex_lock(&vm->mutex); if (last_pfn > vm->last_pfn) { - /* grow va space 32M by 32M */ - unsigned align = ((32 << 20) >> 12) - 1; + /* release mutex and lock in right order */ + mutex_unlock(&vm->mutex); radeon_mutex_lock(&rdev->cs_mutex); - radeon_vm_unbind_locked(rdev, vm); + mutex_lock(&vm->mutex); + /* and check again */ + if (last_pfn > vm->last_pfn) { + /* grow va space 32M by 32M */ + unsigned align = ((32 << 20) >> 12) - 1; + radeon_vm_unbind_locked(rdev, vm); + vm->last_pfn = (last_pfn + align) & ~align; + } radeon_mutex_unlock(&rdev->cs_mutex); - vm->last_pfn = (last_pfn + align) & ~align; } head = &vm->va; last_offset = 0; @@ -595,8 +601,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, if (bo_va == NULL) return 0; - mutex_lock(&vm->mutex); radeon_mutex_lock(&rdev->cs_mutex); + mutex_lock(&vm->mutex); radeon_vm_bo_update_pte(rdev, vm, bo, NULL); radeon_mutex_unlock(&rdev->cs_mutex); list_del(&bo_va->vm_list); @@ -641,9 +647,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) struct radeon_bo_va *bo_va, *tmp; int r; - mutex_lock(&vm->mutex); - radeon_mutex_lock(&rdev->cs_mutex); + mutex_lock(&vm->mutex); radeon_vm_unbind_locked(rdev, vm); radeon_mutex_unlock(&rdev->cs_mutex); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index f1016a5820d..5c58d7d90cb 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -273,7 +273,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) break; case RADEON_INFO_MAX_PIPES: if (rdev->family >= CHIP_TAHITI) - value = rdev->config.si.max_pipes_per_simd; + value = rdev->config.si.max_cu_per_sh; else if (rdev->family >= CHIP_CAYMAN) value = rdev->config.cayman.max_pipes_per_simd; else if (rdev->family >= CHIP_CEDAR) diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 25f9eef12c4..e95c5e61d4e 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -908,12 +908,6 @@ static int rs600_startup(struct radeon_device *rdev) return r; } - r = r600_audio_init(rdev); - if (r) { - dev_err(rdev->dev, "failed initializing audio\n"); - return r; - } - r = radeon_ib_pool_start(rdev); if (r) return r; @@ -922,6 +916,12 @@ static int rs600_startup(struct radeon_device *rdev) if (r) return r; + r = r600_audio_init(rdev); + if (r) { + dev_err(rdev->dev, "failed initializing audio\n"); + return r; + } + return 0; } diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 3277ddecfe9..159b6a43fda 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -637,12 +637,6 @@ static int rs690_startup(struct radeon_device *rdev) return r; } - r = r600_audio_init(rdev); - if (r) { - dev_err(rdev->dev, "failed initializing audio\n"); - return r; - } - r = radeon_ib_pool_start(rdev); if (r) return r; @@ -651,6 +645,12 @@ static int rs690_startup(struct radeon_device *rdev) if (r) return r; + r = r600_audio_init(rdev); + if (r) { + dev_err(rdev->dev, "failed initializing audio\n"); + return r; + } + return 0; } diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 04ddc365a90..4ad0281fdc3 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -956,6 +956,12 @@ static int rv770_startup(struct radeon_device *rdev) if (r) return r; + r = r600_audio_init(rdev); + if (r) { + DRM_ERROR("radeon: audio init failed\n"); + return r; + } + return 0; } @@ -978,12 +984,6 @@ int rv770_resume(struct radeon_device *rdev) return r; } - r = r600_audio_init(rdev); - if (r) { - dev_err(rdev->dev, "radeon: audio init failed\n"); - return r; - } - return r; } @@ -1092,12 +1092,6 @@ int rv770_init(struct radeon_device *rdev) rdev->accel_working = false; } - r = r600_audio_init(rdev); - if (r) { - dev_err(rdev->dev, "radeon: audio init failed\n"); - return r; - } - return 0; } diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 549732e56ca..c7b61f16ecf 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -867,200 +867,6 @@ void dce6_bandwidth_update(struct radeon_device *rdev) /* * Core functions */ -static u32 si_get_tile_pipe_to_backend_map(struct radeon_device *rdev, - u32 num_tile_pipes, - u32 num_backends_per_asic, - u32 *backend_disable_mask_per_asic, - u32 num_shader_engines) -{ - u32 backend_map = 0; - u32 enabled_backends_mask = 0; - u32 enabled_backends_count = 0; - u32 num_backends_per_se; - u32 cur_pipe; - u32 swizzle_pipe[SI_MAX_PIPES]; - u32 cur_backend = 0; - u32 i; - bool force_no_swizzle; - - /* force legal values */ - if (num_tile_pipes < 1) - num_tile_pipes = 1; - if (num_tile_pipes > rdev->config.si.max_tile_pipes) - num_tile_pipes = rdev->config.si.max_tile_pipes; - if (num_shader_engines < 1) - num_shader_engines = 1; - if (num_shader_engines > rdev->config.si.max_shader_engines) - num_shader_engines = rdev->config.si.max_shader_engines; - if (num_backends_per_asic < num_shader_engines) - num_backends_per_asic = num_shader_engines; - if (num_backends_per_asic > (rdev->config.si.max_backends_per_se * num_shader_engines)) - num_backends_per_asic = rdev->config.si.max_backends_per_se * num_shader_engines; - - /* make sure we have the same number of backends per se */ - num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines); - /* set up the number of backends per se */ - num_backends_per_se = num_backends_per_asic / num_shader_engines; - if (num_backends_per_se > rdev->config.si.max_backends_per_se) { - num_backends_per_se = rdev->config.si.max_backends_per_se; - num_backends_per_asic = num_backends_per_se * num_shader_engines; - } - - /* create enable mask and count for enabled backends */ - for (i = 0; i < SI_MAX_BACKENDS; ++i) { - if (((*backend_disable_mask_per_asic >> i) & 1) == 0) { - enabled_backends_mask |= (1 << i); - ++enabled_backends_count; - } - if (enabled_backends_count == num_backends_per_asic) - break; - } - - /* force the backends mask to match the current number of backends */ - if (enabled_backends_count != num_backends_per_asic) { - u32 this_backend_enabled; - u32 shader_engine; - u32 backend_per_se; - - enabled_backends_mask = 0; - enabled_backends_count = 0; - *backend_disable_mask_per_asic = SI_MAX_BACKENDS_MASK; - for (i = 0; i < SI_MAX_BACKENDS; ++i) { - /* calc the current se */ - shader_engine = i / rdev->config.si.max_backends_per_se; - /* calc the backend per se */ - backend_per_se = i % rdev->config.si.max_backends_per_se; - /* default to not enabled */ - this_backend_enabled = 0; - if ((shader_engine < num_shader_engines) && - (backend_per_se < num_backends_per_se)) - this_backend_enabled = 1; - if (this_backend_enabled) { - enabled_backends_mask |= (1 << i); - *backend_disable_mask_per_asic &= ~(1 << i); - ++enabled_backends_count; - } - } - } - - - memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * SI_MAX_PIPES); - switch (rdev->family) { - case CHIP_TAHITI: - case CHIP_PITCAIRN: - case CHIP_VERDE: - force_no_swizzle = true; - break; - default: - force_no_swizzle = false; - break; - } - if (force_no_swizzle) { - bool last_backend_enabled = false; - - force_no_swizzle = false; - for (i = 0; i < SI_MAX_BACKENDS; ++i) { - if (((enabled_backends_mask >> i) & 1) == 1) { - if (last_backend_enabled) - force_no_swizzle = true; - last_backend_enabled = true; - } else - last_backend_enabled = false; - } - } - - switch (num_tile_pipes) { - case 1: - case 3: - case 5: - case 7: - DRM_ERROR("odd number of pipes!\n"); - break; - case 2: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - break; - case 4: - if (force_no_swizzle) { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - swizzle_pipe[2] = 2; - swizzle_pipe[3] = 3; - } else { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 1; - swizzle_pipe[3] = 3; - } - break; - case 6: - if (force_no_swizzle) { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - swizzle_pipe[2] = 2; - swizzle_pipe[3] = 3; - swizzle_pipe[4] = 4; - swizzle_pipe[5] = 5; - } else { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 1; - swizzle_pipe[4] = 3; - swizzle_pipe[5] = 5; - } - break; - case 8: - if (force_no_swizzle) { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 1; - swizzle_pipe[2] = 2; - swizzle_pipe[3] = 3; - swizzle_pipe[4] = 4; - swizzle_pipe[5] = 5; - swizzle_pipe[6] = 6; - swizzle_pipe[7] = 7; - } else { - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 6; - swizzle_pipe[4] = 1; - swizzle_pipe[5] = 3; - swizzle_pipe[6] = 5; - swizzle_pipe[7] = 7; - } - break; - } - - for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { - while (((1 << cur_backend) & enabled_backends_mask) == 0) - cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS; - - backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); - - cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS; - } - - return backend_map; -} - -static u32 si_get_disable_mask_per_asic(struct radeon_device *rdev, - u32 disable_mask_per_se, - u32 max_disable_mask_per_se, - u32 num_shader_engines) -{ - u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se); - u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se; - - if (num_shader_engines == 1) - return disable_mask_per_asic; - else if (num_shader_engines == 2) - return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se); - else - return 0xffffffff; -} - static void si_tiling_mode_table_init(struct radeon_device *rdev) { const u32 num_tile_mode_states = 32; @@ -1562,18 +1368,151 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev) DRM_ERROR("unknown asic: 0x%x\n", rdev->family); } +static void si_select_se_sh(struct radeon_device *rdev, + u32 se_num, u32 sh_num) +{ + u32 data = INSTANCE_BROADCAST_WRITES; + + if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) + data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES; + else if (se_num == 0xffffffff) + data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num); + else if (sh_num == 0xffffffff) + data |= SH_BROADCAST_WRITES | SE_INDEX(se_num); + else + data |= SH_INDEX(sh_num) | SE_INDEX(se_num); + WREG32(GRBM_GFX_INDEX, data); +} + +static u32 si_create_bitmask(u32 bit_width) +{ + u32 i, mask = 0; + + for (i = 0; i < bit_width; i++) { + mask <<= 1; + mask |= 1; + } + return mask; +} + +static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh) +{ + u32 data, mask; + + data = RREG32(CC_GC_SHADER_ARRAY_CONFIG); + if (data & 1) + data &= INACTIVE_CUS_MASK; + else + data = 0; + data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG); + + data >>= INACTIVE_CUS_SHIFT; + + mask = si_create_bitmask(cu_per_sh); + + return ~data & mask; +} + +static void si_setup_spi(struct radeon_device *rdev, + u32 se_num, u32 sh_per_se, + u32 cu_per_sh) +{ + int i, j, k; + u32 data, mask, active_cu; + + for (i = 0; i < se_num; i++) { + for (j = 0; j < sh_per_se; j++) { + si_select_se_sh(rdev, i, j); + data = RREG32(SPI_STATIC_THREAD_MGMT_3); + active_cu = si_get_cu_enabled(rdev, cu_per_sh); + + mask = 1; + for (k = 0; k < 16; k++) { + mask <<= k; + if (active_cu & mask) { + data &= ~mask; + WREG32(SPI_STATIC_THREAD_MGMT_3, data); + break; + } + } + } + } + si_select_se_sh(rdev, 0xffffffff, 0xffffffff); +} + +static u32 si_get_rb_disabled(struct radeon_device *rdev, + u32 max_rb_num, u32 se_num, + u32 sh_per_se) +{ + u32 data, mask; + + data = RREG32(CC_RB_BACKEND_DISABLE); + if (data & 1) + data &= BACKEND_DISABLE_MASK; + else + data = 0; + data |= RREG32(GC_USER_RB_BACKEND_DISABLE); + + data >>= BACKEND_DISABLE_SHIFT; + + mask = si_create_bitmask(max_rb_num / se_num / sh_per_se); + + return data & mask; +} + +static void si_setup_rb(struct radeon_device *rdev, + u32 se_num, u32 sh_per_se, + u32 max_rb_num) +{ + int i, j; + u32 data, mask; + u32 disabled_rbs = 0; + u32 enabled_rbs = 0; + + for (i = 0; i < se_num; i++) { + for (j = 0; j < sh_per_se; j++) { + si_select_se_sh(rdev, i, j); + data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); + disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); + } + } + si_select_se_sh(rdev, 0xffffffff, 0xffffffff); + + mask = 1; + for (i = 0; i < max_rb_num; i++) { + if (!(disabled_rbs & mask)) + enabled_rbs |= mask; + mask <<= 1; + } + + for (i = 0; i < se_num; i++) { + si_select_se_sh(rdev, i, 0xffffffff); + data = 0; + for (j = 0; j < sh_per_se; j++) { + switch (enabled_rbs & 3) { + case 1: + data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2); + break; + case 2: + data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2); + break; + case 3: + default: + data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2); + break; + } + enabled_rbs >>= 2; + } + WREG32(PA_SC_RASTER_CONFIG, data); + } + si_select_se_sh(rdev, 0xffffffff, 0xffffffff); +} + static void si_gpu_init(struct radeon_device *rdev) { - u32 cc_rb_backend_disable = 0; - u32 cc_gc_shader_array_config; u32 gb_addr_config = 0; u32 mc_shared_chmap, mc_arb_ramcfg; - u32 gb_backend_map; - u32 cgts_tcc_disable; u32 sx_debug_1; - u32 gc_user_shader_array_config; - u32 gc_user_rb_backend_disable; - u32 cgts_user_tcc_disable; u32 hdp_host_path_cntl; u32 tmp; int i, j; @@ -1581,9 +1520,9 @@ static void si_gpu_init(struct radeon_device *rdev) switch (rdev->family) { case CHIP_TAHITI: rdev->config.si.max_shader_engines = 2; - rdev->config.si.max_pipes_per_simd = 4; rdev->config.si.max_tile_pipes = 12; - rdev->config.si.max_simds_per_se = 8; + rdev->config.si.max_cu_per_sh = 8; + rdev->config.si.max_sh_per_se = 2; rdev->config.si.max_backends_per_se = 4; rdev->config.si.max_texture_channel_caches = 12; rdev->config.si.max_gprs = 256; @@ -1594,12 +1533,13 @@ static void si_gpu_init(struct radeon_device *rdev) rdev->config.si.sc_prim_fifo_size_backend = 0x100; rdev->config.si.sc_hiz_tile_fifo_size = 0x30; rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_PITCAIRN: rdev->config.si.max_shader_engines = 2; - rdev->config.si.max_pipes_per_simd = 4; rdev->config.si.max_tile_pipes = 8; - rdev->config.si.max_simds_per_se = 5; + rdev->config.si.max_cu_per_sh = 5; + rdev->config.si.max_sh_per_se = 2; rdev->config.si.max_backends_per_se = 4; rdev->config.si.max_texture_channel_caches = 8; rdev->config.si.max_gprs = 256; @@ -1610,13 +1550,14 @@ static void si_gpu_init(struct radeon_device *rdev) rdev->config.si.sc_prim_fifo_size_backend = 0x100; rdev->config.si.sc_hiz_tile_fifo_size = 0x30; rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_VERDE: default: rdev->config.si.max_shader_engines = 1; - rdev->config.si.max_pipes_per_simd = 4; rdev->config.si.max_tile_pipes = 4; - rdev->config.si.max_simds_per_se = 2; + rdev->config.si.max_cu_per_sh = 2; + rdev->config.si.max_sh_per_se = 2; rdev->config.si.max_backends_per_se = 4; rdev->config.si.max_texture_channel_caches = 4; rdev->config.si.max_gprs = 256; @@ -1627,6 +1568,7 @@ static void si_gpu_init(struct radeon_device *rdev) rdev->config.si.sc_prim_fifo_size_backend = 0x40; rdev->config.si.sc_hiz_tile_fifo_size = 0x30; rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN; break; } @@ -1648,31 +1590,7 @@ static void si_gpu_init(struct radeon_device *rdev) mc_shared_chmap = RREG32(MC_SHARED_CHMAP); mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); - cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); - cc_gc_shader_array_config = RREG32(CC_GC_SHADER_ARRAY_CONFIG); - cgts_tcc_disable = 0xffff0000; - for (i = 0; i < rdev->config.si.max_texture_channel_caches; i++) - cgts_tcc_disable &= ~(1 << (16 + i)); - gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); - gc_user_shader_array_config = RREG32(GC_USER_SHADER_ARRAY_CONFIG); - cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); - - rdev->config.si.num_shader_engines = rdev->config.si.max_shader_engines; rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes; - tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; - rdev->config.si.num_backends_per_se = r600_count_pipe_bits(tmp); - tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; - rdev->config.si.backend_disable_mask_per_asic = - si_get_disable_mask_per_asic(rdev, tmp, SI_MAX_BACKENDS_PER_SE_MASK, - rdev->config.si.num_shader_engines); - rdev->config.si.backend_map = - si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes, - rdev->config.si.num_backends_per_se * - rdev->config.si.num_shader_engines, - &rdev->config.si.backend_disable_mask_per_asic, - rdev->config.si.num_shader_engines); - tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT; - rdev->config.si.num_texture_channel_caches = r600_count_pipe_bits(tmp); rdev->config.si.mem_max_burst_length_bytes = 256; tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; @@ -1683,55 +1601,8 @@ static void si_gpu_init(struct radeon_device *rdev) rdev->config.si.num_gpus = 1; rdev->config.si.multi_gpu_tile_size = 64; - gb_addr_config = 0; - switch (rdev->config.si.num_tile_pipes) { - case 1: - gb_addr_config |= NUM_PIPES(0); - break; - case 2: - gb_addr_config |= NUM_PIPES(1); - break; - case 4: - gb_addr_config |= NUM_PIPES(2); - break; - case 8: - default: - gb_addr_config |= NUM_PIPES(3); - break; - } - - tmp = (rdev->config.si.mem_max_burst_length_bytes / 256) - 1; - gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp); - gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.si.num_shader_engines - 1); - tmp = (rdev->config.si.shader_engine_tile_size / 16) - 1; - gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp); - switch (rdev->config.si.num_gpus) { - case 1: - default: - gb_addr_config |= NUM_GPUS(0); - break; - case 2: - gb_addr_config |= NUM_GPUS(1); - break; - case 4: - gb_addr_config |= NUM_GPUS(2); - break; - } - switch (rdev->config.si.multi_gpu_tile_size) { - case 16: - gb_addr_config |= MULTI_GPU_TILE_SIZE(0); - break; - case 32: - default: - gb_addr_config |= MULTI_GPU_TILE_SIZE(1); - break; - case 64: - gb_addr_config |= MULTI_GPU_TILE_SIZE(2); - break; - case 128: - gb_addr_config |= MULTI_GPU_TILE_SIZE(3); - break; - } + /* fix up row size */ + gb_addr_config &= ~ROW_SIZE_MASK; switch (rdev->config.si.mem_row_size_in_kb) { case 1: default: @@ -1745,26 +1616,6 @@ static void si_gpu_init(struct radeon_device *rdev) break; } - tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; - rdev->config.si.num_tile_pipes = (1 << tmp); - tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; - rdev->config.si.mem_max_burst_length_bytes = (tmp + 1) * 256; - tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT; - rdev->config.si.num_shader_engines = tmp + 1; - tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT; - rdev->config.si.num_gpus = tmp + 1; - tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT; - rdev->config.si.multi_gpu_tile_size = 1 << tmp; - tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; - rdev->config.si.mem_row_size_in_kb = 1 << tmp; - - gb_backend_map = - si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes, - rdev->config.si.num_backends_per_se * - rdev->config.si.num_shader_engines, - &rdev->config.si.backend_disable_mask_per_asic, - rdev->config.si.num_shader_engines); - /* setup tiling info dword. gb_addr_config is not adequate since it does * not have bank info, so create a custom tiling dword. * bits 3:0 num_pipes @@ -1789,33 +1640,29 @@ static void si_gpu_init(struct radeon_device *rdev) rdev->config.si.tile_config |= (3 << 0); break; } - rdev->config.si.tile_config |= - ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; + if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + rdev->config.si.tile_config |= 1 << 4; + else + rdev->config.si.tile_config |= 0 << 4; rdev->config.si.tile_config |= ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; rdev->config.si.tile_config |= ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; - rdev->config.si.backend_map = gb_backend_map; WREG32(GB_ADDR_CONFIG, gb_addr_config); WREG32(DMIF_ADDR_CONFIG, gb_addr_config); WREG32(HDP_ADDR_CONFIG, gb_addr_config); - /* primary versions */ - WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); - WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); - WREG32(CC_GC_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config); - - WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); + si_tiling_mode_table_init(rdev); - /* user versions */ - WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable); - WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); - WREG32(GC_USER_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config); + si_setup_rb(rdev, rdev->config.si.max_shader_engines, + rdev->config.si.max_sh_per_se, + rdev->config.si.max_backends_per_se); - WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); + si_setup_spi(rdev, rdev->config.si.max_shader_engines, + rdev->config.si.max_sh_per_se, + rdev->config.si.max_cu_per_sh); - si_tiling_mode_table_init(rdev); /* set HW defaults for 3D engine */ WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 53ea2c42dbd..db406796286 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -24,6 +24,11 @@ #ifndef SI_H #define SI_H +#define TAHITI_RB_BITMAP_WIDTH_PER_SH 2 + +#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003 +#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002 + #define CG_MULT_THERMAL_STATUS 0x714 #define ASIC_MAX_TEMP(x) ((x) << 0) #define ASIC_MAX_TEMP_MASK 0x000001ff @@ -408,6 +413,12 @@ #define SOFT_RESET_IA (1 << 15) #define GRBM_GFX_INDEX 0x802C +#define INSTANCE_INDEX(x) ((x) << 0) +#define SH_INDEX(x) ((x) << 8) +#define SE_INDEX(x) ((x) << 16) +#define SH_BROADCAST_WRITES (1 << 29) +#define INSTANCE_BROADCAST_WRITES (1 << 30) +#define SE_BROADCAST_WRITES (1 << 31) #define GRBM_INT_CNTL 0x8060 # define RDERR_INT_ENABLE (1 << 0) @@ -480,6 +491,8 @@ #define VGT_TF_MEMORY_BASE 0x89B8 #define CC_GC_SHADER_ARRAY_CONFIG 0x89bc +#define INACTIVE_CUS_MASK 0xFFFF0000 +#define INACTIVE_CUS_SHIFT 16 #define GC_USER_SHADER_ARRAY_CONFIG 0x89c0 #define PA_CL_ENHANCE 0x8A14 @@ -688,6 +701,12 @@ #define RLC_MC_CNTL 0xC344 #define RLC_UCODE_CNTL 0xC348 +#define PA_SC_RASTER_CONFIG 0x28350 +# define RASTER_CONFIG_RB_MAP_0 0 +# define RASTER_CONFIG_RB_MAP_1 1 +# define RASTER_CONFIG_RB_MAP_2 2 +# define RASTER_CONFIG_RB_MAP_3 3 + #define VGT_EVENT_INITIATOR 0x28a90 # define SAMPLE_STREAMOUTSTATS1 (1 << 0) # define SAMPLE_STREAMOUTSTATS2 (2 << 0) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index b67cfcaa661..36f4b28c1b9 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1204,6 +1204,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, (*destroy)(bo); else kfree(bo); + ttm_mem_global_free(mem_glob, acc_size); return -EINVAL; } bo->destroy = destroy; @@ -1307,22 +1308,14 @@ int ttm_bo_create(struct ttm_bo_device *bdev, struct ttm_buffer_object **p_bo) { struct ttm_buffer_object *bo; - struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; size_t acc_size; int ret; - acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); - ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); - if (unlikely(ret != 0)) - return ret; - bo = kzalloc(sizeof(*bo), GFP_KERNEL); - - if (unlikely(bo == NULL)) { - ttm_mem_global_free(mem_glob, acc_size); + if (unlikely(bo == NULL)) return -ENOMEM; - } + acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, buffer_start, interruptible, persistent_swap_storage, acc_size, NULL, NULL); diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 38f9534ac51..5b3c7d135dc 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -190,6 +190,19 @@ find_active_client(struct list_head *head) return NULL; } +int vga_switcheroo_get_client_state(struct pci_dev *pdev) +{ + struct vga_switcheroo_client *client; + + client = find_client_from_pci(&vgasr_priv.clients, pdev); + if (!client) + return VGA_SWITCHEROO_NOT_FOUND; + if (!vgasr_priv.active) + return VGA_SWITCHEROO_INIT; + return client->pwr_state; +} +EXPORT_SYMBOL(vga_switcheroo_get_client_state); + void vga_switcheroo_unregister_client(struct pci_dev *pdev) { struct vga_switcheroo_client *client; @@ -291,8 +304,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client) vga_switchon(new_client); vga_set_default_device(new_client->pdev); - set_audio_state(new_client->id, VGA_SWITCHEROO_ON); - return 0; } @@ -308,6 +319,8 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client) active->active = false; + set_audio_state(active->id, VGA_SWITCHEROO_OFF); + if (new_client->fb_info) { struct fb_event event; event.info = new_client->fb_info; @@ -321,11 +334,11 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client) if (new_client->ops->reprobe) new_client->ops->reprobe(new_client->pdev); - set_audio_state(active->id, VGA_SWITCHEROO_OFF); - if (active->pwr_state == VGA_SWITCHEROO_ON) vga_switchoff(active); + set_audio_state(new_client->id, VGA_SWITCHEROO_ON); + new_client->active = true; return 0; } @@ -371,8 +384,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, /* pwr off the device not in use */ if (strncmp(usercmd, "OFF", 3) == 0) { list_for_each_entry(client, &vgasr_priv.clients, list) { - if (client->active) + if (client->active || client_is_audio(client)) continue; + set_audio_state(client->id, VGA_SWITCHEROO_OFF); if (client->pwr_state == VGA_SWITCHEROO_ON) vga_switchoff(client); } @@ -381,10 +395,11 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, /* pwr on the device not in use */ if (strncmp(usercmd, "ON", 2) == 0) { list_for_each_entry(client, &vgasr_priv.clients, list) { - if (client->active) + if (client->active || client_is_audio(client)) continue; if (client->pwr_state == VGA_SWITCHEROO_OFF) vga_switchon(client); + set_audio_state(client->id, VGA_SWITCHEROO_ON); } goto out; } diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig index beb2491db27..a0edd985421 100644 --- a/drivers/i2c/muxes/Kconfig +++ b/drivers/i2c/muxes/Kconfig @@ -37,4 +37,16 @@ config I2C_MUX_PCA954x This driver can also be built as a module. If so, the module will be called i2c-mux-pca954x. +config I2C_MUX_PINCTRL + tristate "pinctrl-based I2C multiplexer" + depends on PINCTRL + help + If you say yes to this option, support will be included for an I2C + multiplexer that uses the pinctrl subsystem, i.e. pin multiplexing. + This is useful for SoCs whose I2C module's signals can be routed to + different sets of pins at run-time. + + This driver can also be built as a module. If so, the module will be + called pinctrl-i2cmux. + endmenu diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile index 5826249b29c..76da8692aff 100644 --- a/drivers/i2c/muxes/Makefile +++ b/drivers/i2c/muxes/Makefile @@ -4,5 +4,6 @@ obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o +obj-$(CONFIG_I2C_MUX_PINCTRL) += i2c-mux-pinctrl.o ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c new file mode 100644 index 00000000000..46a66976347 --- /dev/null +++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c @@ -0,0 +1,279 @@ +/* + * I2C multiplexer using pinctrl API + * + * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/i2c.h> +#include <linux/i2c-mux.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/of_i2c.h> +#include <linux/pinctrl/consumer.h> +#include <linux/i2c-mux-pinctrl.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +struct i2c_mux_pinctrl { + struct device *dev; + struct i2c_mux_pinctrl_platform_data *pdata; + struct pinctrl *pinctrl; + struct pinctrl_state **states; + struct pinctrl_state *state_idle; + struct i2c_adapter *parent; + struct i2c_adapter **busses; +}; + +static int i2c_mux_pinctrl_select(struct i2c_adapter *adap, void *data, + u32 chan) +{ + struct i2c_mux_pinctrl *mux = data; + + return pinctrl_select_state(mux->pinctrl, mux->states[chan]); +} + +static int i2c_mux_pinctrl_deselect(struct i2c_adapter *adap, void *data, + u32 chan) +{ + struct i2c_mux_pinctrl *mux = data; + + return pinctrl_select_state(mux->pinctrl, mux->state_idle); +} + +#ifdef CONFIG_OF +static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux, + struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + int num_names, i, ret; + struct device_node *adapter_np; + struct i2c_adapter *adapter; + + if (!np) + return 0; + + mux->pdata = devm_kzalloc(&pdev->dev, sizeof(*mux->pdata), GFP_KERNEL); + if (!mux->pdata) { + dev_err(mux->dev, + "Cannot allocate i2c_mux_pinctrl_platform_data\n"); + return -ENOMEM; + } + + num_names = of_property_count_strings(np, "pinctrl-names"); + if (num_names < 0) { + dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n", + num_names); + return num_names; + } + + mux->pdata->pinctrl_states = devm_kzalloc(&pdev->dev, + sizeof(*mux->pdata->pinctrl_states) * num_names, + GFP_KERNEL); + if (!mux->pdata->pinctrl_states) { + dev_err(mux->dev, "Cannot allocate pinctrl_states\n"); + return -ENOMEM; + } + + for (i = 0; i < num_names; i++) { + ret = of_property_read_string_index(np, "pinctrl-names", i, + &mux->pdata->pinctrl_states[mux->pdata->bus_count]); + if (ret < 0) { + dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n", + ret); + return ret; + } + if (!strcmp(mux->pdata->pinctrl_states[mux->pdata->bus_count], + "idle")) { + if (i != num_names - 1) { + dev_err(mux->dev, "idle state must be last\n"); + return -EINVAL; + } + mux->pdata->pinctrl_state_idle = "idle"; + } else { + mux->pdata->bus_count++; + } + } + + adapter_np = of_parse_phandle(np, "i2c-parent", 0); + if (!adapter_np) { + dev_err(mux->dev, "Cannot parse i2c-parent\n"); + return -ENODEV; + } + adapter = of_find_i2c_adapter_by_node(adapter_np); + if (!adapter) { + dev_err(mux->dev, "Cannot find parent bus\n"); + return -ENODEV; + } + mux->pdata->parent_bus_num = i2c_adapter_id(adapter); + put_device(&adapter->dev); + + return 0; +} +#else +static inline int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux, + struct platform_device *pdev) +{ + return 0; +} +#endif + +static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev) +{ + struct i2c_mux_pinctrl *mux; + int (*deselect)(struct i2c_adapter *, void *, u32); + int i, ret; + + mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL); + if (!mux) { + dev_err(&pdev->dev, "Cannot allocate i2c_mux_pinctrl\n"); + ret = -ENOMEM; + goto err; + } + platform_set_drvdata(pdev, mux); + + mux->dev = &pdev->dev; + + mux->pdata = pdev->dev.platform_data; + if (!mux->pdata) { + ret = i2c_mux_pinctrl_parse_dt(mux, pdev); + if (ret < 0) + goto err; + } + if (!mux->pdata) { + dev_err(&pdev->dev, "Missing platform data\n"); + ret = -ENODEV; + goto err; + } + + mux->states = devm_kzalloc(&pdev->dev, + sizeof(*mux->states) * mux->pdata->bus_count, + GFP_KERNEL); + if (!mux->states) { + dev_err(&pdev->dev, "Cannot allocate states\n"); + ret = -ENOMEM; + goto err; + } + + mux->busses = devm_kzalloc(&pdev->dev, + sizeof(mux->busses) * mux->pdata->bus_count, + GFP_KERNEL); + if (!mux->states) { + dev_err(&pdev->dev, "Cannot allocate busses\n"); + ret = -ENOMEM; + goto err; + } + + mux->pinctrl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR(mux->pinctrl)) { + ret = PTR_ERR(mux->pinctrl); + dev_err(&pdev->dev, "Cannot get pinctrl: %d\n", ret); + goto err; + } + for (i = 0; i < mux->pdata->bus_count; i++) { + mux->states[i] = pinctrl_lookup_state(mux->pinctrl, + mux->pdata->pinctrl_states[i]); + if (IS_ERR(mux->states[i])) { + ret = PTR_ERR(mux->states[i]); + dev_err(&pdev->dev, + "Cannot look up pinctrl state %s: %d\n", + mux->pdata->pinctrl_states[i], ret); + goto err; + } + } + if (mux->pdata->pinctrl_state_idle) { + mux->state_idle = pinctrl_lookup_state(mux->pinctrl, + mux->pdata->pinctrl_state_idle); + if (IS_ERR(mux->state_idle)) { + ret = PTR_ERR(mux->state_idle); + dev_err(&pdev->dev, + "Cannot look up pinctrl state %s: %d\n", + mux->pdata->pinctrl_state_idle, ret); + goto err; + } + + deselect = i2c_mux_pinctrl_deselect; + } else { + deselect = NULL; + } + + mux->parent = i2c_get_adapter(mux->pdata->parent_bus_num); + if (!mux->parent) { + dev_err(&pdev->dev, "Parent adapter (%d) not found\n", + mux->pdata->parent_bus_num); + ret = -ENODEV; + goto err; + } + + for (i = 0; i < mux->pdata->bus_count; i++) { + u32 bus = mux->pdata->base_bus_num ? + (mux->pdata->base_bus_num + i) : 0; + + mux->busses[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev, + mux, bus, i, + i2c_mux_pinctrl_select, + deselect); + if (!mux->busses[i]) { + ret = -ENODEV; + dev_err(&pdev->dev, "Failed to add adapter %d\n", i); + goto err_del_adapter; + } + } + + return 0; + +err_del_adapter: + for (; i > 0; i--) + i2c_del_mux_adapter(mux->busses[i - 1]); + i2c_put_adapter(mux->parent); +err: + return ret; +} + +static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev) +{ + struct i2c_mux_pinctrl *mux = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < mux->pdata->bus_count; i++) + i2c_del_mux_adapter(mux->busses[i]); + + i2c_put_adapter(mux->parent); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id i2c_mux_pinctrl_of_match[] __devinitconst = { + { .compatible = "i2c-mux-pinctrl", }, + {}, +}; +MODULE_DEVICE_TABLE(of, i2c_mux_pinctrl_of_match); +#endif + +static struct platform_driver i2c_mux_pinctrl_driver = { + .driver = { + .name = "i2c-mux-pinctrl", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match), + }, + .probe = i2c_mux_pinctrl_probe, + .remove = __devexit_p(i2c_mux_pinctrl_remove), +}; +module_platform_driver(i2c_mux_pinctrl_driver); + +MODULE_DESCRIPTION("pinctrl-based I2C multiplexer driver"); +MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:i2c-mux-pinctrl"); diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 55ab284e22f..b18870c455a 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -1593,6 +1593,10 @@ static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst, struct net_device *pdev; pdev = ip_dev_find(&init_net, peer_ip); + if (!pdev) { + err = -ENODEV; + goto out; + } ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, n, pdev, 0); if (!ep->l2t) diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index ee1c577238f..3530c41fcd1 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -140,7 +140,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->max_mr_size = ~0ull; props->page_size_cap = dev->dev->caps.page_size_cap; props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps; - props->max_qp_wr = dev->dev->caps.max_wqes; + props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; props->max_sge = min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg); props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs; @@ -1084,12 +1084,9 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) int total_eqs = 0; int i, j, eq; - /* Init eq table */ - ibdev->eq_table = NULL; - ibdev->eq_added = 0; - - /* Legacy mode? */ - if (dev->caps.comp_pool == 0) + /* Legacy mode or comp_pool is not large enough */ + if (dev->caps.comp_pool == 0 || + dev->caps.num_ports > dev->caps.comp_pool) return; eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/ @@ -1135,7 +1132,10 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) { int i; - int total_eqs; + + /* no additional eqs were added */ + if (!ibdev->eq_table) + return; /* Reset the advertised EQ number */ ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; @@ -1148,12 +1148,7 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) mlx4_release_eq(dev, ibdev->eq_table[i]); } - total_eqs = dev->caps.num_comp_vectors + ibdev->eq_added; - memset(ibdev->eq_table, 0, total_eqs * sizeof(int)); kfree(ibdev->eq_table); - - ibdev->eq_table = NULL; - ibdev->eq_added = 0; } static void *mlx4_ib_add(struct mlx4_dev *dev) diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index e62297cc77c..ff36655d23d 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -44,6 +44,14 @@ #include <linux/mlx4/device.h> #include <linux/mlx4/doorbell.h> +enum { + MLX4_IB_SQ_MIN_WQE_SHIFT = 6, + MLX4_IB_MAX_HEADROOM = 2048 +}; + +#define MLX4_IB_SQ_HEADROOM(shift) ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1) +#define MLX4_IB_SQ_MAX_SPARE (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT)) + struct mlx4_ib_ucontext { struct ib_ucontext ibucontext; struct mlx4_uar uar; diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index ceb33327091..8d4ed24aef9 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -310,8 +310,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, int is_user, int has_rq, struct mlx4_ib_qp *qp) { /* Sanity check RQ size before proceeding */ - if (cap->max_recv_wr > dev->dev->caps.max_wqes || - cap->max_recv_sge > dev->dev->caps.max_rq_sg) + if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE || + cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg)) return -EINVAL; if (!has_rq) { @@ -329,8 +329,17 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); } - cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; - cap->max_recv_sge = qp->rq.max_gs; + /* leave userspace return values as they were, so as not to break ABI */ + if (is_user) { + cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; + cap->max_recv_sge = qp->rq.max_gs; + } else { + cap->max_recv_wr = qp->rq.max_post = + min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); + cap->max_recv_sge = min(qp->rq.max_gs, + min(dev->dev->caps.max_sq_sg, + dev->dev->caps.max_rq_sg)); + } return 0; } @@ -341,8 +350,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, int s; /* Sanity check SQ size before proceeding */ - if (cap->max_send_wr > dev->dev->caps.max_wqes || - cap->max_send_sge > dev->dev->caps.max_sq_sg || + if (cap->max_send_wr > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) || + cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) || cap->max_inline_data + send_wqe_overhead(type, qp->flags) + sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) return -EINVAL; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index 85a69c95855..037f5cea85b 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -231,7 +231,6 @@ struct ocrdma_qp_hwq_info { u32 entry_size; u32 max_cnt; u32 max_wqe_idx; - u32 free_delta; u16 dbid; /* qid, where to ring the doorbell. */ u32 len; dma_addr_t pa; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h index a411a4e3193..517ab20b727 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h @@ -101,8 +101,6 @@ struct ocrdma_create_qp_uresp { u32 rsvd1; u32 num_wqe_allocated; u32 num_rqe_allocated; - u32 free_wqe_delta; - u32 free_rqe_delta; u32 db_sq_offset; u32 db_rq_offset; u32 db_shift; @@ -126,8 +124,7 @@ struct ocrdma_create_srq_uresp { u32 db_rq_offset; u32 db_shift; - u32 free_rqe_delta; - u32 rsvd2; + u64 rsvd2; u64 rsvd3; } __packed; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 9b204b1ba33..9343a152297 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -732,7 +732,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, break; case OCRDMA_SRQ_LIMIT_EVENT: ib_evt.element.srq = &qp->srq->ibsrq; - ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED; + ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED; srq_event = 1; qp_event = 0; break; @@ -1990,19 +1990,12 @@ static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp, max_wqe_allocated = 1 << max_wqe_allocated; max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe); - if (qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { - qp->sq.free_delta = 0; - qp->rq.free_delta = 1; - } else - qp->sq.free_delta = 1; - qp->sq.max_cnt = max_wqe_allocated; qp->sq.max_wqe_idx = max_wqe_allocated - 1; if (!attrs->srq) { qp->rq.max_cnt = max_rqe_allocated; qp->rq.max_wqe_idx = max_rqe_allocated - 1; - qp->rq.free_delta = 1; } } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index a20d16eaae7..04fef3de6d7 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -26,7 +26,6 @@ *******************************************************************/ #include <linux/module.h> -#include <linux/version.h> #include <linux/idr.h> #include <rdma/ib_verbs.h> #include <rdma/ib_user_verbs.h> diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index e9f74d1b48f..d16d172b6b6 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -940,8 +940,6 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; uresp.db_shift = 16; } - uresp.free_wqe_delta = qp->sq.free_delta; - uresp.free_rqe_delta = qp->rq.free_delta; if (qp->dpp_enabled) { uresp.dpp_credit = dpp_credit_lmt; @@ -1307,8 +1305,6 @@ static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) free_cnt = (q->max_cnt - q->head) + q->tail; else free_cnt = q->tail - q->head; - if (q->free_delta) - free_cnt -= q->free_delta; return free_cnt; } @@ -1501,7 +1497,6 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata) (srq->pd->id * srq->dev->nic_info.db_page_size); uresp.db_page_size = srq->dev->nic_info.db_page_size; uresp.num_rqe_allocated = srq->rq.max_cnt; - uresp.free_rqe_delta = 1; if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET; uresp.db_shift = 24; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index e6483439f25..633f03d8027 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h @@ -28,7 +28,6 @@ #ifndef __OCRDMA_VERBS_H__ #define __OCRDMA_VERBS_H__ -#include <linux/version.h> int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *, struct ib_send_wr **bad_wr); int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *, diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index d90a421e9ca..a2e418cba0f 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -547,26 +547,12 @@ static void iommu_poll_events(struct amd_iommu *iommu) spin_unlock_irqrestore(&iommu->lock, flags); } -static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) +static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) { struct amd_iommu_fault fault; - volatile u64 *raw; - int i; INC_STATS_COUNTER(pri_requests); - raw = (u64 *)(iommu->ppr_log + head); - - /* - * Hardware bug: Interrupt may arrive before the entry is written to - * memory. If this happens we need to wait for the entry to arrive. - */ - for (i = 0; i < LOOP_TIMEOUT; ++i) { - if (PPR_REQ_TYPE(raw[0]) != 0) - break; - udelay(1); - } - if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); return; @@ -578,12 +564,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) fault.tag = PPR_TAG(raw[0]); fault.flags = PPR_FLAGS(raw[0]); - /* - * To detect the hardware bug we need to clear the entry - * to back to zero. - */ - raw[0] = raw[1] = 0; - atomic_notifier_call_chain(&ppr_notifier, 0, &fault); } @@ -595,25 +575,62 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu) if (iommu->ppr_log == NULL) return; + /* enable ppr interrupts again */ + writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); + spin_lock_irqsave(&iommu->lock, flags); head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); while (head != tail) { + volatile u64 *raw; + u64 entry[2]; + int i; - /* Handle PPR entry */ - iommu_handle_ppr_entry(iommu, head); + raw = (u64 *)(iommu->ppr_log + head); + + /* + * Hardware bug: Interrupt may arrive before the entry is + * written to memory. If this happens we need to wait for the + * entry to arrive. + */ + for (i = 0; i < LOOP_TIMEOUT; ++i) { + if (PPR_REQ_TYPE(raw[0]) != 0) + break; + udelay(1); + } + + /* Avoid memcpy function-call overhead */ + entry[0] = raw[0]; + entry[1] = raw[1]; - /* Update and refresh ring-buffer state*/ + /* + * To detect the hardware bug we need to clear the entry + * back to zero. + */ + raw[0] = raw[1] = 0UL; + + /* Update head pointer of hardware ring-buffer */ head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); + + /* + * Release iommu->lock because ppr-handling might need to + * re-aquire it + */ + spin_unlock_irqrestore(&iommu->lock, flags); + + /* Handle PPR entry */ + iommu_handle_ppr_entry(iommu, entry); + + spin_lock_irqsave(&iommu->lock, flags); + + /* Refresh ring-buffer information */ + head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); } - /* enable ppr interrupts again */ - writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); - spin_unlock_irqrestore(&iommu->lock, flags); } diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index c56790375e0..542024ba6db 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1029,6 +1029,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) if (!iommu->dev) return 1; + iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number, + PCI_DEVFN(0, 0)); + iommu->cap_ptr = h->cap_ptr; iommu->pci_seg = h->pci_seg; iommu->mmio_phys = h->mmio_phys; @@ -1323,20 +1326,16 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu) { int i, j; u32 ioc_feature_control; - struct pci_dev *pdev = NULL; + struct pci_dev *pdev = iommu->root_pdev; /* RD890 BIOSes may not have completely reconfigured the iommu */ - if (!is_rd890_iommu(iommu->dev)) + if (!is_rd890_iommu(iommu->dev) || !pdev) return; /* * First, we need to ensure that the iommu is enabled. This is * controlled by a register in the northbridge */ - pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0)); - - if (!pdev) - return; /* Select Northbridge indirect register 0x75 and enable writing */ pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); @@ -1346,8 +1345,6 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu) if (!(ioc_feature_control & 0x1)) pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); - pci_dev_put(pdev); - /* Restore the iommu BAR */ pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, iommu->stored_addr_lo); diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 2452f3b7173..24355559a2a 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -481,6 +481,9 @@ struct amd_iommu { /* Pointer to PCI device of this IOMMU */ struct pci_dev *dev; + /* Cache pdev to root device for resume quirks */ + struct pci_dev *root_pdev; + /* physical address of MMIO space */ u64 mmio_phys; /* virtual address of MMIO space */ diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 04cb8c88d74..12b2b55c519 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -379,7 +379,7 @@ config LEDS_NETXBIG config LEDS_ASIC3 bool "LED support for the HTC ASIC3" - depends on LEDS_CLASS + depends on LEDS_CLASS=y depends on MFD_ASIC3 default y help @@ -390,7 +390,7 @@ config LEDS_ASIC3 config LEDS_RENESAS_TPU bool "LED support for Renesas TPU" - depends on LEDS_CLASS && HAVE_CLK && GENERIC_GPIO + depends on LEDS_CLASS=y && HAVE_CLK && GENERIC_GPIO help This option enables build of the LED TPU platform driver, suitable to drive any TPU channel on newer Renesas SoCs. diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 8ee92c81aec..e663e6f413e 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -29,7 +29,7 @@ static void led_update_brightness(struct led_classdev *led_cdev) led_cdev->brightness = led_cdev->brightness_get(led_cdev); } -static ssize_t led_brightness_show(struct device *dev, +static ssize_t led_brightness_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index d6860043f6f..d65353d8d3f 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c @@ -44,13 +44,6 @@ static void led_set_software_blink(struct led_classdev *led_cdev, if (!led_cdev->blink_brightness) led_cdev->blink_brightness = led_cdev->max_brightness; - if (led_get_trigger_data(led_cdev) && - delay_on == led_cdev->blink_delay_on && - delay_off == led_cdev->blink_delay_off) - return; - - led_stop_software_blink(led_cdev); - led_cdev->blink_delay_on = delay_on; led_cdev->blink_delay_off = delay_off; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 835de7168cd..a9c7981ddd2 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2550,6 +2550,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) err = -EINVAL; spin_lock_init(&conf->device_lock); rdev_for_each(rdev, mddev) { + struct request_queue *q; int disk_idx = rdev->raid_disk; if (disk_idx >= mddev->raid_disks || disk_idx < 0) @@ -2562,6 +2563,9 @@ static struct r1conf *setup_conf(struct mddev *mddev) if (disk->rdev) goto abort; disk->rdev = rdev; + q = bdev_get_queue(rdev->bdev); + if (q->merge_bvec_fn) + mddev->merge_check_needed = 1; disk->head_position = 0; } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 987db37cb87..99ae6068e45 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3475,6 +3475,7 @@ static int run(struct mddev *mddev) rdev_for_each(rdev, mddev) { long long diff; + struct request_queue *q; disk_idx = rdev->raid_disk; if (disk_idx < 0) @@ -3493,6 +3494,9 @@ static int run(struct mddev *mddev) goto out_free_conf; disk->rdev = rdev; } + q = bdev_get_queue(rdev->bdev); + if (q->merge_bvec_fn) + mddev->merge_check_needed = 1; diff = (rdev->new_data_offset - rdev->data_offset); if (!mddev->reshape_backwards) diff = -diff; diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c index 9f957c2d48e..09d4f8d9d59 100644 --- a/drivers/mtd/ubi/debug.c +++ b/drivers/mtd/ubi/debug.c @@ -264,6 +264,9 @@ static struct dentry *dfs_rootdir; */ int ubi_debugfs_init(void) { + if (!IS_ENABLED(DEBUG_FS)) + return 0; + dfs_rootdir = debugfs_create_dir("ubi", NULL); if (IS_ERR_OR_NULL(dfs_rootdir)) { int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir); @@ -281,7 +284,8 @@ int ubi_debugfs_init(void) */ void ubi_debugfs_exit(void) { - debugfs_remove(dfs_rootdir); + if (IS_ENABLED(DEBUG_FS)) + debugfs_remove(dfs_rootdir); } /* Read an UBI debugfs file */ @@ -403,6 +407,9 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi) struct dentry *dent; struct ubi_debug_info *d = ubi->dbg; + if (!IS_ENABLED(DEBUG_FS)) + return 0; + n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME, ubi->ubi_num); if (n == UBI_DFS_DIR_LEN) { @@ -470,5 +477,6 @@ out: */ void ubi_debugfs_exit_dev(struct ubi_device *ubi) { - debugfs_remove_recursive(ubi->dbg->dfs_dir); + if (IS_ENABLED(DEBUG_FS)) + debugfs_remove_recursive(ubi->dbg->dfs_dir); } diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 9df100a4ec3..b6be644e7b8 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -1262,11 +1262,11 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) dbg_wl("flush pending work for LEB %d:%d (%d pending works)", vol_id, lnum, ubi->works_count); - down_write(&ubi->work_sem); while (found) { struct ubi_work *wrk; found = 0; + down_read(&ubi->work_sem); spin_lock(&ubi->wl_lock); list_for_each_entry(wrk, &ubi->works, list) { if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) && @@ -1277,18 +1277,27 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) spin_unlock(&ubi->wl_lock); err = wrk->func(ubi, wrk, 0); - if (err) - goto out; + if (err) { + up_read(&ubi->work_sem); + return err; + } + spin_lock(&ubi->wl_lock); found = 1; break; } } spin_unlock(&ubi->wl_lock); + up_read(&ubi->work_sem); } -out: + /* + * Make sure all the works which have been done in parallel are + * finished. + */ + down_write(&ubi->work_sem); up_write(&ubi->work_sem); + return err; } diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index e2ce508c275..eea660800a0 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -594,8 +594,8 @@ static void c_can_chip_config(struct net_device *dev) priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR); - if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY & - CAN_CTRLMODE_LOOPBACK)) { + if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) && + (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) { /* loopback + silent mode : useful for hot self-test */ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE | CONTROL_SIE | CONTROL_IE | CONTROL_TEST); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index e30e2a2f354..7de82418497 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -747,21 +747,6 @@ struct bnx2x_fastpath { #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG -#define BNX2X_IP_CSUM_ERR(cqe) \ - (!((cqe)->fast_path_cqe.status_flags & \ - ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \ - ((cqe)->fast_path_cqe.type_error_flags & \ - ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) - -#define BNX2X_L4_CSUM_ERR(cqe) \ - (!((cqe)->fast_path_cqe.status_flags & \ - ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \ - ((cqe)->fast_path_cqe.type_error_flags & \ - ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) - -#define BNX2X_RX_CSUM_OK(cqe) \ - (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe))) - #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ (((le16_to_cpu(flags) & \ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ad0743bf4bd..8098eea9704 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -190,7 +190,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) if ((netif_tx_queue_stopped(txq)) && (bp->state == BNX2X_STATE_OPEN) && - (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)) + (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)) netif_tx_wake_queue(txq); __netif_tx_unlock(txq); @@ -617,6 +617,25 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp, return 0; } +static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, + struct bnx2x_fastpath *fp) +{ + /* Do nothing if no IP/L4 csum validation was done */ + + if (cqe->fast_path_cqe.status_flags & + (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | + ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) + return; + + /* If both IP/L4 validation were done, check if an error was found. */ + + if (cqe->fast_path_cqe.type_error_flags & + (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | + ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) + fp->eth_q_stats.hw_csum_err++; + else + skb->ip_summed = CHECKSUM_UNNECESSARY; +} int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) { @@ -806,13 +825,9 @@ reuse_rx: skb_checksum_none_assert(skb); - if (bp->dev->features & NETIF_F_RXCSUM) { + if (bp->dev->features & NETIF_F_RXCSUM) + bnx2x_csum_validate(skb, cqe, fp); - if (likely(BNX2X_RX_CSUM_OK(cqe))) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - fp->eth_q_stats.hw_csum_err++; - } skb_record_rx_queue(skb, fp->rx_queue); @@ -2501,8 +2516,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget) /* we split the first BD into headers and data BDs * to ease the pain of our fellow microcode engineers * we use one mapping for both BDs - * So far this has only been observed to happen - * in Other Operating Systems(TM) */ static noinline u16 bnx2x_tx_split(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, @@ -3156,7 +3169,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) txdata->tx_bd_prod += nbd; - if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) { + if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) { netif_tx_stop_queue(txq); /* paired memory barrier is in bnx2x_tx_int(), we have to keep @@ -3165,7 +3178,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) smp_mb(); fp->eth_q_stats.driver_xoff++; - if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3) + if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4) netif_tx_wake_queue(txq); } txdata->tx_pkt++; diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 1fe2c7a8b40..a8fb52992c6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -697,10 +697,10 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, if (slave != dev->caps.function) memset(inbox->buf, 0, 256); if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { - *(u8 *) inbox->buf = !!reset_qkey_viols << 6; + *(u8 *) inbox->buf |= !!reset_qkey_viols << 6; ((__be32 *) inbox->buf)[2] = agg_cap_mask; } else { - ((u8 *) inbox->buf)[3] = !!reset_qkey_viols; + ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols; ((__be32 *) inbox->buf)[1] = agg_cap_mask; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 33c3e46e59c..212c1219327 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -479,7 +479,7 @@ qlcnic_init_pci_info(struct qlcnic_adapter *adapter) for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { pfn = pci_info[i].id; - if (pfn > QLCNIC_MAX_PCI_FUNC) { + if (pfn >= QLCNIC_MAX_PCI_FUNC) { ret = QL_STATUS_INVALID_PARAM; goto err_eswitch; } diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index c3b331b74fa..0cc053af70b 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -61,7 +61,7 @@ static LIST_HEAD(pinctrl_maps); list_for_each_entry(_maps_node_, &pinctrl_maps, node) \ for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \ _i_ < _maps_node_->num_maps; \ - i++, _map_ = &_maps_node_->maps[_i_]) + _i_++, _map_ = &_maps_node_->maps[_i_]) /** * pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c index f6e7c670906..dd6d93aa533 100644 --- a/drivers/pinctrl/pinctrl-imx.c +++ b/drivers/pinctrl/pinctrl-imx.c @@ -27,16 +27,16 @@ #include "core.h" #include "pinctrl-imx.h" -#define IMX_PMX_DUMP(info, p, m, c, n) \ -{ \ - int i, j; \ - printk("Format: Pin Mux Config\n"); \ - for (i = 0; i < n; i++) { \ - j = p[i]; \ - printk("%s %d 0x%lx\n", \ - info->pins[j].name, \ - m[i], c[i]); \ - } \ +#define IMX_PMX_DUMP(info, p, m, c, n) \ +{ \ + int i, j; \ + printk(KERN_DEBUG "Format: Pin Mux Config\n"); \ + for (i = 0; i < n; i++) { \ + j = p[i]; \ + printk(KERN_DEBUG "%s %d 0x%lx\n", \ + info->pins[j].name, \ + m[i], c[i]); \ + } \ } /* The bits in CONFIG cell defined in binding doc*/ @@ -173,8 +173,10 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev, /* create mux map */ parent = of_get_parent(np); - if (!parent) + if (!parent) { + kfree(new_map); return -EINVAL; + } new_map[0].type = PIN_MAP_TYPE_MUX_GROUP; new_map[0].data.mux.function = parent->name; new_map[0].data.mux.group = np->name; @@ -193,7 +195,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev, } dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n", - new_map->data.mux.function, new_map->data.mux.group, map_num); + (*map)->data.mux.function, (*map)->data.mux.group, map_num); return 0; } @@ -201,10 +203,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev, static void imx_dt_free_map(struct pinctrl_dev *pctldev, struct pinctrl_map *map, unsigned num_maps) { - int i; - - for (i = 0; i < num_maps; i++) - kfree(map); + kfree(map); } static struct pinctrl_ops imx_pctrl_ops = { @@ -475,9 +474,8 @@ static int __devinit imx_pinctrl_parse_groups(struct device_node *np, grp->configs[j] = config & ~IMX_PAD_SION; } -#ifdef DEBUG IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins); -#endif + return 0; } diff --git a/drivers/pinctrl/pinctrl-mxs.c b/drivers/pinctrl/pinctrl-mxs.c index 556e45a213e..afb50ee6459 100644 --- a/drivers/pinctrl/pinctrl-mxs.c +++ b/drivers/pinctrl/pinctrl-mxs.c @@ -107,8 +107,10 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev, /* Compose group name */ group = kzalloc(length, GFP_KERNEL); - if (!group) - return -ENOMEM; + if (!group) { + ret = -ENOMEM; + goto free; + } snprintf(group, length, "%s.%d", np->name, reg); new_map[i].data.mux.group = group; i++; @@ -118,7 +120,7 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev, pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL); if (!pconfig) { ret = -ENOMEM; - goto free; + goto free_group; } new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP; @@ -133,6 +135,9 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev, return 0; +free_group: + if (!purecfg) + free(group); free: kfree(new_map); return ret; @@ -511,6 +516,7 @@ int __devinit mxs_pinctrl_probe(struct platform_device *pdev, return 0; err: + platform_set_drvdata(pdev, NULL); iounmap(d->base); return ret; } @@ -520,6 +526,7 @@ int __devexit mxs_pinctrl_remove(struct platform_device *pdev) { struct mxs_pinctrl_data *d = platform_get_drvdata(pdev); + platform_set_drvdata(pdev, NULL); pinctrl_unregister(d->pctl); iounmap(d->base); diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c index b26395d1634..e8937e7e499 100644 --- a/drivers/pinctrl/pinctrl-nomadik.c +++ b/drivers/pinctrl/pinctrl-nomadik.c @@ -673,7 +673,7 @@ static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip, * wakeup is anyhow controlled by the RIMSC and FIMSC registers. */ if (nmk_chip->sleepmode && on) { - __nmk_gpio_set_slpm(nmk_chip, gpio % nmk_chip->chip.base, + __nmk_gpio_set_slpm(nmk_chip, gpio % NMK_GPIO_PER_CHIP, NMK_GPIO_SLPM_WAKEUP_ENABLE); } @@ -1246,6 +1246,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev) ret = PTR_ERR(clk); goto out_unmap; } + clk_prepare(clk); nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL); if (!nmk_chip) { diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c index ba15b1a29e5..e9f8e7d1100 100644 --- a/drivers/pinctrl/pinctrl-sirf.c +++ b/drivers/pinctrl/pinctrl-sirf.c @@ -1184,7 +1184,7 @@ out_no_gpio_remap: return ret; } -static const struct of_device_id pinmux_ids[] = { +static const struct of_device_id pinmux_ids[] __devinitconst = { { .compatible = "sirf,prima2-gpio-pinmux" }, {} }; diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index 639db4d0aa7..2fd9d36acd1 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c @@ -5,7 +5,7 @@ * * (C) 2009 - Peter Feuerer peter (a) piie.net * http://piie.net - * 2009 Borislav Petkov <petkovbb@gmail.com> + * 2009 Borislav Petkov bp (a) alien8.de * * Inspired by and many thanks to: * o acerfand - Rachel Greenham diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c index 3660bace123..e82e7eaac0f 100644 --- a/drivers/regulator/anatop-regulator.c +++ b/drivers/regulator/anatop-regulator.c @@ -224,7 +224,7 @@ static struct platform_driver anatop_regulator_driver = { .of_match_table = of_anatop_regulator_match_tbl, }, .probe = anatop_regulator_probe, - .remove = anatop_regulator_remove, + .remove = __devexit_p(anatop_regulator_remove), }; static int __init anatop_regulator_init(void) diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 7584a74eec8..09a737c868b 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -2050,6 +2050,9 @@ int regulator_map_voltage_linear(struct regulator_dev *rdev, return -EINVAL; } + if (min_uV < rdev->desc->min_uV) + min_uV = rdev->desc->min_uV; + ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step); if (ret < 0) return ret; diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c index 9997d7aaca8..242851a4c1a 100644 --- a/drivers/regulator/gpio-regulator.c +++ b/drivers/regulator/gpio-regulator.c @@ -101,16 +101,20 @@ static int gpio_regulator_get_value(struct regulator_dev *dev) } static int gpio_regulator_set_value(struct regulator_dev *dev, - int min, int max) + int min, int max, unsigned *selector) { struct gpio_regulator_data *data = rdev_get_drvdata(dev); - int ptr, target, state, best_val = INT_MAX; + int ptr, target = 0, state, best_val = INT_MAX; for (ptr = 0; ptr < data->nr_states; ptr++) if (data->states[ptr].value < best_val && data->states[ptr].value >= min && - data->states[ptr].value <= max) + data->states[ptr].value <= max) { target = data->states[ptr].gpios; + best_val = data->states[ptr].value; + if (selector) + *selector = ptr; + } if (best_val == INT_MAX) return -EINVAL; @@ -128,7 +132,7 @@ static int gpio_regulator_set_voltage(struct regulator_dev *dev, int min_uV, int max_uV, unsigned *selector) { - return gpio_regulator_set_value(dev, min_uV, max_uV); + return gpio_regulator_set_value(dev, min_uV, max_uV, selector); } static int gpio_regulator_list_voltage(struct regulator_dev *dev, @@ -145,7 +149,7 @@ static int gpio_regulator_list_voltage(struct regulator_dev *dev, static int gpio_regulator_set_current_limit(struct regulator_dev *dev, int min_uA, int max_uA) { - return gpio_regulator_set_value(dev, min_uA, max_uA); + return gpio_regulator_set_value(dev, min_uA, max_uA, NULL); } static struct regulator_ops gpio_regulator_voltage_ops = { @@ -286,7 +290,7 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev) cfg.dev = &pdev->dev; cfg.init_data = config->init_data; - cfg.driver_data = &drvdata; + cfg.driver_data = drvdata; drvdata->dev = regulator_register(&drvdata->desc, &cfg); if (IS_ERR(drvdata->dev)) { diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c index 1f4bb80457b..9d540cd02da 100644 --- a/drivers/regulator/max8649.c +++ b/drivers/regulator/max8649.c @@ -259,6 +259,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client, config.dev = &client->dev; config.init_data = pdata->regulator; config.driver_data = info; + config.regmap = info->regmap; info->regulator = regulator_register(&dcdc_desc, &config); if (IS_ERR(info->regulator)) { diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c index c4435f608df..9b7ca90057d 100644 --- a/drivers/regulator/palmas-regulator.c +++ b/drivers/regulator/palmas-regulator.c @@ -775,9 +775,6 @@ static __devinit int palmas_probe(struct platform_device *pdev) err_unregister_regulator: while (--id >= 0) regulator_unregister(pmic->rdev[id]); - kfree(pmic->rdev); - kfree(pmic->desc); - kfree(pmic); return ret; } @@ -788,10 +785,6 @@ static int __devexit palmas_remove(struct platform_device *pdev) for (id = 0; id < PALMAS_NUM_REGS; id++) regulator_unregister(pmic->rdev[id]); - - kfree(pmic->rdev); - kfree(pmic->desc); - kfree(pmic); return 0; } diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 7d5f56edb8e..4267789ca99 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -910,14 +910,17 @@ static inline int cmos_poweroff(struct device *dev) static u32 rtc_handler(void *context) { + struct device *dev = context; + + pm_wakeup_event(dev, 0); acpi_clear_event(ACPI_EVENT_RTC); acpi_disable_event(ACPI_EVENT_RTC, 0); return ACPI_INTERRUPT_HANDLED; } -static inline void rtc_wake_setup(void) +static inline void rtc_wake_setup(struct device *dev) { - acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL); + acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev); /* * After the RTC handler is installed, the Fixed_RTC event should * be disabled. Only when the RTC alarm is set will it be enabled. @@ -950,7 +953,7 @@ cmos_wake_setup(struct device *dev) if (acpi_disabled) return; - rtc_wake_setup(); + rtc_wake_setup(dev); acpi_rtc_info.wake_on = rtc_wake_on; acpi_rtc_info.wake_off = rtc_wake_off; diff --git a/drivers/staging/ramster/zcache-main.c b/drivers/staging/ramster/zcache-main.c index 4e7ef0e6b79..d46764b5aab 100644 --- a/drivers/staging/ramster/zcache-main.c +++ b/drivers/staging/ramster/zcache-main.c @@ -3002,7 +3002,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind) return oid; } -static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, +static int zcache_frontswap_store(unsigned type, pgoff_t offset, struct page *page) { u64 ind64 = (u64)offset; @@ -3025,7 +3025,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, /* returns 0 if the page was successfully gotten from frontswap, -1 if * was not present (should never happen!) */ -static int zcache_frontswap_get_page(unsigned type, pgoff_t offset, +static int zcache_frontswap_load(unsigned type, pgoff_t offset, struct page *page) { u64 ind64 = (u64)offset; @@ -3080,8 +3080,8 @@ static void zcache_frontswap_init(unsigned ignored) } static struct frontswap_ops zcache_frontswap_ops = { - .put_page = zcache_frontswap_put_page, - .get_page = zcache_frontswap_get_page, + .store = zcache_frontswap_store, + .load = zcache_frontswap_load, .invalidate_page = zcache_frontswap_flush_page, .invalidate_area = zcache_frontswap_flush_area, .init = zcache_frontswap_init diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index 2734dacacba..784c796b984 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c @@ -1835,7 +1835,7 @@ static int zcache_frontswap_poolid = -1; * Swizzling increases objects per swaptype, increasing tmem concurrency * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from - * frontswap_get_page(), but has side-effects. Hence using 8. + * frontswap_load(), but has side-effects. Hence using 8. */ #define SWIZ_BITS 8 #define SWIZ_MASK ((1 << SWIZ_BITS) - 1) @@ -1849,7 +1849,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind) return oid; } -static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, +static int zcache_frontswap_store(unsigned type, pgoff_t offset, struct page *page) { u64 ind64 = (u64)offset; @@ -1870,7 +1870,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, /* returns 0 if the page was successfully gotten from frontswap, -1 if * was not present (should never happen!) */ -static int zcache_frontswap_get_page(unsigned type, pgoff_t offset, +static int zcache_frontswap_load(unsigned type, pgoff_t offset, struct page *page) { u64 ind64 = (u64)offset; @@ -1919,8 +1919,8 @@ static void zcache_frontswap_init(unsigned ignored) } static struct frontswap_ops zcache_frontswap_ops = { - .put_page = zcache_frontswap_put_page, - .get_page = zcache_frontswap_get_page, + .store = zcache_frontswap_store, + .load = zcache_frontswap_load, .invalidate_page = zcache_frontswap_flush_page, .invalidate_area = zcache_frontswap_flush_area, .init = zcache_frontswap_init diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index 37c609898f8..7e6136e2ce8 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -587,14 +587,14 @@ static void sbp_management_request_logout( { struct sbp_tport *tport = agent->tport; struct sbp_tpg *tpg = tport->tpg; - int login_id; + int id; struct sbp_login_descriptor *login; - login_id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); + id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); - login = sbp_login_find_by_id(tpg, login_id); + login = sbp_login_find_by_id(tpg, id); if (!login) { - pr_warn("cannot find login: %d\n", login_id); + pr_warn("cannot find login: %d\n", id); req->status.status = cpu_to_be32( STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 686dba189f8..9f99d040490 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -133,16 +133,11 @@ static struct se_device *fd_create_virtdevice( ret = PTR_ERR(dev_p); goto fail; } - - /* O_DIRECT too? */ - flags = O_RDWR | O_CREAT | O_LARGEFILE; - /* - * If fd_buffered_io=1 has not been set explicitly (the default), - * use O_SYNC to force FILEIO writes to disk. + * Use O_DSYNC by default instead of O_SYNC to forgo syncing + * of pure timestamp updates. */ - if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO)) - flags |= O_SYNC; + flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; file = filp_open(dev_p, flags, 0600); if (IS_ERR(file)) { @@ -380,23 +375,6 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd) } } -static void fd_emulate_write_fua(struct se_cmd *cmd) -{ - struct se_device *dev = cmd->se_dev; - struct fd_dev *fd_dev = dev->dev_ptr; - loff_t start = cmd->t_task_lba * - dev->se_sub_dev->se_dev_attrib.block_size; - loff_t end = start + cmd->data_length; - int ret; - - pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", - cmd->t_task_lba, cmd->data_length); - - ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); - if (ret != 0) - pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); -} - static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, enum dma_data_direction data_direction) { @@ -411,19 +389,21 @@ static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, ret = fd_do_readv(cmd, sgl, sgl_nents); } else { ret = fd_do_writev(cmd, sgl, sgl_nents); - + /* + * Perform implict vfs_fsync_range() for fd_do_writev() ops + * for SCSI WRITEs with Forced Unit Access (FUA) set. + * Allow this to happen independent of WCE=0 setting. + */ if (ret > 0 && - dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && (cmd->se_cmd_flags & SCF_FUA)) { - /* - * We might need to be a bit smarter here - * and return some sense data to let the initiator - * know the FUA WRITE cache sync failed..? - */ - fd_emulate_write_fua(cmd); - } + struct fd_dev *fd_dev = dev->dev_ptr; + loff_t start = cmd->t_task_lba * + dev->se_sub_dev->se_dev_attrib.block_size; + loff_t end = start + cmd->data_length; + vfs_fsync_range(fd_dev->fd_file, start, end, 1); + } } if (ret < 0) { @@ -442,7 +422,6 @@ enum { static match_table_t tokens = { {Opt_fd_dev_name, "fd_dev_name=%s"}, {Opt_fd_dev_size, "fd_dev_size=%s"}, - {Opt_fd_buffered_io, "fd_buffered_io=%d"}, {Opt_err, NULL} }; @@ -454,7 +433,7 @@ static ssize_t fd_set_configfs_dev_params( struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; char *orig, *ptr, *arg_p, *opts; substring_t args[MAX_OPT_ARGS]; - int ret = 0, arg, token; + int ret = 0, token; opts = kstrdup(page, GFP_KERNEL); if (!opts) @@ -498,19 +477,6 @@ static ssize_t fd_set_configfs_dev_params( " bytes\n", fd_dev->fd_dev_size); fd_dev->fbd_flags |= FBDF_HAS_SIZE; break; - case Opt_fd_buffered_io: - match_int(args, &arg); - if (arg != 1) { - pr_err("bogus fd_buffered_io=%d value\n", arg); - ret = -EINVAL; - goto out; - } - - pr_debug("FILEIO: Using buffered I/O" - " operations for struct fd_dev\n"); - - fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; - break; default: break; } @@ -542,10 +508,8 @@ static ssize_t fd_show_configfs_dev_params( ssize_t bl = 0; bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); - bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", - fd_dev->fd_dev_name, fd_dev->fd_dev_size, - (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ? - "Buffered" : "Synchronous"); + bl += sprintf(b + bl, " File: %s Size: %llu Mode: O_DSYNC\n", + fd_dev->fd_dev_name, fd_dev->fd_dev_size); return bl; } diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index fbd59ef7d8b..70ce7fd7111 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -14,7 +14,6 @@ #define FBDF_HAS_PATH 0x01 #define FBDF_HAS_SIZE 0x02 -#define FDBD_USE_BUFFERED_IO 0x04 struct fd_dev { u32 fbd_flags; diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 4604153b795..1bd9163bc11 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -2179,6 +2179,16 @@ static int __devinit sci_init_single(struct platform_device *dev, return 0; } +static void sci_cleanup_single(struct sci_port *port) +{ + sci_free_gpios(port); + + clk_put(port->iclk); + clk_put(port->fclk); + + pm_runtime_disable(port->port.dev); +} + #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE static void serial_console_putchar(struct uart_port *port, int ch) { @@ -2360,14 +2370,10 @@ static int sci_remove(struct platform_device *dev) cpufreq_unregister_notifier(&port->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); - sci_free_gpios(port); - uart_remove_one_port(&sci_uart_driver, &port->port); - clk_put(port->iclk); - clk_put(port->fclk); + sci_cleanup_single(port); - pm_runtime_disable(&dev->dev); return 0; } @@ -2385,14 +2391,20 @@ static int __devinit sci_probe_single(struct platform_device *dev, index+1, SCI_NPORTS); dev_notice(&dev->dev, "Consider bumping " "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n"); - return 0; + return -EINVAL; } ret = sci_init_single(dev, sciport, index, p); if (ret) return ret; - return uart_add_one_port(&sci_uart_driver, &sciport->port); + ret = uart_add_one_port(&sci_uart_driver, &sciport->port); + if (ret) { + sci_cleanup_single(sciport); + return ret; + } + + return 0; } static int __devinit sci_probe(struct platform_device *dev) @@ -2413,24 +2425,22 @@ static int __devinit sci_probe(struct platform_device *dev) ret = sci_probe_single(dev, dev->id, p, sp); if (ret) - goto err_unreg; + return ret; sp->freq_transition.notifier_call = sci_notifier; ret = cpufreq_register_notifier(&sp->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); - if (unlikely(ret < 0)) - goto err_unreg; + if (unlikely(ret < 0)) { + sci_cleanup_single(sp); + return ret; + } #ifdef CONFIG_SH_STANDARD_BIOS sh_bios_gdb_detach(); #endif return 0; - -err_unreg: - sci_remove(dev); - return ret; } static int sci_suspend(struct device *dev) diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c index 2ce9992f403..901576eb5a8 100644 --- a/drivers/video/omap2/displays/panel-taal.c +++ b/drivers/video/omap2/displays/panel-taal.c @@ -526,7 +526,7 @@ static ssize_t taal_num_errors_show(struct device *dev, { struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); - u8 errors; + u8 errors = 0; int r; mutex_lock(&td->lock); diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c index 72ded9cd2cb..5066eee10cc 100644 --- a/drivers/video/omap2/dss/core.c +++ b/drivers/video/omap2/dss/core.c @@ -194,8 +194,7 @@ static inline int dss_initialize_debugfs(void) static inline void dss_uninitialize_debugfs(void) { } -static inline int dss_debugfs_create_file(const char *name, - void (*write)(struct seq_file *)) +int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)) { return 0; } diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index ec363d8390e..ca8382d346e 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c @@ -3724,7 +3724,7 @@ static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs, /* CLKIN4DDR = 16 * TXBYTECLKHS */ tlp_avail = thsbyte_clk * (blank - trans_lp); - ttxclkesc = tdsi_fclk / lp_clk_div; + ttxclkesc = tdsi_fclk * lp_clk_div; lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc - 26) / 16; diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c index 6ea1ff149f6..770632359a1 100644 --- a/drivers/video/omap2/dss/dss.c +++ b/drivers/video/omap2/dss/dss.c @@ -731,7 +731,7 @@ static void dss_runtime_put(void) DSSDBG("dss_runtime_put\n"); r = pm_runtime_put_sync(&dss.pdev->dev); - WARN_ON(r < 0); + WARN_ON(r < 0 && r != -EBUSY); } /* DEBUGFS */ diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c index dcb79521e6c..89f264c6742 100644 --- a/drivers/xen/tmem.c +++ b/drivers/xen/tmem.c @@ -269,7 +269,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind) } /* returns 0 if the page was successfully put into frontswap, -1 if not */ -static int tmem_frontswap_put_page(unsigned type, pgoff_t offset, +static int tmem_frontswap_store(unsigned type, pgoff_t offset, struct page *page) { u64 ind64 = (u64)offset; @@ -295,7 +295,7 @@ static int tmem_frontswap_put_page(unsigned type, pgoff_t offset, * returns 0 if the page was successfully gotten from frontswap, -1 if * was not present (should never happen!) */ -static int tmem_frontswap_get_page(unsigned type, pgoff_t offset, +static int tmem_frontswap_load(unsigned type, pgoff_t offset, struct page *page) { u64 ind64 = (u64)offset; @@ -362,8 +362,8 @@ static int __init no_frontswap(char *s) __setup("nofrontswap", no_frontswap); static struct frontswap_ops __initdata tmem_frontswap_ops = { - .put_page = tmem_frontswap_put_page, - .get_page = tmem_frontswap_get_page, + .store = tmem_frontswap_store, + .load = tmem_frontswap_load, .invalidate_page = tmem_frontswap_flush_page, .invalidate_area = tmem_frontswap_flush_area, .init = tmem_frontswap_init diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 20350a93ed9..6df0cbe1cbc 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -174,6 +174,7 @@ struct smb_version_operations { void (*add_credits)(struct TCP_Server_Info *, const unsigned int); void (*set_credits)(struct TCP_Server_Info *, const int); int * (*get_credits_field)(struct TCP_Server_Info *); + __u64 (*get_next_mid)(struct TCP_Server_Info *); /* data offset from read response message */ unsigned int (*read_data_offset)(char *); /* data length from read response message */ @@ -399,6 +400,12 @@ set_credits(struct TCP_Server_Info *server, const int val) server->ops->set_credits(server, val); } +static inline __u64 +get_next_mid(struct TCP_Server_Info *server) +{ + return server->ops->get_next_mid(server); +} + /* * Macros to allow the TCP_Server_Info->net field and related code to drop out * when CONFIG_NET_NS isn't set. diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 5ec21ecf798..0a6cbfe2761 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -114,7 +114,6 @@ extern int small_smb_init_no_tc(const int smb_cmd, const int wct, void **request_buf); extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses, const struct nls_table *nls_cp); -extern __u64 GetNextMid(struct TCP_Server_Info *server); extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); extern u64 cifs_UnixTimeToNT(struct timespec); extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index b5ad716b264..5b400730c21 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -268,7 +268,7 @@ small_smb_init_no_tc(const int smb_command, const int wct, return rc; buffer = (struct smb_hdr *)*request_buf; - buffer->Mid = GetNextMid(ses->server); + buffer->Mid = get_next_mid(ses->server); if (ses->capabilities & CAP_UNICODE) buffer->Flags2 |= SMBFLG2_UNICODE; if (ses->capabilities & CAP_STATUS32) @@ -402,7 +402,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses) cFYI(1, "secFlags 0x%x", secFlags); - pSMB->hdr.Mid = GetNextMid(server); + pSMB->hdr.Mid = get_next_mid(server); pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS); if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) @@ -782,7 +782,7 @@ CIFSSMBLogoff(const int xid, struct cifs_ses *ses) return rc; } - pSMB->hdr.Mid = GetNextMid(ses->server); + pSMB->hdr.Mid = get_next_mid(ses->server); if (ses->server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) @@ -4762,7 +4762,7 @@ getDFSRetry: /* server pointer checked in called function, but should never be null here anyway */ - pSMB->hdr.Mid = GetNextMid(ses->server); + pSMB->hdr.Mid = get_next_mid(ses->server); pSMB->hdr.Tid = ses->ipc_tid; pSMB->hdr.Uid = ses->Suid; if (ses->capabilities & CAP_STATUS32) diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index ccafdedd0db..78db68a5cf4 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1058,13 +1058,15 @@ cifs_demultiplex_thread(void *p) if (mid_entry != NULL) { if (!mid_entry->multiRsp || mid_entry->multiEnd) mid_entry->callback(mid_entry); - } else if (!server->ops->is_oplock_break(buf, server)) { + } else if (!server->ops->is_oplock_break || + !server->ops->is_oplock_break(buf, server)) { cERROR(1, "No task to wake, unknown frame received! " "NumMids %d", atomic_read(&midCount)); cifs_dump_mem("Received Data is: ", buf, HEADER_SIZE(server)); #ifdef CONFIG_CIFS_DEBUG2 - server->ops->dump_detail(buf); + if (server->ops->dump_detail) + server->ops->dump_detail(buf); cifs_dump_mids(server); #endif /* CIFS_DEBUG2 */ @@ -3938,7 +3940,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses, header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX, NULL /*no tid */ , 4 /*wct */ ); - smb_buffer->Mid = GetNextMid(ses->server); + smb_buffer->Mid = get_next_mid(ses->server); smb_buffer->Uid = ses->Suid; pSMB = (TCONX_REQ *) smb_buffer; pSMBr = (TCONX_RSP *) smb_buffer_response; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 253170dfa71..513adbc211d 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -876,7 +876,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) struct cifsLockInfo *li, *tmp; struct cifs_tcon *tcon; struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); - unsigned int num, max_num; + unsigned int num, max_num, max_buf; LOCKING_ANDX_RANGE *buf, *cur; int types[] = {LOCKING_ANDX_LARGE_FILES, LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; @@ -892,8 +892,19 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) return rc; } - max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) / - sizeof(LOCKING_ANDX_RANGE); + /* + * Accessing maxBuf is racy with cifs_reconnect - need to store value + * and check it for zero before using. + */ + max_buf = tcon->ses->server->maxBuf; + if (!max_buf) { + mutex_unlock(&cinode->lock_mutex); + FreeXid(xid); + return -EINVAL; + } + + max_num = (max_buf - sizeof(struct smb_hdr)) / + sizeof(LOCKING_ANDX_RANGE); buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); if (!buf) { mutex_unlock(&cinode->lock_mutex); @@ -1218,7 +1229,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) int types[] = {LOCKING_ANDX_LARGE_FILES, LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; unsigned int i; - unsigned int max_num, num; + unsigned int max_num, num, max_buf; LOCKING_ANDX_RANGE *buf, *cur; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); @@ -1228,8 +1239,16 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) INIT_LIST_HEAD(&tmp_llist); - max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) / - sizeof(LOCKING_ANDX_RANGE); + /* + * Accessing maxBuf is racy with cifs_reconnect - need to store value + * and check it for zero before using. + */ + max_buf = tcon->ses->server->maxBuf; + if (!max_buf) + return -EINVAL; + + max_num = (max_buf - sizeof(struct smb_hdr)) / + sizeof(LOCKING_ANDX_RANGE); buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); if (!buf) return -ENOMEM; @@ -1247,46 +1266,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) continue; if (types[i] != li->type) continue; - if (!cinode->can_cache_brlcks) { - cur->Pid = cpu_to_le16(li->pid); - cur->LengthLow = cpu_to_le32((u32)li->length); - cur->LengthHigh = - cpu_to_le32((u32)(li->length>>32)); - cur->OffsetLow = cpu_to_le32((u32)li->offset); - cur->OffsetHigh = - cpu_to_le32((u32)(li->offset>>32)); - /* - * We need to save a lock here to let us add - * it again to the file's list if the unlock - * range request fails on the server. - */ - list_move(&li->llist, &tmp_llist); - if (++num == max_num) { - stored_rc = cifs_lockv(xid, tcon, - cfile->netfid, - li->type, num, - 0, buf); - if (stored_rc) { - /* - * We failed on the unlock range - * request - add all locks from - * the tmp list to the head of - * the file's list. - */ - cifs_move_llist(&tmp_llist, - &cfile->llist); - rc = stored_rc; - } else - /* - * The unlock range request - * succeed - free the tmp list. - */ - cifs_free_llist(&tmp_llist); - cur = buf; - num = 0; - } else - cur++; - } else { + if (cinode->can_cache_brlcks) { /* * We can cache brlock requests - simply remove * a lock from the file's list. @@ -1294,7 +1274,41 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid) list_del(&li->llist); cifs_del_lock_waiters(li); kfree(li); + continue; } + cur->Pid = cpu_to_le16(li->pid); + cur->LengthLow = cpu_to_le32((u32)li->length); + cur->LengthHigh = cpu_to_le32((u32)(li->length>>32)); + cur->OffsetLow = cpu_to_le32((u32)li->offset); + cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32)); + /* + * We need to save a lock here to let us add it again to + * the file's list if the unlock range request fails on + * the server. + */ + list_move(&li->llist, &tmp_llist); + if (++num == max_num) { + stored_rc = cifs_lockv(xid, tcon, cfile->netfid, + li->type, num, 0, buf); + if (stored_rc) { + /* + * We failed on the unlock range + * request - add all locks from the tmp + * list to the head of the file's list. + */ + cifs_move_llist(&tmp_llist, + &cfile->llist); + rc = stored_rc; + } else + /* + * The unlock range request succeed - + * free the tmp list. + */ + cifs_free_llist(&tmp_llist); + cur = buf; + num = 0; + } else + cur++; } if (num) { stored_rc = cifs_lockv(xid, tcon, cfile->netfid, diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index e2552d2b2e4..557506ae1e2 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -212,93 +212,6 @@ cifs_small_buf_release(void *buf_to_free) return; } -/* - * Find a free multiplex id (SMB mid). Otherwise there could be - * mid collisions which might cause problems, demultiplexing the - * wrong response to this request. Multiplex ids could collide if - * one of a series requests takes much longer than the others, or - * if a very large number of long lived requests (byte range - * locks or FindNotify requests) are pending. No more than - * 64K-1 requests can be outstanding at one time. If no - * mids are available, return zero. A future optimization - * could make the combination of mids and uid the key we use - * to demultiplex on (rather than mid alone). - * In addition to the above check, the cifs demultiplex - * code already used the command code as a secondary - * check of the frame and if signing is negotiated the - * response would be discarded if the mid were the same - * but the signature was wrong. Since the mid is not put in the - * pending queue until later (when it is about to be dispatched) - * we do have to limit the number of outstanding requests - * to somewhat less than 64K-1 although it is hard to imagine - * so many threads being in the vfs at one time. - */ -__u64 GetNextMid(struct TCP_Server_Info *server) -{ - __u64 mid = 0; - __u16 last_mid, cur_mid; - bool collision; - - spin_lock(&GlobalMid_Lock); - - /* mid is 16 bit only for CIFS/SMB */ - cur_mid = (__u16)((server->CurrentMid) & 0xffff); - /* we do not want to loop forever */ - last_mid = cur_mid; - cur_mid++; - - /* - * This nested loop looks more expensive than it is. - * In practice the list of pending requests is short, - * fewer than 50, and the mids are likely to be unique - * on the first pass through the loop unless some request - * takes longer than the 64 thousand requests before it - * (and it would also have to have been a request that - * did not time out). - */ - while (cur_mid != last_mid) { - struct mid_q_entry *mid_entry; - unsigned int num_mids; - - collision = false; - if (cur_mid == 0) - cur_mid++; - - num_mids = 0; - list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { - ++num_mids; - if (mid_entry->mid == cur_mid && - mid_entry->mid_state == MID_REQUEST_SUBMITTED) { - /* This mid is in use, try a different one */ - collision = true; - break; - } - } - - /* - * if we have more than 32k mids in the list, then something - * is very wrong. Possibly a local user is trying to DoS the - * box by issuing long-running calls and SIGKILL'ing them. If - * we get to 2^16 mids then we're in big trouble as this - * function could loop forever. - * - * Go ahead and assign out the mid in this situation, but force - * an eventual reconnect to clean out the pending_mid_q. - */ - if (num_mids > 32768) - server->tcpStatus = CifsNeedReconnect; - - if (!collision) { - mid = (__u64)cur_mid; - server->CurrentMid = mid; - break; - } - cur_mid++; - } - spin_unlock(&GlobalMid_Lock); - return mid; -} - /* NB: MID can not be set if treeCon not passed in, in that case it is responsbility of caller to set the mid */ void @@ -334,7 +247,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , /* Uid is not converted */ buffer->Uid = treeCon->ses->Suid; - buffer->Mid = GetNextMid(treeCon->ses->server); + buffer->Mid = get_next_mid(treeCon->ses->server); } if (treeCon->Flags & SMB_SHARE_IS_IN_DFS) buffer->Flags2 |= SMBFLG2_DFS; diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index d9d615fbed3..6dec38f5522 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -125,6 +125,94 @@ cifs_get_credits_field(struct TCP_Server_Info *server) return &server->credits; } +/* + * Find a free multiplex id (SMB mid). Otherwise there could be + * mid collisions which might cause problems, demultiplexing the + * wrong response to this request. Multiplex ids could collide if + * one of a series requests takes much longer than the others, or + * if a very large number of long lived requests (byte range + * locks or FindNotify requests) are pending. No more than + * 64K-1 requests can be outstanding at one time. If no + * mids are available, return zero. A future optimization + * could make the combination of mids and uid the key we use + * to demultiplex on (rather than mid alone). + * In addition to the above check, the cifs demultiplex + * code already used the command code as a secondary + * check of the frame and if signing is negotiated the + * response would be discarded if the mid were the same + * but the signature was wrong. Since the mid is not put in the + * pending queue until later (when it is about to be dispatched) + * we do have to limit the number of outstanding requests + * to somewhat less than 64K-1 although it is hard to imagine + * so many threads being in the vfs at one time. + */ +static __u64 +cifs_get_next_mid(struct TCP_Server_Info *server) +{ + __u64 mid = 0; + __u16 last_mid, cur_mid; + bool collision; + + spin_lock(&GlobalMid_Lock); + + /* mid is 16 bit only for CIFS/SMB */ + cur_mid = (__u16)((server->CurrentMid) & 0xffff); + /* we do not want to loop forever */ + last_mid = cur_mid; + cur_mid++; + + /* + * This nested loop looks more expensive than it is. + * In practice the list of pending requests is short, + * fewer than 50, and the mids are likely to be unique + * on the first pass through the loop unless some request + * takes longer than the 64 thousand requests before it + * (and it would also have to have been a request that + * did not time out). + */ + while (cur_mid != last_mid) { + struct mid_q_entry *mid_entry; + unsigned int num_mids; + + collision = false; + if (cur_mid == 0) + cur_mid++; + + num_mids = 0; + list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { + ++num_mids; + if (mid_entry->mid == cur_mid && + mid_entry->mid_state == MID_REQUEST_SUBMITTED) { + /* This mid is in use, try a different one */ + collision = true; + break; + } + } + + /* + * if we have more than 32k mids in the list, then something + * is very wrong. Possibly a local user is trying to DoS the + * box by issuing long-running calls and SIGKILL'ing them. If + * we get to 2^16 mids then we're in big trouble as this + * function could loop forever. + * + * Go ahead and assign out the mid in this situation, but force + * an eventual reconnect to clean out the pending_mid_q. + */ + if (num_mids > 32768) + server->tcpStatus = CifsNeedReconnect; + + if (!collision) { + mid = (__u64)cur_mid; + server->CurrentMid = mid; + break; + } + cur_mid++; + } + spin_unlock(&GlobalMid_Lock); + return mid; +} + struct smb_version_operations smb1_operations = { .send_cancel = send_nt_cancel, .compare_fids = cifs_compare_fids, @@ -133,6 +221,7 @@ struct smb_version_operations smb1_operations = { .add_credits = cifs_add_credits, .set_credits = cifs_set_credits, .get_credits_field = cifs_get_credits_field, + .get_next_mid = cifs_get_next_mid, .read_data_offset = cifs_read_data_offset, .read_data_length = cifs_read_data_length, .map_error = map_smb_to_linux_error, diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 1b36ffe6a47..3097ee58fd7 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -779,7 +779,7 @@ send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon, pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES; pSMB->Timeout = 0; - pSMB->hdr.Mid = GetNextMid(ses->server); + pSMB->hdr.Mid = get_next_mid(ses->server); return SendReceive(xid, ses, in_buf, out_buf, &bytes_returned, 0); diff --git a/fs/dcache.c b/fs/dcache.c index 85c9e2bff8e..40469044088 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -683,6 +683,8 @@ EXPORT_SYMBOL(dget_parent); /** * d_find_alias - grab a hashed alias of inode * @inode: inode in question + * @want_discon: flag, used by d_splice_alias, to request + * that only a DISCONNECTED alias be returned. * * If inode has a hashed alias, or is a directory and has any alias, * acquire the reference to alias and return it. Otherwise return NULL. @@ -691,9 +693,10 @@ EXPORT_SYMBOL(dget_parent); * of a filesystem. * * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer - * any other hashed alias over that. + * any other hashed alias over that one unless @want_discon is set, + * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. */ -static struct dentry *__d_find_alias(struct inode *inode) +static struct dentry *__d_find_alias(struct inode *inode, int want_discon) { struct dentry *alias, *discon_alias; @@ -705,7 +708,7 @@ again: if (IS_ROOT(alias) && (alias->d_flags & DCACHE_DISCONNECTED)) { discon_alias = alias; - } else { + } else if (!want_discon) { __dget_dlock(alias); spin_unlock(&alias->d_lock); return alias; @@ -736,7 +739,7 @@ struct dentry *d_find_alias(struct inode *inode) if (!list_empty(&inode->i_dentry)) { spin_lock(&inode->i_lock); - de = __d_find_alias(inode); + de = __d_find_alias(inode, 0); spin_unlock(&inode->i_lock); } return de; @@ -1647,8 +1650,9 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) if (inode && S_ISDIR(inode->i_mode)) { spin_lock(&inode->i_lock); - new = __d_find_any_alias(inode); + new = __d_find_alias(inode, 1); if (new) { + BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); spin_unlock(&inode->i_lock); security_d_instantiate(new, inode); d_move(new, dentry); @@ -2478,7 +2482,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) struct dentry *alias; /* Does an aliased dentry already exist? */ - alias = __d_find_alias(inode); + alias = __d_find_alias(inode, 0); if (alias) { actual = alias; write_seqlock(&rename_lock); diff --git a/fs/exofs/sys.c b/fs/exofs/sys.c index e32bc919e4e..5a7b691e748 100644 --- a/fs/exofs/sys.c +++ b/fs/exofs/sys.c @@ -109,7 +109,7 @@ static struct kobj_type odev_ktype = { static struct kobj_type uuid_ktype = { }; -void exofs_sysfs_dbg_print() +void exofs_sysfs_dbg_print(void) { #ifdef CONFIG_EXOFS_DEBUG struct kobject *k_name, *k_tmp; diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 99b6324290d..cee7812cc3c 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -90,8 +90,8 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb, * unusual file system layouts. */ if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) { - block_cluster = EXT4_B2C(sbi, (start - - ext4_block_bitmap(sb, gdp))); + block_cluster = EXT4_B2C(sbi, + ext4_block_bitmap(sb, gdp) - start); if (block_cluster < num_clusters) block_cluster = -1; else if (block_cluster == num_clusters) { @@ -102,7 +102,7 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb, if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) { inode_cluster = EXT4_B2C(sbi, - start - ext4_inode_bitmap(sb, gdp)); + ext4_inode_bitmap(sb, gdp) - start); if (inode_cluster < num_clusters) inode_cluster = -1; else if (inode_cluster == num_clusters) { @@ -114,7 +114,7 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb, itbl_blk = ext4_inode_table(sb, gdp); for (i = 0; i < sbi->s_itb_per_group; i++) { if (ext4_block_in_group(sb, itbl_blk + i, block_group)) { - c = EXT4_B2C(sbi, start - itbl_blk + i); + c = EXT4_B2C(sbi, itbl_blk + i - start); if ((c < num_clusters) || (c == inode_cluster) || (c == block_cluster) || (c == itbl_cluster)) continue; diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 8ad112ae0ad..e34deac3f36 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -123,7 +123,6 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) else ext4_clear_inode_flag(inode, i); } - ei->i_flags = flags; ext4_set_inode_flags(inode); inode->i_ctime = ext4_current_time(inode); diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 8d2fb8c88cf..41a3ccff18d 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -664,6 +664,7 @@ static long writeback_sb_inodes(struct super_block *sb, /* Wait for I_SYNC. This function drops i_lock... */ inode_sleep_on_writeback(inode); /* Inode may be gone, start again */ + spin_lock(&wb->list_lock); continue; } inode->i_state |= I_SYNC; diff --git a/fs/fuse/control.c b/fs/fuse/control.c index 42593c587d4..03ff5b1eba9 100644 --- a/fs/fuse/control.c +++ b/fs/fuse/control.c @@ -75,19 +75,13 @@ static ssize_t fuse_conn_limit_write(struct file *file, const char __user *buf, unsigned global_limit) { unsigned long t; - char tmp[32]; unsigned limit = (1 << 16) - 1; int err; - if (*ppos || count >= sizeof(tmp) - 1) - return -EINVAL; - - if (copy_from_user(tmp, buf, count)) + if (*ppos) return -EINVAL; - tmp[count] = '\0'; - - err = strict_strtoul(tmp, 0, &t); + err = kstrtoul_from_user(buf, count, 0, &t); if (err) return err; diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index df5ac048dc7..334e0b18a01 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -775,6 +775,8 @@ static int fuse_link(struct dentry *entry, struct inode *newdir, static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr, struct kstat *stat) { + unsigned int blkbits; + stat->dev = inode->i_sb->s_dev; stat->ino = attr->ino; stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); @@ -790,7 +792,13 @@ static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr, stat->ctime.tv_nsec = attr->ctimensec; stat->size = attr->size; stat->blocks = attr->blocks; - stat->blksize = (1 << inode->i_blkbits); + + if (attr->blksize != 0) + blkbits = ilog2(attr->blksize); + else + blkbits = inode->i_sb->s_blocksize_bits; + + stat->blksize = 1 << blkbits; } static int fuse_do_getattr(struct inode *inode, struct kstat *stat, @@ -863,6 +871,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat, if (stat) { generic_fillattr(inode, stat); stat->mode = fi->orig_i_mode; + stat->ino = fi->orig_ino; } } diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 9562109d3a8..b321a688cde 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2173,6 +2173,44 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, return ret; } +long fuse_file_fallocate(struct file *file, int mode, loff_t offset, + loff_t length) +{ + struct fuse_file *ff = file->private_data; + struct fuse_conn *fc = ff->fc; + struct fuse_req *req; + struct fuse_fallocate_in inarg = { + .fh = ff->fh, + .offset = offset, + .length = length, + .mode = mode + }; + int err; + + if (fc->no_fallocate) + return -EOPNOTSUPP; + + req = fuse_get_req(fc); + if (IS_ERR(req)) + return PTR_ERR(req); + + req->in.h.opcode = FUSE_FALLOCATE; + req->in.h.nodeid = ff->nodeid; + req->in.numargs = 1; + req->in.args[0].size = sizeof(inarg); + req->in.args[0].value = &inarg; + fuse_request_send(fc, req); + err = req->out.h.error; + if (err == -ENOSYS) { + fc->no_fallocate = 1; + err = -EOPNOTSUPP; + } + fuse_put_request(fc, req); + + return err; +} +EXPORT_SYMBOL_GPL(fuse_file_fallocate); + static const struct file_operations fuse_file_operations = { .llseek = fuse_file_llseek, .read = do_sync_read, @@ -2190,6 +2228,7 @@ static const struct file_operations fuse_file_operations = { .unlocked_ioctl = fuse_file_ioctl, .compat_ioctl = fuse_file_compat_ioctl, .poll = fuse_file_poll, + .fallocate = fuse_file_fallocate, }; static const struct file_operations fuse_direct_io_file_operations = { @@ -2206,6 +2245,7 @@ static const struct file_operations fuse_direct_io_file_operations = { .unlocked_ioctl = fuse_file_ioctl, .compat_ioctl = fuse_file_compat_ioctl, .poll = fuse_file_poll, + .fallocate = fuse_file_fallocate, /* no splice_read */ }; diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 572cefc7801..771fb6322c0 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -82,6 +82,9 @@ struct fuse_inode { preserve the original mode */ umode_t orig_i_mode; + /** 64 bit inode number */ + u64 orig_ino; + /** Version of last attribute change */ u64 attr_version; @@ -478,6 +481,9 @@ struct fuse_conn { /** Are BSD file locking primitives not implemented by fs? */ unsigned no_flock:1; + /** Is fallocate not implemented by fs? */ + unsigned no_fallocate:1; + /** The number of requests waiting for completion */ atomic_t num_waiting; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 42678a33b7b..1cd61652018 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -91,6 +91,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb) fi->nlookup = 0; fi->attr_version = 0; fi->writectr = 0; + fi->orig_ino = 0; INIT_LIST_HEAD(&fi->write_files); INIT_LIST_HEAD(&fi->queued_writes); INIT_LIST_HEAD(&fi->writepages); @@ -139,6 +140,18 @@ static int fuse_remount_fs(struct super_block *sb, int *flags, char *data) return 0; } +/* + * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down + * so that it will fit. + */ +static ino_t fuse_squash_ino(u64 ino64) +{ + ino_t ino = (ino_t) ino64; + if (sizeof(ino_t) < sizeof(u64)) + ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8; + return ino; +} + void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, u64 attr_valid) { @@ -148,7 +161,7 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, fi->attr_version = ++fc->attr_version; fi->i_time = attr_valid; - inode->i_ino = attr->ino; + inode->i_ino = fuse_squash_ino(attr->ino); inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); set_nlink(inode, attr->nlink); inode->i_uid = attr->uid; @@ -174,6 +187,8 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, fi->orig_i_mode = inode->i_mode; if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS)) inode->i_mode &= ~S_ISVTX; + + fi->orig_ino = attr->ino; } void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, diff --git a/fs/proc/base.c b/fs/proc/base.c index 616f41a7cde..437195f204e 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1803,7 +1803,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) rcu_read_lock(); file = fcheck_files(files, fd); if (file) { - unsigned i_mode, f_mode = file->f_mode; + unsigned f_mode = file->f_mode; rcu_read_unlock(); put_files_struct(files); @@ -1819,12 +1819,14 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) inode->i_gid = GLOBAL_ROOT_GID; } - i_mode = S_IFLNK; - if (f_mode & FMODE_READ) - i_mode |= S_IRUSR | S_IXUSR; - if (f_mode & FMODE_WRITE) - i_mode |= S_IWUSR | S_IXUSR; - inode->i_mode = i_mode; + if (S_ISLNK(inode->i_mode)) { + unsigned i_mode = S_IFLNK; + if (f_mode & FMODE_READ) + i_mode |= S_IRUSR | S_IXUSR; + if (f_mode & FMODE_WRITE) + i_mode |= S_IWUSR | S_IXUSR; + inode->i_mode = i_mode; + } security_task_to_inode(task, inode); put_task_struct(task); @@ -1859,6 +1861,7 @@ static struct dentry *proc_fd_instantiate(struct inode *dir, ei = PROC_I(inode); ei->fd = fd; + inode->i_mode = S_IFLNK; inode->i_op = &proc_pid_link_inode_operations; inode->i_size = 64; ei->op.proc_get_link = proc_fd_link; diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 685a83756b2..84a7e6f3c04 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -2918,6 +2918,9 @@ int dbg_debugfs_init_fs(struct ubifs_info *c) struct dentry *dent; struct ubifs_debug_info *d = c->dbg; + if (!IS_ENABLED(DEBUG_FS)) + return 0; + n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME, c->vi.ubi_num, c->vi.vol_id); if (n == UBIFS_DFS_DIR_LEN) { @@ -3010,7 +3013,8 @@ out: */ void dbg_debugfs_exit_fs(struct ubifs_info *c) { - debugfs_remove_recursive(c->dbg->dfs_dir); + if (IS_ENABLED(DEBUG_FS)) + debugfs_remove_recursive(c->dbg->dfs_dir); } struct ubifs_global_debug_info ubifs_dbg; @@ -3095,6 +3099,9 @@ int dbg_debugfs_init(void) const char *fname; struct dentry *dent; + if (!IS_ENABLED(DEBUG_FS)) + return 0; + fname = "ubifs"; dent = debugfs_create_dir(fname, NULL); if (IS_ERR_OR_NULL(dent)) @@ -3159,7 +3166,8 @@ out: */ void dbg_debugfs_exit(void) { - debugfs_remove_recursive(dfs_rootdir); + if (IS_ENABLED(DEBUG_FS)) + debugfs_remove_recursive(dfs_rootdir); } /** diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index b0d62820ada..9e6e1c6eb60 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -440,8 +440,8 @@ static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable) #else /* CONFIG_ACPI */ -static int register_acpi_bus_type(struct acpi_bus_type *bus) { return 0; } -static int unregister_acpi_bus_type(struct acpi_bus_type *bus) { return 0; } +static inline int register_acpi_bus_type(void *bus) { return 0; } +static inline int unregister_acpi_bus_type(void *bus) { return 0; } #endif /* CONFIG_ACPI */ diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 2520a6e241d..9f02005f217 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -2,6 +2,7 @@ #define _ASM_GENERIC_BUG_H #include <linux/compiler.h> +#include <linux/kernel.h> #ifdef CONFIG_BUG diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 73e45600f95..bac55c21511 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -54,7 +54,7 @@ struct drm_mode_object { struct drm_object_properties *properties; }; -#define DRM_OBJECT_MAX_PROPERTY 16 +#define DRM_OBJECT_MAX_PROPERTY 24 struct drm_object_properties { int count; uint32_t ids[DRM_OBJECT_MAX_PROPERTY]; diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 58d0bdab68d..81368ab6c61 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -181,6 +181,7 @@ {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x674A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ @@ -198,6 +199,7 @@ {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6771, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ @@ -229,10 +231,11 @@ {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ - {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ - {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ @@ -531,6 +534,7 @@ {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ @@ -550,6 +554,7 @@ {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ @@ -561,11 +566,19 @@ {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9918, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9919, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0, 0, 0} #define r128_PCI_IDS \ diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h index b6d7ce92ead..68733587e70 100644 --- a/include/drm/exynos_drm.h +++ b/include/drm/exynos_drm.h @@ -64,6 +64,7 @@ struct drm_exynos_gem_map_off { * A structure for mapping buffer. * * @handle: a handle to gem object created. + * @pad: just padding to be 64-bit aligned. * @size: memory size to be mapped. * @mapped: having user virtual address mmaped. * - this variable would be filled by exynos gem module @@ -72,7 +73,8 @@ struct drm_exynos_gem_map_off { */ struct drm_exynos_gem_mmap { unsigned int handle; - unsigned int size; + unsigned int pad; + uint64_t size; uint64_t mapped; }; diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 81e803e90aa..acba894374a 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -132,6 +132,7 @@ extern u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt); extern void clockevents_register_device(struct clock_event_device *dev); +extern void clockevents_config(struct clock_event_device *dev, u32 freq); extern void clockevents_config_and_register(struct clock_event_device *dev, u32 freq, unsigned long min_delta, unsigned long max_delta); diff --git a/include/linux/compaction.h b/include/linux/compaction.h index e988037abd2..51a90b7f2d6 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -1,8 +1,6 @@ #ifndef _LINUX_COMPACTION_H #define _LINUX_COMPACTION_H -#include <linux/node.h> - /* Return values for compact_zone() and try_to_compact_pages() */ /* compaction didn't start as it was not possible or direct reclaim was more suitable */ #define COMPACT_SKIPPED 0 @@ -13,23 +11,6 @@ /* The full zone was compacted */ #define COMPACT_COMPLETE 3 -/* - * compaction supports three modes - * - * COMPACT_ASYNC_MOVABLE uses asynchronous migration and only scans - * MIGRATE_MOVABLE pageblocks as migration sources and targets. - * COMPACT_ASYNC_UNMOVABLE uses asynchronous migration and only scans - * MIGRATE_MOVABLE pageblocks as migration sources. - * MIGRATE_UNMOVABLE pageblocks are scanned as potential migration - * targets and convers them to MIGRATE_MOVABLE if possible - * COMPACT_SYNC uses synchronous migration and scans all pageblocks - */ -enum compact_mode { - COMPACT_ASYNC_MOVABLE, - COMPACT_ASYNC_UNMOVABLE, - COMPACT_SYNC, -}; - #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; extern int sysctl_compaction_handler(struct ctl_table *table, int write, diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h new file mode 100644 index 00000000000..0e4e2eec5c1 --- /dev/null +++ b/include/linux/frontswap.h @@ -0,0 +1,127 @@ +#ifndef _LINUX_FRONTSWAP_H +#define _LINUX_FRONTSWAP_H + +#include <linux/swap.h> +#include <linux/mm.h> +#include <linux/bitops.h> + +struct frontswap_ops { + void (*init)(unsigned); + int (*store)(unsigned, pgoff_t, struct page *); + int (*load)(unsigned, pgoff_t, struct page *); + void (*invalidate_page)(unsigned, pgoff_t); + void (*invalidate_area)(unsigned); +}; + +extern bool frontswap_enabled; +extern struct frontswap_ops + frontswap_register_ops(struct frontswap_ops *ops); +extern void frontswap_shrink(unsigned long); +extern unsigned long frontswap_curr_pages(void); +extern void frontswap_writethrough(bool); + +extern void __frontswap_init(unsigned type); +extern int __frontswap_store(struct page *page); +extern int __frontswap_load(struct page *page); +extern void __frontswap_invalidate_page(unsigned, pgoff_t); +extern void __frontswap_invalidate_area(unsigned); + +#ifdef CONFIG_FRONTSWAP + +static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) +{ + bool ret = false; + + if (frontswap_enabled && sis->frontswap_map) + ret = test_bit(offset, sis->frontswap_map); + return ret; +} + +static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset) +{ + if (frontswap_enabled && sis->frontswap_map) + set_bit(offset, sis->frontswap_map); +} + +static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset) +{ + if (frontswap_enabled && sis->frontswap_map) + clear_bit(offset, sis->frontswap_map); +} + +static inline void frontswap_map_set(struct swap_info_struct *p, + unsigned long *map) +{ + p->frontswap_map = map; +} + +static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) +{ + return p->frontswap_map; +} +#else +/* all inline routines become no-ops and all externs are ignored */ + +#define frontswap_enabled (0) + +static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) +{ + return false; +} + +static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset) +{ +} + +static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset) +{ +} + +static inline void frontswap_map_set(struct swap_info_struct *p, + unsigned long *map) +{ +} + +static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) +{ + return NULL; +} +#endif + +static inline int frontswap_store(struct page *page) +{ + int ret = -1; + + if (frontswap_enabled) + ret = __frontswap_store(page); + return ret; +} + +static inline int frontswap_load(struct page *page) +{ + int ret = -1; + + if (frontswap_enabled) + ret = __frontswap_load(page); + return ret; +} + +static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset) +{ + if (frontswap_enabled) + __frontswap_invalidate_page(type, offset); +} + +static inline void frontswap_invalidate_area(unsigned type) +{ + if (frontswap_enabled) + __frontswap_invalidate_area(type); +} + +static inline void frontswap_init(unsigned type) +{ + if (frontswap_enabled) + __frontswap_init(type); +} + +#endif /* _LINUX_FRONTSWAP_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 51978ed43e9..17fd887c798 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -802,13 +802,14 @@ struct inode { unsigned int __i_nlink; }; dev_t i_rdev; + loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ unsigned short i_bytes; + unsigned int i_blkbits; blkcnt_t i_blocks; - loff_t i_size; #ifdef __NEED_I_SIZE_ORDERED seqcount_t i_size_seqcount; @@ -828,9 +829,8 @@ struct inode { struct list_head i_dentry; struct rcu_head i_rcu; }; - atomic_t i_count; - unsigned int i_blkbits; u64 i_version; + atomic_t i_count; atomic_t i_dio_count; atomic_t i_writecount; const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ diff --git a/include/linux/fuse.h b/include/linux/fuse.h index 8f2ab8fef92..9303348965f 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h @@ -54,6 +54,9 @@ * 7.18 * - add FUSE_IOCTL_DIR flag * - add FUSE_NOTIFY_DELETE + * + * 7.19 + * - add FUSE_FALLOCATE */ #ifndef _LINUX_FUSE_H @@ -85,7 +88,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 18 +#define FUSE_KERNEL_MINOR_VERSION 19 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -278,6 +281,7 @@ enum fuse_opcode { FUSE_POLL = 40, FUSE_NOTIFY_REPLY = 41, FUSE_BATCH_FORGET = 42, + FUSE_FALLOCATE = 43, /* CUSE specific operations */ CUSE_INIT = 4096, @@ -571,6 +575,14 @@ struct fuse_notify_poll_wakeup_out { __u64 kh; }; +struct fuse_fallocate_in { + __u64 fh; + __u64 offset; + __u64 length; + __u32 mode; + __u32 padding; +}; + struct fuse_in_header { __u32 len; __u32 opcode; diff --git a/include/linux/i2c-mux-pinctrl.h b/include/linux/i2c-mux-pinctrl.h new file mode 100644 index 00000000000..a65c86429e8 --- /dev/null +++ b/include/linux/i2c-mux-pinctrl.h @@ -0,0 +1,41 @@ +/* + * i2c-mux-pinctrl platform data + * + * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _LINUX_I2C_MUX_PINCTRL_H +#define _LINUX_I2C_MUX_PINCTRL_H + +/** + * struct i2c_mux_pinctrl_platform_data - Platform data for i2c-mux-pinctrl + * @parent_bus_num: Parent I2C bus number + * @base_bus_num: Base I2C bus number for the child busses. 0 for dynamic. + * @bus_count: Number of child busses. Also the number of elements in + * @pinctrl_states + * @pinctrl_states: The names of the pinctrl state to select for each child bus + * @pinctrl_state_idle: The pinctrl state to select when no child bus is being + * accessed. If NULL, the most recently used pinctrl state will be left + * selected. + */ +struct i2c_mux_pinctrl_platform_data { + int parent_bus_num; + int base_bus_num; + int bus_count; + const char **pinctrl_states; + const char *pinctrl_state_idle; +}; + +#endif diff --git a/include/linux/init_task.h b/include/linux/init_task.h index e4baff5f7ff..9e65eff6af3 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -149,6 +149,7 @@ extern struct cred init_cred; .normal_prio = MAX_PRIO-20, \ .policy = SCHED_NORMAL, \ .cpus_allowed = CPU_MASK_ALL, \ + .nr_cpus_allowed= NR_CPUS, \ .mm = NULL, \ .active_mm = &init_mm, \ .se = { \ @@ -157,7 +158,6 @@ extern struct cred init_cred; .rt = { \ .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ .time_slice = RR_TIMESLICE, \ - .nr_cpus_allowed = NR_CPUS, \ }, \ .tasks = LIST_HEAD_INIT(tsk.tasks), \ INIT_PUSHABLE_TASKS(tsk) \ diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 1b14d25162c..d6a58065c09 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -128,7 +128,7 @@ struct kparam_array * The ops can have NULL set or get functions. */ #define module_param_cb(name, ops, arg, perm) \ - __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, 0) + __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1) /** * <level>_param_cb - general callback for a module/cmdline parameter @@ -192,7 +192,7 @@ struct kparam_array { (void *)set, (void *)get }; \ __module_param_call(MODULE_PARAM_PREFIX, \ name, &__param_ops_##name, arg, \ - (perm) + sizeof(__check_old_set_param(set))*0, 0) + (perm) + sizeof(__check_old_set_param(set))*0, -1) /* We don't get oldget: it's often a new-style param_get_uint, etc. */ static inline int @@ -272,7 +272,7 @@ static inline void __kernel_param_unlock(void) */ #define core_param(name, var, type, perm) \ param_check_##type(name, &(var)); \ - __module_param_call("", name, ¶m_ops_##type, &var, perm, 0) + __module_param_call("", name, ¶m_ops_##type, &var, perm, -1) #endif /* !MODULE */ /** @@ -290,7 +290,7 @@ static inline void __kernel_param_unlock(void) = { len, string }; \ __module_param_call(MODULE_PARAM_PREFIX, name, \ ¶m_ops_string, \ - .str = &__param_string_##name, perm, 0); \ + .str = &__param_string_##name, perm, -1); \ __MODULE_PARM_TYPE(name, "string") /** @@ -432,7 +432,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp); __module_param_call(MODULE_PARAM_PREFIX, name, \ ¶m_array_ops, \ .arr = &__param_arr_##name, \ - perm, 0); \ + perm, -1); \ __MODULE_PARM_TYPE(name, "array of " #type) extern struct kernel_param_ops param_array_ops; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index f32578634d9..45db49f64bb 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -555,6 +555,8 @@ enum perf_event_type { PERF_RECORD_MAX, /* non-ABI */ }; +#define PERF_MAX_STACK_DEPTH 127 + enum perf_callchain_context { PERF_CONTEXT_HV = (__u64)-32, PERF_CONTEXT_KERNEL = (__u64)-128, @@ -609,8 +611,6 @@ struct perf_guest_info_callbacks { #include <linux/sysfs.h> #include <asm/local.h> -#define PERF_MAX_STACK_DEPTH 255 - struct perf_callchain_entry { __u64 nr; __u64 ip[PERF_MAX_STACK_DEPTH]; diff --git a/include/linux/prctl.h b/include/linux/prctl.h index 711e0a30aac..3988012255d 100644 --- a/include/linux/prctl.h +++ b/include/linux/prctl.h @@ -127,8 +127,8 @@ #define PR_SET_PTRACER 0x59616d61 # define PR_SET_PTRACER_ANY ((unsigned long)-1) -#define PR_SET_CHILD_SUBREAPER 36 -#define PR_GET_CHILD_SUBREAPER 37 +#define PR_SET_CHILD_SUBREAPER 36 +#define PR_GET_CHILD_SUBREAPER 37 /* * If no_new_privs is set, then operations that grant new privileges (i.e. @@ -142,7 +142,9 @@ * asking selinux for a specific new context (e.g. with runcon) will result * in execve returning -EPERM. */ -#define PR_SET_NO_NEW_PRIVS 38 -#define PR_GET_NO_NEW_PRIVS 39 +#define PR_SET_NO_NEW_PRIVS 38 +#define PR_GET_NO_NEW_PRIVS 39 + +#define PR_GET_TID_ADDRESS 40 #endif /* _LINUX_PRCTL_H */ diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 0d04cd69ab9..ffc444c38b0 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -368,8 +368,11 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) iter->index++; if (likely(*slot)) return slot; - if (flags & RADIX_TREE_ITER_CONTIG) + if (flags & RADIX_TREE_ITER_CONTIG) { + /* forbid switching to the next chunk */ + iter->next_index = 0; break; + } } } return NULL; diff --git a/include/linux/sched.h b/include/linux/sched.h index f34437e835a..4059c0f33f0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -145,6 +145,7 @@ extern unsigned long this_cpu_load(void); extern void calc_global_load(unsigned long ticks); +extern void update_cpu_load_nohz(void); extern unsigned long get_parent_ip(unsigned long addr); @@ -438,6 +439,7 @@ extern int get_dumpable(struct mm_struct *mm); /* leave room for more dump flags */ #define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ +#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) @@ -875,6 +877,8 @@ struct sched_group_power { * Number of busy cpus in this group. */ atomic_t nr_busy_cpus; + + unsigned long cpumask[0]; /* iteration mask */ }; struct sched_group { @@ -899,6 +903,15 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg) return to_cpumask(sg->cpumask); } +/* + * cpumask masking which cpus in the group are allowed to iterate up the domain + * tree. + */ +static inline struct cpumask *sched_group_mask(struct sched_group *sg) +{ + return to_cpumask(sg->sgp->cpumask); +} + /** * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. * @group: The group whose first cpu is to be returned. @@ -1187,7 +1200,6 @@ struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned int time_slice; - int nr_cpus_allowed; struct sched_rt_entity *back; #ifdef CONFIG_RT_GROUP_SCHED @@ -1252,6 +1264,7 @@ struct task_struct { #endif unsigned int policy; + int nr_cpus_allowed; cpumask_t cpus_allowed; #ifdef CONFIG_PREEMPT_RCU diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index b534a1be540..642cb7355df 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -225,14 +225,11 @@ enum { /* device driver is going to provide hardware time stamp */ SKBTX_IN_PROGRESS = 1 << 2, - /* ensure the originating sk reference is available on driver level */ - SKBTX_DRV_NEEDS_SK_REF = 1 << 3, - /* device driver supports TX zero-copy buffers */ - SKBTX_DEV_ZEROCOPY = 1 << 4, + SKBTX_DEV_ZEROCOPY = 1 << 3, /* generate wifi status information (where possible) */ - SKBTX_WIFI_STATUS = 1 << 5, + SKBTX_WIFI_STATUS = 1 << 4, }; /* diff --git a/include/linux/swap.h b/include/linux/swap.h index b6661933e25..c84ec68eaec 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -197,6 +197,10 @@ struct swap_info_struct { struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ unsigned int old_block_size; /* seldom referenced */ +#ifdef CONFIG_FRONTSWAP + unsigned long *frontswap_map; /* frontswap in-use, one bit per page */ + atomic_t frontswap_pages; /* frontswap pages in-use counter */ +#endif }; struct swap_list_t { diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h new file mode 100644 index 00000000000..e282624e8c1 --- /dev/null +++ b/include/linux/swapfile.h @@ -0,0 +1,13 @@ +#ifndef _LINUX_SWAPFILE_H +#define _LINUX_SWAPFILE_H + +/* + * these were static in swapfile.c but frontswap.c needs them and we don't + * want to expose them to the dozens of source files that include swap.h + */ +extern spinlock_t swap_lock; +extern struct swap_list_t swap_list; +extern struct swap_info_struct *swap_info[]; +extern int try_to_unuse(unsigned int, bool, unsigned long); + +#endif /* _LINUX_SWAPFILE_H */ diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index b455c7c212e..60da41fe9dc 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h @@ -12,6 +12,9 @@ enum vga_switcheroo_state { VGA_SWITCHEROO_OFF, VGA_SWITCHEROO_ON, + /* below are referred only from vga_switcheroo_get_client_state() */ + VGA_SWITCHEROO_INIT, + VGA_SWITCHEROO_NOT_FOUND, }; enum vga_switcheroo_client_id { @@ -50,6 +53,8 @@ void vga_switcheroo_unregister_handler(void); int vga_switcheroo_process_delayed_switch(void); +int vga_switcheroo_get_client_state(struct pci_dev *dev); + #else static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} @@ -62,5 +67,7 @@ static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev, int id, bool active) { return 0; } static inline void vga_switcheroo_unregister_handler(void) {} static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } +static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } + #endif diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index a192f780765..7fdbad52254 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -302,6 +302,8 @@ extern void fib6_run_gc(unsigned long expires, extern void fib6_gc_cleanup(void); extern int fib6_init(void); +extern int fib6_init_late(void); +extern void fib6_cleanup_late(void); #ifdef CONFIG_IPV6_MULTIPLE_TABLES extern int fib6_rules_init(void); diff --git a/init/main.c b/init/main.c index 1ca6b32c482..b5cc0a7c470 100644 --- a/init/main.c +++ b/init/main.c @@ -508,7 +508,7 @@ asmlinkage void __init start_kernel(void) parse_early_param(); parse_args("Booting kernel", static_command_line, __start___param, __stop___param - __start___param, - 0, 0, &unknown_bootoption); + -1, -1, &unknown_bootoption); jump_label_init(); @@ -755,13 +755,8 @@ static void __init do_initcalls(void) { int level; - for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) { - pr_info("initlevel:%d=%s, %d registered initcalls\n", - level, initcall_level_names[level], - (int) (initcall_levels[level+1] - - initcall_levels[level])); + for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) do_initcall_level(level); - } } /* diff --git a/ipc/shm.c b/ipc/shm.c index 5e2cbfdab6f..41c1285d697 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -393,6 +393,16 @@ static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) return sfd->file->f_op->fsync(sfd->file, start, end, datasync); } +static long shm_fallocate(struct file *file, int mode, loff_t offset, + loff_t len) +{ + struct shm_file_data *sfd = shm_file_data(file); + + if (!sfd->file->f_op->fallocate) + return -EOPNOTSUPP; + return sfd->file->f_op->fallocate(file, mode, offset, len); +} + static unsigned long shm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) @@ -410,6 +420,7 @@ static const struct file_operations shm_file_operations = { .get_unmapped_area = shm_get_unmapped_area, #endif .llseek = noop_llseek, + .fallocate = shm_fallocate, }; static const struct file_operations shm_file_operations_huge = { @@ -418,6 +429,7 @@ static const struct file_operations shm_file_operations_huge = { .release = shm_release, .get_unmapped_area = shm_get_unmapped_area, .llseek = noop_llseek, + .fallocate = shm_fallocate, }; int is_file_shm_hugepages(struct file *file) diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 0f3527d6184..72fcd3069a9 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -896,10 +896,13 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) mutex_unlock(&cgroup_mutex); /* - * Drop the active superblock reference that we took when we - * created the cgroup + * We want to drop the active superblock reference from the + * cgroup creation after all the dentry refs are gone - + * kill_sb gets mighty unhappy otherwise. Mark + * dentry->d_fsdata with cgroup_diput() to tell + * cgroup_d_release() to call deactivate_super(). */ - deactivate_super(cgrp->root->sb); + dentry->d_fsdata = cgroup_diput; /* * if we're getting rid of the cgroup, refcount should ensure @@ -925,6 +928,13 @@ static int cgroup_delete(const struct dentry *d) return 1; } +static void cgroup_d_release(struct dentry *dentry) +{ + /* did cgroup_diput() tell me to deactivate super? */ + if (dentry->d_fsdata == cgroup_diput) + deactivate_super(dentry->d_sb); +} + static void remove_dir(struct dentry *d) { struct dentry *parent = dget(d->d_parent); @@ -1532,6 +1542,7 @@ static int cgroup_get_rootdir(struct super_block *sb) static const struct dentry_operations cgroup_dops = { .d_iput = cgroup_diput, .d_delete = cgroup_delete, + .d_release = cgroup_d_release, }; struct inode *inode = diff --git a/kernel/events/core.c b/kernel/events/core.c index 5b06cbbf693..f85c0154b33 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3181,7 +3181,6 @@ static void perf_event_for_each(struct perf_event *event, event = event->group_leader; perf_event_for_each_child(event, func); - func(event); list_for_each_entry(sibling, &event->sibling_list, group_entry) perf_event_for_each_child(sibling, func); mutex_unlock(&ctx->mutex); diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index fc275e4f629..eebd6d5cfb4 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -275,8 +275,10 @@ void handle_nested_irq(unsigned int irq) kstat_incr_irqs_this_cpu(irq, desc); action = desc->action; - if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) + if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { + desc->istate |= IRQS_PENDING; goto out_unlock; + } irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); raw_spin_unlock_irq(&desc->lock); @@ -324,8 +326,10 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); kstat_incr_irqs_this_cpu(irq, desc); - if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) + if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { + desc->istate |= IRQS_PENDING; goto out_unlock; + } handle_irq_event(desc); diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 8e5c56b3b7d..001fa5bab49 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -101,6 +101,9 @@ extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask); extern void irq_set_thread_affinity(struct irq_desc *desc); +extern int irq_do_set_affinity(struct irq_data *data, + const struct cpumask *dest, bool force); + /* Inline functions for support of irq chips on slow busses */ static inline void chip_bus_lock(struct irq_desc *desc) { diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index ea0c6c2ae6f..8c548232ba3 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -142,6 +142,25 @@ static inline void irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } #endif +int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, + bool force) +{ + struct irq_desc *desc = irq_data_to_desc(data); + struct irq_chip *chip = irq_data_get_irq_chip(data); + int ret; + + ret = chip->irq_set_affinity(data, mask, false); + switch (ret) { + case IRQ_SET_MASK_OK: + cpumask_copy(data->affinity, mask); + case IRQ_SET_MASK_OK_NOCOPY: + irq_set_thread_affinity(desc); + ret = 0; + } + + return ret; +} + int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) { struct irq_chip *chip = irq_data_get_irq_chip(data); @@ -152,14 +171,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) return -EINVAL; if (irq_can_move_pcntxt(data)) { - ret = chip->irq_set_affinity(data, mask, false); - switch (ret) { - case IRQ_SET_MASK_OK: - cpumask_copy(data->affinity, mask); - case IRQ_SET_MASK_OK_NOCOPY: - irq_set_thread_affinity(desc); - ret = 0; - } + ret = irq_do_set_affinity(data, mask, false); } else { irqd_set_move_pending(data); irq_copy_pending(desc, mask); @@ -283,9 +295,8 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); static int setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) { - struct irq_chip *chip = irq_desc_get_chip(desc); struct cpumask *set = irq_default_affinity; - int ret, node = desc->irq_data.node; + int node = desc->irq_data.node; /* Excludes PER_CPU and NO_BALANCE interrupts */ if (!irq_can_set_affinity(irq)) @@ -311,13 +322,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) if (cpumask_intersects(mask, nodemask)) cpumask_and(mask, mask, nodemask); } - ret = chip->irq_set_affinity(&desc->irq_data, mask, false); - switch (ret) { - case IRQ_SET_MASK_OK: - cpumask_copy(desc->irq_data.affinity, mask); - case IRQ_SET_MASK_OK_NOCOPY: - irq_set_thread_affinity(desc); - } + irq_do_set_affinity(&desc->irq_data, mask, false); return 0; } #else diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index c3c89751b32..ca3f4aaff70 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c @@ -42,17 +42,8 @@ void irq_move_masked_irq(struct irq_data *idata) * For correct operation this depends on the caller * masking the irqs. */ - if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) - < nr_cpu_ids)) { - int ret = chip->irq_set_affinity(&desc->irq_data, - desc->pending_mask, false); - switch (ret) { - case IRQ_SET_MASK_OK: - cpumask_copy(desc->irq_data.affinity, desc->pending_mask); - case IRQ_SET_MASK_OK_NOCOPY: - irq_set_thread_affinity(desc); - } - } + if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) + irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false); cpumask_clear(desc->pending_mask); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 39eb6011bc3..d5594a4268d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -142,9 +142,8 @@ const_debug unsigned int sysctl_sched_features = #define SCHED_FEAT(name, enabled) \ #name , -static __read_mostly char *sched_feat_names[] = { +static const char * const sched_feat_names[] = { #include "features.h" - NULL }; #undef SCHED_FEAT @@ -2517,25 +2516,32 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load, sched_avg_update(this_rq); } +#ifdef CONFIG_NO_HZ +/* + * There is no sane way to deal with nohz on smp when using jiffies because the + * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading + * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}. + * + * Therefore we cannot use the delta approach from the regular tick since that + * would seriously skew the load calculation. However we'll make do for those + * updates happening while idle (nohz_idle_balance) or coming out of idle + * (tick_nohz_idle_exit). + * + * This means we might still be one tick off for nohz periods. + */ + /* * Called from nohz_idle_balance() to update the load ratings before doing the * idle balance. */ void update_idle_cpu_load(struct rq *this_rq) { - unsigned long curr_jiffies = jiffies; + unsigned long curr_jiffies = ACCESS_ONCE(jiffies); unsigned long load = this_rq->load.weight; unsigned long pending_updates; /* - * Bloody broken means of dealing with nohz, but better than nothing.. - * jiffies is updated by one cpu, another cpu can drift wrt the jiffy - * update and see 0 difference the one time and 2 the next, even though - * we ticked at roughtly the same rate. - * - * Hence we only use this from nohz_idle_balance() and skip this - * nonsense when called from the scheduler_tick() since that's - * guaranteed a stable rate. + * bail if there's load or we're actually up-to-date. */ if (load || curr_jiffies == this_rq->last_load_update_tick) return; @@ -2547,12 +2553,38 @@ void update_idle_cpu_load(struct rq *this_rq) } /* + * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed. + */ +void update_cpu_load_nohz(void) +{ + struct rq *this_rq = this_rq(); + unsigned long curr_jiffies = ACCESS_ONCE(jiffies); + unsigned long pending_updates; + + if (curr_jiffies == this_rq->last_load_update_tick) + return; + + raw_spin_lock(&this_rq->lock); + pending_updates = curr_jiffies - this_rq->last_load_update_tick; + if (pending_updates) { + this_rq->last_load_update_tick = curr_jiffies; + /* + * We were idle, this means load 0, the current load might be + * !0 due to remote wakeups and the sort. + */ + __update_cpu_load(this_rq, 0, pending_updates); + } + raw_spin_unlock(&this_rq->lock); +} +#endif /* CONFIG_NO_HZ */ + +/* * Called from scheduler_tick() */ static void update_cpu_load_active(struct rq *this_rq) { /* - * See the mess in update_idle_cpu_load(). + * See the mess around update_idle_cpu_load() / update_cpu_load_nohz(). */ this_rq->last_load_update_tick = jiffies; __update_cpu_load(this_rq, this_rq->load.weight, 1); @@ -4982,7 +5014,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) p->sched_class->set_cpus_allowed(p, new_mask); cpumask_copy(&p->cpus_allowed, new_mask); - p->rt.nr_cpus_allowed = cpumask_weight(new_mask); + p->nr_cpus_allowed = cpumask_weight(new_mask); } /* @@ -5524,15 +5556,20 @@ static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ #ifdef CONFIG_SCHED_DEBUG -static __read_mostly int sched_domain_debug_enabled; +static __read_mostly int sched_debug_enabled; -static int __init sched_domain_debug_setup(char *str) +static int __init sched_debug_setup(char *str) { - sched_domain_debug_enabled = 1; + sched_debug_enabled = 1; return 0; } -early_param("sched_debug", sched_domain_debug_setup); +early_param("sched_debug", sched_debug_setup); + +static inline bool sched_debug(void) +{ + return sched_debug_enabled; +} static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, struct cpumask *groupmask) @@ -5572,7 +5609,12 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, break; } - if (!group->sgp->power) { + /* + * Even though we initialize ->power to something semi-sane, + * we leave power_orig unset. This allows us to detect if + * domain iteration is still funny without causing /0 traps. + */ + if (!group->sgp->power_orig) { printk(KERN_CONT "\n"); printk(KERN_ERR "ERROR: domain->cpu_power not " "set\n"); @@ -5620,7 +5662,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) { int level = 0; - if (!sched_domain_debug_enabled) + if (!sched_debug_enabled) return; if (!sd) { @@ -5641,6 +5683,10 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) } #else /* !CONFIG_SCHED_DEBUG */ # define sched_domain_debug(sd, cpu) do { } while (0) +static inline bool sched_debug(void) +{ + return false; +} #endif /* CONFIG_SCHED_DEBUG */ static int sd_degenerate(struct sched_domain *sd) @@ -5962,6 +6008,44 @@ struct sched_domain_topology_level { struct sd_data data; }; +/* + * Build an iteration mask that can exclude certain CPUs from the upwards + * domain traversal. + * + * Asymmetric node setups can result in situations where the domain tree is of + * unequal depth, make sure to skip domains that already cover the entire + * range. + * + * In that case build_sched_domains() will have terminated the iteration early + * and our sibling sd spans will be empty. Domains should always include the + * cpu they're built on, so check that. + * + */ +static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) +{ + const struct cpumask *span = sched_domain_span(sd); + struct sd_data *sdd = sd->private; + struct sched_domain *sibling; + int i; + + for_each_cpu(i, span) { + sibling = *per_cpu_ptr(sdd->sd, i); + if (!cpumask_test_cpu(i, sched_domain_span(sibling))) + continue; + + cpumask_set_cpu(i, sched_group_mask(sg)); + } +} + +/* + * Return the canonical balance cpu for this group, this is the first cpu + * of this group that's also in the iteration mask. + */ +int group_balance_cpu(struct sched_group *sg) +{ + return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); +} + static int build_overlap_sched_groups(struct sched_domain *sd, int cpu) { @@ -5980,6 +6064,12 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) if (cpumask_test_cpu(i, covered)) continue; + child = *per_cpu_ptr(sdd->sd, i); + + /* See the comment near build_group_mask(). */ + if (!cpumask_test_cpu(i, sched_domain_span(child))) + continue; + sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), GFP_KERNEL, cpu_to_node(cpu)); @@ -5987,8 +6077,6 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) goto fail; sg_span = sched_group_cpus(sg); - - child = *per_cpu_ptr(sdd->sd, i); if (child->child) { child = child->child; cpumask_copy(sg_span, sched_domain_span(child)); @@ -5997,10 +6085,24 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) cpumask_or(covered, covered, sg_span); - sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span)); - atomic_inc(&sg->sgp->ref); + sg->sgp = *per_cpu_ptr(sdd->sgp, i); + if (atomic_inc_return(&sg->sgp->ref) == 1) + build_group_mask(sd, sg); - if (cpumask_test_cpu(cpu, sg_span)) + /* + * Initialize sgp->power such that even if we mess up the + * domains and no possible iteration will get us here, we won't + * die on a /0 trap. + */ + sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span); + + /* + * Make sure the first group of this domain contains the + * canonical balance cpu. Otherwise the sched_domain iteration + * breaks. See update_sg_lb_stats(). + */ + if ((!groups && cpumask_test_cpu(cpu, sg_span)) || + group_balance_cpu(sg) == cpu) groups = sg; if (!first) @@ -6074,6 +6176,7 @@ build_sched_groups(struct sched_domain *sd, int cpu) cpumask_clear(sched_group_cpus(sg)); sg->sgp->power = 0; + cpumask_setall(sched_group_mask(sg)); for_each_cpu(j, span) { if (get_group(j, sdd, NULL) != group) @@ -6115,7 +6218,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) sg = sg->next; } while (sg != sd->groups); - if (cpu != group_first_cpu(sg)) + if (cpu != group_balance_cpu(sg)) return; update_group_power(sd, cpu); @@ -6165,11 +6268,8 @@ int sched_domain_level_max; static int __init setup_relax_domain_level(char *str) { - unsigned long val; - - val = simple_strtoul(str, NULL, 0); - if (val < sched_domain_level_max) - default_relax_domain_level = val; + if (kstrtoint(str, 0, &default_relax_domain_level)) + pr_warn("Unable to set relax_domain_level\n"); return 1; } @@ -6279,14 +6379,13 @@ static struct sched_domain_topology_level *sched_domain_topology = default_topol #ifdef CONFIG_NUMA static int sched_domains_numa_levels; -static int sched_domains_numa_scale; static int *sched_domains_numa_distance; static struct cpumask ***sched_domains_numa_masks; static int sched_domains_curr_level; static inline int sd_local_flags(int level) { - if (sched_domains_numa_distance[level] > REMOTE_DISTANCE) + if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE) return 0; return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE; @@ -6344,6 +6443,42 @@ static const struct cpumask *sd_numa_mask(int cpu) return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; } +static void sched_numa_warn(const char *str) +{ + static int done = false; + int i,j; + + if (done) + return; + + done = true; + + printk(KERN_WARNING "ERROR: %s\n\n", str); + + for (i = 0; i < nr_node_ids; i++) { + printk(KERN_WARNING " "); + for (j = 0; j < nr_node_ids; j++) + printk(KERN_CONT "%02d ", node_distance(i,j)); + printk(KERN_CONT "\n"); + } + printk(KERN_WARNING "\n"); +} + +static bool find_numa_distance(int distance) +{ + int i; + + if (distance == node_distance(0, 0)) + return true; + + for (i = 0; i < sched_domains_numa_levels; i++) { + if (sched_domains_numa_distance[i] == distance) + return true; + } + + return false; +} + static void sched_init_numa(void) { int next_distance, curr_distance = node_distance(0, 0); @@ -6351,7 +6486,6 @@ static void sched_init_numa(void) int level = 0; int i, j, k; - sched_domains_numa_scale = curr_distance; sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); if (!sched_domains_numa_distance) return; @@ -6362,23 +6496,41 @@ static void sched_init_numa(void) * * Assumes node_distance(0,j) includes all distances in * node_distance(i,j) in order to avoid cubic time. - * - * XXX: could be optimized to O(n log n) by using sort() */ next_distance = curr_distance; for (i = 0; i < nr_node_ids; i++) { for (j = 0; j < nr_node_ids; j++) { - int distance = node_distance(0, j); - if (distance > curr_distance && - (distance < next_distance || - next_distance == curr_distance)) - next_distance = distance; + for (k = 0; k < nr_node_ids; k++) { + int distance = node_distance(i, k); + + if (distance > curr_distance && + (distance < next_distance || + next_distance == curr_distance)) + next_distance = distance; + + /* + * While not a strong assumption it would be nice to know + * about cases where if node A is connected to B, B is not + * equally connected to A. + */ + if (sched_debug() && node_distance(k, i) != distance) + sched_numa_warn("Node-distance not symmetric"); + + if (sched_debug() && i && !find_numa_distance(distance)) + sched_numa_warn("Node-0 not representative"); + } + if (next_distance != curr_distance) { + sched_domains_numa_distance[level++] = next_distance; + sched_domains_numa_levels = level; + curr_distance = next_distance; + } else break; } - if (next_distance != curr_distance) { - sched_domains_numa_distance[level++] = next_distance; - sched_domains_numa_levels = level; - curr_distance = next_distance; - } else break; + + /* + * In case of sched_debug() we verify the above assumption. + */ + if (!sched_debug()) + break; } /* * 'level' contains the number of unique distances, excluding the @@ -6403,7 +6555,7 @@ static void sched_init_numa(void) return; for (j = 0; j < nr_node_ids; j++) { - struct cpumask *mask = kzalloc_node(cpumask_size(), GFP_KERNEL, j); + struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); if (!mask) return; @@ -6490,7 +6642,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map) *per_cpu_ptr(sdd->sg, j) = sg; - sgp = kzalloc_node(sizeof(struct sched_group_power), + sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(), GFP_KERNEL, cpu_to_node(j)); if (!sgp) return -ENOMEM; @@ -6543,7 +6695,6 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, if (!sd) return child; - set_domain_attribute(sd, attr); cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); if (child) { sd->level = child->level + 1; @@ -6551,6 +6702,7 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, child->parent = sd; } sd->child = child; + set_domain_attribute(sd, attr); return sd; } @@ -6691,7 +6843,6 @@ static int init_sched_domains(const struct cpumask *cpu_map) if (!doms_cur) doms_cur = &fallback_doms; cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); - dattr_cur = NULL; err = build_sched_domains(doms_cur[0], NULL); register_sched_domain_sysctl(); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 940e6d17cf9..c099cc6eebe 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2703,7 +2703,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) int want_sd = 1; int sync = wake_flags & WF_SYNC; - if (p->rt.nr_cpus_allowed == 1) + if (p->nr_cpus_allowed == 1) return prev_cpu; if (sd_flag & SD_BALANCE_WAKE) { @@ -3503,15 +3503,22 @@ unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu) unsigned long scale_rt_power(int cpu) { struct rq *rq = cpu_rq(cpu); - u64 total, available; + u64 total, available, age_stamp, avg; - total = sched_avg_period() + (rq->clock - rq->age_stamp); + /* + * Since we're reading these variables without serialization make sure + * we read them once before doing sanity checks on them. + */ + age_stamp = ACCESS_ONCE(rq->age_stamp); + avg = ACCESS_ONCE(rq->rt_avg); + + total = sched_avg_period() + (rq->clock - age_stamp); - if (unlikely(total < rq->rt_avg)) { + if (unlikely(total < avg)) { /* Ensures that power won't end up being negative */ available = 0; } else { - available = total - rq->rt_avg; + available = total - avg; } if (unlikely((s64)total < SCHED_POWER_SCALE)) @@ -3574,13 +3581,28 @@ void update_group_power(struct sched_domain *sd, int cpu) power = 0; - group = child->groups; - do { - power += group->sgp->power; - group = group->next; - } while (group != child->groups); + if (child->flags & SD_OVERLAP) { + /* + * SD_OVERLAP domains cannot assume that child groups + * span the current group. + */ - sdg->sgp->power = power; + for_each_cpu(cpu, sched_group_cpus(sdg)) + power += power_of(cpu); + } else { + /* + * !SD_OVERLAP domains can assume that child groups + * span the current group. + */ + + group = child->groups; + do { + power += group->sgp->power; + group = group->next; + } while (group != child->groups); + } + + sdg->sgp->power_orig = sdg->sgp->power = power; } /* @@ -3610,7 +3632,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) /** * update_sg_lb_stats - Update sched_group's statistics for load balancing. - * @sd: The sched_domain whose statistics are to be updated. + * @env: The load balancing environment. * @group: sched_group whose statistics are to be updated. * @load_idx: Load index of sched_domain of this_cpu for load calc. * @local_group: Does group contain this_cpu. @@ -3630,7 +3652,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, int i; if (local_group) - balance_cpu = group_first_cpu(group); + balance_cpu = group_balance_cpu(group); /* Tally up the load of all CPUs in the group */ max_cpu_load = 0; @@ -3645,7 +3667,8 @@ static inline void update_sg_lb_stats(struct lb_env *env, /* Bias balancing toward cpus of our domain */ if (local_group) { - if (idle_cpu(i) && !first_idle_cpu) { + if (idle_cpu(i) && !first_idle_cpu && + cpumask_test_cpu(i, sched_group_mask(group))) { first_idle_cpu = 1; balance_cpu = i; } @@ -3719,11 +3742,10 @@ static inline void update_sg_lb_stats(struct lb_env *env, /** * update_sd_pick_busiest - return 1 on busiest group - * @sd: sched_domain whose statistics are to be checked + * @env: The load balancing environment. * @sds: sched_domain statistics * @sg: sched_group candidate to be checked for being the busiest * @sgs: sched_group statistics - * @this_cpu: the current cpu * * Determine if @sg is a busier group than the previously selected * busiest group. @@ -3761,9 +3783,7 @@ static bool update_sd_pick_busiest(struct lb_env *env, /** * update_sd_lb_stats - Update sched_domain's statistics for load balancing. - * @sd: sched_domain whose statistics are to be updated. - * @this_cpu: Cpu for which load balance is currently performed. - * @idle: Idle status of this_cpu + * @env: The load balancing environment. * @cpus: Set of cpus considered for load balancing. * @balance: Should we balance. * @sds: variable to hold the statistics for this sched_domain. @@ -3852,10 +3872,8 @@ static inline void update_sd_lb_stats(struct lb_env *env, * Returns 1 when packing is required and a task should be moved to * this CPU. The amount of the imbalance is returned in *imbalance. * - * @sd: The sched_domain whose packing is to be checked. + * @env: The load balancing environment. * @sds: Statistics of the sched_domain which is to be packed - * @this_cpu: The cpu at whose sched_domain we're performing load-balance. - * @imbalance: returns amount of imbalanced due to packing. */ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) { @@ -3881,9 +3899,8 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) * fix_small_imbalance - Calculate the minor imbalance that exists * amongst the groups of a sched_domain, during * load balancing. + * @env: The load balancing environment. * @sds: Statistics of the sched_domain whose imbalance is to be calculated. - * @this_cpu: The cpu at whose sched_domain we're performing load-balance. - * @imbalance: Variable to store the imbalance. */ static inline void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) @@ -4026,11 +4043,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * Also calculates the amount of weighted load which should be moved * to restore balance. * - * @sd: The sched_domain whose busiest group is to be returned. - * @this_cpu: The cpu for which load balancing is currently being performed. - * @imbalance: Variable which stores amount of weighted load which should - * be moved to restore balance/put a group to idle. - * @idle: The idle status of this_cpu. + * @env: The load balancing environment. * @cpus: The set of CPUs under consideration for load-balancing. * @balance: Pointer to a variable indicating if this_cpu * is the appropriate cpu to perform load balancing at this_level. diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index c5565c3c515..573e1ca0110 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -274,13 +274,16 @@ static void update_rt_migration(struct rt_rq *rt_rq) static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { + struct task_struct *p; + if (!rt_entity_is_task(rt_se)) return; + p = rt_task_of(rt_se); rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq->rt_nr_total++; - if (rt_se->nr_cpus_allowed > 1) + if (p->nr_cpus_allowed > 1) rt_rq->rt_nr_migratory++; update_rt_migration(rt_rq); @@ -288,13 +291,16 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { + struct task_struct *p; + if (!rt_entity_is_task(rt_se)) return; + p = rt_task_of(rt_se); rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq->rt_nr_total--; - if (rt_se->nr_cpus_allowed > 1) + if (p->nr_cpus_allowed > 1) rt_rq->rt_nr_migratory--; update_rt_migration(rt_rq); @@ -1161,7 +1167,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); - if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) + if (!task_current(rq, p) && p->nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); inc_nr_running(rq); @@ -1225,7 +1231,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) cpu = task_cpu(p); - if (p->rt.nr_cpus_allowed == 1) + if (p->nr_cpus_allowed == 1) goto out; /* For anything but wake ups, just return the task_cpu */ @@ -1260,9 +1266,9 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) * will have to sort it out. */ if (curr && unlikely(rt_task(curr)) && - (curr->rt.nr_cpus_allowed < 2 || + (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio) && - (p->rt.nr_cpus_allowed > 1)) { + (p->nr_cpus_allowed > 1)) { int target = find_lowest_rq(p); if (target != -1) @@ -1276,10 +1282,10 @@ out: static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) { - if (rq->curr->rt.nr_cpus_allowed == 1) + if (rq->curr->nr_cpus_allowed == 1) return; - if (p->rt.nr_cpus_allowed != 1 + if (p->nr_cpus_allowed != 1 && cpupri_find(&rq->rd->cpupri, p, NULL)) return; @@ -1395,7 +1401,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) * The previous task needs to be made eligible for pushing * if it is still active */ - if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1) + if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); } @@ -1408,7 +1414,7 @@ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) && - (p->rt.nr_cpus_allowed > 1)) + (p->nr_cpus_allowed > 1)) return 1; return 0; } @@ -1464,7 +1470,7 @@ static int find_lowest_rq(struct task_struct *task) if (unlikely(!lowest_mask)) return -1; - if (task->rt.nr_cpus_allowed == 1) + if (task->nr_cpus_allowed == 1) return -1; /* No other targets possible */ if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) @@ -1556,7 +1562,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) task_running(rq, task) || !task->on_rq)) { - raw_spin_unlock(&lowest_rq->lock); + double_unlock_balance(rq, lowest_rq); lowest_rq = NULL; break; } @@ -1586,7 +1592,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq) BUG_ON(rq->cpu != task_cpu(p)); BUG_ON(task_current(rq, p)); - BUG_ON(p->rt.nr_cpus_allowed <= 1); + BUG_ON(p->nr_cpus_allowed <= 1); BUG_ON(!p->on_rq); BUG_ON(!rt_task(p)); @@ -1793,9 +1799,9 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) if (!task_running(rq, p) && !test_tsk_need_resched(rq->curr) && has_pushable_tasks(rq) && - p->rt.nr_cpus_allowed > 1 && + p->nr_cpus_allowed > 1 && rt_task(rq->curr) && - (rq->curr->rt.nr_cpus_allowed < 2 || + (rq->curr->nr_cpus_allowed < 2 || rq->curr->prio <= p->prio)) push_rt_tasks(rq); } @@ -1817,7 +1823,7 @@ static void set_cpus_allowed_rt(struct task_struct *p, * Only update if the process changes its state from whether it * can migrate or not. */ - if ((p->rt.nr_cpus_allowed > 1) == (weight > 1)) + if ((p->nr_cpus_allowed > 1) == (weight > 1)) return; rq = task_rq(p); @@ -1979,6 +1985,8 @@ static void watchdog(struct rq *rq, struct task_struct *p) static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) { + struct sched_rt_entity *rt_se = &p->rt; + update_curr_rt(rq); watchdog(rq, p); @@ -1996,12 +2004,15 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) p->rt.time_slice = RR_TIMESLICE; /* - * Requeue to the end of queue if we are not the only element - * on the queue: + * Requeue to the end of queue if we (and all of our ancestors) are the + * only element on the queue */ - if (p->rt.run_list.prev != p->rt.run_list.next) { - requeue_task_rt(rq, p, 0); - set_tsk_need_resched(p); + for_each_sched_rt_entity(rt_se) { + if (rt_se->run_list.prev != rt_se->run_list.next) { + requeue_task_rt(rq, p, 0); + set_tsk_need_resched(p); + return; + } } } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ba9dccfd24c..6d52cea7f33 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -526,6 +526,8 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag) DECLARE_PER_CPU(struct sched_domain *, sd_llc); DECLARE_PER_CPU(int, sd_llc_id); +extern int group_balance_cpu(struct sched_group *sg); + #endif /* CONFIG_SMP */ #include "stats.h" diff --git a/kernel/smpboot.c b/kernel/smpboot.c index e1a797e028a..98f60c5caa1 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -31,6 +31,12 @@ void __init idle_thread_set_boot_cpu(void) per_cpu(idle_threads, smp_processor_id()) = current; } +/** + * idle_init - Initialize the idle thread for a cpu + * @cpu: The cpu for which the idle thread should be initialized + * + * Creates the thread if it does not exist. + */ static inline void idle_init(unsigned int cpu) { struct task_struct *tsk = per_cpu(idle_threads, cpu); @@ -45,17 +51,16 @@ static inline void idle_init(unsigned int cpu) } /** - * idle_thread_init - Initialize the idle thread for a cpu - * @cpu: The cpu for which the idle thread should be initialized - * - * Creates the thread if it does not exist. + * idle_threads_init - Initialize idle threads for all cpus */ void __init idle_threads_init(void) { - unsigned int cpu; + unsigned int cpu, boot_cpu; + + boot_cpu = smp_processor_id(); for_each_possible_cpu(cpu) { - if (cpu != smp_processor_id()) + if (cpu != boot_cpu) idle_init(cpu); } } diff --git a/kernel/sys.c b/kernel/sys.c index 9ff89cb9657..f0ec44dcd41 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1786,27 +1786,13 @@ SYSCALL_DEFINE1(umask, int, mask) } #ifdef CONFIG_CHECKPOINT_RESTORE -static bool vma_flags_mismatch(struct vm_area_struct *vma, - unsigned long required, - unsigned long banned) -{ - return (vma->vm_flags & required) != required || - (vma->vm_flags & banned); -} - static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) { + struct vm_area_struct *vma; struct file *exe_file; struct dentry *dentry; int err; - /* - * Setting new mm::exe_file is only allowed when no VM_EXECUTABLE vma's - * remain. So perform a quick test first. - */ - if (mm->num_exe_file_vmas) - return -EBUSY; - exe_file = fget(fd); if (!exe_file) return -EBADF; @@ -1827,17 +1813,30 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) if (err) goto exit; + down_write(&mm->mmap_sem); + + /* + * Forbid mm->exe_file change if there are mapped other files. + */ + err = -EBUSY; + for (vma = mm->mmap; vma; vma = vma->vm_next) { + if (vma->vm_file && !path_equal(&vma->vm_file->f_path, + &exe_file->f_path)) + goto exit_unlock; + } + /* * The symlink can be changed only once, just to disallow arbitrary * transitions malicious software might bring in. This means one * could make a snapshot over all processes running and monitor * /proc/pid/exe changes to notice unusual activity if needed. */ - down_write(&mm->mmap_sem); - if (likely(!mm->exe_file)) - set_mm_exe_file(mm, exe_file); - else - err = -EBUSY; + err = -EPERM; + if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags)) + goto exit_unlock; + + set_mm_exe_file(mm, exe_file); +exit_unlock: up_write(&mm->mmap_sem); exit: @@ -1862,7 +1861,7 @@ static int prctl_set_mm(int opt, unsigned long addr, if (opt == PR_SET_MM_EXE_FILE) return prctl_set_mm_exe_file(mm, (unsigned int)addr); - if (addr >= TASK_SIZE) + if (addr >= TASK_SIZE || addr < mmap_min_addr) return -EINVAL; error = -EINVAL; @@ -1924,12 +1923,6 @@ static int prctl_set_mm(int opt, unsigned long addr, error = -EFAULT; goto out; } -#ifdef CONFIG_STACK_GROWSUP - if (vma_flags_mismatch(vma, VM_READ | VM_WRITE | VM_GROWSUP, 0)) -#else - if (vma_flags_mismatch(vma, VM_READ | VM_WRITE | VM_GROWSDOWN, 0)) -#endif - goto out; if (opt == PR_SET_MM_START_STACK) mm->start_stack = addr; else if (opt == PR_SET_MM_ARG_START) @@ -1981,12 +1974,22 @@ out: up_read(&mm->mmap_sem); return error; } + +static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) +{ + return put_user(me->clear_child_tid, tid_addr); +} + #else /* CONFIG_CHECKPOINT_RESTORE */ static int prctl_set_mm(int opt, unsigned long addr, unsigned long arg4, unsigned long arg5) { return -EINVAL; } +static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) +{ + return -EINVAL; +} #endif SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, @@ -2124,6 +2127,9 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, else return -EINVAL; break; + case PR_GET_TID_ADDRESS: + error = prctl_get_tid_address(me, (int __user **)arg2); + break; default: return -EINVAL; } diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 9cd928f7a7c..7e1ce012a85 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -297,8 +297,7 @@ void clockevents_register_device(struct clock_event_device *dev) } EXPORT_SYMBOL_GPL(clockevents_register_device); -static void clockevents_config(struct clock_event_device *dev, - u32 freq) +void clockevents_config(struct clock_event_device *dev, u32 freq) { u64 sec; diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 6a3a5b9ff56..da70c6db496 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -576,6 +576,7 @@ void tick_nohz_idle_exit(void) /* Update jiffies first */ select_nohz_load_balancer(0); tick_do_update_jiffies64(now); + update_cpu_load_nohz(); #ifndef CONFIG_VIRT_CPU_ACCOUNTING /* @@ -814,6 +815,16 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) return HRTIMER_RESTART; } +static int sched_skew_tick; + +static int __init skew_tick(char *str) +{ + get_option(&str, &sched_skew_tick); + + return 0; +} +early_param("skew_tick", skew_tick); + /** * tick_setup_sched_timer - setup the tick emulation timer */ @@ -831,6 +842,14 @@ void tick_setup_sched_timer(void) /* Get the next period (per cpu) */ hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); + /* Offset the tick to avert xtime_lock contention. */ + if (sched_skew_tick) { + u64 offset = ktime_to_ns(tick_period) >> 1; + do_div(offset, num_possible_cpus()); + offset *= smp_processor_id(); + hrtimer_add_expires_ns(&ts->sched_timer, offset); + } + for (;;) { hrtimer_forward(&ts->sched_timer, now, tick_period); hrtimer_start_expires(&ts->sched_timer, diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 6e46cacf596..6f46a00a1e8 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -962,6 +962,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) timekeeper.xtime.tv_sec++; leap = second_overflow(timekeeper.xtime.tv_sec); timekeeper.xtime.tv_sec += leap; + timekeeper.wall_to_monotonic.tv_sec -= leap; } /* Accumulate raw time */ @@ -1077,6 +1078,7 @@ static void update_wall_time(void) timekeeper.xtime.tv_sec++; leap = second_overflow(timekeeper.xtime.tv_sec); timekeeper.xtime.tv_sec += leap; + timekeeper.wall_to_monotonic.tv_sec -= leap; } timekeeping_update(false); diff --git a/lib/btree.c b/lib/btree.c index e5ec1e9c1aa..f9a484676cb 100644 --- a/lib/btree.c +++ b/lib/btree.c @@ -319,8 +319,8 @@ void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, if (head->height == 0) return NULL; -retry: longcpy(key, __key, geo->keylen); +retry: dec_key(geo, key); node = head->node; @@ -351,7 +351,7 @@ retry: } miss: if (retry_key) { - __key = retry_key; + longcpy(key, retry_key, geo->keylen); retry_key = NULL; goto retry; } @@ -509,6 +509,7 @@ retry: int btree_insert(struct btree_head *head, struct btree_geo *geo, unsigned long *key, void *val, gfp_t gfp) { + BUG_ON(!val); return btree_insert_level(head, geo, key, val, 1, gfp); } EXPORT_SYMBOL_GPL(btree_insert); diff --git a/lib/radix-tree.c b/lib/radix-tree.c index d7c878cc006..e7964296fd5 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -686,6 +686,9 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, * during iterating; it can be zero only at the beginning. * And we cannot overflow iter->next_index in a single step, * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. + * + * This condition also used by radix_tree_next_slot() to stop + * contiguous iterating, and forbid swithing to the next chunk. */ index = iter->next_index; if (!index && iter->index) diff --git a/lib/raid6/recov.c b/lib/raid6/recov.c index 1805a5cc5da..a95bccb8497 100644 --- a/lib/raid6/recov.c +++ b/lib/raid6/recov.c @@ -22,8 +22,8 @@ #include <linux/raid/pq.h> /* Recover two failed data blocks. */ -void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, int failb, - void **ptrs) +static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, + int failb, void **ptrs) { u8 *p, *q, *dp, *dq; u8 px, qx, db; @@ -66,7 +66,8 @@ void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, int failb, } /* Recover failure of one data block plus the P block */ -void raid6_datap_recov_intx1(int disks, size_t bytes, int faila, void **ptrs) +static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila, + void **ptrs) { u8 *p, *q, *dq; const u8 *qmul; /* Q multiplier table */ diff --git a/lib/raid6/recov_ssse3.c b/lib/raid6/recov_ssse3.c index 37ae6193055..ecb710c0b4d 100644 --- a/lib/raid6/recov_ssse3.c +++ b/lib/raid6/recov_ssse3.c @@ -19,8 +19,8 @@ static int raid6_has_ssse3(void) boot_cpu_has(X86_FEATURE_SSSE3); } -void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, int failb, - void **ptrs) +static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, + int failb, void **ptrs) { u8 *p, *q, *dp, *dq; const u8 *pbmul; /* P multiplier table for B data */ @@ -194,7 +194,8 @@ void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, int failb, } -void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila, void **ptrs) +static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila, + void **ptrs) { u8 *p, *q, *dq; const u8 *qmul; /* Q multiplier table */ diff --git a/mm/Kconfig b/mm/Kconfig index b2176374b98..82fed4eb2b6 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -389,3 +389,20 @@ config CLEANCACHE in a negligible performance hit. If unsure, say Y to enable cleancache + +config FRONTSWAP + bool "Enable frontswap to cache swap pages if tmem is present" + depends on SWAP + default n + help + Frontswap is so named because it can be thought of as the opposite + of a "backing" store for a swap device. The data is stored into + "transcendent memory", memory that is not directly accessible or + addressable by the kernel and is of unknown and possibly + time-varying size. When space in transcendent memory is available, + a significant swap I/O reduction may be achieved. When none is + available, all frontswap calls are reduced to a single pointer- + compare-against-NULL resulting in a negligible performance hit + and swap data is stored as normal on the matching swap device. + + If unsure, say Y to enable frontswap. diff --git a/mm/Makefile b/mm/Makefile index a156285ce88..2e2fbbefb99 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o obj-$(CONFIG_BOUNCE) += bounce.o obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o +obj-$(CONFIG_FRONTSWAP) += frontswap.o obj-$(CONFIG_HAS_DMA) += dmapool.o obj-$(CONFIG_HUGETLBFS) += hugetlb.o obj-$(CONFIG_NUMA) += mempolicy.o diff --git a/mm/compaction.c b/mm/compaction.c index 4ac338af512..7ea259d82a9 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -236,7 +236,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, */ while (unlikely(too_many_isolated(zone))) { /* async migration should just abort */ - if (cc->mode != COMPACT_SYNC) + if (!cc->sync) return 0; congestion_wait(BLK_RW_ASYNC, HZ/10); @@ -304,8 +304,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, * satisfies the allocation */ pageblock_nr = low_pfn >> pageblock_order; - if (cc->mode != COMPACT_SYNC && - last_pageblock_nr != pageblock_nr && + if (!cc->sync && last_pageblock_nr != pageblock_nr && !migrate_async_suitable(get_pageblock_migratetype(page))) { low_pfn += pageblock_nr_pages; low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; @@ -326,7 +325,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, continue; } - if (cc->mode != COMPACT_SYNC) + if (!cc->sync) mode |= ISOLATE_ASYNC_MIGRATE; lruvec = mem_cgroup_page_lruvec(page, zone); @@ -361,90 +360,27 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, #endif /* CONFIG_COMPACTION || CONFIG_CMA */ #ifdef CONFIG_COMPACTION -/* - * Returns true if MIGRATE_UNMOVABLE pageblock was successfully - * converted to MIGRATE_MOVABLE type, false otherwise. - */ -static bool rescue_unmovable_pageblock(struct page *page) -{ - unsigned long pfn, start_pfn, end_pfn; - struct page *start_page, *end_page; - - pfn = page_to_pfn(page); - start_pfn = pfn & ~(pageblock_nr_pages - 1); - end_pfn = start_pfn + pageblock_nr_pages; - - start_page = pfn_to_page(start_pfn); - end_page = pfn_to_page(end_pfn); - - /* Do not deal with pageblocks that overlap zones */ - if (page_zone(start_page) != page_zone(end_page)) - return false; - - for (page = start_page, pfn = start_pfn; page < end_page; pfn++, - page++) { - if (!pfn_valid_within(pfn)) - continue; - - if (PageBuddy(page)) { - int order = page_order(page); - - pfn += (1 << order) - 1; - page += (1 << order) - 1; - - continue; - } else if (page_count(page) == 0 || PageLRU(page)) - continue; - - return false; - } - - set_pageblock_migratetype(page, MIGRATE_MOVABLE); - move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE); - return true; -} -enum smt_result { - GOOD_AS_MIGRATION_TARGET, - FAIL_UNMOVABLE_TARGET, - FAIL_BAD_TARGET, -}; - -/* - * Returns GOOD_AS_MIGRATION_TARGET if the page is within a block - * suitable for migration to, FAIL_UNMOVABLE_TARGET if the page - * is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise. - */ -static enum smt_result suitable_migration_target(struct page *page, - struct compact_control *cc) +/* Returns true if the page is within a block suitable for migration to */ +static bool suitable_migration_target(struct page *page) { int migratetype = get_pageblock_migratetype(page); /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) - return FAIL_BAD_TARGET; + return false; /* If the page is a large free page, then allow migration */ if (PageBuddy(page) && page_order(page) >= pageblock_order) - return GOOD_AS_MIGRATION_TARGET; + return true; /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ - if (cc->mode != COMPACT_ASYNC_UNMOVABLE && - migrate_async_suitable(migratetype)) - return GOOD_AS_MIGRATION_TARGET; - - if (cc->mode == COMPACT_ASYNC_MOVABLE && - migratetype == MIGRATE_UNMOVABLE) - return FAIL_UNMOVABLE_TARGET; - - if (cc->mode != COMPACT_ASYNC_MOVABLE && - migratetype == MIGRATE_UNMOVABLE && - rescue_unmovable_pageblock(page)) - return GOOD_AS_MIGRATION_TARGET; + if (migrate_async_suitable(migratetype)) + return true; /* Otherwise skip the block */ - return FAIL_BAD_TARGET; + return false; } /* @@ -478,13 +414,6 @@ static void isolate_freepages(struct zone *zone, zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; /* - * isolate_freepages() may be called more than once during - * compact_zone_order() run and we want only the most recent - * count. - */ - cc->nr_pageblocks_skipped = 0; - - /* * Isolate free pages until enough are available to migrate the * pages on cc->migratepages. We stop searching if the migrate * and free page scanners meet or enough free pages are isolated. @@ -492,7 +421,6 @@ static void isolate_freepages(struct zone *zone, for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; pfn -= pageblock_nr_pages) { unsigned long isolated; - enum smt_result ret; if (!pfn_valid(pfn)) continue; @@ -509,12 +437,9 @@ static void isolate_freepages(struct zone *zone, continue; /* Check the block is suitable for migration */ - ret = suitable_migration_target(page, cc); - if (ret != GOOD_AS_MIGRATION_TARGET) { - if (ret == FAIL_UNMOVABLE_TARGET) - cc->nr_pageblocks_skipped++; + if (!suitable_migration_target(page)) continue; - } + /* * Found a block suitable for isolating free pages from. Now * we disabled interrupts, double check things are ok and @@ -523,14 +448,12 @@ static void isolate_freepages(struct zone *zone, */ isolated = 0; spin_lock_irqsave(&zone->lock, flags); - ret = suitable_migration_target(page, cc); - if (ret == GOOD_AS_MIGRATION_TARGET) { + if (suitable_migration_target(page)) { end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); isolated = isolate_freepages_block(pfn, end_pfn, freelist, false); nr_freepages += isolated; - } else if (ret == FAIL_UNMOVABLE_TARGET) - cc->nr_pageblocks_skipped++; + } spin_unlock_irqrestore(&zone->lock, flags); /* @@ -762,9 +685,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) nr_migrate = cc->nr_migratepages; err = migrate_pages(&cc->migratepages, compaction_alloc, - (unsigned long)&cc->freepages, false, - (cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT - : MIGRATE_ASYNC); + (unsigned long)cc, false, + cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC); update_nr_listpages(cc); nr_remaining = cc->nr_migratepages; @@ -793,8 +715,7 @@ out: static unsigned long compact_zone_order(struct zone *zone, int order, gfp_t gfp_mask, - enum compact_mode mode, - unsigned long *nr_pageblocks_skipped) + bool sync) { struct compact_control cc = { .nr_freepages = 0, @@ -802,17 +723,12 @@ static unsigned long compact_zone_order(struct zone *zone, .order = order, .migratetype = allocflags_to_migratetype(gfp_mask), .zone = zone, - .mode = mode, + .sync = sync, }; - unsigned long rc; - INIT_LIST_HEAD(&cc.freepages); INIT_LIST_HEAD(&cc.migratepages); - rc = compact_zone(zone, &cc); - *nr_pageblocks_skipped = cc.nr_pageblocks_skipped; - - return rc; + return compact_zone(zone, &cc); } int sysctl_extfrag_threshold = 500; @@ -837,8 +753,6 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, struct zoneref *z; struct zone *zone; int rc = COMPACT_SKIPPED; - unsigned long nr_pageblocks_skipped; - enum compact_mode mode; /* * Check whether it is worth even starting compaction. The order check is @@ -855,22 +769,12 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, nodemask) { int status; - mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE; -retry: - status = compact_zone_order(zone, order, gfp_mask, mode, - &nr_pageblocks_skipped); + status = compact_zone_order(zone, order, gfp_mask, sync); rc = max(status, rc); /* If a normal allocation would succeed, stop compacting */ if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) break; - - if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) { - if (nr_pageblocks_skipped) { - mode = COMPACT_ASYNC_UNMOVABLE; - goto retry; - } - } } return rc; @@ -904,7 +808,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) if (ok && cc->order > zone->compact_order_failed) zone->compact_order_failed = cc->order + 1; /* Currently async compaction is never deferred. */ - else if (!ok && cc->mode == COMPACT_SYNC) + else if (!ok && cc->sync) defer_compaction(zone, cc->order); } @@ -919,7 +823,7 @@ int compact_pgdat(pg_data_t *pgdat, int order) { struct compact_control cc = { .order = order, - .mode = COMPACT_ASYNC_MOVABLE, + .sync = false, }; return __compact_pgdat(pgdat, &cc); @@ -929,7 +833,7 @@ static int compact_node(int nid) { struct compact_control cc = { .order = -1, - .mode = COMPACT_SYNC, + .sync = true, }; return __compact_pgdat(NODE_DATA(nid), &cc); diff --git a/mm/frontswap.c b/mm/frontswap.c new file mode 100644 index 00000000000..e25025574a0 --- /dev/null +++ b/mm/frontswap.c @@ -0,0 +1,314 @@ +/* + * Frontswap frontend + * + * This code provides the generic "frontend" layer to call a matching + * "backend" driver implementation of frontswap. See + * Documentation/vm/frontswap.txt for more information. + * + * Copyright (C) 2009-2012 Oracle Corp. All rights reserved. + * Author: Dan Magenheimer + * + * This work is licensed under the terms of the GNU GPL, version 2. + */ + +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/swap.h> +#include <linux/swapops.h> +#include <linux/proc_fs.h> +#include <linux/security.h> +#include <linux/capability.h> +#include <linux/module.h> +#include <linux/uaccess.h> +#include <linux/debugfs.h> +#include <linux/frontswap.h> +#include <linux/swapfile.h> + +/* + * frontswap_ops is set by frontswap_register_ops to contain the pointers + * to the frontswap "backend" implementation functions. + */ +static struct frontswap_ops frontswap_ops __read_mostly; + +/* + * This global enablement flag reduces overhead on systems where frontswap_ops + * has not been registered, so is preferred to the slower alternative: a + * function call that checks a non-global. + */ +bool frontswap_enabled __read_mostly; +EXPORT_SYMBOL(frontswap_enabled); + +/* + * If enabled, frontswap_store will return failure even on success. As + * a result, the swap subsystem will always write the page to swap, in + * effect converting frontswap into a writethrough cache. In this mode, + * there is no direct reduction in swap writes, but a frontswap backend + * can unilaterally "reclaim" any pages in use with no data loss, thus + * providing increases control over maximum memory usage due to frontswap. + */ +static bool frontswap_writethrough_enabled __read_mostly; + +#ifdef CONFIG_DEBUG_FS +/* + * Counters available via /sys/kernel/debug/frontswap (if debugfs is + * properly configured). These are for information only so are not protected + * against increment races. + */ +static u64 frontswap_loads; +static u64 frontswap_succ_stores; +static u64 frontswap_failed_stores; +static u64 frontswap_invalidates; + +static inline void inc_frontswap_loads(void) { + frontswap_loads++; +} +static inline void inc_frontswap_succ_stores(void) { + frontswap_succ_stores++; +} +static inline void inc_frontswap_failed_stores(void) { + frontswap_failed_stores++; +} +static inline void inc_frontswap_invalidates(void) { + frontswap_invalidates++; +} +#else +static inline void inc_frontswap_loads(void) { } +static inline void inc_frontswap_succ_stores(void) { } +static inline void inc_frontswap_failed_stores(void) { } +static inline void inc_frontswap_invalidates(void) { } +#endif +/* + * Register operations for frontswap, returning previous thus allowing + * detection of multiple backends and possible nesting. + */ +struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops) +{ + struct frontswap_ops old = frontswap_ops; + + frontswap_ops = *ops; + frontswap_enabled = true; + return old; +} +EXPORT_SYMBOL(frontswap_register_ops); + +/* + * Enable/disable frontswap writethrough (see above). + */ +void frontswap_writethrough(bool enable) +{ + frontswap_writethrough_enabled = enable; +} +EXPORT_SYMBOL(frontswap_writethrough); + +/* + * Called when a swap device is swapon'd. + */ +void __frontswap_init(unsigned type) +{ + struct swap_info_struct *sis = swap_info[type]; + + BUG_ON(sis == NULL); + if (sis->frontswap_map == NULL) + return; + if (frontswap_enabled) + (*frontswap_ops.init)(type); +} +EXPORT_SYMBOL(__frontswap_init); + +/* + * "Store" data from a page to frontswap and associate it with the page's + * swaptype and offset. Page must be locked and in the swap cache. + * If frontswap already contains a page with matching swaptype and + * offset, the frontswap implmentation may either overwrite the data and + * return success or invalidate the page from frontswap and return failure. + */ +int __frontswap_store(struct page *page) +{ + int ret = -1, dup = 0; + swp_entry_t entry = { .val = page_private(page), }; + int type = swp_type(entry); + struct swap_info_struct *sis = swap_info[type]; + pgoff_t offset = swp_offset(entry); + + BUG_ON(!PageLocked(page)); + BUG_ON(sis == NULL); + if (frontswap_test(sis, offset)) + dup = 1; + ret = (*frontswap_ops.store)(type, offset, page); + if (ret == 0) { + frontswap_set(sis, offset); + inc_frontswap_succ_stores(); + if (!dup) + atomic_inc(&sis->frontswap_pages); + } else if (dup) { + /* + failed dup always results in automatic invalidate of + the (older) page from frontswap + */ + frontswap_clear(sis, offset); + atomic_dec(&sis->frontswap_pages); + inc_frontswap_failed_stores(); + } else + inc_frontswap_failed_stores(); + if (frontswap_writethrough_enabled) + /* report failure so swap also writes to swap device */ + ret = -1; + return ret; +} +EXPORT_SYMBOL(__frontswap_store); + +/* + * "Get" data from frontswap associated with swaptype and offset that were + * specified when the data was put to frontswap and use it to fill the + * specified page with data. Page must be locked and in the swap cache. + */ +int __frontswap_load(struct page *page) +{ + int ret = -1; + swp_entry_t entry = { .val = page_private(page), }; + int type = swp_type(entry); + struct swap_info_struct *sis = swap_info[type]; + pgoff_t offset = swp_offset(entry); + + BUG_ON(!PageLocked(page)); + BUG_ON(sis == NULL); + if (frontswap_test(sis, offset)) + ret = (*frontswap_ops.load)(type, offset, page); + if (ret == 0) + inc_frontswap_loads(); + return ret; +} +EXPORT_SYMBOL(__frontswap_load); + +/* + * Invalidate any data from frontswap associated with the specified swaptype + * and offset so that a subsequent "get" will fail. + */ +void __frontswap_invalidate_page(unsigned type, pgoff_t offset) +{ + struct swap_info_struct *sis = swap_info[type]; + + BUG_ON(sis == NULL); + if (frontswap_test(sis, offset)) { + (*frontswap_ops.invalidate_page)(type, offset); + atomic_dec(&sis->frontswap_pages); + frontswap_clear(sis, offset); + inc_frontswap_invalidates(); + } +} +EXPORT_SYMBOL(__frontswap_invalidate_page); + +/* + * Invalidate all data from frontswap associated with all offsets for the + * specified swaptype. + */ +void __frontswap_invalidate_area(unsigned type) +{ + struct swap_info_struct *sis = swap_info[type]; + + BUG_ON(sis == NULL); + if (sis->frontswap_map == NULL) + return; + (*frontswap_ops.invalidate_area)(type); + atomic_set(&sis->frontswap_pages, 0); + memset(sis->frontswap_map, 0, sis->max / sizeof(long)); +} +EXPORT_SYMBOL(__frontswap_invalidate_area); + +/* + * Frontswap, like a true swap device, may unnecessarily retain pages + * under certain circumstances; "shrink" frontswap is essentially a + * "partial swapoff" and works by calling try_to_unuse to attempt to + * unuse enough frontswap pages to attempt to -- subject to memory + * constraints -- reduce the number of pages in frontswap to the + * number given in the parameter target_pages. + */ +void frontswap_shrink(unsigned long target_pages) +{ + struct swap_info_struct *si = NULL; + int si_frontswap_pages; + unsigned long total_pages = 0, total_pages_to_unuse; + unsigned long pages = 0, pages_to_unuse = 0; + int type; + bool locked = false; + + /* + * we don't want to hold swap_lock while doing a very + * lengthy try_to_unuse, but swap_list may change + * so restart scan from swap_list.head each time + */ + spin_lock(&swap_lock); + locked = true; + total_pages = 0; + for (type = swap_list.head; type >= 0; type = si->next) { + si = swap_info[type]; + total_pages += atomic_read(&si->frontswap_pages); + } + if (total_pages <= target_pages) + goto out; + total_pages_to_unuse = total_pages - target_pages; + for (type = swap_list.head; type >= 0; type = si->next) { + si = swap_info[type]; + si_frontswap_pages = atomic_read(&si->frontswap_pages); + if (total_pages_to_unuse < si_frontswap_pages) + pages = pages_to_unuse = total_pages_to_unuse; + else { + pages = si_frontswap_pages; + pages_to_unuse = 0; /* unuse all */ + } + /* ensure there is enough RAM to fetch pages from frontswap */ + if (security_vm_enough_memory_mm(current->mm, pages)) + continue; + vm_unacct_memory(pages); + break; + } + if (type < 0) + goto out; + locked = false; + spin_unlock(&swap_lock); + try_to_unuse(type, true, pages_to_unuse); +out: + if (locked) + spin_unlock(&swap_lock); + return; +} +EXPORT_SYMBOL(frontswap_shrink); + +/* + * Count and return the number of frontswap pages across all + * swap devices. This is exported so that backend drivers can + * determine current usage without reading debugfs. + */ +unsigned long frontswap_curr_pages(void) +{ + int type; + unsigned long totalpages = 0; + struct swap_info_struct *si = NULL; + + spin_lock(&swap_lock); + for (type = swap_list.head; type >= 0; type = si->next) { + si = swap_info[type]; + totalpages += atomic_read(&si->frontswap_pages); + } + spin_unlock(&swap_lock); + return totalpages; +} +EXPORT_SYMBOL(frontswap_curr_pages); + +static int __init init_frontswap(void) +{ +#ifdef CONFIG_DEBUG_FS + struct dentry *root = debugfs_create_dir("frontswap", NULL); + if (root == NULL) + return -ENXIO; + debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads); + debugfs_create_u64("succ_stores", S_IRUGO, root, &frontswap_succ_stores); + debugfs_create_u64("failed_stores", S_IRUGO, root, + &frontswap_failed_stores); + debugfs_create_u64("invalidates", S_IRUGO, + root, &frontswap_invalidates); +#endif + return 0; +} + +module_init(init_frontswap); diff --git a/mm/internal.h b/mm/internal.h index 5cbb7819004..2ba87fbfb75 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -94,9 +94,6 @@ extern void putback_lru_page(struct page *page); /* * in mm/page_alloc.c */ -extern void set_pageblock_migratetype(struct page *page, int migratetype); -extern int move_freepages_block(struct zone *zone, struct page *page, - int migratetype); extern void __free_pages_bootmem(struct page *page, unsigned int order); extern void prep_compound_page(struct page *page, unsigned long order); #ifdef CONFIG_MEMORY_FAILURE @@ -104,7 +101,6 @@ extern bool is_free_buddy_page(struct page *page); #endif #if defined CONFIG_COMPACTION || defined CONFIG_CMA -#include <linux/compaction.h> /* * in mm/compaction.c @@ -123,14 +119,11 @@ struct compact_control { unsigned long nr_migratepages; /* Number of pages to migrate */ unsigned long free_pfn; /* isolate_freepages search base */ unsigned long migrate_pfn; /* isolate_migratepages search base */ - enum compact_mode mode; /* Compaction mode */ + bool sync; /* Synchronous migration */ int order; /* order a direct compactor needs */ int migratetype; /* MOVABLE, RECLAIMABLE etc */ struct zone *zone; - - /* Number of UNMOVABLE destination pageblocks skipped during scan */ - unsigned long nr_pageblocks_skipped; }; unsigned long diff --git a/mm/migrate.c b/mm/migrate.c index ab81d482ae6..be26d5cbe56 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -436,7 +436,10 @@ void migrate_page_copy(struct page *newpage, struct page *page) * is actually a signal that all of the page has become dirty. * Whereas only part of our page may be dirty. */ - __set_page_dirty_nobuffers(newpage); + if (PageSwapBacked(page)) + SetPageDirty(newpage); + else + __set_page_dirty_nobuffers(newpage); } mlock_migrate_page(newpage, page); diff --git a/mm/nommu.c b/mm/nommu.c index c4acfbc0997..d4b0c10872d 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1486,7 +1486,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - ret = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); + retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); if (file) fput(file); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index ed0e1967736..416637f0e92 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -183,7 +183,7 @@ static bool oom_unkillable_task(struct task_struct *p, unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, const nodemask_t *nodemask, unsigned long totalpages) { - unsigned long points; + long points; if (oom_unkillable_task(p, memcg, nodemask)) return 0; @@ -223,7 +223,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, * Never return 0 for an eligible task regardless of the root bonus and * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). */ - return points ? points : 1; + return points > 0 ? points : 1; } /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6092f331b32..44030096da6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -219,7 +219,7 @@ EXPORT_SYMBOL(nr_online_nodes); int page_group_by_mobility_disabled __read_mostly; -void set_pageblock_migratetype(struct page *page, int migratetype) +static void set_pageblock_migratetype(struct page *page, int migratetype) { if (unlikely(page_group_by_mobility_disabled)) @@ -954,8 +954,8 @@ static int move_freepages(struct zone *zone, return pages_moved; } -int move_freepages_block(struct zone *zone, struct page *page, - int migratetype) +static int move_freepages_block(struct zone *zone, struct page *page, + int migratetype) { unsigned long start_pfn, end_pfn; struct page *start_page, *end_page; @@ -5651,7 +5651,7 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end) .nr_migratepages = 0, .order = -1, .zone = page_zone(pfn_to_page(start)), - .mode = COMPACT_SYNC, + .sync = true, }; INIT_LIST_HEAD(&cc.migratepages); diff --git a/mm/page_io.c b/mm/page_io.c index dc76b4d0611..34f02923744 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -18,6 +18,7 @@ #include <linux/bio.h> #include <linux/swapops.h> #include <linux/writeback.h> +#include <linux/frontswap.h> #include <asm/pgtable.h> static struct bio *get_swap_bio(gfp_t gfp_flags, @@ -98,6 +99,12 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) unlock_page(page); goto out; } + if (frontswap_store(page) == 0) { + set_page_writeback(page); + unlock_page(page); + end_page_writeback(page); + goto out; + } bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); if (bio == NULL) { set_page_dirty(page); @@ -122,6 +129,11 @@ int swap_readpage(struct page *page) VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(PageUptodate(page)); + if (frontswap_load(page) == 0) { + SetPageUptodate(page); + unlock_page(page); + goto out; + } bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); if (bio == NULL) { unlock_page(page); diff --git a/mm/shmem.c b/mm/shmem.c index 585bd220a21..a15a466d0d1 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -683,10 +683,21 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, mutex_lock(&shmem_swaplist_mutex); /* * We needed to drop mutex to make that restrictive page - * allocation; but the inode might already be freed by now, - * and we cannot refer to inode or mapping or info to check. - * However, we do hold page lock on the PageSwapCache page, - * so can check if that still has our reference remaining. + * allocation, but the inode might have been freed while we + * dropped it: although a racing shmem_evict_inode() cannot + * complete without emptying the radix_tree, our page lock + * on this swapcache page is not enough to prevent that - + * free_swap_and_cache() of our swap entry will only + * trylock_page(), removing swap from radix_tree whatever. + * + * We must not proceed to shmem_add_to_page_cache() if the + * inode has been freed, but of course we cannot rely on + * inode or mapping or info to check that. However, we can + * safely check if our swap entry is still in use (and here + * it can't have got reused for another page): if it's still + * in use, then the inode cannot have been freed yet, and we + * can safely proceed (if it's no longer in use, that tells + * nothing about the inode, but we don't need to unuse swap). */ if (!page_swapcount(*pagep)) error = -ENOENT; @@ -730,9 +741,9 @@ int shmem_unuse(swp_entry_t swap, struct page *page) /* * There's a faint possibility that swap page was replaced before - * caller locked it: it will come back later with the right page. + * caller locked it: caller will come back later with the right page. */ - if (unlikely(!PageSwapCache(page))) + if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) goto out; /* @@ -995,21 +1006,15 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, newpage = shmem_alloc_page(gfp, info, index); if (!newpage) return -ENOMEM; - VM_BUG_ON(shmem_should_replace_page(newpage, gfp)); - *pagep = newpage; page_cache_get(newpage); copy_highpage(newpage, oldpage); + flush_dcache_page(newpage); - VM_BUG_ON(!PageLocked(oldpage)); __set_page_locked(newpage); - VM_BUG_ON(!PageUptodate(oldpage)); SetPageUptodate(newpage); - VM_BUG_ON(!PageSwapBacked(oldpage)); SetPageSwapBacked(newpage); - VM_BUG_ON(!swap_index); set_page_private(newpage, swap_index); - VM_BUG_ON(!PageSwapCache(oldpage)); SetPageSwapCache(newpage); /* @@ -1019,13 +1024,24 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, spin_lock_irq(&swap_mapping->tree_lock); error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, newpage); - __inc_zone_page_state(newpage, NR_FILE_PAGES); - __dec_zone_page_state(oldpage, NR_FILE_PAGES); + if (!error) { + __inc_zone_page_state(newpage, NR_FILE_PAGES); + __dec_zone_page_state(oldpage, NR_FILE_PAGES); + } spin_unlock_irq(&swap_mapping->tree_lock); - BUG_ON(error); - mem_cgroup_replace_page_cache(oldpage, newpage); - lru_cache_add_anon(newpage); + if (unlikely(error)) { + /* + * Is this possible? I think not, now that our callers check + * both PageSwapCache and page_private after getting page lock; + * but be defensive. Reverse old to newpage for clear and free. + */ + oldpage = newpage; + } else { + mem_cgroup_replace_page_cache(oldpage, newpage); + lru_cache_add_anon(newpage); + *pagep = newpage; + } ClearPageSwapCache(oldpage); set_page_private(oldpage, 0); @@ -1033,7 +1049,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, unlock_page(oldpage); page_cache_release(oldpage); page_cache_release(oldpage); - return 0; + return error; } /* @@ -1107,7 +1123,8 @@ repeat: /* We have to do this with page locked to prevent races */ lock_page(page); - if (!PageSwapCache(page) || page->mapping) { + if (!PageSwapCache(page) || page_private(page) != swap.val || + page->mapping) { error = -EEXIST; /* try again */ goto failed; } diff --git a/mm/swapfile.c b/mm/swapfile.c index 457b10baef5..de5bc51c4a6 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -31,6 +31,8 @@ #include <linux/memcontrol.h> #include <linux/poll.h> #include <linux/oom.h> +#include <linux/frontswap.h> +#include <linux/swapfile.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> @@ -42,7 +44,7 @@ static bool swap_count_continued(struct swap_info_struct *, pgoff_t, static void free_swap_count_continuations(struct swap_info_struct *); static sector_t map_swap_entry(swp_entry_t, struct block_device**); -static DEFINE_SPINLOCK(swap_lock); +DEFINE_SPINLOCK(swap_lock); static unsigned int nr_swapfiles; long nr_swap_pages; long total_swap_pages; @@ -53,9 +55,9 @@ static const char Unused_file[] = "Unused swap file entry "; static const char Bad_offset[] = "Bad swap offset entry "; static const char Unused_offset[] = "Unused swap offset entry "; -static struct swap_list_t swap_list = {-1, -1}; +struct swap_list_t swap_list = {-1, -1}; -static struct swap_info_struct *swap_info[MAX_SWAPFILES]; +struct swap_info_struct *swap_info[MAX_SWAPFILES]; static DEFINE_MUTEX(swapon_mutex); @@ -556,6 +558,7 @@ static unsigned char swap_entry_free(struct swap_info_struct *p, swap_list.next = p->type; nr_swap_pages++; p->inuse_pages--; + frontswap_invalidate_page(p->type, offset); if ((p->flags & SWP_BLKDEV) && disk->fops->swap_slot_free_notify) disk->fops->swap_slot_free_notify(p->bdev, offset); @@ -985,11 +988,12 @@ static int unuse_mm(struct mm_struct *mm, } /* - * Scan swap_map from current position to next entry still in use. + * Scan swap_map (or frontswap_map if frontswap parameter is true) + * from current position to next entry still in use. * Recycle to start on reaching the end, returning 0 when empty. */ static unsigned int find_next_to_unuse(struct swap_info_struct *si, - unsigned int prev) + unsigned int prev, bool frontswap) { unsigned int max = si->max; unsigned int i = prev; @@ -1015,6 +1019,12 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si, prev = 0; i = 1; } + if (frontswap) { + if (frontswap_test(si, i)) + break; + else + continue; + } count = si->swap_map[i]; if (count && swap_count(count) != SWAP_MAP_BAD) break; @@ -1026,8 +1036,12 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si, * We completely avoid races by reading each swap page in advance, * and then search for the process using it. All the necessary * page table adjustments can then be made atomically. + * + * if the boolean frontswap is true, only unuse pages_to_unuse pages; + * pages_to_unuse==0 means all pages; ignored if frontswap is false */ -static int try_to_unuse(unsigned int type) +int try_to_unuse(unsigned int type, bool frontswap, + unsigned long pages_to_unuse) { struct swap_info_struct *si = swap_info[type]; struct mm_struct *start_mm; @@ -1060,7 +1074,7 @@ static int try_to_unuse(unsigned int type) * one pass through swap_map is enough, but not necessarily: * there are races when an instance of an entry might be missed. */ - while ((i = find_next_to_unuse(si, i)) != 0) { + while ((i = find_next_to_unuse(si, i, frontswap)) != 0) { if (signal_pending(current)) { retval = -EINTR; break; @@ -1227,6 +1241,10 @@ static int try_to_unuse(unsigned int type) * interactive performance. */ cond_resched(); + if (frontswap && pages_to_unuse > 0) { + if (!--pages_to_unuse) + break; + } } mmput(start_mm); @@ -1486,7 +1504,8 @@ bad_bmap: } static void enable_swap_info(struct swap_info_struct *p, int prio, - unsigned char *swap_map) + unsigned char *swap_map, + unsigned long *frontswap_map) { int i, prev; @@ -1496,6 +1515,7 @@ static void enable_swap_info(struct swap_info_struct *p, int prio, else p->prio = --least_priority; p->swap_map = swap_map; + frontswap_map_set(p, frontswap_map); p->flags |= SWP_WRITEOK; nr_swap_pages += p->pages; total_swap_pages += p->pages; @@ -1512,6 +1532,7 @@ static void enable_swap_info(struct swap_info_struct *p, int prio, swap_list.head = swap_list.next = p->type; else swap_info[prev]->next = p->type; + frontswap_init(p->type); spin_unlock(&swap_lock); } @@ -1585,7 +1606,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) spin_unlock(&swap_lock); oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX); - err = try_to_unuse(type); + err = try_to_unuse(type, false, 0); /* force all pages to be unused */ compare_swap_oom_score_adj(OOM_SCORE_ADJ_MAX, oom_score_adj); if (err) { @@ -1596,7 +1617,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) * sys_swapoff for this swap_info_struct at this point. */ /* re-insert swap space back into swap_list */ - enable_swap_info(p, p->prio, p->swap_map); + enable_swap_info(p, p->prio, p->swap_map, frontswap_map_get(p)); goto out_dput; } @@ -1622,9 +1643,11 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) swap_map = p->swap_map; p->swap_map = NULL; p->flags = 0; + frontswap_invalidate_area(type); spin_unlock(&swap_lock); mutex_unlock(&swapon_mutex); vfree(swap_map); + vfree(frontswap_map_get(p)); /* Destroy swap account informatin */ swap_cgroup_swapoff(type); @@ -1988,6 +2011,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) sector_t span; unsigned long maxpages; unsigned char *swap_map = NULL; + unsigned long *frontswap_map = NULL; struct page *page = NULL; struct inode *inode = NULL; @@ -2071,6 +2095,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) error = nr_extents; goto bad_swap; } + /* frontswap enabled? set up bit-per-page map for frontswap */ + if (frontswap_enabled) + frontswap_map = vzalloc(maxpages / sizeof(long)); if (p->bdev) { if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { @@ -2086,14 +2113,15 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) if (swap_flags & SWAP_FLAG_PREFER) prio = (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; - enable_swap_info(p, prio, swap_map); + enable_swap_info(p, prio, swap_map, frontswap_map); printk(KERN_INFO "Adding %uk swap on %s. " - "Priority:%d extents:%d across:%lluk %s%s\n", + "Priority:%d extents:%d across:%lluk %s%s%s\n", p->pages<<(PAGE_SHIFT-10), name, p->prio, nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), (p->flags & SWP_SOLIDSTATE) ? "SS" : "", - (p->flags & SWP_DISCARDABLE) ? "D" : ""); + (p->flags & SWP_DISCARDABLE) ? "D" : "", + (frontswap_map) ? "FS" : ""); mutex_unlock(&swapon_mutex); atomic_inc(&proc_poll_event); diff --git a/net/can/raw.c b/net/can/raw.c index cde1b4a20f7..46cca3a91d1 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -681,9 +681,6 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock, if (err < 0) goto free_skb; - /* to be able to check the received tx sock reference in raw_rcv() */ - skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF; - skb->dev = dev; skb->sk = sk; diff --git a/net/core/dev.c b/net/core/dev.c index c6e29ea65bd..57c4f9bc685 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2089,25 +2089,6 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) return 0; } -/* - * Try to orphan skb early, right before transmission by the device. - * We cannot orphan skb if tx timestamp is requested or the sk-reference - * is needed on driver level for other reasons, e.g. see net/can/raw.c - */ -static inline void skb_orphan_try(struct sk_buff *skb) -{ - struct sock *sk = skb->sk; - - if (sk && !skb_shinfo(skb)->tx_flags) { - /* skb_tx_hash() wont be able to get sk. - * We copy sk_hash into skb->rxhash - */ - if (!skb->rxhash) - skb->rxhash = sk->sk_hash; - skb_orphan(skb); - } -} - static bool can_checksum_protocol(netdev_features_t features, __be16 protocol) { return ((features & NETIF_F_GEN_CSUM) || @@ -2193,8 +2174,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, if (!list_empty(&ptype_all)) dev_queue_xmit_nit(skb, dev); - skb_orphan_try(skb); - features = netif_skb_features(skb); if (vlan_tx_tag_present(skb) && @@ -2304,7 +2283,7 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb, if (skb->sk && skb->sk->sk_hash) hash = skb->sk->sk_hash; else - hash = (__force u16) skb->protocol ^ skb->rxhash; + hash = (__force u16) skb->protocol; hash = jhash_1word(hash, hashrnd); return (u16) (((u64) hash * qcount) >> 32) + qoffset; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 3d84fb9d887..f9f40b932e4 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -362,22 +362,23 @@ EXPORT_SYMBOL(netpoll_send_skb_on_dev); void netpoll_send_udp(struct netpoll *np, const char *msg, int len) { - int total_len, eth_len, ip_len, udp_len; + int total_len, ip_len, udp_len; struct sk_buff *skb; struct udphdr *udph; struct iphdr *iph; struct ethhdr *eth; udp_len = len + sizeof(*udph); - ip_len = eth_len = udp_len + sizeof(*iph); - total_len = eth_len + ETH_HLEN + NET_IP_ALIGN; + ip_len = udp_len + sizeof(*iph); + total_len = ip_len + LL_RESERVED_SPACE(np->dev); - skb = find_skb(np, total_len, total_len - len); + skb = find_skb(np, total_len + np->dev->needed_tailroom, + total_len - len); if (!skb) return; skb_copy_to_linear_data(skb, msg, len); - skb->len += len; + skb_put(skb, len); skb_push(skb, sizeof(*udph)); skb_reset_transport_header(skb); diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 215afc74d8a..a83bf067bdc 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -1697,21 +1697,25 @@ int __init fib6_init(void) ret = register_pernet_subsys(&fib6_net_ops); if (ret) goto out_kmem_cache_create; - - ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib, - NULL); - if (ret) - goto out_unregister_subsys; out: return ret; -out_unregister_subsys: - unregister_pernet_subsys(&fib6_net_ops); out_kmem_cache_create: kmem_cache_destroy(fib6_node_kmem); goto out; } +int __init fib6_init_late(void) +{ + return __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib, + NULL); +} + +void fib6_cleanup_late(void) +{ + rtnl_unregister(PF_INET6, RTM_GETROUTE); +} + void fib6_gc_cleanup(void) { unregister_pernet_subsys(&fib6_net_ops); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 0d41f68daff..c7ccc36ba63 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2974,13 +2974,17 @@ int __init ip6_route_init(void) if (ret) goto out_kmem_cache; - ret = register_pernet_subsys(&ip6_route_net_ops); + ret = fib6_init(); if (ret) goto out_dst_entries; ret = register_pernet_subsys(&ipv6_inetpeer_ops); if (ret) - goto out_register_subsys; + goto out_fib6_init; + + ret = register_pernet_subsys(&ip6_route_net_ops); + if (ret) + goto out_register_inetpeer; ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep; @@ -2995,13 +2999,13 @@ int __init ip6_route_init(void) init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev; init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); #endif - ret = fib6_init(); + ret = fib6_init_late(); if (ret) - goto out_register_inetpeer; + goto out_register_subsys; ret = xfrm6_init(); if (ret) - goto out_fib6_init; + goto out_fib6_init_late; ret = fib6_rules_init(); if (ret) @@ -3024,12 +3028,14 @@ fib6_rules_init: fib6_rules_cleanup(); xfrm6_init: xfrm6_fini(); -out_fib6_init: - fib6_gc_cleanup(); -out_register_inetpeer: - unregister_pernet_subsys(&ipv6_inetpeer_ops); +out_fib6_init_late: + fib6_cleanup_late(); out_register_subsys: unregister_pernet_subsys(&ip6_route_net_ops); +out_register_inetpeer: + unregister_pernet_subsys(&ipv6_inetpeer_ops); +out_fib6_init: + fib6_gc_cleanup(); out_dst_entries: dst_entries_destroy(&ip6_dst_blackhole_ops); out_kmem_cache: diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 07d7d55a1b9..cd6f7a991d8 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -372,7 +372,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, skb_trim(skb, skb->dev->mtu); } skb->protocol = ETH_P_AF_IUCV; - skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF; nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return -ENOMEM; diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index a68aed7fce0..ec2118d0e27 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c @@ -502,10 +502,8 @@ static int snd_compr_pause(struct snd_compr_stream *stream) if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING) return -EPERM; retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH); - if (!retval) { + if (!retval) stream->runtime->state = SNDRV_PCM_STATE_PAUSED; - wake_up(&stream->runtime->sleep); - } return retval; } @@ -544,6 +542,10 @@ static int snd_compr_stop(struct snd_compr_stream *stream) if (!retval) { stream->runtime->state = SNDRV_PCM_STATE_SETUP; wake_up(&stream->runtime->sleep); + stream->runtime->hw_pointer = 0; + stream->runtime->app_pointer = 0; + stream->runtime->total_bytes_available = 0; + stream->runtime->total_bytes_transferred = 0; } return retval; } diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 2b6392be451..02763827dde 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2484,9 +2484,9 @@ static void azx_notifier_unregister(struct azx *chip) static int DELAYED_INIT_MARK azx_first_init(struct azx *chip); static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip); +#ifdef SUPPORT_VGA_SWITCHEROO static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci); -#ifdef SUPPORT_VGA_SWITCHEROO static void azx_vs_set_state(struct pci_dev *pci, enum vga_switcheroo_state state) { @@ -2578,6 +2578,7 @@ static int __devinit register_vga_switcheroo(struct azx *chip) #else #define init_vga_switcheroo(chip) /* NOP */ #define register_vga_switcheroo(chip) 0 +#define check_hdmi_disabled(pci) false #endif /* SUPPORT_VGA_SWITCHER */ /* @@ -2638,6 +2639,7 @@ static int azx_dev_free(struct snd_device *device) return azx_free(device->device_data); } +#ifdef SUPPORT_VGA_SWITCHEROO /* * Check of disabled HDMI controller by vga-switcheroo */ @@ -2670,12 +2672,13 @@ static bool __devinit check_hdmi_disabled(struct pci_dev *pci) struct pci_dev *p = get_bound_vga(pci); if (p) { - if (vga_default_device() && p != vga_default_device()) + if (vga_switcheroo_get_client_state(p) == VGA_SWITCHEROO_OFF) vga_inactive = true; pci_dev_put(p); } return vga_inactive; } +#endif /* SUPPORT_VGA_SWITCHEROO */ /* * white/black-listing for position_fix @@ -3351,6 +3354,11 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { { PCI_DEVICE(0x6549, 0x1200), .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT }, /* Creative X-Fi (CA0110-IBG) */ + /* CTHDA chips */ + { PCI_DEVICE(0x1102, 0x0010), + .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, + { PCI_DEVICE(0x1102, 0x0012), + .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, #if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE) /* the following entry conflicts with snd-ctxfi driver, * as ctxfi driver mutates from HD-audio to native mode with @@ -3367,11 +3375,6 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB }, #endif - /* CTHDA chips */ - { PCI_DEVICE(0x1102, 0x0010), - .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, - { PCI_DEVICE(0x1102, 0x0012), - .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA }, /* Vortex86MX */ { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC }, /* VMware HDAudio */ diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 3acb5824ad3..172370b3793 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -4061,7 +4061,7 @@ static void cx_auto_init_digital(struct hda_codec *codec) static int cx_auto_init(struct hda_codec *codec) { struct conexant_spec *spec = codec->spec; - /*snd_hda_sequence_write(codec, cx_auto_init_verbs);*/ + snd_hda_gen_apply_verbs(codec); cx_auto_init_output(codec); cx_auto_init_input(codec); cx_auto_init_digital(codec); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 224410e8e9e..f8f4906e498 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -1896,6 +1896,7 @@ static int alc_init(struct hda_codec *codec) alc_fix_pll(codec); alc_auto_init_amp(codec, spec->init_amp); + snd_hda_gen_apply_verbs(codec); alc_init_special_input_src(codec); alc_auto_init_std(codec); @@ -6439,6 +6440,7 @@ enum { ALC662_FIXUP_ASUS_MODE7, ALC662_FIXUP_ASUS_MODE8, ALC662_FIXUP_NO_JACK_DETECT, + ALC662_FIXUP_ZOTAC_Z68, }; static const struct alc_fixup alc662_fixups[] = { @@ -6588,6 +6590,13 @@ static const struct alc_fixup alc662_fixups[] = { .type = ALC_FIXUP_FUNC, .v.func = alc_fixup_no_jack_detect, }, + [ALC662_FIXUP_ZOTAC_Z68] = { + .type = ALC_FIXUP_PINS, + .v.pins = (const struct alc_pincfg[]) { + { 0x1b, 0x02214020 }, /* Front HP */ + { } + } + }, }; static const struct snd_pci_quirk alc662_fixup_tbl[] = { @@ -6601,6 +6610,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), + SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68), SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T), #if 0 diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c index a75c3766aed..0418fa11e6b 100644 --- a/sound/soc/codecs/wm2000.c +++ b/sound/soc/codecs/wm2000.c @@ -99,8 +99,9 @@ static void wm2000_reset(struct wm2000_priv *wm2000) } static int wm2000_poll_bit(struct i2c_client *i2c, - unsigned int reg, u8 mask, int timeout) + unsigned int reg, u8 mask) { + int timeout = 4000; int val; val = wm2000_read(i2c, reg); @@ -119,7 +120,7 @@ static int wm2000_poll_bit(struct i2c_client *i2c, static int wm2000_power_up(struct i2c_client *i2c, int analogue) { struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); - int ret, timeout; + int ret; BUG_ON(wm2000->anc_mode != ANC_OFF); @@ -140,13 +141,13 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue) /* Wait for ANC engine to become ready */ if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, - WM2000_ANC_ENG_IDLE, 1)) { + WM2000_ANC_ENG_IDLE)) { dev_err(&i2c->dev, "ANC engine failed to reset\n"); return -ETIMEDOUT; } if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, - WM2000_STATUS_BOOT_COMPLETE, 1)) { + WM2000_STATUS_BOOT_COMPLETE)) { dev_err(&i2c->dev, "ANC engine failed to initialise\n"); return -ETIMEDOUT; } @@ -173,16 +174,13 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue) dev_dbg(&i2c->dev, "Download complete\n"); if (analogue) { - timeout = 248; - wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, timeout / 4); + wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, 248 / 4); wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, WM2000_MODE_ANA_SEQ_INCLUDE | WM2000_MODE_MOUSE_ENABLE | WM2000_MODE_THERMAL_ENABLE); } else { - timeout = 10; - wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, WM2000_MODE_MOUSE_ENABLE | WM2000_MODE_THERMAL_ENABLE); @@ -201,9 +199,8 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue) wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, - WM2000_STATUS_MOUSE_ACTIVE, timeout)) { - dev_err(&i2c->dev, "Timed out waiting for device after %dms\n", - timeout * 10); + WM2000_STATUS_MOUSE_ACTIVE)) { + dev_err(&i2c->dev, "Timed out waiting for device\n"); return -ETIMEDOUT; } @@ -218,28 +215,25 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue) static int wm2000_power_down(struct i2c_client *i2c, int analogue) { struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); - int timeout; if (analogue) { - timeout = 248; - wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, timeout / 4); + wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, 248 / 4); wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, WM2000_MODE_ANA_SEQ_INCLUDE | WM2000_MODE_POWER_DOWN); } else { - timeout = 10; wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, WM2000_MODE_POWER_DOWN); } if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, - WM2000_STATUS_POWER_DOWN_COMPLETE, timeout)) { + WM2000_STATUS_POWER_DOWN_COMPLETE)) { dev_err(&i2c->dev, "Timeout waiting for ANC power down\n"); return -ETIMEDOUT; } if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, - WM2000_ANC_ENG_IDLE, 1)) { + WM2000_ANC_ENG_IDLE)) { dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n"); return -ETIMEDOUT; } @@ -268,13 +262,13 @@ static int wm2000_enter_bypass(struct i2c_client *i2c, int analogue) } if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, - WM2000_STATUS_ANC_DISABLED, 10)) { + WM2000_STATUS_ANC_DISABLED)) { dev_err(&i2c->dev, "Timeout waiting for ANC disable\n"); return -ETIMEDOUT; } if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, - WM2000_ANC_ENG_IDLE, 1)) { + WM2000_ANC_ENG_IDLE)) { dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n"); return -ETIMEDOUT; } @@ -311,7 +305,7 @@ static int wm2000_exit_bypass(struct i2c_client *i2c, int analogue) wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, - WM2000_STATUS_MOUSE_ACTIVE, 10)) { + WM2000_STATUS_MOUSE_ACTIVE)) { dev_err(&i2c->dev, "Timed out waiting for MOUSE\n"); return -ETIMEDOUT; } @@ -325,38 +319,32 @@ static int wm2000_exit_bypass(struct i2c_client *i2c, int analogue) static int wm2000_enter_standby(struct i2c_client *i2c, int analogue) { struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); - int timeout; BUG_ON(wm2000->anc_mode != ANC_ACTIVE); if (analogue) { - timeout = 248; - wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, timeout / 4); + wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, 248 / 4); wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, WM2000_MODE_ANA_SEQ_INCLUDE | WM2000_MODE_THERMAL_ENABLE | WM2000_MODE_STANDBY_ENTRY); } else { - timeout = 10; - wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, WM2000_MODE_THERMAL_ENABLE | WM2000_MODE_STANDBY_ENTRY); } if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, - WM2000_STATUS_ANC_DISABLED, timeout)) { + WM2000_STATUS_ANC_DISABLED)) { dev_err(&i2c->dev, "Timed out waiting for ANC disable after 1ms\n"); return -ETIMEDOUT; } - if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, WM2000_ANC_ENG_IDLE, - 1)) { + if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, WM2000_ANC_ENG_IDLE)) { dev_err(&i2c->dev, - "Timed out waiting for standby after %dms\n", - timeout * 10); + "Timed out waiting for standby\n"); return -ETIMEDOUT; } @@ -374,23 +362,19 @@ static int wm2000_enter_standby(struct i2c_client *i2c, int analogue) static int wm2000_exit_standby(struct i2c_client *i2c, int analogue) { struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); - int timeout; BUG_ON(wm2000->anc_mode != ANC_STANDBY); wm2000_write(i2c, WM2000_REG_SYS_CTL1, 0); if (analogue) { - timeout = 248; - wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, timeout / 4); + wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, 248 / 4); wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, WM2000_MODE_ANA_SEQ_INCLUDE | WM2000_MODE_THERMAL_ENABLE | WM2000_MODE_MOUSE_ENABLE); } else { - timeout = 10; - wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, WM2000_MODE_THERMAL_ENABLE | WM2000_MODE_MOUSE_ENABLE); @@ -400,9 +384,8 @@ static int wm2000_exit_standby(struct i2c_client *i2c, int analogue) wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, - WM2000_STATUS_MOUSE_ACTIVE, timeout)) { - dev_err(&i2c->dev, "Timed out waiting for MOUSE after %dms\n", - timeout * 10); + WM2000_STATUS_MOUSE_ACTIVE)) { + dev_err(&i2c->dev, "Timed out waiting for MOUSE\n"); return -ETIMEDOUT; } diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 993639d694c..aa8c98b628d 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -46,6 +46,39 @@ #define WM8994_NUM_DRC 3 #define WM8994_NUM_EQ 3 +static struct { + unsigned int reg; + unsigned int mask; +} wm8994_vu_bits[] = { + { WM8994_LEFT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU }, + { WM8994_RIGHT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU }, + { WM8994_LEFT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU }, + { WM8994_RIGHT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU }, + { WM8994_SPEAKER_VOLUME_LEFT, WM8994_SPKOUT_VU }, + { WM8994_SPEAKER_VOLUME_RIGHT, WM8994_SPKOUT_VU }, + { WM8994_LEFT_OUTPUT_VOLUME, WM8994_HPOUT1_VU }, + { WM8994_RIGHT_OUTPUT_VOLUME, WM8994_HPOUT1_VU }, + { WM8994_LEFT_OPGA_VOLUME, WM8994_MIXOUT_VU }, + { WM8994_RIGHT_OPGA_VOLUME, WM8994_MIXOUT_VU }, + + { WM8994_AIF1_DAC1_LEFT_VOLUME, WM8994_AIF1DAC1_VU }, + { WM8994_AIF1_DAC1_RIGHT_VOLUME, WM8994_AIF1DAC1_VU }, + { WM8994_AIF1_DAC2_LEFT_VOLUME, WM8994_AIF1DAC2_VU }, + { WM8994_AIF1_DAC2_RIGHT_VOLUME, WM8994_AIF1DAC2_VU }, + { WM8994_AIF2_DAC_LEFT_VOLUME, WM8994_AIF2DAC_VU }, + { WM8994_AIF2_DAC_RIGHT_VOLUME, WM8994_AIF2DAC_VU }, + { WM8994_AIF1_ADC1_LEFT_VOLUME, WM8994_AIF1ADC1_VU }, + { WM8994_AIF1_ADC1_RIGHT_VOLUME, WM8994_AIF1ADC1_VU }, + { WM8994_AIF1_ADC2_LEFT_VOLUME, WM8994_AIF1ADC2_VU }, + { WM8994_AIF1_ADC2_RIGHT_VOLUME, WM8994_AIF1ADC2_VU }, + { WM8994_AIF2_ADC_LEFT_VOLUME, WM8994_AIF2ADC_VU }, + { WM8994_AIF2_ADC_RIGHT_VOLUME, WM8994_AIF1ADC2_VU }, + { WM8994_DAC1_LEFT_VOLUME, WM8994_DAC1_VU }, + { WM8994_DAC1_RIGHT_VOLUME, WM8994_DAC1_VU }, + { WM8994_DAC2_LEFT_VOLUME, WM8994_DAC2_VU }, + { WM8994_DAC2_RIGHT_VOLUME, WM8994_DAC2_VU }, +}; + static int wm8994_drc_base[] = { WM8994_AIF1_DRC1_1, WM8994_AIF1_DRC2_1, @@ -989,6 +1022,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w, struct snd_soc_codec *codec = w->codec; struct wm8994 *control = codec->control_data; int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA; + int i; int dac; int adc; int val; @@ -1047,6 +1081,13 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w, WM8994_AIF1DAC2L_ENA); break; + case SND_SOC_DAPM_POST_PMU: + for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++) + snd_soc_write(codec, wm8994_vu_bits[i].reg, + snd_soc_read(codec, + wm8994_vu_bits[i].reg)); + break; + case SND_SOC_DAPM_PRE_PMD: case SND_SOC_DAPM_POST_PMD: snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, @@ -1072,6 +1113,7 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; + int i; int dac; int adc; int val; @@ -1122,6 +1164,13 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w, WM8994_AIF2DACR_ENA); break; + case SND_SOC_DAPM_POST_PMU: + for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++) + snd_soc_write(codec, wm8994_vu_bits[i].reg, + snd_soc_read(codec, + wm8994_vu_bits[i].reg)); + break; + case SND_SOC_DAPM_PRE_PMD: case SND_SOC_DAPM_POST_PMD: snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, @@ -1190,17 +1239,19 @@ static int late_enable_ev(struct snd_soc_dapm_widget *w, switch (event) { case SND_SOC_DAPM_PRE_PMU: if (wm8994->aif1clk_enable) { - aif1clk_ev(w, kcontrol, event); + aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU); snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, WM8994_AIF1CLK_ENA_MASK, WM8994_AIF1CLK_ENA); + aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU); wm8994->aif1clk_enable = 0; } if (wm8994->aif2clk_enable) { - aif2clk_ev(w, kcontrol, event); + aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU); snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, WM8994_AIF2CLK_ENA_MASK, WM8994_AIF2CLK_ENA); + aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU); wm8994->aif2clk_enable = 0; } break; @@ -1221,15 +1272,17 @@ static int late_disable_ev(struct snd_soc_dapm_widget *w, switch (event) { case SND_SOC_DAPM_POST_PMD: if (wm8994->aif1clk_disable) { + aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD); snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, WM8994_AIF1CLK_ENA_MASK, 0); - aif1clk_ev(w, kcontrol, event); + aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD); wm8994->aif1clk_disable = 0; } if (wm8994->aif2clk_disable) { + aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD); snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, WM8994_AIF2CLK_ENA_MASK, 0); - aif2clk_ev(w, kcontrol, event); + aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD); wm8994->aif2clk_disable = 0; } break; @@ -1527,9 +1580,11 @@ SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev) static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = { SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | + SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | + SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0, left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)), @@ -3879,39 +3934,11 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) pm_runtime_put(codec->dev); - /* Latch volume updates (right only; we always do left then right). */ - snd_soc_update_bits(codec, WM8994_AIF1_DAC1_LEFT_VOLUME, - WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU); - snd_soc_update_bits(codec, WM8994_AIF1_DAC1_RIGHT_VOLUME, - WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU); - snd_soc_update_bits(codec, WM8994_AIF1_DAC2_LEFT_VOLUME, - WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU); - snd_soc_update_bits(codec, WM8994_AIF1_DAC2_RIGHT_VOLUME, - WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU); - snd_soc_update_bits(codec, WM8994_AIF2_DAC_LEFT_VOLUME, - WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU); - snd_soc_update_bits(codec, WM8994_AIF2_DAC_RIGHT_VOLUME, - WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU); - snd_soc_update_bits(codec, WM8994_AIF1_ADC1_LEFT_VOLUME, - WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU); - snd_soc_update_bits(codec, WM8994_AIF1_ADC1_RIGHT_VOLUME, - WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU); - snd_soc_update_bits(codec, WM8994_AIF1_ADC2_LEFT_VOLUME, - WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU); - snd_soc_update_bits(codec, WM8994_AIF1_ADC2_RIGHT_VOLUME, - WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU); - snd_soc_update_bits(codec, WM8994_AIF2_ADC_LEFT_VOLUME, - WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU); - snd_soc_update_bits(codec, WM8994_AIF2_ADC_RIGHT_VOLUME, - WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU); - snd_soc_update_bits(codec, WM8994_DAC1_LEFT_VOLUME, - WM8994_DAC1_VU, WM8994_DAC1_VU); - snd_soc_update_bits(codec, WM8994_DAC1_RIGHT_VOLUME, - WM8994_DAC1_VU, WM8994_DAC1_VU); - snd_soc_update_bits(codec, WM8994_DAC2_LEFT_VOLUME, - WM8994_DAC2_VU, WM8994_DAC2_VU); - snd_soc_update_bits(codec, WM8994_DAC2_RIGHT_VOLUME, - WM8994_DAC2_VU, WM8994_DAC2_VU); + /* Latch volume update bits */ + for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++) + snd_soc_update_bits(codec, wm8994_vu_bits[i].reg, + wm8994_vu_bits[i].mask, + wm8994_vu_bits[i].mask); /* Set the low bit of the 3D stereo depth so TLV matches */ snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_2, diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c index f23700359c6..080327414c6 100644 --- a/sound/soc/fsl/imx-audmux.c +++ b/sound/soc/fsl/imx-audmux.c @@ -26,6 +26,7 @@ #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/pinctrl/consumer.h> #include "imx-audmux.h" @@ -249,6 +250,7 @@ EXPORT_SYMBOL_GPL(imx_audmux_v2_configure_port); static int __devinit imx_audmux_probe(struct platform_device *pdev) { struct resource *res; + struct pinctrl *pinctrl; const struct of_device_id *of_id = of_match_device(imx_audmux_dt_ids, &pdev->dev); @@ -257,6 +259,12 @@ static int __devinit imx_audmux_probe(struct platform_device *pdev) if (!audmux_base) return -EADDRNOTAVAIL; + pinctrl = devm_pinctrl_get_select_default(&pdev->dev); + if (IS_ERR(pinctrl)) { + dev_err(&pdev->dev, "setup pinctrl failed!"); + return PTR_ERR(pinctrl); + } + audmux_clk = clk_get(&pdev->dev, "audmux"); if (IS_ERR(audmux_clk)) { dev_dbg(&pdev->dev, "cannot get clock: %ld\n", diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 90ee77d2409..89eae93445c 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -913,7 +913,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, /* do we need to add this widget to the list ? */ if (list) { int err; - err = dapm_list_add_widget(list, path->sink); + err = dapm_list_add_widget(list, path->source); if (err < 0) { dev_err(widget->dapm->dev, "could not add widget %s\n", widget->name); @@ -954,7 +954,7 @@ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream, if (stream == SNDRV_PCM_STREAM_PLAYBACK) paths = is_connected_output_ep(dai->playback_widget, list); else - paths = is_connected_input_ep(dai->playback_widget, list); + paths = is_connected_input_ep(dai->capture_widget, list); trace_snd_soc_dapm_connected(paths, stream); dapm_clear_walk(&card->dapm); diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index bedd1717a37..48fd15b312c 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -794,6 +794,9 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card, for (i = 0; i < card->num_links; i++) { be = &card->rtd[i]; + if (!be->dai_link->no_pcm) + continue; + if (be->cpu_dai->playback_widget == widget || be->codec_dai->playback_widget == widget) return be; @@ -803,6 +806,9 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card, for (i = 0; i < card->num_links; i++) { be = &card->rtd[i]; + if (!be->dai_link->no_pcm) + continue; + if (be->cpu_dai->capture_widget == widget || be->codec_dai->capture_widget == widget) return be; diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c index 57cd419f743..f43edb364a1 100644 --- a/sound/soc/tegra/tegra30_ahub.c +++ b/sound/soc/tegra/tegra30_ahub.c @@ -629,3 +629,4 @@ MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>"); MODULE_DESCRIPTION("Tegra30 AHUB driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRV_NAME); +MODULE_DEVICE_TABLE(of, tegra30_ahub_of_match); diff --git a/sound/usb/card.h b/sound/usb/card.h index 0d37238b845..2b9fffff23b 100644 --- a/sound/usb/card.h +++ b/sound/usb/card.h @@ -119,6 +119,7 @@ struct snd_usb_substream { unsigned long unlink_mask; /* bitmask of unlinked urbs */ /* data and sync endpoints for this stream */ + unsigned int ep_num; /* the endpoint number */ struct snd_usb_endpoint *data_endpoint; struct snd_usb_endpoint *sync_endpoint; unsigned long flags; diff --git a/sound/usb/stream.c b/sound/usb/stream.c index 6b7d7a2b7ba..083ed81160e 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c @@ -97,6 +97,7 @@ static void snd_usb_init_substream(struct snd_usb_stream *as, subs->formats |= fp->formats; subs->num_formats++; subs->fmt_type = fp->fmt_type; + subs->ep_num = fp->endpoint; } /* @@ -119,9 +120,7 @@ int snd_usb_add_audio_stream(struct snd_usb_audio *chip, if (as->fmt_type != fp->fmt_type) continue; subs = &as->substream[stream]; - if (!subs->data_endpoint) - continue; - if (subs->data_endpoint->ep_num == fp->endpoint) { + if (subs->ep_num == fp->endpoint) { list_add_tail(&fp->list, &subs->fmt_list); subs->num_formats++; subs->formats |= fp->formats; @@ -134,7 +133,7 @@ int snd_usb_add_audio_stream(struct snd_usb_audio *chip, if (as->fmt_type != fp->fmt_type) continue; subs = &as->substream[stream]; - if (subs->data_endpoint) + if (subs->ep_num) continue; err = snd_pcm_new_stream(as->pcm, stream, 1); if (err < 0) diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index 5476bc0a1ea..b4b572e8c10 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST @@ -1,4 +1,6 @@ tools/perf +tools/scripts +tools/lib/traceevent include/linux/const.h include/linux/perf_event.h include/linux/rbtree.h diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 8c767c6bca9..25249f76329 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -152,7 +152,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, if (symbol_conf.use_callchain) { err = callchain_append(he->callchain, - &evsel->hists.callchain_cursor, + &callchain_cursor, sample->period); if (err) return err; @@ -162,7 +162,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, * so we don't allocated the extra space needed because the stdio * code will not use it. */ - if (al->sym != NULL && use_browser > 0) { + if (he->ms.sym != NULL && use_browser > 0) { struct annotation *notes = symbol__annotation(he->ms.sym); assert(evsel != NULL); diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 62ae30d34fa..262589991ea 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1129,7 +1129,7 @@ static int add_default_attributes(void) return 0; if (!evsel_list->nr_entries) { - if (perf_evlist__add_attrs_array(evsel_list, default_attrs) < 0) + if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0) return -1; } @@ -1139,21 +1139,21 @@ static int add_default_attributes(void) return 0; /* Append detailed run extra attributes: */ - if (perf_evlist__add_attrs_array(evsel_list, detailed_attrs) < 0) + if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) return -1; if (detailed_run < 2) return 0; /* Append very detailed run extra attributes: */ - if (perf_evlist__add_attrs_array(evsel_list, very_detailed_attrs) < 0) + if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) return -1; if (detailed_run < 3) return 0; /* Append very, very detailed run extra attributes: */ - return perf_evlist__add_attrs_array(evsel_list, very_very_detailed_attrs); + return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); } int cmd_stat(int argc, const char **argv, const char *prefix __used) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 871b540293e..6bb0277b7df 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -787,7 +787,7 @@ static void perf_event__process_sample(struct perf_tool *tool, } if (symbol_conf.use_callchain) { - err = callchain_append(he->callchain, &evsel->hists.callchain_cursor, + err = callchain_append(he->callchain, &callchain_cursor, sample->period); if (err) return; diff --git a/tools/perf/design.txt b/tools/perf/design.txt index bd0bb1b1279..67e5d0cace8 100644 --- a/tools/perf/design.txt +++ b/tools/perf/design.txt @@ -409,14 +409,15 @@ Counters can be enabled and disabled in two ways: via ioctl and via prctl. When a counter is disabled, it doesn't count or generate events but does continue to exist and maintain its count value. -An individual counter or counter group can be enabled with +An individual counter can be enabled with - ioctl(fd, PERF_EVENT_IOC_ENABLE); + ioctl(fd, PERF_EVENT_IOC_ENABLE, 0); or disabled with - ioctl(fd, PERF_EVENT_IOC_DISABLE); + ioctl(fd, PERF_EVENT_IOC_DISABLE, 0); +For a counter group, pass PERF_IOC_FLAG_GROUP as the third argument. Enabling or disabling the leader of a group enables or disables the whole group; that is, while the group leader is disabled, none of the counters in the group will count. Enabling or disabling a member of a diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 4deea6aaf92..34b1c46eaf4 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -668,7 +668,7 @@ static int annotate_browser__run(struct annotate_browser *browser, int evidx, "q/ESC/CTRL+C Exit\n\n" "-> Go to target\n" "<- Exit\n" - "h Cycle thru hottest instructions\n" + "H Cycle thru hottest instructions\n" "j Toggle showing jump to target arrows\n" "J Toggle showing number of jump sources on targets\n" "n Search next string\n" diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN index ad73300f7ba..95264f30417 100755 --- a/tools/perf/util/PERF-VERSION-GEN +++ b/tools/perf/util/PERF-VERSION-GEN @@ -12,7 +12,7 @@ LF=' # First check if there is a .git to get the version from git describe # otherwise try to get the version from the kernel makefile if test -d ../../.git -o -f ../../.git && - VN=$(git describe --abbrev=4 HEAD 2>/dev/null) && + VN=$(git describe --match 'v[0-9].[0-9]*' --abbrev=4 HEAD 2>/dev/null) && case "$VN" in *$LF*) (exit 1) ;; v[0-9]*) diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 9f7106a8d9a..3a6bff47614 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c @@ -18,6 +18,8 @@ #include "util.h" #include "callchain.h" +__thread struct callchain_cursor callchain_cursor; + bool ip_callchain__valid(struct ip_callchain *chain, const union perf_event *event) { diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index 7f9c0f1ae3a..3bdb407f9cd 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h @@ -76,6 +76,8 @@ struct callchain_cursor { struct callchain_cursor_node *curr; }; +extern __thread struct callchain_cursor callchain_cursor; + static inline void callchain_init(struct callchain_root *root) { INIT_LIST_HEAD(&root->node.siblings); diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 4ac5f5ae4ce..7400fb3fc50 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -159,6 +159,17 @@ out_delete_partial_list: return -1; } +int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, + struct perf_event_attr *attrs, size_t nr_attrs) +{ + size_t i; + + for (i = 0; i < nr_attrs; i++) + event_attr_init(attrs + i); + + return perf_evlist__add_attrs(evlist, attrs, nr_attrs); +} + static int trace_event__id(const char *evname) { char *filename, *colon; @@ -263,7 +274,8 @@ void perf_evlist__disable(struct perf_evlist *evlist) for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { list_for_each_entry(pos, &evlist->entries, node) { for (thread = 0; thread < evlist->threads->nr; thread++) - ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE); + ioctl(FD(pos, cpu, thread), + PERF_EVENT_IOC_DISABLE, 0); } } } @@ -276,7 +288,8 @@ void perf_evlist__enable(struct perf_evlist *evlist) for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { list_for_each_entry(pos, &evlist->entries, node) { for (thread = 0; thread < evlist->threads->nr; thread++) - ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE); + ioctl(FD(pos, cpu, thread), + PERF_EVENT_IOC_ENABLE, 0); } } } diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 58abb63ac13..989bee9624c 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -54,6 +54,8 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry); int perf_evlist__add_default(struct perf_evlist *evlist); int perf_evlist__add_attrs(struct perf_evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs); +int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, + struct perf_event_attr *attrs, size_t nr_attrs); int perf_evlist__add_tracepoints(struct perf_evlist *evlist, const char *tracepoints[], size_t nr_tracepoints); int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, @@ -62,6 +64,8 @@ int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, #define perf_evlist__add_attrs_array(evlist, array) \ perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array)) +#define perf_evlist__add_default_attrs(evlist, array) \ + __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array)) #define perf_evlist__add_tracepoints_array(evlist, array) \ perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array)) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 91d19138f3e..9f6cebd798e 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -494,16 +494,24 @@ int perf_evsel__open_per_thread(struct perf_evsel *evsel, } static int perf_event__parse_id_sample(const union perf_event *event, u64 type, - struct perf_sample *sample) + struct perf_sample *sample, + bool swapped) { const u64 *array = event->sample.array; + union u64_swap u; array += ((event->header.size - sizeof(event->header)) / sizeof(u64)) - 1; if (type & PERF_SAMPLE_CPU) { - u32 *p = (u32 *)array; - sample->cpu = *p; + u.val64 = *array; + if (swapped) { + /* undo swap of u64, then swap on individual u32s */ + u.val64 = bswap_64(u.val64); + u.val32[0] = bswap_32(u.val32[0]); + } + + sample->cpu = u.val32[0]; array--; } @@ -523,9 +531,16 @@ static int perf_event__parse_id_sample(const union perf_event *event, u64 type, } if (type & PERF_SAMPLE_TID) { - u32 *p = (u32 *)array; - sample->pid = p[0]; - sample->tid = p[1]; + u.val64 = *array; + if (swapped) { + /* undo swap of u64, then swap on individual u32s */ + u.val64 = bswap_64(u.val64); + u.val32[0] = bswap_32(u.val32[0]); + u.val32[1] = bswap_32(u.val32[1]); + } + + sample->pid = u.val32[0]; + sample->tid = u.val32[1]; } return 0; @@ -562,7 +577,7 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, if (event->header.type != PERF_RECORD_SAMPLE) { if (!sample_id_all) return 0; - return perf_event__parse_id_sample(event, type, data); + return perf_event__parse_id_sample(event, type, data, swapped); } array = event->sample.array; diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 1293b5ebea4..514e2a4b367 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -378,7 +378,7 @@ void hist_entry__free(struct hist_entry *he) * collapse the histogram */ -static bool hists__collapse_insert_entry(struct hists *hists, +static bool hists__collapse_insert_entry(struct hists *hists __used, struct rb_root *root, struct hist_entry *he) { @@ -397,8 +397,9 @@ static bool hists__collapse_insert_entry(struct hists *hists, iter->period += he->period; iter->nr_events += he->nr_events; if (symbol_conf.use_callchain) { - callchain_cursor_reset(&hists->callchain_cursor); - callchain_merge(&hists->callchain_cursor, iter->callchain, + callchain_cursor_reset(&callchain_cursor); + callchain_merge(&callchain_cursor, + iter->callchain, he->callchain); } hist_entry__free(he); diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index cfc64e293f9..34bb556d621 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -67,8 +67,6 @@ struct hists { struct events_stats stats; u64 event_stream; u16 col_len[HISTC_NR_COLS]; - /* Best would be to reuse the session callchain cursor */ - struct callchain_cursor callchain_cursor; }; struct hist_entry *__hists__add_entry(struct hists *self, diff --git a/tools/perf/util/pager.c b/tools/perf/util/pager.c index 1915de20dca..3322b8446e8 100644 --- a/tools/perf/util/pager.c +++ b/tools/perf/util/pager.c @@ -57,6 +57,10 @@ void setup_pager(void) } if (!pager) pager = getenv("PAGER"); + if (!pager) { + if (!access("/usr/bin/pager", X_OK)) + pager = "/usr/bin/pager"; + } if (!pager) pager = "less"; else if (!*pager || !strcmp(pager, "cat")) diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 59dccc98b55..0dda25d82d0 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -2164,16 +2164,12 @@ int del_perf_probe_events(struct strlist *dellist) error: if (kfd >= 0) { - if (namelist) - strlist__delete(namelist); - + strlist__delete(namelist); close(kfd); } if (ufd >= 0) { - if (unamelist) - strlist__delete(unamelist); - + strlist__delete(unamelist); close(ufd); } diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 93d355d2710..2600916efa8 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -288,7 +288,8 @@ struct branch_info *machine__resolve_bstack(struct machine *self, return bi; } -int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, +int machine__resolve_callchain(struct machine *self, + struct perf_evsel *evsel __used, struct thread *thread, struct ip_callchain *chain, struct symbol **parent) @@ -297,7 +298,12 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, unsigned int i; int err; - callchain_cursor_reset(&evsel->hists.callchain_cursor); + callchain_cursor_reset(&callchain_cursor); + + if (chain->nr > PERF_MAX_STACK_DEPTH) { + pr_warning("corrupted callchain. skipping...\n"); + return 0; + } for (i = 0; i < chain->nr; i++) { u64 ip; @@ -317,7 +323,14 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, case PERF_CONTEXT_USER: cpumode = PERF_RECORD_MISC_USER; break; default: - break; + pr_debug("invalid callchain context: " + "%"PRId64"\n", (s64) ip); + /* + * It seems the callchain is corrupted. + * Discard all. + */ + callchain_cursor_reset(&callchain_cursor); + return 0; } continue; } @@ -333,7 +346,7 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, break; } - err = callchain_cursor_append(&evsel->hists.callchain_cursor, + err = callchain_cursor_append(&callchain_cursor, ip, al.map, al.sym); if (err) return err; @@ -441,37 +454,65 @@ void mem_bswap_64(void *src, int byte_size) } } -static void perf_event__all64_swap(union perf_event *event) +static void swap_sample_id_all(union perf_event *event, void *data) +{ + void *end = (void *) event + event->header.size; + int size = end - data; + + BUG_ON(size % sizeof(u64)); + mem_bswap_64(data, size); +} + +static void perf_event__all64_swap(union perf_event *event, + bool sample_id_all __used) { struct perf_event_header *hdr = &event->header; mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); } -static void perf_event__comm_swap(union perf_event *event) +static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) { event->comm.pid = bswap_32(event->comm.pid); event->comm.tid = bswap_32(event->comm.tid); + + if (sample_id_all) { + void *data = &event->comm.comm; + + data += ALIGN(strlen(data) + 1, sizeof(u64)); + swap_sample_id_all(event, data); + } } -static void perf_event__mmap_swap(union perf_event *event) +static void perf_event__mmap_swap(union perf_event *event, + bool sample_id_all) { event->mmap.pid = bswap_32(event->mmap.pid); event->mmap.tid = bswap_32(event->mmap.tid); event->mmap.start = bswap_64(event->mmap.start); event->mmap.len = bswap_64(event->mmap.len); event->mmap.pgoff = bswap_64(event->mmap.pgoff); + + if (sample_id_all) { + void *data = &event->mmap.filename; + + data += ALIGN(strlen(data) + 1, sizeof(u64)); + swap_sample_id_all(event, data); + } } -static void perf_event__task_swap(union perf_event *event) +static void perf_event__task_swap(union perf_event *event, bool sample_id_all) { event->fork.pid = bswap_32(event->fork.pid); event->fork.tid = bswap_32(event->fork.tid); event->fork.ppid = bswap_32(event->fork.ppid); event->fork.ptid = bswap_32(event->fork.ptid); event->fork.time = bswap_64(event->fork.time); + + if (sample_id_all) + swap_sample_id_all(event, &event->fork + 1); } -static void perf_event__read_swap(union perf_event *event) +static void perf_event__read_swap(union perf_event *event, bool sample_id_all) { event->read.pid = bswap_32(event->read.pid); event->read.tid = bswap_32(event->read.tid); @@ -479,6 +520,9 @@ static void perf_event__read_swap(union perf_event *event) event->read.time_enabled = bswap_64(event->read.time_enabled); event->read.time_running = bswap_64(event->read.time_running); event->read.id = bswap_64(event->read.id); + + if (sample_id_all) + swap_sample_id_all(event, &event->read + 1); } static u8 revbyte(u8 b) @@ -530,7 +574,8 @@ void perf_event__attr_swap(struct perf_event_attr *attr) swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); } -static void perf_event__hdr_attr_swap(union perf_event *event) +static void perf_event__hdr_attr_swap(union perf_event *event, + bool sample_id_all __used) { size_t size; @@ -541,18 +586,21 @@ static void perf_event__hdr_attr_swap(union perf_event *event) mem_bswap_64(event->attr.id, size); } -static void perf_event__event_type_swap(union perf_event *event) +static void perf_event__event_type_swap(union perf_event *event, + bool sample_id_all __used) { event->event_type.event_type.event_id = bswap_64(event->event_type.event_type.event_id); } -static void perf_event__tracing_data_swap(union perf_event *event) +static void perf_event__tracing_data_swap(union perf_event *event, + bool sample_id_all __used) { event->tracing_data.size = bswap_32(event->tracing_data.size); } -typedef void (*perf_event__swap_op)(union perf_event *event); +typedef void (*perf_event__swap_op)(union perf_event *event, + bool sample_id_all); static perf_event__swap_op perf_event__swap_ops[] = { [PERF_RECORD_MMAP] = perf_event__mmap_swap, @@ -986,6 +1034,15 @@ static int perf_session__process_user_event(struct perf_session *session, union } } +static void event_swap(union perf_event *event, bool sample_id_all) +{ + perf_event__swap_op swap; + + swap = perf_event__swap_ops[event->header.type]; + if (swap) + swap(event, sample_id_all); +} + static int perf_session__process_event(struct perf_session *session, union perf_event *event, struct perf_tool *tool, @@ -994,9 +1051,8 @@ static int perf_session__process_event(struct perf_session *session, struct perf_sample sample; int ret; - if (session->header.needs_swap && - perf_event__swap_ops[event->header.type]) - perf_event__swap_ops[event->header.type](event); + if (session->header.needs_swap) + event_swap(event, session->sample_id_all); if (event->header.type >= PERF_RECORD_HEADER_MAX) return -EINVAL; @@ -1428,7 +1484,6 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, int print_sym, int print_dso, int print_symoffset) { struct addr_location al; - struct callchain_cursor *cursor = &evsel->hists.callchain_cursor; struct callchain_cursor_node *node; if (perf_event__preprocess_sample(event, machine, &al, sample, @@ -1446,10 +1501,10 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, error("Failed to resolve callchain. Skipping\n"); return; } - callchain_cursor_commit(cursor); + callchain_cursor_commit(&callchain_cursor); while (1) { - node = callchain_cursor_current(cursor); + node = callchain_cursor_current(&callchain_cursor); if (!node) break; @@ -1460,12 +1515,12 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, } if (print_dso) { printf(" ("); - map__fprintf_dsoname(al.map, stdout); + map__fprintf_dsoname(node->map, stdout); printf(")"); } printf("\n"); - callchain_cursor_advance(cursor); + callchain_cursor_advance(&callchain_cursor); } } else { diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index e2ba8858f3e..3e2e5ea0f03 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -323,6 +323,7 @@ struct dso *dso__new(const char *name) dso->sorted_by_name = 0; dso->has_build_id = 0; dso->kernel = DSO_TYPE_USER; + dso->needs_swap = DSO_SWAP__UNSET; INIT_LIST_HEAD(&dso->node); } @@ -1156,6 +1157,33 @@ static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) return -1; } +static int dso__swap_init(struct dso *dso, unsigned char eidata) +{ + static unsigned int const endian = 1; + + dso->needs_swap = DSO_SWAP__NO; + + switch (eidata) { + case ELFDATA2LSB: + /* We are big endian, DSO is little endian. */ + if (*(unsigned char const *)&endian != 1) + dso->needs_swap = DSO_SWAP__YES; + break; + + case ELFDATA2MSB: + /* We are little endian, DSO is big endian. */ + if (*(unsigned char const *)&endian != 0) + dso->needs_swap = DSO_SWAP__YES; + break; + + default: + pr_err("unrecognized DSO data encoding %d\n", eidata); + return -EINVAL; + } + + return 0; +} + static int dso__load_sym(struct dso *dso, struct map *map, const char *name, int fd, symbol_filter_t filter, int kmodule, int want_symtab) @@ -1187,6 +1215,9 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, goto out_elf_end; } + if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) + goto out_elf_end; + /* Always reject images with a mismatched build-id: */ if (dso->has_build_id) { u8 build_id[BUILD_ID_SIZE]; @@ -1272,7 +1303,7 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, if (opdsec && sym.st_shndx == opdidx) { u32 offset = sym.st_value - opdshdr.sh_addr; u64 *opd = opddata->d_buf + offset; - sym.st_value = *opd; + sym.st_value = DSO__SWAP(dso, u64, *opd); sym.st_shndx = elf_addr_to_index(elf, sym.st_value); } @@ -2786,8 +2817,11 @@ int machine__load_vmlinux_path(struct machine *machine, enum map_type type, struct map *dso__new_map(const char *name) { + struct map *map = NULL; struct dso *dso = dso__new(name); - struct map *map = map__new2(0, dso, MAP__FUNCTION); + + if (dso) + map = map__new2(0, dso, MAP__FUNCTION); return map; } diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 5649d63798c..af0752b1aca 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -9,6 +9,7 @@ #include <linux/list.h> #include <linux/rbtree.h> #include <stdio.h> +#include <byteswap.h> #ifdef HAVE_CPLUS_DEMANGLE extern char *cplus_demangle(const char *, int); @@ -160,11 +161,18 @@ enum dso_kernel_type { DSO_TYPE_GUEST_KERNEL }; +enum dso_swap_type { + DSO_SWAP__UNSET, + DSO_SWAP__NO, + DSO_SWAP__YES, +}; + struct dso { struct list_head node; struct rb_root symbols[MAP__NR_TYPES]; struct rb_root symbol_names[MAP__NR_TYPES]; enum dso_kernel_type kernel; + enum dso_swap_type needs_swap; u8 adjust_symbols:1; u8 has_build_id:1; u8 hit:1; @@ -182,6 +190,28 @@ struct dso { char name[0]; }; +#define DSO__SWAP(dso, type, val) \ +({ \ + type ____r = val; \ + BUG_ON(dso->needs_swap == DSO_SWAP__UNSET); \ + if (dso->needs_swap == DSO_SWAP__YES) { \ + switch (sizeof(____r)) { \ + case 2: \ + ____r = bswap_16(val); \ + break; \ + case 4: \ + ____r = bswap_32(val); \ + break; \ + case 8: \ + ____r = bswap_64(val); \ + break; \ + default: \ + BUG_ON(1); \ + } \ + } \ + ____r; \ +}) + struct dso *dso__new(const char *name); void dso__delete(struct dso *dso); diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index ab2f682fd44..16de7ad4850 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -73,8 +73,8 @@ int backwards_count; char *progname; int num_cpus; -cpu_set_t *cpu_mask; -size_t cpu_mask_size; +cpu_set_t *cpu_present_set, *cpu_mask; +size_t cpu_present_setsize, cpu_mask_size; struct counters { unsigned long long tsc; /* per thread */ @@ -103,6 +103,12 @@ struct timeval tv_even; struct timeval tv_odd; struct timeval tv_delta; +int mark_cpu_present(int pkg, int core, int cpu) +{ + CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set); + return 0; +} + /* * cpu_mask_init(ncpus) * @@ -118,6 +124,18 @@ void cpu_mask_init(int ncpus) } cpu_mask_size = CPU_ALLOC_SIZE(ncpus); CPU_ZERO_S(cpu_mask_size, cpu_mask); + + /* + * Allocate and initialize cpu_present_set + */ + cpu_present_set = CPU_ALLOC(ncpus); + if (cpu_present_set == NULL) { + perror("CPU_ALLOC"); + exit(3); + } + cpu_present_setsize = CPU_ALLOC_SIZE(ncpus); + CPU_ZERO_S(cpu_present_setsize, cpu_present_set); + for_all_cpus(mark_cpu_present); } void cpu_mask_uninit() @@ -125,6 +143,9 @@ void cpu_mask_uninit() CPU_FREE(cpu_mask); cpu_mask = NULL; cpu_mask_size = 0; + CPU_FREE(cpu_present_set); + cpu_present_set = NULL; + cpu_present_setsize = 0; } int cpu_migrate(int cpu) @@ -912,6 +933,8 @@ int is_snb(unsigned int family, unsigned int model) switch (model) { case 0x2A: case 0x2D: + case 0x3A: /* IVB */ + case 0x3D: /* IVB Xeon */ return 1; } return 0; @@ -1047,6 +1070,9 @@ int fork_it(char **argv) int retval; pid_t child_pid; get_counters(cnt_even); + + /* clear affinity side-effect of get_counters() */ + sched_setaffinity(0, cpu_present_setsize, cpu_present_set); gettimeofday(&tv_even, (struct timezone *)NULL); child_pid = fork(); diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index a6a0365475e..5afb4311402 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c @@ -332,6 +332,7 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt, */ hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link) if (ei->type == KVM_IRQ_ROUTING_MSI || + ue->type == KVM_IRQ_ROUTING_MSI || ue->u.irqchip.irqchip == ei->irqchip.irqchip) return r; |