aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CREDITS9
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/sound/alsa/soc/machine.txt6
-rw-r--r--MAINTAINERS34
-rw-r--r--Makefile2
-rw-r--r--arch/arm/configs/omap1_defconfig5
-rw-r--r--arch/arm/mach-at91/at91rm9200_devices.c2
-rw-r--r--arch/arm/mach-at91/at91sam9260.c6
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c2
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c2
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c2
-rw-r--r--arch/arm/mach-at91/include/mach/system_rev.h2
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c6
-rw-r--r--arch/arm/mach-davinci/dm646x.c1
-rw-r--r--arch/arm/mach-davinci/include/mach/psc.h5
-rw-r--r--arch/arm/mach-davinci/psc.c18
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c9
-rw-r--r--arch/arm/mach-msm/devices-iommu.c1
-rw-r--r--arch/arm/mach-mx5/imx51-dt.c12
-rw-r--r--arch/arm/mach-mx5/imx53-dt.c12
-rw-r--r--arch/arm/mach-mxs/include/mach/mx28.h4
-rw-r--r--arch/arm/mach-mxs/include/mach/mxs.h1
-rw-r--r--arch/arm/mach-mxs/mach-m28evk.c2
-rw-r--r--arch/arm/mach-mxs/mach-stmp378x_devb.c2
-rw-r--r--arch/arm/mach-mxs/module-tx28.c4
-rw-r--r--arch/arm/mach-omap1/clock_data.c12
-rw-r--r--arch/arm/mach-prima2/pm.c1
-rw-r--r--arch/arm/mach-prima2/prima2.c1
-rw-r--r--arch/arm/mach-s3c64xx/dev-spi.c1
-rw-r--r--arch/arm/mach-s3c64xx/s3c6400.c2
-rw-r--r--arch/arm/mach-s3c64xx/setup-fb-24bpp.c2
-rw-r--r--arch/arm/mach-sa1100/Makefile.boot4
-rw-r--r--arch/m68k/include/asm/unistd.h4
-rw-r--r--arch/m68k/kernel/syscalltable.S2
-rw-r--r--arch/tile/include/asm/irq.h10
-rw-r--r--arch/tile/kernel/irq.c16
-rw-r--r--arch/tile/kernel/pci-dma.c1
-rw-r--r--arch/tile/kernel/pci.c1
-rw-r--r--arch/tile/kernel/sysfs.c1
-rw-r--r--arch/tile/lib/exports.c3
-rw-r--r--arch/tile/mm/homecache.c9
-rw-r--r--arch/x86/Kconfig8
-rw-r--r--arch/x86/include/asm/e820.h8
-rw-r--r--arch/x86/include/asm/efi.h5
-rw-r--r--arch/x86/kernel/e820.c3
-rw-r--r--arch/x86/kernel/hpet.c21
-rw-r--r--arch/x86/kernel/setup.c21
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--arch/x86/platform/efi/efi.c29
-rw-r--r--arch/x86/platform/efi/efi_64.c17
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/md.c27
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--drivers/net/ethernet/tile/tilepro.c8
-rw-r--r--drivers/ptp/ptp_clock.c4
-rw-r--r--drivers/rapidio/devices/tsi721.c41
-rw-r--r--drivers/rapidio/devices/tsi721.h2
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/spi/Kconfig4
-rw-r--r--drivers/spi/spi-ath79.c1
-rw-r--r--drivers/spi/spi-gpio.c4
-rw-r--r--drivers/spi/spi-nuc900.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c26
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c3
-rw-r--r--drivers/target/loopback/tcm_loop.c41
-rw-r--r--drivers/target/target_core_alua.c27
-rw-r--r--drivers/target/target_core_cdb.c20
-rw-r--r--drivers/target/target_core_configfs.c11
-rw-r--r--drivers/target/target_core_device.c30
-rw-r--r--drivers/target/target_core_file.c20
-rw-r--r--drivers/target/target_core_iblock.c16
-rw-r--r--drivers/target/target_core_pr.c240
-rw-r--r--drivers/target/target_core_pscsi.c28
-rw-r--r--drivers/target/target_core_rd.c258
-rw-r--r--drivers/target/target_core_tmr.c4
-rw-r--r--drivers/target/target_core_transport.c260
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c2
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c3
-rw-r--r--fs/btrfs/extent-tree.c100
-rw-r--r--fs/btrfs/extent_io.c24
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/cifs/file.c26
-rw-r--r--fs/cifs/readdir.c10
-rw-r--r--fs/cifs/smbencrypt.c6
-rw-r--r--fs/proc/meminfo.c7
-rw-r--r--fs/proc/stat.c4
-rw-r--r--fs/xfs/xfs_bmap.c20
-rw-r--r--fs/xfs/xfs_export.c8
-rw-r--r--fs/xfs/xfs_log.c348
-rw-r--r--fs/xfs/xfs_trace.h12
-rw-r--r--include/asm-generic/unistd.h8
-rw-r--r--include/linux/compat.h9
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/shrinker.h2
-rw-r--r--include/target/target_core_base.h46
-rw-r--r--include/target/target_core_transport.h24
-rw-r--r--kernel/events/core.c4
-rw-r--r--kernel/lockdep.c8
-rw-r--r--kernel/printk.c3
-rw-r--r--kernel/time/alarmtimer.c2
-rw-r--r--kernel/timer.c2
-rw-r--r--lib/dma-debug.c2
-rw-r--r--mm/huge_memory.c16
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/vmalloc.c2
-rw-r--r--mm/vmscan.c26
-rw-r--r--security/tomoyo/realpath.c10
-rw-r--r--sound/pci/hda/patch_realtek.c65
-rw-r--r--sound/pci/hda/patch_sigmatel.c47
-rw-r--r--sound/pci/sis7019.c64
-rw-r--r--sound/soc/codecs/uda1380.c4
-rw-r--r--sound/soc/codecs/wm8994.c12
-rw-r--r--sound/soc/imx/Kconfig2
-rw-r--r--sound/soc/kirkwood/Kconfig3
-rw-r--r--sound/soc/pxa/Kconfig3
-rw-r--r--sound/soc/soc-utils.c31
-rw-r--r--tools/perf/builtin-stat.c3
-rw-r--r--tools/perf/util/header.c2
129 files changed, 1237 insertions, 1213 deletions
diff --git a/CREDITS b/CREDITS
index 07e32a87d95..44fce988eaa 100644
--- a/CREDITS
+++ b/CREDITS
@@ -688,10 +688,13 @@ S: Oxfordshire, UK.
N: Kees Cook
E: kees@outflux.net
-W: http://outflux.net/
-P: 1024D/17063E6D 9FA3 C49C 23C9 D1BC 2E30 1975 1FFF 4BA9 1706 3E6D
-D: Minor updates to SCSI types, added /proc/pid/maps protection
+E: kees@ubuntu.com
+E: keescook@chromium.org
+W: http://outflux.net/blog/
+P: 4096R/DC6DC026 A5C3 F68F 229D D60F 723E 6E13 8972 F4DF DC6D C026
+D: Various security things, bug fixes, and documentation.
S: (ask for current address)
+S: Portland, Oregon
S: USA
N: Robin Cornelius
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 5e22c3f1f8b..81c287fad79 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -320,7 +320,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
on: enable for both 32- and 64-bit processes
off: disable for both 32- and 64-bit processes
- amd_iommu= [HW,X86-84]
+ amd_iommu= [HW,X86-64]
Pass parameters to the AMD IOMMU driver in the system.
Possible values are:
fullflush - enable flushing of IO/TLB entries when
diff --git a/Documentation/sound/alsa/soc/machine.txt b/Documentation/sound/alsa/soc/machine.txt
index 3e2ec9cbf39..d50c14df341 100644
--- a/Documentation/sound/alsa/soc/machine.txt
+++ b/Documentation/sound/alsa/soc/machine.txt
@@ -50,8 +50,7 @@ Machine DAI Configuration
The machine DAI configuration glues all the codec and CPU DAIs together. It can
also be used to set up the DAI system clock and for any machine related DAI
initialisation e.g. the machine audio map can be connected to the codec audio
-map, unconnected codec pins can be set as such. Please see corgi.c, spitz.c
-for examples.
+map, unconnected codec pins can be set as such.
struct snd_soc_dai_link is used to set up each DAI in your machine. e.g.
@@ -83,8 +82,7 @@ Machine Power Map
The machine driver can optionally extend the codec power map and to become an
audio power map of the audio subsystem. This allows for automatic power up/down
of speaker/HP amplifiers, etc. Codec pins can be connected to the machines jack
-sockets in the machine init function. See soc/pxa/spitz.c and dapm.txt for
-details.
+sockets in the machine init function.
Machine Controls
diff --git a/MAINTAINERS b/MAINTAINERS
index 44756028499..b9db108f01c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -511,8 +511,8 @@ M: Joerg Roedel <joerg.roedel@amd.com>
L: iommu@lists.linux-foundation.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu.git
S: Supported
-F: arch/x86/kernel/amd_iommu*.c
-F: arch/x86/include/asm/amd_iommu*.h
+F: drivers/iommu/amd_iommu*.[ch]
+F: include/linux/amd-iommu.h
AMD MICROCODE UPDATE SUPPORT
M: Andreas Herrmann <andreas.herrmann3@amd.com>
@@ -1054,35 +1054,18 @@ ARM/SAMSUNG ARM ARCHITECTURES
M: Ben Dooks <ben-linux@fluff.org>
M: Kukjin Kim <kgene.kim@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/plat-samsung/
F: arch/arm/plat-s3c24xx/
F: arch/arm/plat-s5p/
+F: arch/arm/mach-s3c24*/
+F: arch/arm/mach-s3c64xx/
F: drivers/*/*s3c2410*
F: drivers/*/*/*s3c2410*
-
-ARM/S3C2410 ARM ARCHITECTURE
-M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.fluff.org/ben/linux/
-S: Maintained
-F: arch/arm/mach-s3c2410/
-
-ARM/S3C244x ARM ARCHITECTURE
-M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.fluff.org/ben/linux/
-S: Maintained
-F: arch/arm/mach-s3c2440/
-F: arch/arm/mach-s3c2443/
-
-ARM/S3C64xx ARM ARCHITECTURE
-M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.fluff.org/ben/linux/
-S: Maintained
-F: arch/arm/mach-s3c64xx/
+F: drivers/spi/spi-s3c*
+F: sound/soc/samsung/*
ARM/S5P EXYNOS ARM ARCHITECTURES
M: Kukjin Kim <kgene.kim@samsung.com>
@@ -4319,8 +4302,9 @@ F: include/linux/mm.h
F: mm/
MEMORY RESOURCE CONTROLLER
+M: Johannes Weiner <hannes@cmpxchg.org>
+M: Michal Hocko <mhocko@suse.cz>
M: Balbir Singh <bsingharora@gmail.com>
-M: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
L: cgroups@vger.kernel.org
L: linux-mm@kvack.org
diff --git a/Makefile b/Makefile
index 12aafc20efb..d1ea73f74c2 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 2
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index a7e77758137..945a34f2a34 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -48,12 +48,7 @@ CONFIG_MACH_SX1=y
CONFIG_MACH_NOKIA770=y
CONFIG_MACH_AMS_DELTA=y
CONFIG_MACH_OMAP_GENERIC=y
-CONFIG_OMAP_ARM_216MHZ=y
-CONFIG_OMAP_ARM_195MHZ=y
-CONFIG_OMAP_ARM_192MHZ=y
CONFIG_OMAP_ARM_182MHZ=y
-CONFIG_OMAP_ARM_168MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
# CONFIG_ARM_THUMB is not set
CONFIG_PCCARD=y
CONFIG_OMAP_CF=y
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
index 66591fa53e0..ad930688358 100644
--- a/arch/arm/mach-at91/at91rm9200_devices.c
+++ b/arch/arm/mach-at91/at91rm9200_devices.c
@@ -83,7 +83,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
* USB Device (Gadget)
* -------------------------------------------------------------------- */
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
static struct at91_udc_data udc_data;
static struct resource udc_resources[] = {
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index b84a9f642f5..0d20677fbef 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -195,9 +195,9 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
- CLKDEV_CON_DEV_ID("t3_clk", "atmel_tcb.1", &tc3_clk),
- CLKDEV_CON_DEV_ID("t4_clk", "atmel_tcb.1", &tc4_clk),
- CLKDEV_CON_DEV_ID("t5_clk", "atmel_tcb.1", &tc5_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
+ CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
+ CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
/* more usart lookup table for DT entries */
CLKDEV_CON_DEV_ID("usart", "fffff200.serial", &mck),
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index 25e3464fb07..629fa977497 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -84,7 +84,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
* USB Device (Gadget)
* -------------------------------------------------------------------- */
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
static struct at91_udc_data udc_data;
static struct resource udc_resources[] = {
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index ae78f4d03b7..a178b58b0b9 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -87,7 +87,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
* USB Device (Gadget)
* -------------------------------------------------------------------- */
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
static struct at91_udc_data udc_data;
static struct resource udc_resources[] = {
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index ad017eb1f8d..d5fbac9ff4f 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -92,7 +92,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
* USB Device (Gadget)
* -------------------------------------------------------------------- */
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
static struct at91_udc_data udc_data;
static struct resource udc_resources[] = {
diff --git a/arch/arm/mach-at91/include/mach/system_rev.h b/arch/arm/mach-at91/include/mach/system_rev.h
index 8f4866045b4..ec164a4124c 100644
--- a/arch/arm/mach-at91/include/mach/system_rev.h
+++ b/arch/arm/mach-at91/include/mach/system_rev.h
@@ -19,7 +19,7 @@
#define BOARD_HAVE_NAND_16BIT (1 << 31)
static inline int board_have_nand_16bit(void)
{
- return system_rev & BOARD_HAVE_NAND_16BIT;
+ return (system_rev & BOARD_HAVE_NAND_16BIT) ? 1 : 0;
}
#endif /* __ARCH_SYSTEM_REV_H__ */
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 1d7d2499522..6659a90dbca 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -753,7 +753,7 @@ static struct snd_platform_data da850_evm_snd_data = {
.num_serializer = ARRAY_SIZE(da850_iis_serializer_direction),
.tdm_slots = 2,
.serial_dir = da850_iis_serializer_direction,
- .asp_chan_q = EVENTQ_1,
+ .asp_chan_q = EVENTQ_0,
.version = MCASP_VERSION_2,
.txnumevt = 1,
.rxnumevt = 1,
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 1918ae71142..46e1f4173b9 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -107,7 +107,7 @@ static struct mtd_partition davinci_nand_partitions[] = {
/* UBL (a few copies) plus U-Boot */
.name = "bootloader",
.offset = 0,
- .size = 28 * NAND_BLOCK_SIZE,
+ .size = 30 * NAND_BLOCK_SIZE,
.mask_flags = MTD_WRITEABLE, /* force read-only */
}, {
/* U-Boot environment */
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index e574d7f837a..635bf774015 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -564,7 +564,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
int val;
u32 value;
- if (!vpif_vsclkdis_reg || !cpld_client)
+ if (!vpif_vidclkctl_reg || !cpld_client)
return -ENXIO;
val = i2c_smbus_read_byte(cpld_client);
@@ -572,7 +572,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
return val;
spin_lock_irqsave(&vpif_reg_lock, flags);
- value = __raw_readl(vpif_vsclkdis_reg);
+ value = __raw_readl(vpif_vidclkctl_reg);
if (mux_mode) {
val &= VPIF_INPUT_TWO_CHANNEL;
value |= VIDCH1CLK;
@@ -580,7 +580,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
val |= VPIF_INPUT_ONE_CHANNEL;
value &= ~VIDCH1CLK;
}
- __raw_writel(value, vpif_vsclkdis_reg);
+ __raw_writel(value, vpif_vidclkctl_reg);
spin_unlock_irqrestore(&vpif_reg_lock, flags);
err = i2c_smbus_write_byte(cpld_client, val);
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 0b68ed534f8..af27c130595 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -161,7 +161,6 @@ static struct clk dsp_clk = {
.name = "dsp",
.parent = &pll1_sysclk1,
.lpsc = DM646X_LPSC_C64X_CPU,
- .flags = PSC_DSP,
.usecount = 1, /* REVISIT how to disable? */
};
diff --git a/arch/arm/mach-davinci/include/mach/psc.h b/arch/arm/mach-davinci/include/mach/psc.h
index fa59c097223..8bc3fc25617 100644
--- a/arch/arm/mach-davinci/include/mach/psc.h
+++ b/arch/arm/mach-davinci/include/mach/psc.h
@@ -233,7 +233,7 @@
#define PTCMD 0x120
#define PTSTAT 0x128
#define PDSTAT 0x200
-#define PDCTL1 0x304
+#define PDCTL 0x300
#define MDSTAT 0x800
#define MDCTL 0xA00
@@ -244,7 +244,10 @@
#define PSC_STATE_ENABLE 3
#define MDSTAT_STATE_MASK 0x3f
+#define PDSTAT_STATE_MASK 0x1f
#define MDCTL_FORCE BIT(31)
+#define PDCTL_NEXT BIT(1)
+#define PDCTL_EPCGOOD BIT(8)
#ifndef __ASSEMBLER__
diff --git a/arch/arm/mach-davinci/psc.c b/arch/arm/mach-davinci/psc.c
index 1fb6bdff38c..d7e210f4b55 100644
--- a/arch/arm/mach-davinci/psc.c
+++ b/arch/arm/mach-davinci/psc.c
@@ -52,7 +52,7 @@ int __init davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id)
void davinci_psc_config(unsigned int domain, unsigned int ctlr,
unsigned int id, bool enable, u32 flags)
{
- u32 epcpr, ptcmd, ptstat, pdstat, pdctl1, mdstat, mdctl;
+ u32 epcpr, ptcmd, ptstat, pdstat, pdctl, mdstat, mdctl;
void __iomem *psc_base;
struct davinci_soc_info *soc_info = &davinci_soc_info;
u32 next_state = PSC_STATE_ENABLE;
@@ -79,11 +79,11 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
mdctl |= MDCTL_FORCE;
__raw_writel(mdctl, psc_base + MDCTL + 4 * id);
- pdstat = __raw_readl(psc_base + PDSTAT);
- if ((pdstat & 0x00000001) == 0) {
- pdctl1 = __raw_readl(psc_base + PDCTL1);
- pdctl1 |= 0x1;
- __raw_writel(pdctl1, psc_base + PDCTL1);
+ pdstat = __raw_readl(psc_base + PDSTAT + 4 * domain);
+ if ((pdstat & PDSTAT_STATE_MASK) == 0) {
+ pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
+ pdctl |= PDCTL_NEXT;
+ __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
ptcmd = 1 << domain;
__raw_writel(ptcmd, psc_base + PTCMD);
@@ -92,9 +92,9 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
epcpr = __raw_readl(psc_base + EPCPR);
} while ((((epcpr >> domain) & 1) == 0));
- pdctl1 = __raw_readl(psc_base + PDCTL1);
- pdctl1 |= 0x100;
- __raw_writel(pdctl1, psc_base + PDCTL1);
+ pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
+ pdctl |= PDCTL_EPCGOOD;
+ __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
} else {
ptcmd = 1 << domain;
__raw_writel(ptcmd, psc_base + PTCMD);
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 9cd860a27af..8deb012189b 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -37,14 +37,15 @@ static void __init imx6q_map_io(void)
imx6q_clock_map_io();
}
-static void __init imx6q_gpio_add_irq_domain(struct device_node *np,
+static int __init imx6q_gpio_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
- static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
- 32 * 7; /* imx6q gets 7 gpio ports */
+ static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
+ gpio_irq_base -= 32;
irq_domain_add_simple(np, gpio_irq_base);
- gpio_irq_base += 32;
+
+ return 0;
}
static const struct of_device_id imx6q_irq_match[] __initconst = {
diff --git a/arch/arm/mach-msm/devices-iommu.c b/arch/arm/mach-msm/devices-iommu.c
index 24030d0da6e..0fb7a17df39 100644
--- a/arch/arm/mach-msm/devices-iommu.c
+++ b/arch/arm/mach-msm/devices-iommu.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/bootmem.h>
+#include <linux/module.h>
#include <mach/irqs.h>
#include <mach/iommu.h>
diff --git a/arch/arm/mach-mx5/imx51-dt.c b/arch/arm/mach-mx5/imx51-dt.c
index ccc61585659..596edd967db 100644
--- a/arch/arm/mach-mx5/imx51-dt.c
+++ b/arch/arm/mach-mx5/imx51-dt.c
@@ -44,20 +44,22 @@ static const struct of_dev_auxdata imx51_auxdata_lookup[] __initconst = {
{ /* sentinel */ }
};
-static void __init imx51_tzic_add_irq_domain(struct device_node *np,
+static int __init imx51_tzic_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
irq_domain_add_simple(np, 0);
+ return 0;
}
-static void __init imx51_gpio_add_irq_domain(struct device_node *np,
+static int __init imx51_gpio_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
- static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
- 32 * 4; /* imx51 gets 4 gpio ports */
+ static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
+ gpio_irq_base -= 32;
irq_domain_add_simple(np, gpio_irq_base);
- gpio_irq_base += 32;
+
+ return 0;
}
static const struct of_device_id imx51_irq_match[] __initconst = {
diff --git a/arch/arm/mach-mx5/imx53-dt.c b/arch/arm/mach-mx5/imx53-dt.c
index ccaa0b81b76..85bfd5ff21b 100644
--- a/arch/arm/mach-mx5/imx53-dt.c
+++ b/arch/arm/mach-mx5/imx53-dt.c
@@ -48,20 +48,22 @@ static const struct of_dev_auxdata imx53_auxdata_lookup[] __initconst = {
{ /* sentinel */ }
};
-static void __init imx53_tzic_add_irq_domain(struct device_node *np,
+static int __init imx53_tzic_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
irq_domain_add_simple(np, 0);
+ return 0;
}
-static void __init imx53_gpio_add_irq_domain(struct device_node *np,
+static int __init imx53_gpio_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
- static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
- 32 * 7; /* imx53 gets 7 gpio ports */
+ static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
+ gpio_irq_base -= 32;
irq_domain_add_simple(np, gpio_irq_base);
- gpio_irq_base += 32;
+
+ return 0;
}
static const struct of_device_id imx53_irq_match[] __initconst = {
diff --git a/arch/arm/mach-mxs/include/mach/mx28.h b/arch/arm/mach-mxs/include/mach/mx28.h
index 75d86118b76..30c7990f3c0 100644
--- a/arch/arm/mach-mxs/include/mach/mx28.h
+++ b/arch/arm/mach-mxs/include/mach/mx28.h
@@ -104,8 +104,8 @@
#define MX28_INT_CAN1 9
#define MX28_INT_LRADC_TOUCH 10
#define MX28_INT_HSADC 13
-#define MX28_INT_IRADC_THRESH0 14
-#define MX28_INT_IRADC_THRESH1 15
+#define MX28_INT_LRADC_THRESH0 14
+#define MX28_INT_LRADC_THRESH1 15
#define MX28_INT_LRADC_CH0 16
#define MX28_INT_LRADC_CH1 17
#define MX28_INT_LRADC_CH2 18
diff --git a/arch/arm/mach-mxs/include/mach/mxs.h b/arch/arm/mach-mxs/include/mach/mxs.h
index 0d2d2b47099..bde5f663474 100644
--- a/arch/arm/mach-mxs/include/mach/mxs.h
+++ b/arch/arm/mach-mxs/include/mach/mxs.h
@@ -30,6 +30,7 @@
*/
#define cpu_is_mx23() ( \
machine_is_mx23evk() || \
+ machine_is_stmp378x() || \
0)
#define cpu_is_mx28() ( \
machine_is_mx28evk() || \
diff --git a/arch/arm/mach-mxs/mach-m28evk.c b/arch/arm/mach-mxs/mach-m28evk.c
index 3b1681e4f49..6b00577b702 100644
--- a/arch/arm/mach-mxs/mach-m28evk.c
+++ b/arch/arm/mach-mxs/mach-m28evk.c
@@ -361,6 +361,6 @@ static struct sys_timer m28evk_timer = {
MACHINE_START(M28EVK, "DENX M28 EVK")
.map_io = mx28_map_io,
.init_irq = mx28_init_irq,
- .init_machine = m28evk_init,
.timer = &m28evk_timer,
+ .init_machine = m28evk_init,
MACHINE_END
diff --git a/arch/arm/mach-mxs/mach-stmp378x_devb.c b/arch/arm/mach-mxs/mach-stmp378x_devb.c
index 177e53123a0..6834dea38c0 100644
--- a/arch/arm/mach-mxs/mach-stmp378x_devb.c
+++ b/arch/arm/mach-mxs/mach-stmp378x_devb.c
@@ -115,6 +115,6 @@ static struct sys_timer stmp378x_dvb_timer = {
MACHINE_START(STMP378X, "STMP378X")
.map_io = mx23_map_io,
.init_irq = mx23_init_irq,
- .init_machine = stmp378x_dvb_init,
.timer = &stmp378x_dvb_timer,
+ .init_machine = stmp378x_dvb_init,
MACHINE_END
diff --git a/arch/arm/mach-mxs/module-tx28.c b/arch/arm/mach-mxs/module-tx28.c
index 0fcff47009c..9a7b08b2a92 100644
--- a/arch/arm/mach-mxs/module-tx28.c
+++ b/arch/arm/mach-mxs/module-tx28.c
@@ -66,11 +66,11 @@ static const iomux_cfg_t tx28_fec1_pads[] __initconst = {
MX28_PAD_ENET0_CRS__ENET1_RX_EN,
};
-static struct fec_platform_data tx28_fec0_data = {
+static const struct fec_platform_data tx28_fec0_data __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
-static struct fec_platform_data tx28_fec1_data = {
+static const struct fec_platform_data tx28_fec1_data __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c
index 1297bb58869..9ff90a744a2 100644
--- a/arch/arm/mach-omap1/clock_data.c
+++ b/arch/arm/mach-omap1/clock_data.c
@@ -16,6 +16,8 @@
#include <linux/kernel.h>
#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
#include <linux/io.h>
#include <asm/mach-types.h> /* for machine_is_* */
@@ -927,16 +929,22 @@ int __init omap1_clk_init(void)
void __init omap1_clk_late_init(void)
{
- if (ck_dpll1.rate >= OMAP1_DPLL1_SANE_VALUE)
+ unsigned long rate = ck_dpll1.rate;
+
+ if (rate >= OMAP1_DPLL1_SANE_VALUE)
return;
+ /* System booting at unusable rate, force reprogramming of DPLL1 */
+ ck_dpll1_p->rate = 0;
+
/* Find the highest supported frequency and enable it */
if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
pr_err("System frequencies not set, using default. Check your config.\n");
omap_writew(0x2290, DPLL_CTL);
- omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL);
+ omap_writew(cpu_is_omap7xx() ? 0x2005 : 0x0005, ARM_CKCTL);
ck_dpll1.rate = OMAP1_DPLL1_SANE_VALUE;
}
propagate_rate(&ck_dpll1);
omap1_show_rates();
+ loops_per_jiffy = cpufreq_scale(loops_per_jiffy, rate, ck_dpll1.rate);
}
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
index cb53160f6c5..26ebb57719d 100644
--- a/arch/arm/mach-prima2/pm.c
+++ b/arch/arm/mach-prima2/pm.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/suspend.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
diff --git a/arch/arm/mach-prima2/prima2.c b/arch/arm/mach-prima2/prima2.c
index ef555c04196..a12b689a870 100644
--- a/arch/arm/mach-prima2/prima2.c
+++ b/arch/arm/mach-prima2/prima2.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
+#include <asm/sizes.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <linux/of.h>
diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c
index 5e6b42089eb..3341fd11872 100644
--- a/arch/arm/mach-s3c64xx/dev-spi.c
+++ b/arch/arm/mach-s3c64xx/dev-spi.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/gpio.h>
diff --git a/arch/arm/mach-s3c64xx/s3c6400.c b/arch/arm/mach-s3c64xx/s3c6400.c
index 7a3bc32df42..51c00f2453c 100644
--- a/arch/arm/mach-s3c64xx/s3c6400.c
+++ b/arch/arm/mach-s3c64xx/s3c6400.c
@@ -70,7 +70,7 @@ void __init s3c6400_init_irq(void)
s3c64xx_init_irq(~0 & ~(0xf << 5), ~0);
}
-struct sysdev_class s3c6400_sysclass = {
+static struct sysdev_class s3c6400_sysclass = {
.name = "s3c6400-core",
};
diff --git a/arch/arm/mach-s3c64xx/setup-fb-24bpp.c b/arch/arm/mach-s3c64xx/setup-fb-24bpp.c
index 83d2afb79e9..2cf80026c58 100644
--- a/arch/arm/mach-s3c64xx/setup-fb-24bpp.c
+++ b/arch/arm/mach-s3c64xx/setup-fb-24bpp.c
@@ -20,7 +20,7 @@
#include <plat/fb.h>
#include <plat/gpio-cfg.h>
-extern void s3c64xx_fb_gpio_setup_24bpp(void)
+void s3c64xx_fb_gpio_setup_24bpp(void)
{
s3c_gpio_cfgrange_nopull(S3C64XX_GPI(0), 16, S3C_GPIO_SFN(2));
s3c_gpio_cfgrange_nopull(S3C64XX_GPJ(0), 12, S3C_GPIO_SFN(2));
diff --git a/arch/arm/mach-sa1100/Makefile.boot b/arch/arm/mach-sa1100/Makefile.boot
index 5a616f6e561..f7951aa0456 100644
--- a/arch/arm/mach-sa1100/Makefile.boot
+++ b/arch/arm/mach-sa1100/Makefile.boot
@@ -1,5 +1,5 @@
-ifeq ($(CONFIG_ARCH_SA1100),y)
- zreladdr-$(CONFIG_SA1111) += 0xc0208000
+ifeq ($(CONFIG_SA1111),y)
+ zreladdr-y += 0xc0208000
else
zreladdr-y += 0xc0008000
endif
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 43f984e9397..303192fc926 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -350,10 +350,12 @@
#define __NR_clock_adjtime 342
#define __NR_syncfs 343
#define __NR_setns 344
+#define __NR_process_vm_readv 345
+#define __NR_process_vm_writev 346
#ifdef __KERNEL__
-#define NR_syscalls 345
+#define NR_syscalls 347
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index c468f2edaa8..ce827b37611 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -365,4 +365,6 @@ ENTRY(sys_call_table)
.long sys_clock_adjtime
.long sys_syncfs
.long sys_setns
+ .long sys_process_vm_readv /* 345 */
+ .long sys_process_vm_writev
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h
index 94e9a511de8..f80f8ceabc6 100644
--- a/arch/tile/include/asm/irq.h
+++ b/arch/tile/include/asm/irq.h
@@ -74,16 +74,6 @@ enum {
*/
void tile_irq_activate(unsigned int irq, int tile_irq_type);
-/*
- * For onboard, non-PCI (e.g. TILE_IRQ_PERCPU) devices, drivers know
- * how to use enable/disable_percpu_irq() to manage interrupts on each
- * core. We can't use the generic enable/disable_irq() because they
- * use a single reference count per irq, rather than per cpu per irq.
- */
-void enable_percpu_irq(unsigned int irq);
-void disable_percpu_irq(unsigned int irq);
-
-
void setup_irq_regs(void);
#endif /* _ASM_TILE_IRQ_H */
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index aa0134db2dd..02e62806501 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -152,14 +152,13 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
* Remove an irq from the disabled mask. If we're in an interrupt
* context, defer enabling the HW interrupt until we leave.
*/
-void enable_percpu_irq(unsigned int irq)
+static void tile_irq_chip_enable(struct irq_data *d)
{
- get_cpu_var(irq_disable_mask) &= ~(1UL << irq);
+ get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
if (__get_cpu_var(irq_depth) == 0)
- unmask_irqs(1UL << irq);
+ unmask_irqs(1UL << d->irq);
put_cpu_var(irq_disable_mask);
}
-EXPORT_SYMBOL(enable_percpu_irq);
/*
* Add an irq to the disabled mask. We disable the HW interrupt
@@ -167,13 +166,12 @@ EXPORT_SYMBOL(enable_percpu_irq);
* in an interrupt context, the return path is careful to avoid
* unmasking a newly disabled interrupt.
*/
-void disable_percpu_irq(unsigned int irq)
+static void tile_irq_chip_disable(struct irq_data *d)
{
- get_cpu_var(irq_disable_mask) |= (1UL << irq);
- mask_irqs(1UL << irq);
+ get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
+ mask_irqs(1UL << d->irq);
put_cpu_var(irq_disable_mask);
}
-EXPORT_SYMBOL(disable_percpu_irq);
/* Mask an interrupt. */
static void tile_irq_chip_mask(struct irq_data *d)
@@ -209,6 +207,8 @@ static void tile_irq_chip_eoi(struct irq_data *d)
static struct irq_chip tile_irq_chip = {
.name = "tile_irq_chip",
+ .irq_enable = tile_irq_chip_enable,
+ .irq_disable = tile_irq_chip_disable,
.irq_ack = tile_irq_chip_ack,
.irq_eoi = tile_irq_chip_eoi,
.irq_mask = tile_irq_chip_mask,
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index 658f2ce426a..b3ed19f8779 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -15,6 +15,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
#include <asm/tlbflush.h>
#include <asm/homecache.h>
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index 2a8014cb1ff..9d610d3fb11 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/uaccess.h>
+#include <linux/export.h>
#include <asm/processor.h>
#include <asm/sections.h>
diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c
index b671a86f451..60290826809 100644
--- a/arch/tile/kernel/sysfs.c
+++ b/arch/tile/kernel/sysfs.c
@@ -18,6 +18,7 @@
#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/smp.h>
+#include <linux/stat.h>
#include <hv/hypervisor.h>
/* Return a string queried from the hypervisor, truncated to page size. */
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index a87d2a859ba..2a81d32de0d 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -39,6 +39,9 @@ EXPORT_SYMBOL(finv_user_asm);
EXPORT_SYMBOL(current_text_addr);
EXPORT_SYMBOL(dump_stack);
+/* arch/tile/kernel/head.S */
+EXPORT_SYMBOL(empty_zero_page);
+
/* arch/tile/lib/, various memcpy files */
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__copy_to_user_inatomic);
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index cbe6f4f9eca..1cc6ae477c9 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -449,9 +449,12 @@ void homecache_free_pages(unsigned long addr, unsigned int order)
VM_BUG_ON(!virt_addr_valid((void *)addr));
page = virt_to_page((void *)addr);
if (put_page_testzero(page)) {
- int pages = (1 << order);
homecache_change_page_home(page, order, initial_page_home());
- while (pages--)
- __free_page(page++);
+ if (order == 0) {
+ free_hot_cold_page(page, 0);
+ } else {
+ init_page_count(page);
+ __free_pages(page, order);
+ }
}
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cb9a1044a77..efb42949cc0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -390,7 +390,7 @@ config X86_INTEL_CE
This option compiles in support for the CE4100 SOC for settop
boxes and media devices.
-config X86_INTEL_MID
+config X86_WANT_INTEL_MID
bool "Intel MID platform support"
depends on X86_32
depends on X86_EXTENDED_PLATFORM
@@ -399,7 +399,10 @@ config X86_INTEL_MID
systems which do not have the PCI legacy interfaces (Moorestown,
Medfield). If you are building for a PC class system say N here.
-if X86_INTEL_MID
+if X86_WANT_INTEL_MID
+
+config X86_INTEL_MID
+ bool
config X86_MRST
bool "Moorestown MID platform"
@@ -411,6 +414,7 @@ config X86_MRST
select SPI
select INTEL_SCU_IPC
select X86_PLATFORM_DEVICES
+ select X86_INTEL_MID
---help---
Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin
Internet Device(MID) platform. Moorestown consists of two chips:
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 908b96957d8..c9547033e38 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -53,6 +53,13 @@
*/
#define E820_RESERVED_KERN 128
+/*
+ * Address ranges that need to be mapped by the kernel direct
+ * mapping. This is used to make sure regions such as
+ * EFI_RUNTIME_SERVICES_DATA are directly mapped. See setup_arch().
+ */
+#define E820_RESERVED_EFI 129
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
struct e820entry {
@@ -115,6 +122,7 @@ static inline void early_memtest(unsigned long start, unsigned long end)
}
#endif
+extern unsigned long e820_end_pfn(unsigned long limit_pfn, unsigned type);
extern unsigned long e820_end_of_ram_pfn(void);
extern unsigned long e820_end_of_low_ram_pfn(void);
extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 7093e4a6a0b..b8d8bfcd44a 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -33,8 +33,6 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
efi_call_virt(f, a1, a2, a3, a4, a5, a6)
-#define efi_ioremap(addr, size, type) ioremap_cache(addr, size)
-
#else /* !CONFIG_X86_32 */
extern u64 efi_call0(void *fp);
@@ -84,9 +82,6 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
-extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
- u32 type);
-
#endif /* CONFIG_X86_32 */
extern int add_efi_memmap;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 303a0e48f07..65ffd110a81 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -135,6 +135,7 @@ static void __init e820_print_type(u32 type)
printk(KERN_CONT "(usable)");
break;
case E820_RESERVED:
+ case E820_RESERVED_EFI:
printk(KERN_CONT "(reserved)");
break;
case E820_ACPI:
@@ -783,7 +784,7 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
/*
* Find the highest page frame number we have available
*/
-static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
+unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
{
int i;
unsigned long last_pfn = 0;
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index b946a9eac7d..1bb0bf4d92c 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -1049,6 +1049,14 @@ int hpet_rtc_timer_init(void)
}
EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
+static void hpet_disable_rtc_channel(void)
+{
+ unsigned long cfg;
+ cfg = hpet_readl(HPET_T1_CFG);
+ cfg &= ~HPET_TN_ENABLE;
+ hpet_writel(cfg, HPET_T1_CFG);
+}
+
/*
* The functions below are called from rtc driver.
* Return 0 if HPET is not being used.
@@ -1060,6 +1068,9 @@ int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
return 0;
hpet_rtc_flags &= ~bit_mask;
+ if (unlikely(!hpet_rtc_flags))
+ hpet_disable_rtc_channel();
+
return 1;
}
EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
@@ -1125,15 +1136,11 @@ EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
static void hpet_rtc_timer_reinit(void)
{
- unsigned int cfg, delta;
+ unsigned int delta;
int lost_ints = -1;
- if (unlikely(!hpet_rtc_flags)) {
- cfg = hpet_readl(HPET_T1_CFG);
- cfg &= ~HPET_TN_ENABLE;
- hpet_writel(cfg, HPET_T1_CFG);
- return;
- }
+ if (unlikely(!hpet_rtc_flags))
+ hpet_disable_rtc_channel();
if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
delta = hpet_default_delta;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index cf0ef986cb6..9a9e40fb091 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -691,6 +691,8 @@ early_param("reservelow", parse_reservelow);
void __init setup_arch(char **cmdline_p)
{
+ unsigned long end_pfn;
+
#ifdef CONFIG_X86_32
memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
visws_early_detect();
@@ -932,7 +934,24 @@ void __init setup_arch(char **cmdline_p)
init_gbpages();
/* max_pfn_mapped is updated here */
- max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
+ end_pfn = max_low_pfn;
+
+#ifdef CONFIG_X86_64
+ /*
+ * There may be regions after the last E820_RAM region that we
+ * want to include in the kernel direct mapping, such as
+ * EFI_RUNTIME_SERVICES_DATA.
+ */
+ if (efi_enabled) {
+ unsigned long efi_end;
+
+ efi_end = e820_end_pfn(MAXMEM>>PAGE_SHIFT, E820_RESERVED_EFI);
+ if (efi_end > max_low_pfn)
+ end_pfn = efi_end;
+ }
+#endif
+
+ max_low_pfn_mapped = init_memory_mapping(0, end_pfn << PAGE_SHIFT);
max_pfn_mapped = max_low_pfn_mapped;
#ifdef CONFIG_X86_64
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index ea305856151..dd74e46828c 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -201,6 +201,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
do {
VM_BUG_ON(compound_head(page) != head);
pages[*nr] = page;
+ if (PageTail(page))
+ get_huge_page_tail(page);
(*nr)++;
page++;
refs++;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 37718f0f053..c9718a16be1 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -323,10 +323,13 @@ static void __init do_add_efi_memmap(void)
case EFI_UNUSABLE_MEMORY:
e820_type = E820_UNUSABLE;
break;
+ case EFI_RUNTIME_SERVICES_DATA:
+ e820_type = E820_RESERVED_EFI;
+ break;
default:
/*
* EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
- * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
+ * EFI_MEMORY_MAPPED_IO
* EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
*/
e820_type = E820_RESERVED;
@@ -671,10 +674,21 @@ void __init efi_enter_virtual_mode(void)
end_pfn = PFN_UP(end);
if (end_pfn <= max_low_pfn_mapped
|| (end_pfn > (1UL << (32 - PAGE_SHIFT))
- && end_pfn <= max_pfn_mapped))
+ && end_pfn <= max_pfn_mapped)) {
va = __va(md->phys_addr);
- else
- va = efi_ioremap(md->phys_addr, size, md->type);
+
+ if (!(md->attribute & EFI_MEMORY_WB)) {
+ addr = (u64) (unsigned long)va;
+ npages = md->num_pages;
+ memrange_efi_to_native(&addr, &npages);
+ set_memory_uc(addr, npages);
+ }
+ } else {
+ if (!(md->attribute & EFI_MEMORY_WB))
+ va = ioremap_nocache(md->phys_addr, size);
+ else
+ va = ioremap_cache(md->phys_addr, size);
+ }
md->virt_addr = (u64) (unsigned long) va;
@@ -684,13 +698,6 @@ void __init efi_enter_virtual_mode(void)
continue;
}
- if (!(md->attribute & EFI_MEMORY_WB)) {
- addr = md->virt_addr;
- npages = md->num_pages;
- memrange_efi_to_native(&addr, &npages);
- set_memory_uc(addr, npages);
- }
-
systab = (u64) (unsigned long) efi_phys.systab;
if (md->phys_addr <= systab && systab < end) {
systab += md->virt_addr - md->phys_addr;
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index ac3aa54e265..312250c6b2d 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -80,20 +80,3 @@ void __init efi_call_phys_epilog(void)
local_irq_restore(efi_flags);
early_code_mapping_set_exec(0);
}
-
-void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
- u32 type)
-{
- unsigned long last_map_pfn;
-
- if (type == EFI_MEMORY_MAPPED_IO)
- return ioremap(phys_addr, size);
-
- last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
- if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
- unsigned long top = last_map_pfn << PAGE_SHIFT;
- efi_ioremap(top, size - (top - phys_addr), type);
- }
-
- return (void __iomem *)__va(phys_addr);
-}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index d8b3d89db04..919daa7cd5b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1743,8 +1743,10 @@ void device_shutdown(void)
*/
list_del_init(&dev->kobj.entry);
spin_unlock(&devices_kset->list_lock);
- /* Disable all device's runtime power management */
- pm_runtime_disable(dev);
+
+ /* Don't allow any more runtime suspends */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_barrier(dev);
if (dev->bus && dev->bus->shutdown) {
dev_dbg(dev, "shutdown\n");
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7878712721b..b6907118283 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1106,10 +1106,12 @@ void bitmap_write_all(struct bitmap *bitmap)
*/
int i;
+ spin_lock_irq(&bitmap->lock);
for (i = 0; i < bitmap->file_pages; i++)
set_page_attr(bitmap, bitmap->filemap[i],
BITMAP_PAGE_NEEDWRITE);
bitmap->allclean = 0;
+ spin_unlock_irq(&bitmap->lock);
}
static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
@@ -1605,7 +1607,9 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
for (chunk = s; chunk <= e; chunk++) {
sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
bitmap_set_memory_bits(bitmap, sec, 1);
+ spin_lock_irq(&bitmap->lock);
bitmap_file_set_bit(bitmap, sec);
+ spin_unlock_irq(&bitmap->lock);
if (sec < bitmap->mddev->recovery_cp)
/* We are asserting that the array is dirty,
* so move the recovery_cp address back so
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 84acfe7d10e..ee981737edf 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -570,7 +570,7 @@ static void mddev_put(struct mddev *mddev)
mddev->ctime == 0 && !mddev->hold_active) {
/* Array is not configured at all, and not held active,
* so destroy it */
- list_del(&mddev->all_mddevs);
+ list_del_init(&mddev->all_mddevs);
bs = mddev->bio_set;
mddev->bio_set = NULL;
if (mddev->gendisk) {
@@ -2546,7 +2546,8 @@ state_show(struct md_rdev *rdev, char *page)
sep = ",";
}
if (test_bit(Blocked, &rdev->flags) ||
- rdev->badblocks.unacked_exist) {
+ (rdev->badblocks.unacked_exist
+ && !test_bit(Faulty, &rdev->flags))) {
len += sprintf(page+len, "%sblocked", sep);
sep = ",";
}
@@ -3788,6 +3789,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
if (err)
return err;
else {
+ if (mddev->hold_active == UNTIL_IOCTL)
+ mddev->hold_active = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
return len;
}
@@ -4487,11 +4490,20 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show)
return -EIO;
+ spin_lock(&all_mddevs_lock);
+ if (list_empty(&mddev->all_mddevs)) {
+ spin_unlock(&all_mddevs_lock);
+ return -EBUSY;
+ }
+ mddev_get(mddev);
+ spin_unlock(&all_mddevs_lock);
+
rv = mddev_lock(mddev);
if (!rv) {
rv = entry->show(mddev, page);
mddev_unlock(mddev);
}
+ mddev_put(mddev);
return rv;
}
@@ -4507,13 +4519,19 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ spin_lock(&all_mddevs_lock);
+ if (list_empty(&mddev->all_mddevs)) {
+ spin_unlock(&all_mddevs_lock);
+ return -EBUSY;
+ }
+ mddev_get(mddev);
+ spin_unlock(&all_mddevs_lock);
rv = mddev_lock(mddev);
- if (mddev->hold_active == UNTIL_IOCTL)
- mddev->hold_active = 0;
if (!rv) {
rv = entry->store(mddev, page, length);
mddev_unlock(mddev);
}
+ mddev_put(mddev);
return rv;
}
@@ -7840,6 +7858,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
s + rdev->data_offset, sectors, acknowledged);
if (rv) {
/* Make sure they get written out promptly */
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
md_wakeup_thread(rdev->mddev->thread);
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 297e2609217..31670f8d6b6 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3036,6 +3036,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (dev->written)
s->written++;
rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && test_bit(Faulty, &rdev->flags))
+ rdev = NULL;
if (rdev) {
is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
&first_bad, &bad_sectors);
@@ -3063,12 +3065,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
}
} else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
- else if (!test_bit(Faulty, &rdev->flags)) {
+ else {
/* in sync if before recovery_offset */
if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
set_bit(R5_Insync, &dev->flags);
}
- if (test_bit(R5_WriteError, &dev->flags)) {
+ if (rdev && test_bit(R5_WriteError, &dev->flags)) {
clear_bit(R5_Insync, &dev->flags);
if (!test_bit(Faulty, &rdev->flags)) {
s->handle_bad_blocks = 1;
@@ -3076,7 +3078,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
} else
clear_bit(R5_WriteError, &dev->flags);
}
- if (test_bit(R5_MadeGood, &dev->flags)) {
+ if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
if (!test_bit(Faulty, &rdev->flags)) {
s->handle_bad_blocks = 1;
atomic_inc(&rdev->nr_pending);
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 10826d8a2a2..1187a1169eb 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -926,7 +926,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
goto done;
/* Re-enable the ingress interrupt. */
- enable_percpu_irq(priv->intr_id);
+ enable_percpu_irq(priv->intr_id, 0);
/* HACK: Avoid the "rotting packet" problem (see above). */
if (qup->__packet_receive_read !=
@@ -1296,7 +1296,7 @@ static void tile_net_open_enable(void *dev_ptr)
info->napi_enabled = true;
/* Enable the ingress interrupt. */
- enable_percpu_irq(priv->intr_id);
+ enable_percpu_irq(priv->intr_id, 0);
}
@@ -1697,7 +1697,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
for (i = 0; i < sh->nr_frags; i++) {
skb_frag_t *f = &sh->frags[i];
- unsigned long pfn = page_to_pfn(f->page);
+ unsigned long pfn = page_to_pfn(skb_frag_page(f));
/* FIXME: Compute "hash_for_home" properly. */
/* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
@@ -1706,7 +1706,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
/* FIXME: Hmmm. */
if (!hash_default) {
void *va = pfn_to_kaddr(pfn) + f->page_offset;
- BUG_ON(PageHighMem(f->page));
+ BUG_ON(PageHighMem(skb_frag_page(f)));
finv_buffer_remote(va, f->size, 0);
}
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index cf3f9997546..10451a15e82 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -101,7 +101,9 @@ static s32 scaled_ppm_to_ppb(long ppm)
static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
{
- return 1; /* always round timer functions to one nanosecond */
+ tp->tv_sec = 0;
+ tp->tv_nsec = 1;
+ return 0;
}
static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 5225930a10c..691b1ab1a3d 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -851,14 +851,12 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
INIT_WORK(&priv->idb_work, tsi721_db_dpc);
/* Allocate buffer for inbound doorbells queue */
- priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
+ priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
&priv->idb_dma, GFP_KERNEL);
if (!priv->idb_base)
return -ENOMEM;
- memset(priv->idb_base, 0, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE);
-
dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n",
priv->idb_base, (unsigned long long)priv->idb_dma);
@@ -904,7 +902,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
*/
/* Allocate space for DMA descriptors */
- bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
+ bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
bd_num * sizeof(struct tsi721_dma_desc),
&bd_phys, GFP_KERNEL);
if (!bd_ptr)
@@ -913,8 +911,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
priv->bdma[chnum].bd_phys = bd_phys;
priv->bdma[chnum].bd_base = bd_ptr;
- memset(bd_ptr, 0, bd_num * sizeof(struct tsi721_dma_desc));
-
dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
bd_ptr, (unsigned long long)bd_phys);
@@ -922,7 +918,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
bd_num : TSI721_DMA_MINSTSSZ;
sts_size = roundup_pow_of_two(sts_size);
- sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
+ sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
sts_size * sizeof(struct tsi721_dma_sts),
&sts_phys, GFP_KERNEL);
if (!sts_ptr) {
@@ -938,8 +934,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
priv->bdma[chnum].sts_base = sts_ptr;
priv->bdma[chnum].sts_size = sts_size;
- memset(sts_ptr, 0, sts_size);
-
dev_dbg(&priv->pdev->dev,
"desc status FIFO @ %p (phys = %llx) size=0x%x\n",
sts_ptr, (unsigned long long)sts_phys, sts_size);
@@ -1400,7 +1394,7 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
/* Outbound message descriptor status FIFO allocation */
priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
- priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
+ priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
priv->omsg_ring[mbox].sts_size *
sizeof(struct tsi721_dma_sts),
&priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
@@ -1412,9 +1406,6 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
goto out_desc;
}
- memset(priv->omsg_ring[mbox].sts_base, 0,
- entries * sizeof(struct tsi721_dma_sts));
-
/*
* Configure Outbound Messaging Engine
*/
@@ -2116,8 +2107,8 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
INIT_LIST_HEAD(&mport->dbells);
rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
- rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
- rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
+ rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
+ rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
strcpy(mport->name, "Tsi721 mport");
/* Hook up interrupt handler */
@@ -2163,7 +2154,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct tsi721_device *priv;
- int i;
+ int i, cap;
int err;
u32 regval;
@@ -2271,10 +2262,20 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
}
- /* Clear "no snoop" and "relaxed ordering" bits. */
- pci_read_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, &regval);
- regval &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN);
- pci_write_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, regval);
+ cap = pci_pcie_cap(pdev);
+ BUG_ON(cap == 0);
+
+ /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
+ pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &regval);
+ regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
+ PCI_EXP_DEVCTL_NOSNOOP_EN);
+ regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT;
+ pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval);
+
+ /* Adjust PCIe completion timeout. */
+ pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, &regval);
+ regval &= ~(0x0f);
+ pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2);
/*
* FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 58be4deb140..822e54c394d 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -72,6 +72,8 @@
#define TSI721_MSIXPBA_OFFSET 0x2a000
#define TSI721_PCIECFG_EPCTL 0x400
+#define MAX_READ_REQUEST_SZ_SHIFT 12
+
/*
* Event Management Registers
*/
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 7639ab906f0..5b979d9cc33 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -202,7 +202,6 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
void __iomem *base = s3c_rtc_base;
int year = tm->tm_year - 100;
- clk_enable(rtc_clk);
pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -214,6 +213,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
return -EINVAL;
}
+ clk_enable(rtc_clk);
writeb(bin2bcd(tm->tm_sec), base + S3C2410_RTCSEC);
writeb(bin2bcd(tm->tm_min), base + S3C2410_RTCMIN);
writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index a1fd73df541..8ba4510a951 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -199,7 +199,7 @@ config SPI_FSL_LIB
depends on FSL_SOC
config SPI_FSL_SPI
- tristate "Freescale SPI controller"
+ bool "Freescale SPI controller"
depends on FSL_SOC
select SPI_FSL_LIB
help
@@ -208,7 +208,7 @@ config SPI_FSL_SPI
MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
config SPI_FSL_ESPI
- tristate "Freescale eSPI controller"
+ bool "Freescale eSPI controller"
depends on FSL_SOC
select SPI_FSL_LIB
help
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 024b48aed5c..acc88b4d286 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -13,6 +13,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index e093d3ec41b..0094c645ff0 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -256,7 +256,7 @@ static void spi_gpio_cleanup(struct spi_device *spi)
spi_bitbang_cleanup(spi);
}
-static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
+static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
{
int value;
@@ -270,7 +270,7 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
return value;
}
-static int __init
+static int __devinit
spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label,
u16 *res_flags)
{
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index 21c70b2b831..182e9c87382 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -8,6 +8,7 @@
*
*/
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 0fd96c10271..8599545cdf9 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -614,13 +614,12 @@ int iscsit_add_reject(
hdr = (struct iscsi_reject *) cmd->pdu;
hdr->reason = reason;
- cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_release_cmd(cmd);
return -1;
}
- memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
@@ -661,13 +660,12 @@ int iscsit_add_reject_from_cmd(
hdr = (struct iscsi_reject *) cmd->pdu;
hdr->reason = reason;
- cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_release_cmd(cmd);
return -1;
}
- memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
if (add_to_conn) {
spin_lock_bh(&conn->cmd_lock);
@@ -1017,11 +1015,6 @@ done:
" non-existent or non-exported iSCSI LUN:"
" 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
}
- if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
-
send_check_condition = 1;
goto attach_cmd;
}
@@ -1044,6 +1037,8 @@ done:
*/
send_check_condition = 1;
} else {
+ cmd->data_length = cmd->se_cmd.data_length;
+
if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1123,7 +1118,7 @@ attach_cmd:
* the backend memory allocation.
*/
ret = transport_generic_new_cmd(&cmd->se_cmd);
- if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) {
+ if (ret < 0) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
dump_immediate_data = 1;
goto after_immediate_data;
@@ -1341,7 +1336,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
spin_lock_irqsave(&se_cmd->t_state_lock, flags);
if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
- (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED))
+ (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
dump_unsolicited_data = 1;
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -2513,10 +2508,10 @@ static int iscsit_send_data_in(
if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
}
hton24(hdr->dlength, datain.length);
@@ -3018,10 +3013,10 @@ static int iscsit_send_status(
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
hdr->response = cmd->iscsi_response;
hdr->cmd_status = cmd->se_cmd.scsi_status;
@@ -3133,6 +3128,7 @@ static int iscsit_send_task_mgt_rsp(
hdr = (struct iscsi_tm_rsp *) cmd->pdu;
memset(hdr, 0, ISCSI_HDR_LEN);
hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
hdr->itt = cpu_to_be32(cmd->init_task_tag);
cmd->stat_sn = conn->stat_sn++;
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index beb39469e7f..1cd6ce373b8 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -30,9 +30,11 @@
static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
{
- int j = DIV_ROUND_UP(len, 2);
+ int j = DIV_ROUND_UP(len, 2), rc;
- hex2bin(dst, src, j);
+ rc = hex2bin(dst, src, j);
+ if (rc < 0)
+ pr_debug("CHAP string contains non hex digit symbols\n");
dst[j] = '\0';
return j;
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 3723d90d5ae..f1a02dad05a 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -398,7 +398,6 @@ struct iscsi_cmd {
u32 pdu_send_order;
/* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
u32 pdu_start;
- u32 residual_count;
/* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
u32 seq_send_order;
/* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
@@ -535,7 +534,6 @@ struct iscsi_conn {
atomic_t connection_exit;
atomic_t connection_recovery;
atomic_t connection_reinstatement;
- atomic_t connection_wait;
atomic_t connection_wait_rcfr;
atomic_t sleep_on_conn_wait_comp;
atomic_t transport_failed;
@@ -643,7 +641,6 @@ struct iscsi_session {
atomic_t session_reinstatement;
atomic_t session_stop_active;
atomic_t sleep_on_sess_wait_comp;
- atomic_t transport_wait_cmds;
/* connection list */
struct list_head sess_conn_list;
struct list_head cr_active_list;
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index c4c68da3e50..101b1beb3bc 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -938,8 +938,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
* handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
- if (se_cmd->se_cmd_flags &
- SCF_SCSI_RESERVATION_CONFLICT) {
+ if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
cmd->i_state = ISTATE_SEND_STATUS;
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index daad362a93c..d734bdec24f 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -224,7 +224,7 @@ static int iscsi_login_zero_tsih_s1(
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Could not allocate memory for session\n");
- return -1;
+ return -ENOMEM;
}
iscsi_login_set_conn_values(sess, conn, pdu->cid);
@@ -250,7 +250,8 @@ static int iscsi_login_zero_tsih_s1(
pr_err("idr_pre_get() for sess_idr failed\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
spin_lock(&sess_idr_lock);
idr_get_new(&sess_idr, NULL, &sess->session_index);
@@ -270,14 +271,16 @@ static int iscsi_login_zero_tsih_s1(
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Unable to allocate memory for"
" struct iscsi_sess_ops.\n");
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
sess->se_sess = transport_init_session();
- if (!sess->se_sess) {
+ if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
return 0;
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 426cd4bf6a9..98936cb7c29 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -981,14 +981,13 @@ struct iscsi_login *iscsi_target_init_negotiation(
return NULL;
}
- login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
if (!login->req) {
pr_err("Unable to allocate memory for Login Request.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
goto out;
}
- memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
if (!login->req_buf) {
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 3df1c9b8ae6..81d5832fbbd 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -113,11 +113,9 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
&tl_cmd->tl_sense_buf[0]);
- /*
- * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
- */
if (scsi_bidi_cmnd(sc))
- se_cmd->t_tasks_bidi = 1;
+ se_cmd->se_cmd_flags |= SCF_BIDI;
+
/*
* Locate the struct se_lun pointer and attach it to struct se_cmd
*/
@@ -148,27 +146,13 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
* Allocate the necessary tasks to complete the received CDB+data
*/
ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
- if (ret == -ENOMEM) {
- /* Out of Resources */
- return PYX_TRANSPORT_LU_COMM_FAILURE;
- } else if (ret == -EINVAL) {
- /*
- * Handle case for SAM_STAT_RESERVATION_CONFLICT
- */
- if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
- /*
- * Otherwise, return SAM_STAT_CHECK_CONDITION and return
- * sense data.
- */
- return PYX_TRANSPORT_USE_SENSE_REASON;
- }
-
+ if (ret != 0)
+ return ret;
/*
* For BIDI commands, pass in the extra READ buffer
* to transport_generic_map_mem_to_cmd() below..
*/
- if (se_cmd->t_tasks_bidi) {
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
struct scsi_data_buffer *sdb = scsi_in(sc);
sgl_bidi = sdb->table.sgl;
@@ -194,12 +178,8 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
}
/* Tell the core about our preallocated memory */
- ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
+ return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
- if (ret < 0)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
-
- return 0;
}
/*
@@ -1360,17 +1340,16 @@ void tcm_loop_drop_scsi_hba(
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
- int host_no = tl_hba->sh->host_no;
+
+ pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
+ " SAS Address: %s at Linux/SCSI Host ID: %d\n",
+ tl_hba->tl_wwn_address, tl_hba->sh->host_no);
/*
* Call device_unregister() on the original tl_hba->dev.
* tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
* release *tl_hba;
*/
device_unregister(&tl_hba->dev);
-
- pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
- " SAS Address: %s at Linux/SCSI Host ID: %d\n",
- config_item_name(&wwn->wwn_group.cg_item), host_no);
}
/* Start items for tcm_loop_cit */
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 88f2ad43ec8..1dcbef499d6 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -191,9 +191,10 @@ int target_emulate_set_target_port_groups(struct se_task *task)
int alua_access_state, primary = 0, rc;
u16 tg_pt_id, rtpi;
- if (!l_port)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
-
+ if (!l_port) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
buf = transport_kmap_first_data_page(cmd);
/*
@@ -203,7 +204,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
if (!l_tg_pt_gp_mem) {
pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -211,7 +213,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
if (!l_tg_pt_gp) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
@@ -220,7 +223,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
if (!rc) {
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICT_ALUA is disabled\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
@@ -245,7 +249,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST.
*/
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
rc = -1;
@@ -298,7 +303,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* throw an exception with ASCQ: INVALID_PARAMETER_LIST
*/
if (rc != 0) {
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
} else {
@@ -335,7 +341,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* INVALID_PARAMETER_LIST
*/
if (rc != 0) {
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
}
@@ -1184,7 +1191,6 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
* struct t10_alua_lu_gp.
*/
spin_lock(&lu_gps_lock);
- atomic_set(&lu_gp->lu_gp_shutdown, 1);
list_del(&lu_gp->lu_gp_node);
alua_lu_gps_count--;
spin_unlock(&lu_gps_lock);
@@ -1438,7 +1444,6 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
tg_pt_gp_mem->tg_pt = port;
port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
- atomic_set(&port->sep_tg_pt_gp_active, 1);
return tg_pt_gp_mem;
}
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 683ba02b824..831468b3163 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -478,7 +478,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
if (cmd->data_length < 60)
return 0;
- buf[2] = 0x3c;
+ buf[3] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
@@ -703,6 +703,7 @@ int target_emulate_inquiry(struct se_task *task)
if (cmd->data_length < 4) {
pr_err("SCSI Inquiry payload length: %u"
" too small for EVPD=1\n", cmd->data_length);
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
@@ -719,6 +720,7 @@ int target_emulate_inquiry(struct se_task *task)
}
pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
ret = -EINVAL;
out_unmap:
@@ -969,7 +971,8 @@ int target_emulate_modesense(struct se_task *task)
default:
pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
cdb[2] & 0x3f, cdb[3]);
- return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
+ cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
+ return -EINVAL;
}
offset += length;
@@ -1027,7 +1030,8 @@ int target_emulate_request_sense(struct se_task *task)
if (cdb[1] & 0x01) {
pr_err("REQUEST_SENSE description emulation not"
" supported\n");
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -ENOSYS;
}
buf = transport_kmap_first_data_page(cmd);
@@ -1100,7 +1104,8 @@ int target_emulate_unmap(struct se_task *task)
if (!dev->transport->do_discard) {
pr_err("UNMAP emulation not supported for: %s\n",
dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
/* First UNMAP block descriptor starts at 8 byte offset */
@@ -1157,7 +1162,8 @@ int target_emulate_write_same(struct se_task *task)
if (!dev->transport->do_discard) {
pr_err("WRITE_SAME emulation not supported"
" for: %s\n", dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
if (cmd->t_task_cdb[0] == WRITE_SAME)
@@ -1193,11 +1199,13 @@ int target_emulate_write_same(struct se_task *task)
int target_emulate_synchronize_cache(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
+ struct se_cmd *cmd = task->task_se_cmd;
if (!dev->transport->do_sync_cache) {
pr_err("SYNCHRONIZE_CACHE emulation not supported"
" for: %s\n", dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
dev->transport->do_sync_cache(task);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index e0c1e8a8dd4..93d4f6a1b79 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -67,9 +67,6 @@ static struct config_group target_core_hbagroup;
static struct config_group alua_group;
static struct config_group alua_lu_gps_group;
-static DEFINE_SPINLOCK(se_device_lock);
-static LIST_HEAD(se_dev_list);
-
static inline struct se_hba *
item_to_hba(struct config_item *item)
{
@@ -2741,7 +2738,6 @@ static struct config_group *target_core_make_subdev(
" struct se_subsystem_dev\n");
goto unlock;
}
- INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
@@ -2777,9 +2773,6 @@ static struct config_group *target_core_make_subdev(
" from allocate_virtdevice()\n");
goto out;
}
- spin_lock(&se_device_lock);
- list_add_tail(&se_dev->se_dev_node, &se_dev_list);
- spin_unlock(&se_device_lock);
config_group_init_type_name(&se_dev->se_dev_group, name,
&target_core_dev_cit);
@@ -2874,10 +2867,6 @@ static void target_core_drop_subdev(
mutex_lock(&hba->hba_access_mutex);
t = hba->transport;
- spin_lock(&se_device_lock);
- list_del(&se_dev->se_dev_node);
- spin_unlock(&se_device_lock);
-
dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
for (i = 0; dev_stat_grp->default_groups[i]; i++) {
df_item = &dev_stat_grp->default_groups[i]->cg_item;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index ba5edec2c5f..9b863942547 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -104,7 +104,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_cmd->se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -137,7 +136,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->orig_fe_lun = 0;
- se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
/*
@@ -200,7 +198,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -708,7 +705,7 @@ done:
se_task->task_scsi_status = GOOD;
transport_complete_task(se_task, 1);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* se_release_device_for_hba():
@@ -957,8 +954,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
return -EINVAL;
}
- pr_err("dpo_emulated not supported\n");
- return -EINVAL;
+ if (flag) {
+ pr_err("dpo_emulated not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
@@ -968,7 +969,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
return -EINVAL;
}
- if (dev->transport->fua_write_emulated == 0) {
+ if (flag && dev->transport->fua_write_emulated == 0) {
pr_err("fua_write_emulated not supported\n");
return -EINVAL;
}
@@ -985,8 +986,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
return -EINVAL;
}
- pr_err("ua read emulated not supported\n");
- return -EINVAL;
+ if (flag) {
+ pr_err("ua read emulated not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
@@ -995,7 +1000,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
- if (dev->transport->write_cache_emulated == 0) {
+ if (flag && dev->transport->write_cache_emulated == 0) {
pr_err("write_cache_emulated not supported\n");
return -EINVAL;
}
@@ -1056,7 +1061,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
@@ -1077,7 +1082,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
@@ -1587,7 +1592,6 @@ int core_dev_setup_virtual_lun0(void)
ret = -ENOMEM;
goto out;
}
- INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 67cd6fe05bf..b4864fba4ef 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -289,9 +289,9 @@ static int fd_do_readv(struct se_task *task)
return -ENOMEM;
}
- for (i = 0; i < task->task_sg_nents; i++) {
- iov[i].iov_len = sg[i].length;
- iov[i].iov_base = sg_virt(&sg[i]);
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ iov[i].iov_len = sg->length;
+ iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
@@ -342,9 +342,9 @@ static int fd_do_writev(struct se_task *task)
return -ENOMEM;
}
- for (i = 0; i < task->task_sg_nents; i++) {
- iov[i].iov_len = sg[i].length;
- iov[i].iov_base = sg_virt(&sg[i]);
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ iov[i].iov_len = sg->length;
+ iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
@@ -438,7 +438,7 @@ static int fd_do_task(struct se_task *task)
if (ret > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
- cmd->t_tasks_fua) {
+ (cmd->se_cmd_flags & SCF_FUA)) {
/*
* We might need to be a bit smarter here
* and return some sense data to let the initiator
@@ -449,13 +449,15 @@ static int fd_do_task(struct se_task *task)
}
- if (ret < 0)
+ if (ret < 0) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return ret;
+ }
if (ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* fd_free_task(): (Part of se_subsystem_api_t template)
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 7698efe2926..4aa99220443 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -531,7 +531,7 @@ static int iblock_do_task(struct se_task *task)
*/
if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
- task->task_se_cmd->t_tasks_fua))
+ (cmd->se_cmd_flags & SCF_FUA)))
rw = WRITE_FUA;
else
rw = WRITE;
@@ -554,12 +554,15 @@ static int iblock_do_task(struct se_task *task)
else {
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOSYS;
}
bio = iblock_get_bio(task, block_lba, sg_num);
- if (!bio)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ if (!bio) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
+ }
bio_list_init(&list);
bio_list_add(&list, bio);
@@ -588,12 +591,13 @@ static int iblock_do_task(struct se_task *task)
submit_bio(rw, bio);
blk_finish_plug(&plug);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
fail:
while ((bio = bio_list_pop(&list)))
bio_put(bio);
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
static u32 iblock_get_device_rev(struct se_device *dev)
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 5a4ebfc3a54..95dee7074ae 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -191,7 +191,7 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
pr_err("Received legacy SPC-2 RESERVE/RELEASE"
" while active SPC-3 registrations exist,"
" returning RESERVATION_CONFLICT\n");
- *ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return true;
}
@@ -252,7 +252,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
(cmd->t_task_cdb[1] & 0x02)) {
pr_err("LongIO and Obselete Bits set, returning"
" ILLEGAL_REQUEST\n");
- ret = PYX_TRANSPORT_ILLEGAL_REQUEST;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ ret = -EINVAL;
goto out;
}
/*
@@ -277,7 +278,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
" from %s \n", cmd->se_lun->unpacked_lun,
cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out_unlock;
}
@@ -1510,7 +1512,8 @@ static int core_scsi3_decode_spec_i_port(
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
if (!tidh_new) {
pr_err("Unable to allocate tidh_new\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = tpg;
@@ -1522,7 +1525,8 @@ static int core_scsi3_decode_spec_i_port(
sa_res_key, all_tg_pt, aptpl);
if (!local_pr_reg) {
kfree(tidh_new);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
tidh_new->dest_pr_reg = local_pr_reg;
/*
@@ -1548,7 +1552,8 @@ static int core_scsi3_decode_spec_i_port(
pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
" does not equal CDB data_length: %u\n", tpdl,
cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -1598,7 +1603,9 @@ static int core_scsi3_decode_spec_i_port(
" for tmp_tpg\n");
atomic_dec(&tmp_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
/*
@@ -1628,7 +1635,9 @@ static int core_scsi3_decode_spec_i_port(
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_tpg_undepend_item(tmp_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
@@ -1646,7 +1655,8 @@ static int core_scsi3_decode_spec_i_port(
if (!dest_tpg) {
pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
" dest_tpg\n");
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -1660,7 +1670,8 @@ static int core_scsi3_decode_spec_i_port(
" %u for Transport ID: %s\n", tid_len, ptr);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -1678,7 +1689,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -1690,7 +1702,9 @@ static int core_scsi3_decode_spec_i_port(
smp_mb__after_atomic_dec();
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -1727,7 +1741,9 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
@@ -1759,7 +1775,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
kfree(tidh_new);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
tidh_new->dest_pr_reg = dest_pr_reg;
@@ -2098,7 +2115,8 @@ static int core_scsi3_emulate_pro_register(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
se_tpg = se_sess->se_tpg;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2117,13 +2135,14 @@ static int core_scsi3_emulate_pro_register(
if (res_key) {
pr_warn("SPC-3 PR: Reservation Key non-zero"
" for SA REGISTER, returning CONFLICT\n");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* Do nothing but return GOOD status.
*/
if (!sa_res_key)
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
if (!spec_i_pt) {
/*
@@ -2138,7 +2157,8 @@ static int core_scsi3_emulate_pro_register(
if (ret != 0) {
pr_err("Unable to allocate"
" struct t10_pr_registration\n");
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
} else {
/*
@@ -2197,14 +2217,16 @@ static int core_scsi3_emulate_pro_register(
" 0x%016Lx\n", res_key,
pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
}
if (spec_i_pt) {
pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
" set while sa_res_key=0\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* An existing ALL_TG_PT=1 registration being released
@@ -2215,7 +2237,8 @@ static int core_scsi3_emulate_pro_register(
" registration exists, but ALL_TG_PT=1 bit not"
" present in received PROUT\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
/*
* Allocate APTPL metadata buffer used for UNREGISTER ops
@@ -2227,7 +2250,9 @@ static int core_scsi3_emulate_pro_register(
pr_err("Unable to allocate"
" pr_aptpl_buf\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
}
/*
@@ -2241,7 +2266,8 @@ static int core_scsi3_emulate_pro_register(
if (pr_holder < 0) {
kfree(pr_aptpl_buf);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
spin_lock(&pr_tmpl->registration_lock);
@@ -2405,7 +2431,8 @@ static int core_scsi3_pro_reserve(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
se_tpg = se_sess->se_tpg;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2417,7 +2444,8 @@ static int core_scsi3_pro_reserve(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RESERVE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2433,7 +2461,8 @@ static int core_scsi3_pro_reserve(
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2448,7 +2477,8 @@ static int core_scsi3_pro_reserve(
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* See if we have an existing PR reservation holder pointer at
@@ -2480,7 +2510,8 @@ static int core_scsi3_pro_reserve(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2503,7 +2534,8 @@ static int core_scsi3_pro_reserve(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2517,7 +2549,7 @@ static int core_scsi3_pro_reserve(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/*
* Otherwise, our *pr_reg becomes the PR reservation holder for said
@@ -2574,7 +2606,8 @@ static int core_scsi3_emulate_pro_reserve(
default:
pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
" 0x%02x\n", type);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
return ret;
@@ -2630,7 +2663,8 @@ static int core_scsi3_emulate_pro_release(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2639,7 +2673,8 @@ static int core_scsi3_emulate_pro_release(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RELEASE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing:
@@ -2661,7 +2696,7 @@ static int core_scsi3_emulate_pro_release(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
(pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
@@ -2675,7 +2710,7 @@ static int core_scsi3_emulate_pro_release(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing:
@@ -2697,7 +2732,8 @@ static int core_scsi3_emulate_pro_release(
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing and above:
@@ -2719,7 +2755,8 @@ static int core_scsi3_emulate_pro_release(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* In response to a persistent reservation release request from the
@@ -2802,7 +2839,8 @@ static int core_scsi3_emulate_pro_clear(
if (!pr_reg_n) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for CLEAR\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 section 5.7.11.6, Clearing:
@@ -2821,7 +2859,8 @@ static int core_scsi3_emulate_pro_clear(
" existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* a) Release the persistent reservation, if any;
@@ -2979,8 +3018,10 @@ static int core_scsi3_pro_preempt(
int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
int prh_type = 0, prh_scope = 0, ret;
- if (!se_sess)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (!se_sess) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@@ -2989,16 +3030,19 @@ static int core_scsi3_pro_preempt(
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for PREEMPT%s\n",
(abort) ? "_AND_ABORT" : "");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
if (pr_reg_n->pr_res_key != res_key) {
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
INIT_LIST_HEAD(&preempt_and_abort_list);
@@ -3012,7 +3056,8 @@ static int core_scsi3_pro_preempt(
if (!all_reg && !sa_res_key) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* From spc4r17, section 5.7.11.4.4 Removing Registrations:
@@ -3106,7 +3151,8 @@ static int core_scsi3_pro_preempt(
if (!released_regs) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* For an existing all registrants type reservation
@@ -3297,7 +3343,8 @@ static int core_scsi3_emulate_pro_preempt(
default:
pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
" Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
return ret;
@@ -3331,7 +3378,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
memset(dest_iport, 0, 64);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
@@ -3349,7 +3397,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
" *pr_reg for REGISTER_AND_MOVE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* The provided reservation key much match the existing reservation key
@@ -3360,7 +3409,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" res_key: 0x%016Lx does not match existing SA REGISTER"
" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* The service active reservation key needs to be non zero
@@ -3369,7 +3419,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
" sa_res_key\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
@@ -3392,7 +3443,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" does not equal CDB data_length: %u\n", tid_len,
cmd->data_length);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
spin_lock(&dev->se_port_lock);
@@ -3417,7 +3469,8 @@ static int core_scsi3_emulate_pro_register_and_move(
atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
spin_lock(&dev->se_port_lock);
@@ -3430,7 +3483,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" fabric ops from Relative Target Port Identifier:"
" %hu\n", rtpi);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -3445,14 +3499,16 @@ static int core_scsi3_emulate_pro_register_and_move(
" from fabric: %s\n", proto_ident,
dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
dest_tf_ops->get_fabric_name());
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
" containg a valid tpg_parse_pr_out_transport_id"
" function pointer\n");
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
@@ -3460,7 +3516,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!initiator_str) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" initiator_str from Transport ID\n");
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -3489,7 +3546,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
" matches: %s on received I_T Nexus\n", initiator_str,
pr_reg_nacl->initiatorname);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
@@ -3497,7 +3555,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" matches: %s %s on received I_T Nexus\n",
initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
pr_reg->pr_reg_isid);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
after_iport_check:
@@ -3517,7 +3576,8 @@ after_iport_check:
pr_err("Unable to locate %s dest_node_acl for"
" TransportID%s\n", dest_tf_ops->get_fabric_name(),
initiator_str);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
@@ -3527,7 +3587,8 @@ after_iport_check:
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
dest_node_acl = NULL;
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -3543,7 +3604,8 @@ after_iport_check:
if (!dest_se_deve) {
pr_err("Unable to locate %s dest_se_deve from RTPI:"
" %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -3553,7 +3615,8 @@ after_iport_check:
atomic_dec(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
dest_se_deve = NULL;
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -3572,7 +3635,8 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
" currently held\n");
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3585,7 +3649,8 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
" Nexus is not reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3603,7 +3668,8 @@ after_iport_check:
" reservation for type: %s\n",
core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out;
}
pr_res_nacl = pr_res_holder->pr_reg_nacl;
@@ -3640,7 +3706,8 @@ after_iport_check:
sa_res_key, 0, aptpl, 2, 1);
if (ret != 0) {
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
@@ -3771,7 +3838,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = EINVAL;
goto out;
}
@@ -3779,13 +3847,16 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
* FIXME: A NULL struct se_session pointer means an this is not coming from
* a $FABRIC_MOD's nexus, but from internal passthrough ops.
*/
- if (!cmd->se_sess)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (!cmd->se_sess) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
if (cmd->data_length < 24) {
pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3820,7 +3891,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
* SPEC_I_PT=1 is only valid for Service action: REGISTER
*/
if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -3837,7 +3909,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
(cmd->data_length != 24)) {
pr_warn("SPC-PR: Received PR OUT illegal parameter"
" list length: %u\n", cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3878,7 +3951,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
default:
pr_err("Unknown PERSISTENT_RESERVE_OUT service"
" action: 0x%02x\n", cdb[1] & 0x1f);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
break;
}
@@ -3906,7 +3980,8 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -3965,7 +4040,8 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -4047,7 +4123,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
if (cmd->data_length < 6) {
pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
" %u too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -4108,7 +4185,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -4255,7 +4333,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
switch (cmd->t_task_cdb[1] & 0x1f) {
@@ -4274,7 +4353,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
default:
pr_err("Unknown PERSISTENT_RESERVE_IN service"
" action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
break;
}
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index ed32e1efe42..8b15e56b038 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -963,6 +963,7 @@ static inline struct bio *pscsi_get_bio(int sg_num)
static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
struct bio **hbio)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
u32 task_sg_num = task->task_sg_nents;
struct bio *bio = NULL, *tbio = NULL;
@@ -971,7 +972,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
u32 data_len = task->task_size, i, len, bytes, off;
int nr_pages = (task->task_size + task_sg[0].offset +
PAGE_SIZE - 1) >> PAGE_SHIFT;
- int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ int nr_vecs = 0, rc;
int rw = (task->task_data_direction == DMA_TO_DEVICE);
*hbio = NULL;
@@ -1058,11 +1059,13 @@ fail:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- return ret;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
static int pscsi_do_task(struct se_task *task)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct request *req;
@@ -1078,7 +1081,9 @@ static int pscsi_do_task(struct se_task *task)
if (!req || IS_ERR(req)) {
pr_err("PSCSI: blk_get_request() failed: %ld\n",
req ? IS_ERR(req) : -ENOMEM);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENODEV;
}
} else {
BUG_ON(!task->task_size);
@@ -1087,8 +1092,11 @@ static int pscsi_do_task(struct se_task *task)
* Setup the main struct request for the task->task_sg[] payload
*/
ret = pscsi_map_sg(task, task->task_sg, &hbio);
- if (ret < 0)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (ret < 0) {
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return ret;
+ }
req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
GFP_KERNEL);
@@ -1115,7 +1123,7 @@ static int pscsi_do_task(struct se_task *task)
(task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
pscsi_req_done);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
fail:
while (hbio) {
@@ -1124,7 +1132,8 @@ fail:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
/* pscsi_get_sense_buffer():
@@ -1198,9 +1207,8 @@ static inline void pscsi_process_SAM_status(
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
pt->pscsi_result);
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
- task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- task->task_se_cmd->transport_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ task->task_se_cmd->scsi_sense_reason =
+ TCM_UNSUPPORTED_SCSI_OPCODE;
transport_complete_task(task, 0);
break;
}
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 5158d3846f1..02e51faa2f4 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -343,235 +343,74 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL;
}
-/* rd_MEMCPY_read():
- *
- *
- */
-static int rd_MEMCPY_read(struct rd_request *req)
+static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
{
struct se_task *task = &req->rd_task;
struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
struct rd_dev_sg_table *table;
- struct scatterlist *sg_d, *sg_s;
- void *dst, *src;
- u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
- u32 length, page_end = 0, table_sg_end;
+ struct scatterlist *rd_sg;
+ struct sg_mapping_iter m;
u32 rd_offset = req->rd_offset;
+ u32 src_len;
table = rd_get_sg_table(dev, req->rd_page);
if (!table)
return -EINVAL;
- table_sg_end = (table->page_end_offset - req->rd_page);
- sg_d = task->task_sg;
- sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+ rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
- pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
- " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
- req->rd_page, req->rd_offset);
-
- src_offset = rd_offset;
+ pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+ dev->rd_dev_id, read_rd ? "Read" : "Write",
+ task->task_lba, req->rd_size, req->rd_page,
+ rd_offset);
+ src_len = PAGE_SIZE - rd_offset;
+ sg_miter_start(&m, task->task_sg, task->task_sg_nents,
+ read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
while (req->rd_size) {
- if ((sg_d[i].length - dst_offset) <
- (sg_s[j].length - src_offset)) {
- length = (sg_d[i].length - dst_offset);
-
- pr_debug("Step 1 - sg_d[%d]: %p length: %d"
- " offset: %u sg_s[%d].length: %u\n", i,
- &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
- sg_s[j].length);
- pr_debug("Step 1 - length: %u dst_offset: %u"
- " src_offset: %u\n", length, dst_offset,
- src_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- dst = sg_virt(&sg_d[i++]) + dst_offset;
- BUG_ON(!dst);
-
- src = sg_virt(&sg_s[j]) + src_offset;
- BUG_ON(!src);
-
- dst_offset = 0;
- src_offset = length;
- page_end = 0;
- } else {
- length = (sg_s[j].length - src_offset);
-
- pr_debug("Step 2 - sg_d[%d]: %p length: %d"
- " offset: %u sg_s[%d].length: %u\n", i,
- &sg_d[i], sg_d[i].length, sg_d[i].offset,
- j, sg_s[j].length);
- pr_debug("Step 2 - length: %u dst_offset: %u"
- " src_offset: %u\n", length, dst_offset,
- src_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- dst = sg_virt(&sg_d[i]) + dst_offset;
- BUG_ON(!dst);
-
- if (sg_d[i].length == length) {
- i++;
- dst_offset = 0;
- } else
- dst_offset = length;
-
- src = sg_virt(&sg_s[j++]) + src_offset;
- BUG_ON(!src);
-
- src_offset = 0;
- page_end = 1;
- }
+ u32 len;
+ void *rd_addr;
- memcpy(dst, src, length);
+ sg_miter_next(&m);
+ len = min((u32)m.length, src_len);
+ m.consumed = len;
- pr_debug("page: %u, remaining size: %u, length: %u,"
- " i: %u, j: %u\n", req->rd_page,
- (req->rd_size - length), length, i, j);
+ rd_addr = sg_virt(rd_sg) + rd_offset;
- req->rd_size -= length;
- if (!req->rd_size)
- return 0;
+ if (read_rd)
+ memcpy(m.addr, rd_addr, len);
+ else
+ memcpy(rd_addr, m.addr, len);
- if (!page_end)
+ req->rd_size -= len;
+ if (!req->rd_size)
continue;
- if (++req->rd_page <= table->page_end_offset) {
- pr_debug("page: %u in same page table\n",
- req->rd_page);
+ src_len -= len;
+ if (src_len) {
+ rd_offset += len;
continue;
}
- pr_debug("getting new page table for page: %u\n",
- req->rd_page);
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
- return -EINVAL;
-
- sg_s = &table->sg_table[j = 0];
- }
-
- return 0;
-}
-
-/* rd_MEMCPY_write():
- *
- *
- */
-static int rd_MEMCPY_write(struct rd_request *req)
-{
- struct se_task *task = &req->rd_task;
- struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
- struct rd_dev_sg_table *table;
- struct scatterlist *sg_d, *sg_s;
- void *dst, *src;
- u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
- u32 length, page_end = 0, table_sg_end;
- u32 rd_offset = req->rd_offset;
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
- return -EINVAL;
-
- table_sg_end = (table->page_end_offset - req->rd_page);
- sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
- sg_s = task->task_sg;
-
- pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
- " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
- req->rd_page, req->rd_offset);
-
- dst_offset = rd_offset;
-
- while (req->rd_size) {
- if ((sg_s[i].length - src_offset) <
- (sg_d[j].length - dst_offset)) {
- length = (sg_s[i].length - src_offset);
-
- pr_debug("Step 1 - sg_s[%d]: %p length: %d"
- " offset: %d sg_d[%d].length: %u\n", i,
- &sg_s[i], sg_s[i].length, sg_s[i].offset,
- j, sg_d[j].length);
- pr_debug("Step 1 - length: %u src_offset: %u"
- " dst_offset: %u\n", length, src_offset,
- dst_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- src = sg_virt(&sg_s[i++]) + src_offset;
- BUG_ON(!src);
-
- dst = sg_virt(&sg_d[j]) + dst_offset;
- BUG_ON(!dst);
-
- src_offset = 0;
- dst_offset = length;
- page_end = 0;
- } else {
- length = (sg_d[j].length - dst_offset);
-
- pr_debug("Step 2 - sg_s[%d]: %p length: %d"
- " offset: %d sg_d[%d].length: %u\n", i,
- &sg_s[i], sg_s[i].length, sg_s[i].offset,
- j, sg_d[j].length);
- pr_debug("Step 2 - length: %u src_offset: %u"
- " dst_offset: %u\n", length, src_offset,
- dst_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- src = sg_virt(&sg_s[i]) + src_offset;
- BUG_ON(!src);
-
- if (sg_s[i].length == length) {
- i++;
- src_offset = 0;
- } else
- src_offset = length;
-
- dst = sg_virt(&sg_d[j++]) + dst_offset;
- BUG_ON(!dst);
-
- dst_offset = 0;
- page_end = 1;
- }
-
- memcpy(dst, src, length);
-
- pr_debug("page: %u, remaining size: %u, length: %u,"
- " i: %u, j: %u\n", req->rd_page,
- (req->rd_size - length), length, i, j);
-
- req->rd_size -= length;
- if (!req->rd_size)
- return 0;
-
- if (!page_end)
- continue;
-
- if (++req->rd_page <= table->page_end_offset) {
- pr_debug("page: %u in same page table\n",
- req->rd_page);
+ /* rd page completed, next one please */
+ req->rd_page++;
+ rd_offset = 0;
+ src_len = PAGE_SIZE;
+ if (req->rd_page <= table->page_end_offset) {
+ rd_sg++;
continue;
}
- pr_debug("getting new page table for page: %u\n",
- req->rd_page);
-
table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
+ if (!table) {
+ sg_miter_stop(&m);
return -EINVAL;
+ }
- sg_d = &table->sg_table[j = 0];
+ /* since we increment, the first sg entry is correct */
+ rd_sg = table->sg_table;
}
-
+ sg_miter_stop(&m);
return 0;
}
@@ -583,28 +422,21 @@ static int rd_MEMCPY_do_task(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
struct rd_request *req = RD_REQ(task);
- unsigned long long lba;
+ u64 tmp;
int ret;
- req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
- lba = task->task_lba;
- req->rd_offset = (do_div(lba,
- (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
- dev->se_sub_dev->se_dev_attrib.block_size;
+ tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+ req->rd_offset = do_div(tmp, PAGE_SIZE);
+ req->rd_page = tmp;
req->rd_size = task->task_size;
- if (task->task_data_direction == DMA_FROM_DEVICE)
- ret = rd_MEMCPY_read(req);
- else
- ret = rd_MEMCPY_write(req);
-
+ ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
if (ret != 0)
return ret;
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
-
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* rd_free_task(): (Part of se_subsystem_api_t template)
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 217e29df629..684522805a1 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -345,10 +345,6 @@ static void core_tmr_drain_cmd_list(
" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
"Preempt" : "", cmd, cmd->t_state,
atomic_read(&cmd->t_fe_count));
- /*
- * Signal that the command has failed via cmd->se_cmd_flags,
- */
- transport_new_cmd_failure(cmd);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
atomic_read(&cmd->t_fe_count));
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 3400ae6e93f..0257658e2e3 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -61,7 +61,6 @@
static int sub_api_initialized;
static struct workqueue_struct *target_completion_wq;
-static struct kmem_cache *se_cmd_cache;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_tmr_req_cache;
struct kmem_cache *se_ua_cache;
@@ -82,24 +81,18 @@ static int transport_generic_get_mem(struct se_cmd *cmd);
static void transport_put_cmd(struct se_cmd *cmd);
static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
-static void transport_generic_request_failure(struct se_cmd *, int, int);
+static void transport_generic_request_failure(struct se_cmd *);
static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
{
- se_cmd_cache = kmem_cache_create("se_cmd_cache",
- sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
- if (!se_cmd_cache) {
- pr_err("kmem_cache_create for struct se_cmd failed\n");
- goto out;
- }
se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
0, NULL);
if (!se_tmr_req_cache) {
pr_err("kmem_cache_create() for struct se_tmr_req"
" failed\n");
- goto out_free_cmd_cache;
+ goto out;
}
se_sess_cache = kmem_cache_create("se_sess_cache",
sizeof(struct se_session), __alignof__(struct se_session),
@@ -182,8 +175,6 @@ out_free_sess_cache:
kmem_cache_destroy(se_sess_cache);
out_free_tmr_req_cache:
kmem_cache_destroy(se_tmr_req_cache);
-out_free_cmd_cache:
- kmem_cache_destroy(se_cmd_cache);
out:
return -ENOMEM;
}
@@ -191,7 +182,6 @@ out:
void release_se_kmem_caches(void)
{
destroy_workqueue(target_completion_wq);
- kmem_cache_destroy(se_cmd_cache);
kmem_cache_destroy(se_tmr_req_cache);
kmem_cache_destroy(se_sess_cache);
kmem_cache_destroy(se_ua_cache);
@@ -680,9 +670,9 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
task->task_scsi_status = GOOD;
} else {
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
- task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
- task->task_se_cmd->transport_error_status =
- PYX_TRANSPORT_ILLEGAL_REQUEST;
+ task->task_se_cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
}
transport_complete_task(task, good);
@@ -693,7 +683,7 @@ static void target_complete_failure_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
- transport_generic_request_failure(cmd, 1, 1);
+ transport_generic_request_failure(cmd);
}
/* transport_complete_task():
@@ -755,10 +745,11 @@ void transport_complete_task(struct se_task *task, int success)
if (cmd->t_tasks_failed) {
if (!task->task_error_status) {
task->task_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- cmd->transport_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
+
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
atomic_set(&cmd->t_transport_complete, 1);
@@ -1335,23 +1326,17 @@ struct se_device *transport_add_device_to_core_hba(
dev->se_hba = hba;
dev->se_sub_dev = se_dev;
dev->transport = transport;
- atomic_set(&dev->active_cmds, 0);
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list);
INIT_LIST_HEAD(&dev->execute_task_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list);
- INIT_LIST_HEAD(&dev->ordered_cmd_list);
INIT_LIST_HEAD(&dev->state_task_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
- spin_lock_init(&dev->ordered_cmd_lock);
- spin_lock_init(&dev->state_task_lock);
- spin_lock_init(&dev->dev_alua_lock);
spin_lock_init(&dev->dev_reservation_lock);
spin_lock_init(&dev->dev_status_lock);
- spin_lock_init(&dev->dev_status_thr_lock);
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
@@ -1507,7 +1492,6 @@ void transport_init_se_cmd(
{
INIT_LIST_HEAD(&cmd->se_lun_node);
INIT_LIST_HEAD(&cmd->se_delayed_node);
- INIT_LIST_HEAD(&cmd->se_ordered_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
INIT_LIST_HEAD(&cmd->se_queue_node);
INIT_LIST_HEAD(&cmd->se_cmd_list);
@@ -1573,6 +1557,8 @@ int transport_generic_allocate_tasks(
pr_err("Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
/*
@@ -1588,6 +1574,9 @@ int transport_generic_allocate_tasks(
" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
(unsigned long)sizeof(cmd->__t_task_cdb));
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
}
} else
@@ -1658,11 +1647,9 @@ int transport_handle_cdb_direct(
* and call transport_generic_request_failure() if necessary..
*/
ret = transport_generic_new_cmd(cmd);
- if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd, 0,
- (cmd->data_direction != DMA_TO_DEVICE));
- }
+ if (ret < 0)
+ transport_generic_request_failure(cmd);
+
return 0;
}
EXPORT_SYMBOL(transport_handle_cdb_direct);
@@ -1798,20 +1785,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
/*
* Handle SAM-esque emulation for generic transport request failures.
*/
-static void transport_generic_request_failure(
- struct se_cmd *cmd,
- int complete,
- int sc)
+static void transport_generic_request_failure(struct se_cmd *cmd)
{
int ret = 0;
pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
cmd->t_task_cdb[0]);
- pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",
+ pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
cmd->se_tfo->get_cmd_state(cmd),
- cmd->t_state,
- cmd->transport_error_status);
+ cmd->t_state, cmd->scsi_sense_reason);
pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
" t_transport_active: %d t_transport_stop: %d"
@@ -1829,46 +1812,19 @@ static void transport_generic_request_failure(
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
transport_complete_task_attr(cmd);
- if (complete) {
- cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
- }
-
- switch (cmd->transport_error_status) {
- case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- break;
- case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
- cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
- break;
- case PYX_TRANSPORT_INVALID_CDB_FIELD:
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- break;
- case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- break;
- case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
- if (!sc)
- transport_new_cmd_failure(cmd);
- /*
- * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
- * we force this session to fall back to session
- * recovery.
- */
- cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
- cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
-
- goto check_stop;
- case PYX_TRANSPORT_LU_COMM_FAILURE:
- case PYX_TRANSPORT_ILLEGAL_REQUEST:
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- break;
- case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
- cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
- break;
- case PYX_TRANSPORT_WRITE_PROTECTED:
- cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ switch (cmd->scsi_sense_reason) {
+ case TCM_NON_EXISTENT_LUN:
+ case TCM_UNSUPPORTED_SCSI_OPCODE:
+ case TCM_INVALID_CDB_FIELD:
+ case TCM_INVALID_PARAMETER_LIST:
+ case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+ case TCM_UNKNOWN_MODE_PAGE:
+ case TCM_WRITE_PROTECTED:
+ case TCM_CHECK_CONDITION_ABORT_CMD:
+ case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+ case TCM_CHECK_CONDITION_NOT_READY:
break;
- case PYX_TRANSPORT_RESERVATION_CONFLICT:
+ case TCM_RESERVATION_CONFLICT:
/*
* No SENSE Data payload for this case, set SCSI Status
* and queue the response to $FABRIC_MOD.
@@ -1893,15 +1849,9 @@ static void transport_generic_request_failure(
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
goto check_stop;
- case PYX_TRANSPORT_USE_SENSE_REASON:
- /*
- * struct se_cmd->scsi_sense_reason already set
- */
- break;
default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
- cmd->t_task_cdb[0],
- cmd->transport_error_status);
+ cmd->t_task_cdb[0], cmd->scsi_sense_reason);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break;
}
@@ -1912,14 +1862,10 @@ static void transport_generic_request_failure(
* transport_send_check_condition_and_sense() after handling
* possible unsoliticied write data payloads.
*/
- if (!sc && !cmd->se_tfo->new_cmd_map)
- transport_new_cmd_failure(cmd);
- else {
- ret = transport_send_check_condition_and_sense(cmd,
- cmd->scsi_sense_reason, 0);
- if (ret == -EAGAIN || ret == -ENOMEM)
- goto queue_full;
- }
+ ret = transport_send_check_condition_and_sense(cmd,
+ cmd->scsi_sense_reason, 0);
+ if (ret == -EAGAIN || ret == -ENOMEM)
+ goto queue_full;
check_stop:
transport_lun_remove_cmd(cmd);
@@ -2002,19 +1948,12 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- atomic_inc(&cmd->se_dev->dev_hoq_count);
- smp_mb__after_atomic_inc();
pr_debug("Added HEAD_OF_QUEUE for CDB:"
" 0x%02x, se_ordered_id: %u\n",
cmd->t_task_cdb[0],
cmd->se_ordered_id);
return 1;
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- spin_lock(&cmd->se_dev->ordered_cmd_lock);
- list_add_tail(&cmd->se_ordered_node,
- &cmd->se_dev->ordered_cmd_list);
- spin_unlock(&cmd->se_dev->ordered_cmd_lock);
-
atomic_inc(&cmd->se_dev->dev_ordered_sync);
smp_mb__after_atomic_inc();
@@ -2076,9 +2015,9 @@ static int transport_execute_tasks(struct se_cmd *cmd)
{
int add_tasks;
- if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
- cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
- transport_generic_request_failure(cmd, 0, 1);
+ if (se_dev_check_online(cmd->se_dev) != 0) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ transport_generic_request_failure(cmd);
return 0;
}
@@ -2163,14 +2102,13 @@ check_depth:
else
error = dev->transport->do_task(task);
if (error != 0) {
- cmd->transport_error_status = error;
spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags &= ~TF_ACTIVE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
atomic_set(&cmd->t_transport_sent, 0);
transport_stop_tasks_for_cmd(cmd);
atomic_inc(&dev->depth_left);
- transport_generic_request_failure(cmd, 0, 1);
+ transport_generic_request_failure(cmd);
}
goto check_depth;
@@ -2178,19 +2116,6 @@ check_depth:
return 0;
}
-void transport_new_cmd_failure(struct se_cmd *se_cmd)
-{
- unsigned long flags;
- /*
- * Any unsolicited data will get dumped for failed command inside of
- * the fabric plugin
- */
- spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-}
-
static inline u32 transport_get_sectors_6(
unsigned char *cdb,
struct se_cmd *cmd,
@@ -2213,10 +2138,15 @@ static inline u32 transport_get_sectors_6(
/*
* Everything else assume TYPE_DISK Sector CDB location.
- * Use 8-bit sector value.
+ * Use 8-bit sector value. SBC-3 says:
+ *
+ * A TRANSFER LENGTH field set to zero specifies that 256
+ * logical blocks shall be written. Any other value
+ * specifies the number of logical blocks that shall be
+ * written.
*/
type_disk:
- return (u32)cdb[4];
+ return cdb[4] ? : 256;
}
static inline u32 transport_get_sectors_10(
@@ -2460,27 +2390,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
return -1;
}
-static int
-transport_handle_reservation_conflict(struct se_cmd *cmd)
-{
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
- cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
- /*
- * For UA Interlock Code 11b, a RESERVATION CONFLICT will
- * establish a UNIT ATTENTION with PREVIOUS RESERVATION
- * CONFLICT STATUS.
- *
- * See spc4r17, section 7.4.6 Control Mode Page, Table 349
- */
- if (cmd->se_sess &&
- cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
- core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
- cmd->orig_fe_lun, 0x2C,
- ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
- return -EINVAL;
-}
-
static inline long long transport_dev_end_lba(struct se_device *dev)
{
return dev->transport->get_blocks(dev) + 1;
@@ -2595,8 +2504,12 @@ static int transport_generic_cmd_sequencer(
*/
if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
- cmd, cdb, pr_reg_type) != 0)
- return transport_handle_reservation_conflict(cmd);
+ cmd, cdb, pr_reg_type) != 0) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EBUSY;
+ }
/*
* This means the CDB is allowed for the SCSI Initiator port
* when said port is *NOT* holding the legacy SPC-2 or
@@ -2658,7 +2571,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_12:
@@ -2667,7 +2581,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_16:
@@ -2676,12 +2591,13 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_64(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case XDWRITEREAD_10:
if ((cmd->data_direction != DMA_TO_DEVICE) ||
- !(cmd->t_tasks_bidi))
+ !(cmd->se_cmd_flags & SCF_BIDI))
goto out_invalid_cdb_field;
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
@@ -2700,7 +2616,8 @@ static int transport_generic_cmd_sequencer(
* Setup BIDI XOR callback to be run after I/O completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
break;
case VARIABLE_LENGTH_CMD:
service_action = get_unaligned_be16(&cdb[8]);
@@ -2728,7 +2645,8 @@ static int transport_generic_cmd_sequencer(
* completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
- cmd->t_tasks_fua = (cdb[10] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
break;
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
@@ -3171,18 +3089,13 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- atomic_dec(&dev->dev_hoq_count);
- smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for"
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- spin_lock(&dev->ordered_cmd_lock);
- list_del(&cmd->se_ordered_node);
atomic_dec(&dev->dev_ordered_sync);
smp_mb__after_atomic_dec();
- spin_unlock(&dev->ordered_cmd_lock);
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -3495,6 +3408,18 @@ int transport_generic_map_mem_to_cmd(
if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
+ /*
+ * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+ * scatterlists already have been set to follow what the fabric
+ * passes for the original expected data transfer length.
+ */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ pr_warn("Rejecting SCSI DATA overflow for fabric using"
+ " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
+ }
cmd->t_data_sg = sgl;
cmd->t_data_nents = sgl_count;
@@ -3813,7 +3738,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
cmd->data_length) {
ret = transport_generic_get_mem(cmd);
if (ret < 0)
- return ret;
+ goto out_fail;
}
/*
@@ -3842,8 +3767,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
task_cdbs = transport_allocate_control_task(cmd);
}
- if (task_cdbs <= 0)
+ if (task_cdbs < 0)
goto out_fail;
+ else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+ cmd->t_state = TRANSPORT_COMPLETE;
+ atomic_set(&cmd->t_transport_active, 1);
+ INIT_WORK(&cmd->work, target_complete_ok_work);
+ queue_work(target_completion_wq, &cmd->work);
+ return 0;
+ }
if (set_counts) {
atomic_inc(&cmd->t_fe_count);
@@ -3929,7 +3861,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
else if (ret < 0)
return ret;
- return PYX_TRANSPORT_WRITE_PENDING;
+ return 1;
queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
@@ -4602,9 +4534,6 @@ void transport_send_task_abort(struct se_cmd *cmd)
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
atomic_inc(&cmd->t_transport_aborted);
smp_mb__after_atomic_inc();
- cmd->scsi_status = SAM_STAT_TASK_ABORTED;
- transport_new_cmd_failure(cmd);
- return;
}
}
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
@@ -4670,8 +4599,6 @@ static int transport_processing_thread(void *param)
struct se_cmd *cmd;
struct se_device *dev = (struct se_device *) param;
- set_user_nice(current, -20);
-
while (!kthread_should_stop()) {
ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
atomic_read(&dev->dev_queue_obj.queue_cnt) ||
@@ -4698,18 +4625,13 @@ get_cmd:
}
ret = cmd->se_tfo->new_cmd_map(cmd);
if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd,
- 0, (cmd->data_direction !=
- DMA_TO_DEVICE));
+ transport_generic_request_failure(cmd);
break;
}
ret = transport_generic_new_cmd(cmd);
if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd,
- 0, (cmd->data_direction !=
- DMA_TO_DEVICE));
+ transport_generic_request_failure(cmd);
+ break;
}
break;
case TRANSPORT_PROCESS_WRITE:
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 4fac37c4c61..71fc9cea5dc 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -200,7 +200,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
lport = ep->lp;
fp = fc_frame_alloc(lport, sizeof(*txrdy));
if (!fp)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ return -ENOMEM; /* Signal QUEUE_FULL */
txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
memset(txrdy, 0, sizeof(*txrdy));
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 5f770412ca4..9402b7387ca 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -436,8 +436,7 @@ static void ft_del_lport(struct se_wwn *wwn)
struct ft_lport_acl *lacl = container_of(wwn,
struct ft_lport_acl, fc_lport_wwn);
- pr_debug("del lport %s\n",
- config_item_name(&wwn->wwn_group.cg_item));
+ pr_debug("del lport %s\n", lacl->name);
mutex_lock(&ft_lport_lock);
list_del(&lacl->list);
mutex_unlock(&ft_lport_lock);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index f0d5718d258..2ad813674d7 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5107,11 +5107,11 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root = orig_root->fs_info->extent_root;
struct btrfs_free_cluster *last_ptr = NULL;
struct btrfs_block_group_cache *block_group = NULL;
+ struct btrfs_block_group_cache *used_block_group;
int empty_cluster = 2 * 1024 * 1024;
int allowed_chunk_alloc = 0;
int done_chunk_alloc = 0;
struct btrfs_space_info *space_info;
- int last_ptr_loop = 0;
int loop = 0;
int index = 0;
int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
@@ -5173,6 +5173,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
ideal_cache:
block_group = btrfs_lookup_block_group(root->fs_info,
search_start);
+ used_block_group = block_group;
/*
* we don't want to use the block group if it doesn't match our
* allocation bits, or if its not cached.
@@ -5210,6 +5211,7 @@ search:
u64 offset;
int cached;
+ used_block_group = block_group;
btrfs_get_block_group(block_group);
search_start = block_group->key.objectid;
@@ -5286,71 +5288,62 @@ alloc:
spin_unlock(&block_group->free_space_ctl->tree_lock);
/*
- * Ok we want to try and use the cluster allocator, so lets look
- * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
- * have tried the cluster allocator plenty of times at this
- * point and not have found anything, so we are likely way too
- * fragmented for the clustering stuff to find anything, so lets
- * just skip it and let the allocator find whatever block it can
- * find
+ * Ok we want to try and use the cluster allocator, so
+ * lets look there
*/
- if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
+ if (last_ptr) {
/*
* the refill lock keeps out other
* people trying to start a new cluster
*/
spin_lock(&last_ptr->refill_lock);
- if (!last_ptr->block_group ||
- last_ptr->block_group->ro ||
- !block_group_bits(last_ptr->block_group, data))
+ used_block_group = last_ptr->block_group;
+ if (used_block_group != block_group &&
+ (!used_block_group ||
+ used_block_group->ro ||
+ !block_group_bits(used_block_group, data))) {
+ used_block_group = block_group;
goto refill_cluster;
+ }
+
+ if (used_block_group != block_group)
+ btrfs_get_block_group(used_block_group);
- offset = btrfs_alloc_from_cluster(block_group, last_ptr,
- num_bytes, search_start);
+ offset = btrfs_alloc_from_cluster(used_block_group,
+ last_ptr, num_bytes, used_block_group->key.objectid);
if (offset) {
/* we have a block, we're done */
spin_unlock(&last_ptr->refill_lock);
goto checks;
}
- spin_lock(&last_ptr->lock);
- /*
- * whoops, this cluster doesn't actually point to
- * this block group. Get a ref on the block
- * group is does point to and try again
- */
- if (!last_ptr_loop && last_ptr->block_group &&
- last_ptr->block_group != block_group &&
- index <=
- get_block_group_index(last_ptr->block_group)) {
-
- btrfs_put_block_group(block_group);
- block_group = last_ptr->block_group;
- btrfs_get_block_group(block_group);
- spin_unlock(&last_ptr->lock);
- spin_unlock(&last_ptr->refill_lock);
-
- last_ptr_loop = 1;
- search_start = block_group->key.objectid;
- /*
- * we know this block group is properly
- * in the list because
- * btrfs_remove_block_group, drops the
- * cluster before it removes the block
- * group from the list
- */
- goto have_block_group;
+ WARN_ON(last_ptr->block_group != used_block_group);
+ if (used_block_group != block_group) {
+ btrfs_put_block_group(used_block_group);
+ used_block_group = block_group;
}
- spin_unlock(&last_ptr->lock);
refill_cluster:
+ BUG_ON(used_block_group != block_group);
+ /* If we are on LOOP_NO_EMPTY_SIZE, we can't
+ * set up a new clusters, so lets just skip it
+ * and let the allocator find whatever block
+ * it can find. If we reach this point, we
+ * will have tried the cluster allocator
+ * plenty of times and not have found
+ * anything, so we are likely way too
+ * fragmented for the clustering stuff to find
+ * anything. */
+ if (loop >= LOOP_NO_EMPTY_SIZE) {
+ spin_unlock(&last_ptr->refill_lock);
+ goto unclustered_alloc;
+ }
+
/*
* this cluster didn't work out, free it and
* start over
*/
btrfs_return_cluster_to_free_space(NULL, last_ptr);
- last_ptr_loop = 0;
-
/* allocate a cluster in this block group */
ret = btrfs_find_space_cluster(trans, root,
block_group, last_ptr,
@@ -5390,6 +5383,7 @@ refill_cluster:
goto loop;
}
+unclustered_alloc:
offset = btrfs_find_space_for_alloc(block_group, search_start,
num_bytes, empty_size);
/*
@@ -5416,14 +5410,14 @@ checks:
search_start = stripe_align(root, offset);
/* move on to the next group */
if (search_start + num_bytes >= search_end) {
- btrfs_add_free_space(block_group, offset, num_bytes);
+ btrfs_add_free_space(used_block_group, offset, num_bytes);
goto loop;
}
/* move on to the next group */
if (search_start + num_bytes >
- block_group->key.objectid + block_group->key.offset) {
- btrfs_add_free_space(block_group, offset, num_bytes);
+ used_block_group->key.objectid + used_block_group->key.offset) {
+ btrfs_add_free_space(used_block_group, offset, num_bytes);
goto loop;
}
@@ -5431,14 +5425,14 @@ checks:
ins->offset = num_bytes;
if (offset < search_start)
- btrfs_add_free_space(block_group, offset,
+ btrfs_add_free_space(used_block_group, offset,
search_start - offset);
BUG_ON(offset > search_start);
- ret = btrfs_update_reserved_bytes(block_group, num_bytes,
+ ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
alloc_type);
if (ret == -EAGAIN) {
- btrfs_add_free_space(block_group, offset, num_bytes);
+ btrfs_add_free_space(used_block_group, offset, num_bytes);
goto loop;
}
@@ -5447,15 +5441,19 @@ checks:
ins->offset = num_bytes;
if (offset < search_start)
- btrfs_add_free_space(block_group, offset,
+ btrfs_add_free_space(used_block_group, offset,
search_start - offset);
BUG_ON(offset > search_start);
+ if (used_block_group != block_group)
+ btrfs_put_block_group(used_block_group);
btrfs_put_block_group(block_group);
break;
loop:
failed_cluster_refill = false;
failed_alloc = false;
BUG_ON(index != get_block_group_index(block_group));
+ if (used_block_group != block_group)
+ btrfs_put_block_group(used_block_group);
btrfs_put_block_group(block_group);
}
up_read(&space_info->groups_sem);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index be1bf627a14..49f3c9dc09f 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -935,8 +935,10 @@ again:
node = tree_search(tree, start);
if (!node) {
prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc)
- return -ENOMEM;
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
err = insert_state(tree, prealloc, start, end, &bits);
prealloc = NULL;
BUG_ON(err == -EEXIST);
@@ -992,8 +994,10 @@ hit_next:
*/
if (state->start < start) {
prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc)
- return -ENOMEM;
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
err = split_state(tree, state, prealloc, start);
BUG_ON(err == -EEXIST);
prealloc = NULL;
@@ -1024,8 +1028,10 @@ hit_next:
this_end = last_start - 1;
prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc)
- return -ENOMEM;
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
/*
* Avoid to free 'prealloc' if it can be merged with
@@ -1051,8 +1057,10 @@ hit_next:
*/
if (state->start <= end && state->end > end) {
prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc)
- return -ENOMEM;
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
err = split_state(tree, state, prealloc, end + 1);
BUG_ON(err == -EEXIST);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c37433d3cd8..0a8c8f8304b 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1611,7 +1611,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
return -EINVAL;
- bdev = blkdev_get_by_path(device_path, FMODE_EXCL,
+ bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
root->fs_info->bdev_holder);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index d6a972df033..8cd4b52d421 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -441,6 +441,8 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
smb_msg.msg_controllen = 0;
for (total_read = 0; to_read; total_read += length, to_read -= length) {
+ try_to_freeze();
+
if (server_unresponsive(server)) {
total_read = -EAGAIN;
break;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index cf0b1539b32..4dd9283885e 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -702,6 +702,13 @@ cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
lock->type, lock->netfid, conf_lock);
}
+/*
+ * Check if there is another lock that prevents us to set the lock (mandatory
+ * style). If such a lock exists, update the flock structure with its
+ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+ * or leave it the same if we can't. Returns 0 if we don't need to request to
+ * the server or 1 otherwise.
+ */
static int
cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
__u8 type, __u16 netfid, struct file_lock *flock)
@@ -739,6 +746,12 @@ cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
mutex_unlock(&cinode->lock_mutex);
}
+/*
+ * Set the byte-range lock (mandatory style). Returns:
+ * 1) 0, if we set the lock and don't need to request to the server;
+ * 2) 1, if no locks prevent us but we need to request to the server;
+ * 3) -EACCESS, if there is a lock that prevents us and wait is false.
+ */
static int
cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
bool wait)
@@ -778,6 +791,13 @@ try_again:
return rc;
}
+/*
+ * Check if there is another lock that prevents us to set the lock (posix
+ * style). If such a lock exists, update the flock structure with its
+ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+ * or leave it the same if we can't. Returns 0 if we don't need to request to
+ * the server or 1 otherwise.
+ */
static int
cifs_posix_lock_test(struct file *file, struct file_lock *flock)
{
@@ -800,6 +820,12 @@ cifs_posix_lock_test(struct file *file, struct file_lock *flock)
return rc;
}
+/*
+ * Set the byte-range lock (posix style). Returns:
+ * 1) 0, if we set the lock and don't need to request to the server;
+ * 2) 1, if we need to request to the server;
+ * 3) <0, if the error occurs while setting the lock.
+ */
static int
cifs_posix_lock_set(struct file *file, struct file_lock *flock)
{
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 5de03ec2014..a090bbe6ee2 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -554,7 +554,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
rc);
return rc;
}
- cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
+ /* FindFirst/Next set last_entry to NULL on malformed reply */
+ if (cifsFile->srch_inf.last_entry)
+ cifs_save_resume_key(cifsFile->srch_inf.last_entry,
+ cifsFile);
}
while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
@@ -562,7 +565,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
cFYI(1, "calling findnext2");
rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
&cifsFile->srch_inf);
- cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
+ /* FindFirst/Next set last_entry to NULL on malformed reply */
+ if (cifsFile->srch_inf.last_entry)
+ cifs_save_resume_key(cifsFile->srch_inf.last_entry,
+ cifsFile);
if (rc)
return -ENOENT;
}
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 7cacba12b8f..80d85088193 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -209,7 +209,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
{
int rc;
int len;
- __u16 wpwd[129];
+ __le16 wpwd[129];
/* Password cannot be longer than 128 characters */
if (passwd) /* Password must be converted to NT unicode */
@@ -219,8 +219,8 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
*wpwd = 0; /* Ensure string is null terminated */
}
- rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__u16));
- memset(wpwd, 0, 129 * sizeof(__u16));
+ rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
+ memset(wpwd, 0, 129 * sizeof(__le16));
return rc;
}
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 586174168e2..80e4645f799 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -131,12 +131,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
K(i.freeswap),
K(global_page_state(NR_FILE_DIRTY)),
K(global_page_state(NR_WRITEBACK)),
- K(global_page_state(NR_ANON_PAGES)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ K(global_page_state(NR_ANON_PAGES)
+ global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
- HPAGE_PMD_NR
+ HPAGE_PMD_NR),
+#else
+ K(global_page_state(NR_ANON_PAGES)),
#endif
- ),
K(global_page_state(NR_FILE_MAPPED)),
K(global_page_state(NR_SHMEM)),
K(global_page_state(NR_SLAB_RECLAIMABLE) +
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 42b274da92c..2a30d67dd6b 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -32,7 +32,7 @@ static cputime64_t get_idle_time(int cpu)
idle = kstat_cpu(cpu).cpustat.idle;
idle = cputime64_add(idle, arch_idle_time(cpu));
} else
- idle = usecs_to_cputime(idle_time);
+ idle = nsecs_to_jiffies64(1000 * idle_time);
return idle;
}
@@ -46,7 +46,7 @@ static cputime64_t get_iowait_time(int cpu)
/* !NO_HZ so we can rely on cpustat.iowait */
iowait = kstat_cpu(cpu).cpustat.iowait;
else
- iowait = usecs_to_cputime(iowait_time);
+ iowait = nsecs_to_jiffies64(1000 * iowait_time);
return iowait;
}
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index c68baeb0974..d0ab7883705 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -2383,6 +2383,8 @@ xfs_bmap_btalloc(
int tryagain;
int error;
+ ASSERT(ap->length);
+
mp = ap->ip->i_mount;
align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
if (unlikely(align)) {
@@ -4629,6 +4631,8 @@ xfs_bmapi_allocate(
int error;
int rt;
+ ASSERT(bma->length > 0);
+
rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip);
/*
@@ -4849,6 +4853,7 @@ xfs_bmapi_write(
ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
ASSERT(!(flags & XFS_BMAPI_IGSTATE));
ASSERT(tp != NULL);
+ ASSERT(len > 0);
whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
XFS_ATTR_FORK : XFS_DATA_FORK;
@@ -4918,9 +4923,22 @@ xfs_bmapi_write(
bma.eof = eof;
bma.conv = !!(flags & XFS_BMAPI_CONVERT);
bma.wasdel = wasdelay;
- bma.length = len;
bma.offset = bno;
+ /*
+ * There's a 32/64 bit type mismatch between the
+ * allocation length request (which can be 64 bits in
+ * length) and the bma length request, which is
+ * xfs_extlen_t and therefore 32 bits. Hence we have to
+ * check for 32-bit overflows and handle them here.
+ */
+ if (len > (xfs_filblks_t)MAXEXTLEN)
+ bma.length = MAXEXTLEN;
+ else
+ bma.length = len;
+
+ ASSERT(len > 0);
+ ASSERT(bma.length > 0);
error = xfs_bmapi_allocate(&bma, flags);
if (error)
goto error0;
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index da108977b21..558910f5e3c 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -98,22 +98,22 @@ xfs_fs_encode_fh(
switch (fileid_type) {
case FILEID_INO32_GEN_PARENT:
spin_lock(&dentry->d_lock);
- fid->i32.parent_ino = dentry->d_parent->d_inode->i_ino;
+ fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
spin_unlock(&dentry->d_lock);
/*FALLTHRU*/
case FILEID_INO32_GEN:
- fid->i32.ino = inode->i_ino;
+ fid->i32.ino = XFS_I(inode)->i_ino;
fid->i32.gen = inode->i_generation;
break;
case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
spin_lock(&dentry->d_lock);
- fid64->parent_ino = dentry->d_parent->d_inode->i_ino;
+ fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
spin_unlock(&dentry->d_lock);
/*FALLTHRU*/
case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
- fid64->ino = inode->i_ino;
+ fid64->ino = XFS_I(inode)->i_ino;
fid64->gen = inode->i_generation;
break;
}
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index a14cd89fe46..34817adf4b9 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -150,6 +150,117 @@ xlog_grant_add_space(
} while (head_val != old);
}
+STATIC bool
+xlog_reserveq_wake(
+ struct log *log,
+ int *free_bytes)
+{
+ struct xlog_ticket *tic;
+ int need_bytes;
+
+ list_for_each_entry(tic, &log->l_reserveq, t_queue) {
+ if (tic->t_flags & XLOG_TIC_PERM_RESERV)
+ need_bytes = tic->t_unit_res * tic->t_cnt;
+ else
+ need_bytes = tic->t_unit_res;
+
+ if (*free_bytes < need_bytes)
+ return false;
+ *free_bytes -= need_bytes;
+
+ trace_xfs_log_grant_wake_up(log, tic);
+ wake_up(&tic->t_wait);
+ }
+
+ return true;
+}
+
+STATIC bool
+xlog_writeq_wake(
+ struct log *log,
+ int *free_bytes)
+{
+ struct xlog_ticket *tic;
+ int need_bytes;
+
+ list_for_each_entry(tic, &log->l_writeq, t_queue) {
+ ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
+
+ need_bytes = tic->t_unit_res;
+
+ if (*free_bytes < need_bytes)
+ return false;
+ *free_bytes -= need_bytes;
+
+ trace_xfs_log_regrant_write_wake_up(log, tic);
+ wake_up(&tic->t_wait);
+ }
+
+ return true;
+}
+
+STATIC int
+xlog_reserveq_wait(
+ struct log *log,
+ struct xlog_ticket *tic,
+ int need_bytes)
+{
+ list_add_tail(&tic->t_queue, &log->l_reserveq);
+
+ do {
+ if (XLOG_FORCED_SHUTDOWN(log))
+ goto shutdown;
+ xlog_grant_push_ail(log, need_bytes);
+
+ XFS_STATS_INC(xs_sleep_logspace);
+ trace_xfs_log_grant_sleep(log, tic);
+
+ xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
+ trace_xfs_log_grant_wake(log, tic);
+
+ spin_lock(&log->l_grant_reserve_lock);
+ if (XLOG_FORCED_SHUTDOWN(log))
+ goto shutdown;
+ } while (xlog_space_left(log, &log->l_grant_reserve_head) < need_bytes);
+
+ list_del_init(&tic->t_queue);
+ return 0;
+shutdown:
+ list_del_init(&tic->t_queue);
+ return XFS_ERROR(EIO);
+}
+
+STATIC int
+xlog_writeq_wait(
+ struct log *log,
+ struct xlog_ticket *tic,
+ int need_bytes)
+{
+ list_add_tail(&tic->t_queue, &log->l_writeq);
+
+ do {
+ if (XLOG_FORCED_SHUTDOWN(log))
+ goto shutdown;
+ xlog_grant_push_ail(log, need_bytes);
+
+ XFS_STATS_INC(xs_sleep_logspace);
+ trace_xfs_log_regrant_write_sleep(log, tic);
+
+ xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
+ trace_xfs_log_regrant_write_wake(log, tic);
+
+ spin_lock(&log->l_grant_write_lock);
+ if (XLOG_FORCED_SHUTDOWN(log))
+ goto shutdown;
+ } while (xlog_space_left(log, &log->l_grant_write_head) < need_bytes);
+
+ list_del_init(&tic->t_queue);
+ return 0;
+shutdown:
+ list_del_init(&tic->t_queue);
+ return XFS_ERROR(EIO);
+}
+
static void
xlog_tic_reset_res(xlog_ticket_t *tic)
{
@@ -350,8 +461,19 @@ xfs_log_reserve(
retval = xlog_grant_log_space(log, internal_ticket);
}
+ if (unlikely(retval)) {
+ /*
+ * If we are failing, make sure the ticket doesn't have any
+ * current reservations. We don't want to add this back
+ * when the ticket/ transaction gets cancelled.
+ */
+ internal_ticket->t_curr_res = 0;
+ /* ungrant will give back unit_res * t_cnt. */
+ internal_ticket->t_cnt = 0;
+ }
+
return retval;
-} /* xfs_log_reserve */
+}
/*
@@ -2481,8 +2603,8 @@ restart:
/*
* Atomically get the log space required for a log ticket.
*
- * Once a ticket gets put onto the reserveq, it will only return after
- * the needed reservation is satisfied.
+ * Once a ticket gets put onto the reserveq, it will only return after the
+ * needed reservation is satisfied.
*
* This function is structured so that it has a lock free fast path. This is
* necessary because every new transaction reservation will come through this
@@ -2490,113 +2612,53 @@ restart:
* every pass.
*
* As tickets are only ever moved on and off the reserveq under the
- * l_grant_reserve_lock, we only need to take that lock if we are going
- * to add the ticket to the queue and sleep. We can avoid taking the lock if the
- * ticket was never added to the reserveq because the t_queue list head will be
- * empty and we hold the only reference to it so it can safely be checked
- * unlocked.
+ * l_grant_reserve_lock, we only need to take that lock if we are going to add
+ * the ticket to the queue and sleep. We can avoid taking the lock if the ticket
+ * was never added to the reserveq because the t_queue list head will be empty
+ * and we hold the only reference to it so it can safely be checked unlocked.
*/
STATIC int
-xlog_grant_log_space(xlog_t *log,
- xlog_ticket_t *tic)
+xlog_grant_log_space(
+ struct log *log,
+ struct xlog_ticket *tic)
{
- int free_bytes;
- int need_bytes;
+ int free_bytes, need_bytes;
+ int error = 0;
-#ifdef DEBUG
- if (log->l_flags & XLOG_ACTIVE_RECOVERY)
- panic("grant Recovery problem");
-#endif
+ ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
trace_xfs_log_grant_enter(log, tic);
+ /*
+ * If there are other waiters on the queue then give them a chance at
+ * logspace before us. Wake up the first waiters, if we do not wake
+ * up all the waiters then go to sleep waiting for more free space,
+ * otherwise try to get some space for this transaction.
+ */
need_bytes = tic->t_unit_res;
if (tic->t_flags & XFS_LOG_PERM_RESERV)
need_bytes *= tic->t_ocnt;
-
- /* something is already sleeping; insert new transaction at end */
- if (!list_empty_careful(&log->l_reserveq)) {
- spin_lock(&log->l_grant_reserve_lock);
- /* recheck the queue now we are locked */
- if (list_empty(&log->l_reserveq)) {
- spin_unlock(&log->l_grant_reserve_lock);
- goto redo;
- }
- list_add_tail(&tic->t_queue, &log->l_reserveq);
-
- trace_xfs_log_grant_sleep1(log, tic);
-
- /*
- * Gotta check this before going to sleep, while we're
- * holding the grant lock.
- */
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return;
-
- XFS_STATS_INC(xs_sleep_logspace);
- xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
-
- /*
- * If we got an error, and the filesystem is shutting down,
- * we'll catch it down below. So just continue...
- */
- trace_xfs_log_grant_wake1(log, tic);
- }
-
-redo:
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return_unlocked;
-
free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
- if (free_bytes < need_bytes) {
+ if (!list_empty_careful(&log->l_reserveq)) {
spin_lock(&log->l_grant_reserve_lock);
- if (list_empty(&tic->t_queue))
- list_add_tail(&tic->t_queue, &log->l_reserveq);
-
- trace_xfs_log_grant_sleep2(log, tic);
-
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return;
-
- xlog_grant_push_ail(log, need_bytes);
-
- XFS_STATS_INC(xs_sleep_logspace);
- xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
-
- trace_xfs_log_grant_wake2(log, tic);
- goto redo;
- }
-
- if (!list_empty(&tic->t_queue)) {
+ if (!xlog_reserveq_wake(log, &free_bytes) ||
+ free_bytes < need_bytes)
+ error = xlog_reserveq_wait(log, tic, need_bytes);
+ spin_unlock(&log->l_grant_reserve_lock);
+ } else if (free_bytes < need_bytes) {
spin_lock(&log->l_grant_reserve_lock);
- list_del_init(&tic->t_queue);
+ error = xlog_reserveq_wait(log, tic, need_bytes);
spin_unlock(&log->l_grant_reserve_lock);
}
+ if (error)
+ return error;
- /* we've got enough space */
xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
trace_xfs_log_grant_exit(log, tic);
xlog_verify_grant_tail(log);
return 0;
-
-error_return_unlocked:
- spin_lock(&log->l_grant_reserve_lock);
-error_return:
- list_del_init(&tic->t_queue);
- spin_unlock(&log->l_grant_reserve_lock);
- trace_xfs_log_grant_error(log, tic);
-
- /*
- * If we are failing, make sure the ticket doesn't have any
- * current reservations. We don't want to add this back when
- * the ticket/transaction gets cancelled.
- */
- tic->t_curr_res = 0;
- tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
- return XFS_ERROR(EIO);
-} /* xlog_grant_log_space */
-
+}
/*
* Replenish the byte reservation required by moving the grant write head.
@@ -2605,10 +2667,12 @@ error_return:
* free fast path.
*/
STATIC int
-xlog_regrant_write_log_space(xlog_t *log,
- xlog_ticket_t *tic)
+xlog_regrant_write_log_space(
+ struct log *log,
+ struct xlog_ticket *tic)
{
- int free_bytes, need_bytes;
+ int free_bytes, need_bytes;
+ int error = 0;
tic->t_curr_res = tic->t_unit_res;
xlog_tic_reset_res(tic);
@@ -2616,104 +2680,38 @@ xlog_regrant_write_log_space(xlog_t *log,
if (tic->t_cnt > 0)
return 0;
-#ifdef DEBUG
- if (log->l_flags & XLOG_ACTIVE_RECOVERY)
- panic("regrant Recovery problem");
-#endif
+ ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
trace_xfs_log_regrant_write_enter(log, tic);
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return_unlocked;
- /* If there are other waiters on the queue then give them a
- * chance at logspace before us. Wake up the first waiters,
- * if we do not wake up all the waiters then go to sleep waiting
- * for more free space, otherwise try to get some space for
- * this transaction.
+ /*
+ * If there are other waiters on the queue then give them a chance at
+ * logspace before us. Wake up the first waiters, if we do not wake
+ * up all the waiters then go to sleep waiting for more free space,
+ * otherwise try to get some space for this transaction.
*/
need_bytes = tic->t_unit_res;
- if (!list_empty_careful(&log->l_writeq)) {
- struct xlog_ticket *ntic;
-
- spin_lock(&log->l_grant_write_lock);
- free_bytes = xlog_space_left(log, &log->l_grant_write_head);
- list_for_each_entry(ntic, &log->l_writeq, t_queue) {
- ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV);
-
- if (free_bytes < ntic->t_unit_res)
- break;
- free_bytes -= ntic->t_unit_res;
- wake_up(&ntic->t_wait);
- }
-
- if (ntic != list_first_entry(&log->l_writeq,
- struct xlog_ticket, t_queue)) {
- if (list_empty(&tic->t_queue))
- list_add_tail(&tic->t_queue, &log->l_writeq);
- trace_xfs_log_regrant_write_sleep1(log, tic);
-
- xlog_grant_push_ail(log, need_bytes);
-
- XFS_STATS_INC(xs_sleep_logspace);
- xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
- trace_xfs_log_regrant_write_wake1(log, tic);
- } else
- spin_unlock(&log->l_grant_write_lock);
- }
-
-redo:
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return_unlocked;
-
free_bytes = xlog_space_left(log, &log->l_grant_write_head);
- if (free_bytes < need_bytes) {
+ if (!list_empty_careful(&log->l_writeq)) {
spin_lock(&log->l_grant_write_lock);
- if (list_empty(&tic->t_queue))
- list_add_tail(&tic->t_queue, &log->l_writeq);
-
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return;
-
- xlog_grant_push_ail(log, need_bytes);
-
- XFS_STATS_INC(xs_sleep_logspace);
- trace_xfs_log_regrant_write_sleep2(log, tic);
- xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
-
- trace_xfs_log_regrant_write_wake2(log, tic);
- goto redo;
- }
-
- if (!list_empty(&tic->t_queue)) {
+ if (!xlog_writeq_wake(log, &free_bytes) ||
+ free_bytes < need_bytes)
+ error = xlog_writeq_wait(log, tic, need_bytes);
+ spin_unlock(&log->l_grant_write_lock);
+ } else if (free_bytes < need_bytes) {
spin_lock(&log->l_grant_write_lock);
- list_del_init(&tic->t_queue);
+ error = xlog_writeq_wait(log, tic, need_bytes);
spin_unlock(&log->l_grant_write_lock);
}
- /* we've got enough space */
+ if (error)
+ return error;
+
xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
trace_xfs_log_regrant_write_exit(log, tic);
xlog_verify_grant_tail(log);
return 0;
-
-
- error_return_unlocked:
- spin_lock(&log->l_grant_write_lock);
- error_return:
- list_del_init(&tic->t_queue);
- spin_unlock(&log->l_grant_write_lock);
- trace_xfs_log_regrant_write_error(log, tic);
-
- /*
- * If we are failing, make sure the ticket doesn't have any
- * current reservations. We don't want to add this back when
- * the ticket/transaction gets cancelled.
- */
- tic->t_curr_res = 0;
- tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
- return XFS_ERROR(EIO);
-} /* xlog_regrant_write_log_space */
-
+}
/* The first cnt-1 times through here we don't need to
* move the grant write head because the permanent
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index f1d2802b2f0..49403579887 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -834,18 +834,14 @@ DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index f4c38d8c667..2292d1af9d7 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -685,9 +685,15 @@ __SYSCALL(__NR_syncfs, sys_syncfs)
__SYSCALL(__NR_setns, sys_setns)
#define __NR_sendmmsg 269
__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
+#define __NR_process_vm_readv 270
+__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
+ compat_sys_process_vm_readv)
+#define __NR_process_vm_writev 271
+__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
+ compat_sys_process_vm_writev)
#undef __NR_syscalls
-#define __NR_syscalls 270
+#define __NR_syscalls 272
/*
* All syscalls below here should go away really,
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 154bf568301..66ed067fb72 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -552,5 +552,14 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
extern void __user *compat_alloc_user_space(unsigned long len);
+asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
+ const struct compat_iovec __user *lvec,
+ unsigned long liovcnt, const struct compat_iovec __user *rvec,
+ unsigned long riovcnt, unsigned long flags);
+asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
+ const struct compat_iovec __user *lvec,
+ unsigned long liovcnt, const struct compat_iovec __user *rvec,
+ unsigned long riovcnt, unsigned long flags);
+
#endif /* CONFIG_COMPAT */
#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 019dc558df1..e0bc4ffb8e7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -393,8 +393,8 @@ struct inodes_stat_t {
#include <linux/semaphore.h>
#include <linux/fiemap.h>
#include <linux/rculist_bl.h>
-#include <linux/shrinker.h>
#include <linux/atomic.h>
+#include <linux/shrinker.h>
#include <asm/byteorder.h>
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3dc3a8c2c48..4baadd18f4a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -10,6 +10,7 @@
#include <linux/mmzone.h>
#include <linux/rbtree.h>
#include <linux/prio_tree.h>
+#include <linux/atomic.h>
#include <linux/debug_locks.h>
#include <linux/mm_types.h>
#include <linux/range.h>
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index a83833a1f7a..07ceb97d53f 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -35,7 +35,7 @@ struct shrinker {
/* These are for internal use */
struct list_head list;
- long nr; /* objs pending delete */
+ atomic_long_t nr_in_batch; /* objs pending delete */
};
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
extern void register_shrinker(struct shrinker *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 7f5fed3c89e..6873c7dd914 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -103,9 +103,10 @@ enum se_cmd_flags_table {
SCF_SCSI_NON_DATA_CDB = 0x00000040,
SCF_SCSI_CDB_EXCEPTION = 0x00000080,
SCF_SCSI_RESERVATION_CONFLICT = 0x00000100,
- SCF_SE_CMD_FAILED = 0x00000400,
+ SCF_FUA = 0x00000200,
SCF_SE_LUN_CMD = 0x00000800,
SCF_SE_ALLOW_EOO = 0x00001000,
+ SCF_BIDI = 0x00002000,
SCF_SENT_CHECK_CONDITION = 0x00004000,
SCF_OVERFLOW_BIT = 0x00008000,
SCF_UNDERFLOW_BIT = 0x00010000,
@@ -154,6 +155,7 @@ enum tcm_sense_reason_table {
TCM_CHECK_CONDITION_ABORT_CMD = 0x0d,
TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
TCM_CHECK_CONDITION_NOT_READY = 0x0f,
+ TCM_RESERVATION_CONFLICT = 0x10,
};
struct se_obj {
@@ -211,7 +213,6 @@ struct t10_alua_lu_gp {
u16 lu_gp_id;
int lu_gp_valid_id;
u32 lu_gp_members;
- atomic_t lu_gp_shutdown;
atomic_t lu_gp_ref_cnt;
spinlock_t lu_gp_lock;
struct config_group lu_gp_group;
@@ -422,11 +423,9 @@ struct se_cmd {
int sam_task_attr;
/* Transport protocol dependent state, see transport_state_table */
enum transport_state_table t_state;
- /* Transport specific error status */
- int transport_error_status;
/* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
- int check_release:1;
- int cmd_wait_set:1;
+ unsigned check_release:1;
+ unsigned cmd_wait_set:1;
/* See se_cmd_flags_table */
u32 se_cmd_flags;
u32 se_ordered_id;
@@ -441,13 +440,10 @@ struct se_cmd {
/* Used for sense data */
void *sense_buffer;
struct list_head se_delayed_node;
- struct list_head se_ordered_node;
struct list_head se_lun_node;
struct list_head se_qf_node;
struct se_device *se_dev;
struct se_dev_entry *se_deve;
- struct se_device *se_obj_ptr;
- struct se_device *se_orig_obj_ptr;
struct se_lun *se_lun;
/* Only used for internal passthrough and legacy TCM fabric modules */
struct se_session *se_sess;
@@ -463,8 +459,6 @@ struct se_cmd {
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
unsigned long long t_task_lba;
int t_tasks_failed;
- int t_tasks_fua;
- bool t_tasks_bidi;
u32 t_tasks_sg_chained_no;
atomic_t t_fe_count;
atomic_t t_se_count;
@@ -489,14 +483,6 @@ struct se_cmd {
struct work_struct work;
- /*
- * Used for pre-registered fabric SGL passthrough WRITE and READ
- * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
- * and other HW target mode fabric modules.
- */
- struct scatterlist *t_task_pt_sgl;
- u32 t_task_pt_sgl_num;
-
struct scatterlist *t_data_sg;
unsigned int t_data_nents;
struct scatterlist *t_bidi_data_sg;
@@ -562,7 +548,7 @@ struct se_node_acl {
} ____cacheline_aligned;
struct se_session {
- int sess_tearing_down:1;
+ unsigned sess_tearing_down:1;
u64 sess_bin_isid;
struct se_node_acl *se_node_acl;
struct se_portal_group *se_tpg;
@@ -683,7 +669,6 @@ struct se_subsystem_dev {
struct t10_reservation t10_pr;
spinlock_t se_dev_lock;
void *se_dev_su_ptr;
- struct list_head se_dev_node;
struct config_group se_dev_group;
/* For T10 Reservations */
struct config_group se_dev_pr_group;
@@ -692,9 +677,6 @@ struct se_subsystem_dev {
} ____cacheline_aligned;
struct se_device {
- /* Set to 1 if thread is NOT sleeping on thread_sem */
- u8 thread_active;
- u8 dev_status_timer_flags;
/* RELATIVE TARGET PORT IDENTIFER Counter */
u16 dev_rpti_counter;
/* Used for SAM Task Attribute ordering */
@@ -719,14 +701,10 @@ struct se_device {
u64 write_bytes;
spinlock_t stats_lock;
/* Active commands on this virtual SE device */
- atomic_t active_cmds;
atomic_t simple_cmds;
atomic_t depth_left;
atomic_t dev_ordered_id;
- atomic_t dev_tur_active;
atomic_t execute_tasks;
- atomic_t dev_status_thr_count;
- atomic_t dev_hoq_count;
atomic_t dev_ordered_sync;
atomic_t dev_qf_count;
struct se_obj dev_obj;
@@ -734,14 +712,9 @@ struct se_device {
struct se_obj dev_export_obj;
struct se_queue_obj dev_queue_obj;
spinlock_t delayed_cmd_lock;
- spinlock_t ordered_cmd_lock;
spinlock_t execute_task_lock;
- spinlock_t state_task_lock;
- spinlock_t dev_alua_lock;
spinlock_t dev_reservation_lock;
- spinlock_t dev_state_lock;
spinlock_t dev_status_lock;
- spinlock_t dev_status_thr_lock;
spinlock_t se_port_lock;
spinlock_t se_tmr_lock;
spinlock_t qf_cmd_lock;
@@ -753,14 +726,10 @@ struct se_device {
struct t10_pr_registration *dev_pr_res_holder;
struct list_head dev_sep_list;
struct list_head dev_tmr_list;
- struct timer_list dev_status_timer;
/* Pointer to descriptor for processing thread */
struct task_struct *process_thread;
- pid_t process_thread_pid;
- struct task_struct *dev_mgmt_thread;
struct work_struct qf_work_queue;
struct list_head delayed_cmd_list;
- struct list_head ordered_cmd_list;
struct list_head execute_task_list;
struct list_head state_task_list;
struct list_head qf_cmd_list;
@@ -771,8 +740,6 @@ struct se_device {
struct se_subsystem_api *transport;
/* Linked list for struct se_hba struct se_device list */
struct list_head dev_list;
- /* Linked list for struct se_global->g_se_dev_list */
- struct list_head g_se_dev_list;
} ____cacheline_aligned;
struct se_hba {
@@ -834,7 +801,6 @@ struct se_port {
u32 sep_index;
struct scsi_port_stats sep_stats;
/* Used for ALUA Target Port Groups membership */
- atomic_t sep_tg_pt_gp_active;
atomic_t sep_tg_pt_secondary_offline;
/* Used for PR ALL_TG_PT=1 */
atomic_t sep_tg_pt_ref_cnt;
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index c16e9431dd0..dac4f2d859f 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -10,29 +10,6 @@
#define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
-#define PYX_TRANSPORT_SENT_TO_TRANSPORT 0
-#define PYX_TRANSPORT_WRITE_PENDING 1
-
-#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE -1
-#define PYX_TRANSPORT_HBA_QUEUE_FULL -2
-#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS -3
-#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES -4
-#define PYX_TRANSPORT_INVALID_CDB_FIELD -5
-#define PYX_TRANSPORT_INVALID_PARAMETER_LIST -6
-#define PYX_TRANSPORT_LU_COMM_FAILURE -7
-#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8
-#define PYX_TRANSPORT_WRITE_PROTECTED -9
-#define PYX_TRANSPORT_RESERVATION_CONFLICT -10
-#define PYX_TRANSPORT_ILLEGAL_REQUEST -11
-#define PYX_TRANSPORT_USE_SENSE_REASON -12
-
-#ifndef SAM_STAT_RESERVATION_CONFLICT
-#define SAM_STAT_RESERVATION_CONFLICT 0x18
-#endif
-
-#define TRANSPORT_PLUGIN_FREE 0
-#define TRANSPORT_PLUGIN_REGISTERED 1
-
#define TRANSPORT_PLUGIN_PHBA_PDEV 1
#define TRANSPORT_PLUGIN_VHBA_PDEV 2
#define TRANSPORT_PLUGIN_VHBA_VDEV 3
@@ -158,7 +135,6 @@ extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
extern int transport_handle_cdb_direct(struct se_cmd *);
extern int transport_generic_handle_cdb_map(struct se_cmd *);
extern int transport_generic_handle_data(struct se_cmd *);
-extern void transport_new_cmd_failure(struct se_cmd *);
extern int transport_generic_handle_tmr(struct se_cmd *);
extern bool target_stop_task(struct se_task *task, unsigned long *flags);
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 600c1629b64..d3b9df5962c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2174,11 +2174,11 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
*/
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
- perf_event_sched_in(cpuctx, ctx, task);
-
if (ctx->nr_events)
cpuctx->task_ctx = ctx;
+ perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
+
perf_pmu_enable(ctx->pmu);
perf_ctx_unlock(cpuctx, ctx);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index e69434b070d..b2e08c932d9 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -44,6 +44,7 @@
#include <linux/stringify.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
+#include <linux/kmemcheck.h>
#include <asm/sections.h>
@@ -2948,7 +2949,12 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass)
{
- memset(lock, 0, sizeof(*lock));
+ int i;
+
+ kmemcheck_mark_initialized(lock, sizeof(*lock));
+
+ for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
+ lock->class_cache[i] = NULL;
#ifdef CONFIG_LOCK_STAT
lock->cpu = raw_smp_processor_id();
diff --git a/kernel/printk.c b/kernel/printk.c
index 1455a0d4eed..7982a0a841e 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1293,10 +1293,11 @@ again:
raw_spin_lock(&logbuf_lock);
if (con_start != log_end)
retry = 1;
+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
if (retry && console_trylock())
goto again;
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
if (wake_klogd)
wake_up_klogd();
}
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index c436e790b21..8a46f5d6450 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -195,7 +195,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
struct alarm *alarm;
ktime_t expired = next->expires;
- if (expired.tv64 >= now.tv64)
+ if (expired.tv64 > now.tv64)
break;
alarm = container_of(next, struct alarm, node);
diff --git a/kernel/timer.c b/kernel/timer.c
index dbaa62422b1..9c3c62b0c4b 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1368,7 +1368,7 @@ SYSCALL_DEFINE0(getppid)
int pid;
rcu_read_lock();
- pid = task_tgid_vnr(current->real_parent);
+ pid = task_tgid_vnr(rcu_dereference(current->real_parent));
rcu_read_unlock();
return pid;
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 74c6c7fce74..fea790a2b17 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -245,7 +245,7 @@ static void put_hash_bucket(struct hash_bucket *bucket,
static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
{
- return ((a->dev_addr == a->dev_addr) &&
+ return ((a->dev_addr == b->dev_addr) &&
(a->dev == b->dev)) ? true : false;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4298abaae15..36b3d988b4e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2259,12 +2259,8 @@ static void khugepaged_do_scan(struct page **hpage)
static void khugepaged_alloc_sleep(void)
{
- DEFINE_WAIT(wait);
- add_wait_queue(&khugepaged_wait, &wait);
- schedule_timeout_interruptible(
- msecs_to_jiffies(
- khugepaged_alloc_sleep_millisecs));
- remove_wait_queue(&khugepaged_wait, &wait);
+ wait_event_freezable_timeout(khugepaged_wait, false,
+ msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
}
#ifndef CONFIG_NUMA
@@ -2313,14 +2309,10 @@ static void khugepaged_loop(void)
if (unlikely(kthread_should_stop()))
break;
if (khugepaged_has_work()) {
- DEFINE_WAIT(wait);
if (!khugepaged_scan_sleep_millisecs)
continue;
- add_wait_queue(&khugepaged_wait, &wait);
- schedule_timeout_interruptible(
- msecs_to_jiffies(
- khugepaged_scan_sleep_millisecs));
- remove_wait_queue(&khugepaged_wait, &wait);
+ wait_event_freezable_timeout(khugepaged_wait, false,
+ msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
} else if (khugepaged_enabled())
wait_event_freezable(khugepaged_wait,
khugepaged_wait_event());
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bb28a5f9db8..73f17c0293c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -576,6 +576,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
__SetPageHead(page);
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
__SetPageTail(p);
+ set_page_count(p, 0);
p->first_page = page;
}
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 578e29174fa..177aca424a0 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -871,9 +871,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (anon_vma)
put_anon_vma(anon_vma);
-out:
unlock_page(hpage);
+out:
if (rc != -EAGAIN) {
list_del(&hpage->lru);
put_page(hpage);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9dd443d89d8..2b8ba3aebf6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -356,8 +356,8 @@ void prep_compound_page(struct page *page, unsigned long order)
__SetPageHead(page);
for (i = 1; i < nr_pages; i++) {
struct page *p = page + i;
-
__SetPageTail(p);
+ set_page_count(p, 0);
p->first_page = page;
}
}
@@ -3377,9 +3377,15 @@ static void setup_zone_migrate_reserve(struct zone *zone)
unsigned long block_migratetype;
int reserve;
- /* Get the start pfn, end pfn and the number of blocks to reserve */
+ /*
+ * Get the start pfn, end pfn and the number of blocks to reserve
+ * We have to be careful to be aligned to pageblock_nr_pages to
+ * make sure that we always check pfn_valid for the first page in
+ * the block.
+ */
start_pfn = zone->zone_start_pfn;
end_pfn = start_pfn + zone->spanned_pages;
+ start_pfn = roundup(start_pfn, pageblock_nr_pages);
reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
pageblock_order;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 3231bf33287..1d8b32f0713 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1633,6 +1633,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
goto fail;
addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
+ if (!addr)
+ return NULL;
/*
* In this function, newly allocated vm_struct is not added
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a1893c05079..f54a05b7a61 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -183,7 +183,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
*/
void register_shrinker(struct shrinker *shrinker)
{
- shrinker->nr = 0;
+ atomic_long_set(&shrinker->nr_in_batch, 0);
down_write(&shrinker_rwsem);
list_add_tail(&shrinker->list, &shrinker_list);
up_write(&shrinker_rwsem);
@@ -247,25 +247,26 @@ unsigned long shrink_slab(struct shrink_control *shrink,
list_for_each_entry(shrinker, &shrinker_list, list) {
unsigned long long delta;
- unsigned long total_scan;
- unsigned long max_pass;
+ long total_scan;
+ long max_pass;
int shrink_ret = 0;
long nr;
long new_nr;
long batch_size = shrinker->batch ? shrinker->batch
: SHRINK_BATCH;
+ max_pass = do_shrinker_shrink(shrinker, shrink, 0);
+ if (max_pass <= 0)
+ continue;
+
/*
* copy the current shrinker scan count into a local variable
* and zero it so that other concurrent shrinker invocations
* don't also do this scanning work.
*/
- do {
- nr = shrinker->nr;
- } while (cmpxchg(&shrinker->nr, nr, 0) != nr);
+ nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
total_scan = nr;
- max_pass = do_shrinker_shrink(shrinker, shrink, 0);
delta = (4 * nr_pages_scanned) / shrinker->seeks;
delta *= max_pass;
do_div(delta, lru_pages + 1);
@@ -325,12 +326,11 @@ unsigned long shrink_slab(struct shrink_control *shrink,
* manner that handles concurrent updates. If we exhausted the
* scan, there is no need to do an update.
*/
- do {
- nr = shrinker->nr;
- new_nr = total_scan + nr;
- if (total_scan <= 0)
- break;
- } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
+ if (total_scan > 0)
+ new_nr = atomic_long_add_return(total_scan,
+ &shrinker->nr_in_batch);
+ else
+ new_nr = atomic_long_read(&shrinker->nr_in_batch);
trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
}
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 36fa7c9bedc..d9f3ced8756 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -293,8 +293,16 @@ char *tomoyo_realpath_from_path(struct path *path)
pos = tomoyo_get_local_path(path->dentry, buf,
buf_len - 1);
/* Get absolute name for the rest. */
- else
+ else {
pos = tomoyo_get_absolute_path(path, buf, buf_len - 1);
+ /*
+ * Fall back to local name if absolute name is not
+ * available.
+ */
+ if (pos == ERR_PTR(-EINVAL))
+ pos = tomoyo_get_local_path(path->dentry, buf,
+ buf_len - 1);
+ }
encode:
if (IS_ERR(pos))
continue;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index cbde019d3d5..1d07e8fa243 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -297,6 +297,8 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
imux = &spec->input_mux[mux_idx];
if (!imux->num_items && mux_idx > 0)
imux = &spec->input_mux[0];
+ if (!imux->num_items)
+ return 0;
if (idx >= imux->num_items)
idx = imux->num_items - 1;
@@ -2629,6 +2631,8 @@ static const char *alc_get_line_out_pfx(struct alc_spec *spec, int ch,
case AUTO_PIN_SPEAKER_OUT:
if (cfg->line_outs == 1)
return "Speaker";
+ if (cfg->line_outs == 2)
+ return ch ? "Bass Speaker" : "Speaker";
break;
case AUTO_PIN_HP_OUT:
/* for multi-io case, only the primary out */
@@ -2902,7 +2906,7 @@ static hda_nid_t alc_auto_look_for_dac(struct hda_codec *codec, hda_nid_t pin)
if (!nid)
continue;
if (found_in_nid_list(nid, spec->multiout.dac_nids,
- spec->multiout.num_dacs))
+ ARRAY_SIZE(spec->private_dac_nids)))
continue;
if (found_in_nid_list(nid, spec->multiout.hp_out_nid,
ARRAY_SIZE(spec->multiout.hp_out_nid)))
@@ -2923,6 +2927,7 @@ static hda_nid_t get_dac_if_single(struct hda_codec *codec, hda_nid_t pin)
return 0;
}
+/* return 0 if no possible DAC is found, 1 if one or more found */
static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
const hda_nid_t *pins, hda_nid_t *dacs)
{
@@ -2940,7 +2945,7 @@ static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
if (!dacs[i])
dacs[i] = alc_auto_look_for_dac(codec, pins[i]);
}
- return 0;
+ return 1;
}
static int alc_auto_fill_multi_ios(struct hda_codec *codec,
@@ -2950,7 +2955,7 @@ static int alc_auto_fill_multi_ios(struct hda_codec *codec,
static int alc_auto_fill_dac_nids(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- const struct auto_pin_cfg *cfg = &spec->autocfg;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
bool redone = false;
int i;
@@ -2961,6 +2966,7 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
spec->multiout.extra_out_nid[0] = 0;
memset(spec->private_dac_nids, 0, sizeof(spec->private_dac_nids));
spec->multiout.dac_nids = spec->private_dac_nids;
+ spec->multi_ios = 0;
/* fill hard-wired DACs first */
if (!redone) {
@@ -2994,10 +3000,12 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
for (i = 0; i < cfg->line_outs; i++) {
if (spec->private_dac_nids[i])
spec->multiout.num_dacs++;
- else
+ else {
memmove(spec->private_dac_nids + i,
spec->private_dac_nids + i + 1,
sizeof(hda_nid_t) * (cfg->line_outs - i - 1));
+ spec->private_dac_nids[cfg->line_outs - 1] = 0;
+ }
}
if (cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
@@ -3019,9 +3027,28 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
if (cfg->line_out_type != AUTO_PIN_HP_OUT)
alc_auto_fill_extra_dacs(codec, cfg->hp_outs, cfg->hp_pins,
spec->multiout.hp_out_nid);
- if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT)
- alc_auto_fill_extra_dacs(codec, cfg->speaker_outs, cfg->speaker_pins,
- spec->multiout.extra_out_nid);
+ if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
+ int err = alc_auto_fill_extra_dacs(codec, cfg->speaker_outs,
+ cfg->speaker_pins,
+ spec->multiout.extra_out_nid);
+ /* if no speaker volume is assigned, try again as the primary
+ * output
+ */
+ if (!err && cfg->speaker_outs > 0 &&
+ cfg->line_out_type == AUTO_PIN_HP_OUT) {
+ cfg->hp_outs = cfg->line_outs;
+ memcpy(cfg->hp_pins, cfg->line_out_pins,
+ sizeof(cfg->hp_pins));
+ cfg->line_outs = cfg->speaker_outs;
+ memcpy(cfg->line_out_pins, cfg->speaker_pins,
+ sizeof(cfg->speaker_pins));
+ cfg->speaker_outs = 0;
+ memset(cfg->speaker_pins, 0, sizeof(cfg->speaker_pins));
+ cfg->line_out_type = AUTO_PIN_SPEAKER_OUT;
+ redone = false;
+ goto again;
+ }
+ }
return 0;
}
@@ -3171,7 +3198,8 @@ static int alc_auto_create_multi_out_ctls(struct hda_codec *codec,
}
static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
- hda_nid_t dac, const char *pfx)
+ hda_nid_t dac, const char *pfx,
+ int cidx)
{
struct alc_spec *spec = codec->spec;
hda_nid_t sw, vol;
@@ -3187,15 +3215,15 @@ static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
if (is_ctl_used(spec->sw_ctls, val))
return 0; /* already created */
mark_ctl_usage(spec->sw_ctls, val);
- return add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, val);
+ return __add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, cidx, val);
}
sw = alc_look_for_out_mute_nid(codec, pin, dac);
vol = alc_look_for_out_vol_nid(codec, pin, dac);
- err = alc_auto_add_stereo_vol(codec, pfx, 0, vol);
+ err = alc_auto_add_stereo_vol(codec, pfx, cidx, vol);
if (err < 0)
return err;
- err = alc_auto_add_stereo_sw(codec, pfx, 0, sw);
+ err = alc_auto_add_stereo_sw(codec, pfx, cidx, sw);
if (err < 0)
return err;
return 0;
@@ -3236,16 +3264,21 @@ static int alc_auto_create_extra_outs(struct hda_codec *codec, int num_pins,
hda_nid_t dac = *dacs;
if (!dac)
dac = spec->multiout.dac_nids[0];
- return alc_auto_create_extra_out(codec, *pins, dac, pfx);
+ return alc_auto_create_extra_out(codec, *pins, dac, pfx, 0);
}
if (dacs[num_pins - 1]) {
/* OK, we have a multi-output system with individual volumes */
for (i = 0; i < num_pins; i++) {
- snprintf(name, sizeof(name), "%s %s",
- pfx, channel_name[i]);
- err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
- name);
+ if (num_pins >= 3) {
+ snprintf(name, sizeof(name), "%s %s",
+ pfx, channel_name[i]);
+ err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
+ name, 0);
+ } else {
+ err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
+ pfx, i);
+ }
if (err < 0)
return err;
}
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index d8d2f9dccd9..eeb25d529e3 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -215,6 +215,7 @@ struct sigmatel_spec {
unsigned int gpio_mute;
unsigned int gpio_led;
unsigned int gpio_led_polarity;
+ unsigned int vref_mute_led_nid; /* pin NID for mute-LED vref control */
unsigned int vref_led;
/* stream */
@@ -4318,12 +4319,10 @@ static void stac_store_hints(struct hda_codec *codec)
spec->eapd_switch = val;
get_int_hint(codec, "gpio_led_polarity", &spec->gpio_led_polarity);
if (get_int_hint(codec, "gpio_led", &spec->gpio_led)) {
- if (spec->gpio_led <= 8) {
- spec->gpio_mask |= spec->gpio_led;
- spec->gpio_dir |= spec->gpio_led;
- if (spec->gpio_led_polarity)
- spec->gpio_data |= spec->gpio_led;
- }
+ spec->gpio_mask |= spec->gpio_led;
+ spec->gpio_dir |= spec->gpio_led;
+ if (spec->gpio_led_polarity)
+ spec->gpio_data |= spec->gpio_led;
}
}
@@ -4443,7 +4442,7 @@ static int stac92xx_init(struct hda_codec *codec)
/* power on when no jack detection is available */
/* or when the VREF is used for controlling LED */
if (!spec->hp_detect ||
- (spec->gpio_led > 8 && spec->gpio_led == nid)) {
+ spec->vref_mute_led_nid == nid) {
stac_toggle_power_map(codec, nid, 1);
continue;
}
@@ -4915,8 +4914,14 @@ static int find_mute_led_gpio(struct hda_codec *codec, int default_polarity)
if (sscanf(dev->name, "HP_Mute_LED_%d_%x",
&spec->gpio_led_polarity,
&spec->gpio_led) == 2) {
- if (spec->gpio_led < 4)
+ unsigned int max_gpio;
+ max_gpio = snd_hda_param_read(codec, codec->afg,
+ AC_PAR_GPIO_CAP);
+ max_gpio &= AC_GPIO_IO_COUNT;
+ if (spec->gpio_led < max_gpio)
spec->gpio_led = 1 << spec->gpio_led;
+ else
+ spec->vref_mute_led_nid = spec->gpio_led;
return 1;
}
if (sscanf(dev->name, "HP_Mute_LED_%d",
@@ -5045,15 +5050,12 @@ static int stac92xx_pre_resume(struct hda_codec *codec)
struct sigmatel_spec *spec = codec->spec;
/* sync mute LED */
- if (spec->gpio_led) {
- if (spec->gpio_led <= 8) {
- stac_gpio_set(codec, spec->gpio_mask,
- spec->gpio_dir, spec->gpio_data);
- } else {
- stac_vrefout_set(codec,
- spec->gpio_led, spec->vref_led);
- }
- }
+ if (spec->vref_mute_led_nid)
+ stac_vrefout_set(codec, spec->vref_mute_led_nid,
+ spec->vref_led);
+ else if (spec->gpio_led)
+ stac_gpio_set(codec, spec->gpio_mask,
+ spec->gpio_dir, spec->gpio_data);
return 0;
}
@@ -5064,7 +5066,7 @@ static void stac92xx_set_power_state(struct hda_codec *codec, hda_nid_t fg,
struct sigmatel_spec *spec = codec->spec;
if (power_state == AC_PWRST_D3) {
- if (spec->gpio_led > 8) {
+ if (spec->vref_mute_led_nid) {
/* with vref-out pin used for mute led control
* codec AFG is prevented from D3 state
*/
@@ -5117,7 +5119,7 @@ static int stac92xx_update_led_status(struct hda_codec *codec)
}
}
/*polarity defines *not* muted state level*/
- if (spec->gpio_led <= 8) {
+ if (!spec->vref_mute_led_nid) {
if (muted)
spec->gpio_data &= ~spec->gpio_led; /* orange */
else
@@ -5135,7 +5137,8 @@ static int stac92xx_update_led_status(struct hda_codec *codec)
muted_lvl = spec->gpio_led_polarity ?
AC_PINCTL_VREF_GRD : AC_PINCTL_VREF_HIZ;
spec->vref_led = muted ? muted_lvl : notmtd_lvl;
- stac_vrefout_set(codec, spec->gpio_led, spec->vref_led);
+ stac_vrefout_set(codec, spec->vref_mute_led_nid,
+ spec->vref_led);
}
return 0;
}
@@ -5649,7 +5652,7 @@ again:
#ifdef CONFIG_SND_HDA_POWER_SAVE
if (spec->gpio_led) {
- if (spec->gpio_led <= 8) {
+ if (!spec->vref_mute_led_nid) {
spec->gpio_mask |= spec->gpio_led;
spec->gpio_dir |= spec->gpio_led;
spec->gpio_data |= spec->gpio_led;
@@ -5962,7 +5965,7 @@ again:
#ifdef CONFIG_SND_HDA_POWER_SAVE
if (spec->gpio_led) {
- if (spec->gpio_led <= 8) {
+ if (!spec->vref_mute_led_nid) {
spec->gpio_mask |= spec->gpio_led;
spec->gpio_dir |= spec->gpio_led;
spec->gpio_data |= spec->gpio_led;
diff --git a/sound/pci/sis7019.c b/sound/pci/sis7019.c
index a391e622a19..28dfafb56dd 100644
--- a/sound/pci/sis7019.c
+++ b/sound/pci/sis7019.c
@@ -41,6 +41,7 @@ MODULE_SUPPORTED_DEVICE("{{SiS,SiS7019 Audio Accelerator}}");
static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
static int enable = 1;
+static int codecs = 1;
module_param(index, int, 0444);
MODULE_PARM_DESC(index, "Index value for SiS7019 Audio Accelerator.");
@@ -48,6 +49,8 @@ module_param(id, charp, 0444);
MODULE_PARM_DESC(id, "ID string for SiS7019 Audio Accelerator.");
module_param(enable, bool, 0444);
MODULE_PARM_DESC(enable, "Enable SiS7019 Audio Accelerator.");
+module_param(codecs, int, 0444);
+MODULE_PARM_DESC(codecs, "Set bit to indicate that codec number is expected to be present (default 1)");
static DEFINE_PCI_DEVICE_TABLE(snd_sis7019_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x7019) },
@@ -140,6 +143,9 @@ struct sis7019 {
dma_addr_t silence_dma_addr;
};
+/* These values are also used by the module param 'codecs' to indicate
+ * which codecs should be present.
+ */
#define SIS_PRIMARY_CODEC_PRESENT 0x0001
#define SIS_SECONDARY_CODEC_PRESENT 0x0002
#define SIS_TERTIARY_CODEC_PRESENT 0x0004
@@ -1078,6 +1084,7 @@ static int sis_chip_init(struct sis7019 *sis)
{
unsigned long io = sis->ioport;
void __iomem *ioaddr = sis->ioaddr;
+ unsigned long timeout;
u16 status;
int count;
int i;
@@ -1104,21 +1111,45 @@ static int sis_chip_init(struct sis7019 *sis)
while ((inw(io + SIS_AC97_STATUS) & SIS_AC97_STATUS_BUSY) && --count)
udelay(1);
+ /* Command complete, we can let go of the semaphore now.
+ */
+ outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
+ if (!count)
+ return -EIO;
+
/* Now that we've finished the reset, find out what's attached.
+ * There are some codec/board combinations that take an extremely
+ * long time to come up. 350+ ms has been observed in the field,
+ * so we'll give them up to 500ms.
*/
- status = inl(io + SIS_AC97_STATUS);
- if (status & SIS_AC97_STATUS_CODEC_READY)
- sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
- if (status & SIS_AC97_STATUS_CODEC2_READY)
- sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
- if (status & SIS_AC97_STATUS_CODEC3_READY)
- sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
-
- /* All done, let go of the semaphore, and check for errors
+ sis->codecs_present = 0;
+ timeout = msecs_to_jiffies(500) + jiffies;
+ while (time_before_eq(jiffies, timeout)) {
+ status = inl(io + SIS_AC97_STATUS);
+ if (status & SIS_AC97_STATUS_CODEC_READY)
+ sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
+ if (status & SIS_AC97_STATUS_CODEC2_READY)
+ sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
+ if (status & SIS_AC97_STATUS_CODEC3_READY)
+ sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
+
+ if (sis->codecs_present == codecs)
+ break;
+
+ msleep(1);
+ }
+
+ /* All done, check for errors.
*/
- outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
- if (!sis->codecs_present || !count)
+ if (!sis->codecs_present) {
+ printk(KERN_ERR "sis7019: could not find any codecs\n");
return -EIO;
+ }
+
+ if (sis->codecs_present != codecs) {
+ printk(KERN_WARNING "sis7019: missing codecs, found %0x, expected %0x\n",
+ sis->codecs_present, codecs);
+ }
/* Let the hardware know that the audio driver is alive,
* and enable PCM slots on the AC-link for L/R playback (3 & 4) and
@@ -1390,6 +1421,17 @@ static int __devinit snd_sis7019_probe(struct pci_dev *pci,
if (!enable)
goto error_out;
+ /* The user can specify which codecs should be present so that we
+ * can wait for them to show up if they are slow to recover from
+ * the AC97 cold reset. We default to a single codec, the primary.
+ *
+ * We assume that SIS_PRIMARY_*_PRESENT matches bits 0-2.
+ */
+ codecs &= SIS_PRIMARY_CODEC_PRESENT | SIS_SECONDARY_CODEC_PRESENT |
+ SIS_TERTIARY_CODEC_PRESENT;
+ if (!codecs)
+ codecs = SIS_PRIMARY_CODEC_PRESENT;
+
rc = snd_card_create(index, id, THIS_MODULE, sizeof(*sis), &card);
if (rc < 0)
goto error_out;
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index c5ca8cfea60..0441893e270 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -863,13 +863,13 @@ static struct i2c_driver uda1380_i2c_driver = {
static int __init uda1380_modinit(void)
{
- int ret;
+ int ret = 0;
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
ret = i2c_add_driver(&uda1380_i2c_driver);
if (ret != 0)
pr_err("Failed to register UDA1380 I2C driver: %d\n", ret);
#endif
- return 0;
+ return ret;
}
module_init(uda1380_modinit);
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 6c298854900..d0c545b73d7 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -1325,15 +1325,15 @@ SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
};
static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = {
-SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
- adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
-SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
- adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_VIRT_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
+ adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_VIRT_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
+ adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
};
static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = {
-SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
-SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
};
static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
diff --git a/sound/soc/imx/Kconfig b/sound/soc/imx/Kconfig
index b133bfcc584..738391757f2 100644
--- a/sound/soc/imx/Kconfig
+++ b/sound/soc/imx/Kconfig
@@ -28,7 +28,7 @@ config SND_MXC_SOC_WM1133_EV1
config SND_SOC_MX27VIS_AIC32X4
tristate "SoC audio support for Visstrim M10 boards"
- depends on MACH_IMX27_VISSTRIM_M10
+ depends on MACH_IMX27_VISSTRIM_M10 && I2C
select SND_SOC_TLV320AIC32X4
select SND_MXC_SOC_MX2
help
diff --git a/sound/soc/kirkwood/Kconfig b/sound/soc/kirkwood/Kconfig
index 8f49e165f4d..c62d715235e 100644
--- a/sound/soc/kirkwood/Kconfig
+++ b/sound/soc/kirkwood/Kconfig
@@ -12,6 +12,7 @@ config SND_KIRKWOOD_SOC_I2S
config SND_KIRKWOOD_SOC_OPENRD
tristate "SoC Audio support for Kirkwood Openrd Client"
depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE)
+ depends on I2C
select SND_KIRKWOOD_SOC_I2S
select SND_SOC_CS42L51
help
@@ -20,7 +21,7 @@ config SND_KIRKWOOD_SOC_OPENRD
config SND_KIRKWOOD_SOC_T5325
tristate "SoC Audio support for HP t5325"
- depends on SND_KIRKWOOD_SOC && MACH_T5325
+ depends on SND_KIRKWOOD_SOC && MACH_T5325 && I2C
select SND_KIRKWOOD_SOC_I2S
select SND_SOC_ALC5623
help
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index ffd2242e305..a0f7d3cfa47 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -151,6 +151,7 @@ config SND_SOC_ZYLONITE
config SND_SOC_RAUMFELD
tristate "SoC Audio support Raumfeld audio adapter"
depends on SND_PXA2XX_SOC && (MACH_RAUMFELD_SPEAKER || MACH_RAUMFELD_CONNECTOR)
+ depends on I2C && SPI_MASTER
select SND_PXA_SOC_SSP
select SND_SOC_CS4270
select SND_SOC_AK4104
@@ -159,7 +160,7 @@ config SND_SOC_RAUMFELD
config SND_PXA2XX_SOC_HX4700
tristate "SoC Audio support for HP iPAQ hx4700"
- depends on SND_PXA2XX_SOC && MACH_H4700
+ depends on SND_PXA2XX_SOC && MACH_H4700 && I2C
select SND_PXA2XX_SOC_I2S
select SND_SOC_AK4641
help
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 0c12b98484b..4220bb0f273 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -58,7 +58,36 @@ int snd_soc_params_to_bclk(struct snd_pcm_hw_params *params)
}
EXPORT_SYMBOL_GPL(snd_soc_params_to_bclk);
-static struct snd_soc_platform_driver dummy_platform;
+static const struct snd_pcm_hardware dummy_dma_hardware = {
+ .formats = 0xffffffff,
+ .channels_min = 1,
+ .channels_max = UINT_MAX,
+
+ /* Random values to keep userspace happy when checking constraints */
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER,
+ .buffer_bytes_max = 128*1024,
+ .period_bytes_min = PAGE_SIZE,
+ .period_bytes_max = PAGE_SIZE*2,
+ .periods_min = 2,
+ .periods_max = 128,
+};
+
+static int dummy_dma_open(struct snd_pcm_substream *substream)
+{
+ snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
+
+ return 0;
+}
+
+static struct snd_pcm_ops dummy_dma_ops = {
+ .open = dummy_dma_open,
+ .ioctl = snd_pcm_lib_ioctl,
+};
+
+static struct snd_soc_platform_driver dummy_platform = {
+ .ops = &dummy_dma_ops,
+};
static __devinit int snd_soc_dummy_probe(struct platform_device *pdev)
{
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 7d98676808d..955930e0a5c 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -463,7 +463,8 @@ static int run_perf_stat(int argc __used, const char **argv)
list_for_each_entry(counter, &evsel_list->entries, node) {
if (create_perf_stat_counter(counter, first) < 0) {
- if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) {
+ if (errno == EINVAL || errno == ENOSYS ||
+ errno == ENOENT || errno == EOPNOTSUPP) {
if (verbose)
ui__warning("%s event is not supported by the kernel.\n",
event_name(counter));
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index bcd05d05b4f..33c17a2b2a8 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -388,7 +388,7 @@ static int write_event_desc(int fd, struct perf_header *h __used,
/*
* write event string as passed on cmdline
*/
- ret = do_write_string(fd, attr->name);
+ ret = do_write_string(fd, event_name(attr));
if (ret < 0)
return ret;
/*